diff --git a/BUILDING.txt b/BUILDING.txt
index 7b99537a26..3940a98be4 100644
--- a/BUILDING.txt
+++ b/BUILDING.txt
@@ -210,6 +210,7 @@ Requirements:
* Maven 3.0 or later
* Findbugs 1.3.9 (if running findbugs)
* ProtocolBuffer 2.5.0
+* CMake 2.6 or newer
* Windows SDK or Visual Studio 2010 Professional
* Unix command-line tools from GnuWin32 or Cygwin: sh, mkdir, rm, cp, tar, gzip
* zlib headers (if building native code bindings for zlib)
diff --git a/hadoop-common-project/hadoop-auth/pom.xml b/hadoop-common-project/hadoop-auth/pom.xml
index b3f83e34d6..2ff51d6ffe 100644
--- a/hadoop-common-project/hadoop-auth/pom.xml
+++ b/hadoop-common-project/hadoop-auth/pom.xml
@@ -144,6 +144,15 @@
maven-jar-plugin
+ prepare-jar
+ prepare-package
+
+ jar
+
+
+
+ prepare-test-jar
+ prepare-package
test-jar
diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticatedURL.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticatedURL.java
index a43a7c9f9c..cee951f815 100644
--- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticatedURL.java
+++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticatedURL.java
@@ -120,32 +120,6 @@ public String toString() {
return token;
}
- /**
- * Return the hashcode for the token.
- *
- * @return the hashcode for the token.
- */
- @Override
- public int hashCode() {
- return (token != null) ? token.hashCode() : 0;
- }
-
- /**
- * Return if two token instances are equal.
- *
- * @param o the other token instance.
- *
- * @return if this instance and the other instance are equal.
- */
- @Override
- public boolean equals(Object o) {
- boolean eq = false;
- if (o instanceof Token) {
- Token other = (Token) o;
- eq = (token == null && other.token == null) || (token != null && this.token.equals(other.token));
- }
- return eq;
- }
}
private static Class extends Authenticator> DEFAULT_AUTHENTICATOR = KerberosAuthenticator.class;
@@ -208,6 +182,16 @@ public AuthenticatedURL(Authenticator authenticator,
this.authenticator.setConnectionConfigurator(connConfigurator);
}
+ /**
+ * Returns the {@link Authenticator} instance used by the
+ * AuthenticatedURL
.
+ *
+ * @return the {@link Authenticator} instance
+ */
+ protected Authenticator getAuthenticator() {
+ return authenticator;
+ }
+
/**
* Returns an authenticated {@link HttpURLConnection}.
*
diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java
index 2a5d4aef85..316cd60a25 100644
--- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java
+++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java
@@ -127,6 +127,7 @@ public class AuthenticationFilter implements Filter {
public static final String SIGNATURE_PROVIDER_ATTRIBUTE =
"org.apache.hadoop.security.authentication.util.SignerSecretProvider";
+ private Properties config;
private Signer signer;
private SignerSecretProvider secretProvider;
private AuthenticationHandler authHandler;
@@ -150,7 +151,7 @@ public class AuthenticationFilter implements Filter {
public void init(FilterConfig filterConfig) throws ServletException {
String configPrefix = filterConfig.getInitParameter(CONFIG_PREFIX);
configPrefix = (configPrefix != null) ? configPrefix + "." : "";
- Properties config = getConfiguration(configPrefix, filterConfig);
+ config = getConfiguration(configPrefix, filterConfig);
String authHandlerName = config.getProperty(AUTH_TYPE, null);
String authHandlerClassName;
if (authHandlerName == null) {
@@ -224,6 +225,17 @@ public void init(FilterConfig filterConfig) throws ServletException {
cookiePath = config.getProperty(COOKIE_PATH, null);
}
+ /**
+ * Returns the configuration properties of the {@link AuthenticationFilter}
+ * without the prefix. The returned properties are the same that the
+ * {@link #getConfiguration(String, FilterConfig)} method returned.
+ *
+ * @return the configuration properties.
+ */
+ protected Properties getConfiguration() {
+ return config;
+ }
+
/**
* Returns the authentication handler being used.
*
@@ -457,7 +469,7 @@ public Principal getUserPrincipal() {
createAuthCookie(httpResponse, signedToken, getCookieDomain(),
getCookiePath(), token.getExpires(), isHttps);
}
- filterChain.doFilter(httpRequest, httpResponse);
+ doFilter(filterChain, httpRequest, httpResponse);
}
} else {
unauthorizedResponse = false;
@@ -481,6 +493,15 @@ public Principal getUserPrincipal() {
}
}
+ /**
+ * Delegates call to the servlet filter chain. Sub-classes my override this
+ * method to perform pre and post tasks.
+ */
+ protected void doFilter(FilterChain filterChain, HttpServletRequest request,
+ HttpServletResponse response) throws IOException, ServletException {
+ filterChain.doFilter(request, response);
+ }
+
/**
* Creates the Hadoop authentication HTTP cookie.
*
diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java
index 4827390542..98524608b4 100644
--- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java
+++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java
@@ -142,11 +142,30 @@ public AppConfigurationEntry[] getAppConfigurationEntry(String name) {
*/
public static final String NAME_RULES = TYPE + ".name.rules";
+ private String type;
private String keytab;
private GSSManager gssManager;
private Subject serverSubject = new Subject();
private List loginContexts = new ArrayList();
+ /**
+ * Creates a Kerberos SPNEGO authentication handler with the default
+ * auth-token type, kerberos
.
+ */
+ public KerberosAuthenticationHandler() {
+ this(TYPE);
+ }
+
+ /**
+ * Creates a Kerberos SPNEGO authentication handler with a custom auth-token
+ * type.
+ *
+ * @param type auth-token type.
+ */
+ public KerberosAuthenticationHandler(String type) {
+ this.type = type;
+ }
+
/**
* Initializes the authentication handler instance.
*
@@ -249,7 +268,7 @@ public void destroy() {
*/
@Override
public String getType() {
- return TYPE;
+ return type;
}
/**
diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/PseudoAuthenticationHandler.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/PseudoAuthenticationHandler.java
index 235081b961..0b329e04ce 100644
--- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/PseudoAuthenticationHandler.java
+++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/PseudoAuthenticationHandler.java
@@ -55,6 +55,25 @@ public class PseudoAuthenticationHandler implements AuthenticationHandler {
private static final Charset UTF8_CHARSET = Charset.forName("UTF-8");
private boolean acceptAnonymous;
+ private String type;
+
+ /**
+ * Creates a Hadoop pseudo authentication handler with the default auth-token
+ * type, simple
.
+ */
+ public PseudoAuthenticationHandler() {
+ this(TYPE);
+ }
+
+ /**
+ * Creates a Hadoop pseudo authentication handler with a custom auth-token
+ * type.
+ *
+ * @param type auth-token type.
+ */
+ public PseudoAuthenticationHandler(String type) {
+ this.type = type;
+ }
/**
* Initializes the authentication handler instance.
@@ -96,7 +115,7 @@ public void destroy() {
*/
@Override
public String getType() {
- return TYPE;
+ return type;
}
/**
diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestAuthenticatedURL.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestAuthenticatedURL.java
index 5be0b382f2..b56fc65b25 100644
--- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestAuthenticatedURL.java
+++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestAuthenticatedURL.java
@@ -33,36 +33,6 @@ public void testToken() throws Exception {
token = new AuthenticatedURL.Token("foo");
Assert.assertTrue(token.isSet());
Assert.assertEquals("foo", token.toString());
-
- AuthenticatedURL.Token token1 = new AuthenticatedURL.Token();
- AuthenticatedURL.Token token2 = new AuthenticatedURL.Token();
- Assert.assertEquals(token1.hashCode(), token2.hashCode());
- Assert.assertTrue(token1.equals(token2));
-
- token1 = new AuthenticatedURL.Token();
- token2 = new AuthenticatedURL.Token("foo");
- Assert.assertNotSame(token1.hashCode(), token2.hashCode());
- Assert.assertFalse(token1.equals(token2));
-
- token1 = new AuthenticatedURL.Token("foo");
- token2 = new AuthenticatedURL.Token();
- Assert.assertNotSame(token1.hashCode(), token2.hashCode());
- Assert.assertFalse(token1.equals(token2));
-
- token1 = new AuthenticatedURL.Token("foo");
- token2 = new AuthenticatedURL.Token("foo");
- Assert.assertEquals(token1.hashCode(), token2.hashCode());
- Assert.assertTrue(token1.equals(token2));
-
- token1 = new AuthenticatedURL.Token("bar");
- token2 = new AuthenticatedURL.Token("foo");
- Assert.assertNotSame(token1.hashCode(), token2.hashCode());
- Assert.assertFalse(token1.equals(token2));
-
- token1 = new AuthenticatedURL.Token("foo");
- token2 = new AuthenticatedURL.Token("bar");
- Assert.assertNotSame(token1.hashCode(), token2.hashCode());
- Assert.assertFalse(token1.equals(token2));
}
@Test
@@ -137,4 +107,12 @@ public void testConnectionConfigurator() throws Exception {
Mockito.verify(connConf).configure(Mockito.any());
}
+ @Test
+ public void testGetAuthenticator() throws Exception {
+ Authenticator authenticator = Mockito.mock(Authenticator.class);
+
+ AuthenticatedURL aURL = new AuthenticatedURL(authenticator);
+ Assert.assertEquals(authenticator, aURL.getAuthenticator());
+ }
+
}
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index fb2741e049..c29934be44 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -199,6 +199,9 @@ Trunk (Unreleased)
HADOOP-10936. Change default KeyProvider bitlength to 128. (wang)
+ HADOOP-10224. JavaKeyStoreProvider has to protect against corrupting
+ underlying store. (asuresh via tucu)
+
BUG FIXES
HADOOP-9451. Fault single-layer config if node group topology is enabled.
@@ -421,6 +424,9 @@ Trunk (Unreleased)
HADOOP-10939. Fix TestKeyProviderFactory testcases to use default 128 bit
length keys. (Arun Suresh via wang)
+ HADOOP-10862. Miscellaneous trivial corrections to KMS classes.
+ (asuresh via tucu)
+
OPTIMIZATIONS
HADOOP-7761. Improve the performance of raw comparisons. (todd)
@@ -490,6 +496,12 @@ Release 2.6.0 - UNRELEASED
HADOOP-10791. AuthenticationFilter should support externalizing the
secret for signing and provide rotation support. (rkanter via tucu)
+ HADOOP-10771. Refactor HTTP delegation support out of httpfs to common.
+ (tucu)
+
+ HADOOP-10835. Implement HTTP proxyuser support in HTTP authentication
+ client/server libraries. (tucu)
+
OPTIMIZATIONS
BUG FIXES
@@ -521,9 +533,6 @@ Release 2.6.0 - UNRELEASED
HADOOP-10830. Missing lock in JavaKeyStoreProvider.createCredentialEntry.
(Benoy Antony via umamahesh)
- HADOOP-10876. The constructor of Path should not take an empty URL as a
- parameter. (Zhihai Xu via wang)
-
HADOOP-10928. Incorrect usage on `hadoop credential list`.
(Josh Elser via wang)
@@ -545,6 +554,12 @@ Release 2.6.0 - UNRELEASED
HADOOP-10931 compile error on tools/hadoop-openstack (xukun via stevel)
+ HADOOP-10929. Typo in Configuration.getPasswordFromCredentialProviders
+ (lmccay via brandonli)
+
+ HADOOP-10402. Configuration.getValByRegex does not substitute for
+ variables. (Robert Kanter via kasha)
+
Release 2.5.0 - UNRELEASED
INCOMPATIBLE CHANGES
diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml
index 0c90a9490d..09f1c5a2d3 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -203,6 +203,17 @@
hadoop-auth
compile
+
+ org.apache.hadoop
+ hadoop-auth
+ test-jar
+ test
+
+
+ org.apache.hadoop
+ hadoop-minikdc
+ test
+
com.jcraft
jsch
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
index 31c40f60f3..cf5ec1a53e 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
@@ -1781,7 +1781,7 @@ public void setStrings(String name, String... values) {
public char[] getPassword(String name) throws IOException {
char[] pass = null;
- pass = getPasswordFromCredenitalProviders(name);
+ pass = getPasswordFromCredentialProviders(name);
if (pass == null) {
pass = getPasswordFromConfig(name);
@@ -1797,7 +1797,7 @@ public char[] getPassword(String name) throws IOException {
* @return password or null if not found
* @throws IOException
*/
- protected char[] getPasswordFromCredenitalProviders(String name)
+ protected char[] getPasswordFromCredentialProviders(String name)
throws IOException {
char[] pass = null;
try {
@@ -2755,7 +2755,8 @@ public Map getValByRegex(String regex) {
item.getValue() instanceof String) {
m = p.matcher((String)item.getKey());
if(m.find()) { // match
- result.put((String) item.getKey(), (String) item.getValue());
+ result.put((String) item.getKey(),
+ substituteVars(getProps().getProperty((String) item.getKey())));
}
}
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/JavaKeyStoreProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/JavaKeyStoreProvider.java
index 529a21287c..250315177a 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/JavaKeyStoreProvider.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/JavaKeyStoreProvider.java
@@ -27,8 +27,11 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.security.ProviderUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import javax.crypto.spec.SecretKeySpec;
+
import java.io.IOException;
import java.io.InputStream;
import java.io.ObjectInputStream;
@@ -80,6 +83,9 @@
@InterfaceAudience.Private
public class JavaKeyStoreProvider extends KeyProvider {
private static final String KEY_METADATA = "KeyMetadata";
+ private static Logger LOG =
+ LoggerFactory.getLogger(JavaKeyStoreProvider.class);
+
public static final String SCHEME_NAME = "jceks";
public static final String KEYSTORE_PASSWORD_FILE_KEY =
@@ -115,6 +121,10 @@ private JavaKeyStoreProvider(URI uri, Configuration conf) throws IOException {
if (pwFile != null) {
ClassLoader cl = Thread.currentThread().getContextClassLoader();
URL pwdFile = cl.getResource(pwFile);
+ if (pwdFile == null) {
+ // Provided Password file does not exist
+ throw new IOException("Password file does not exists");
+ }
if (pwdFile != null) {
InputStream is = pwdFile.openStream();
try {
@@ -129,19 +139,25 @@ private JavaKeyStoreProvider(URI uri, Configuration conf) throws IOException {
password = KEYSTORE_PASSWORD_DEFAULT;
}
try {
+ Path oldPath = constructOldPath(path);
+ Path newPath = constructNewPath(path);
keyStore = KeyStore.getInstance(SCHEME_NAME);
+ FsPermission perm = null;
if (fs.exists(path)) {
- // save off permissions in case we need to
- // rewrite the keystore in flush()
- FileStatus s = fs.getFileStatus(path);
- permissions = s.getPermission();
-
- keyStore.load(fs.open(path), password);
+ // flush did not proceed to completion
+ // _NEW should not exist
+ if (fs.exists(newPath)) {
+ throw new IOException(
+ String.format("Keystore not loaded due to some inconsistency "
+ + "('%s' and '%s' should not exist together)!!", path, newPath));
+ }
+ perm = tryLoadFromPath(path, oldPath);
} else {
- permissions = new FsPermission("700");
- // required to create an empty keystore. *sigh*
- keyStore.load(null, password);
+ perm = tryLoadIncompleteFlush(oldPath, newPath);
}
+ // Need to save off permissions in case we need to
+ // rewrite the keystore in flush()
+ permissions = perm;
} catch (KeyStoreException e) {
throw new IOException("Can't create keystore", e);
} catch (NoSuchAlgorithmException e) {
@@ -154,6 +170,136 @@ private JavaKeyStoreProvider(URI uri, Configuration conf) throws IOException {
writeLock = lock.writeLock();
}
+ /**
+ * Try loading from the user specified path, else load from the backup
+ * path in case Exception is not due to bad/wrong password
+ * @param path Actual path to load from
+ * @param backupPath Backup path (_OLD)
+ * @return The permissions of the loaded file
+ * @throws NoSuchAlgorithmException
+ * @throws CertificateException
+ * @throws IOException
+ */
+ private FsPermission tryLoadFromPath(Path path, Path backupPath)
+ throws NoSuchAlgorithmException, CertificateException,
+ IOException {
+ FsPermission perm = null;
+ try {
+ perm = loadFromPath(path, password);
+ // Remove _OLD if exists
+ if (fs.exists(backupPath)) {
+ fs.delete(backupPath, true);
+ }
+ LOG.debug("KeyStore loaded successfully !!");
+ } catch (IOException ioe) {
+ // If file is corrupted for some reason other than
+ // wrong password try the _OLD file if exits
+ if (!isBadorWrongPassword(ioe)) {
+ perm = loadFromPath(backupPath, password);
+ // Rename CURRENT to CORRUPTED
+ renameOrFail(path, new Path(path.toString() + "_CORRUPTED_"
+ + System.currentTimeMillis()));
+ renameOrFail(backupPath, path);
+ LOG.debug(String.format(
+ "KeyStore loaded successfully from '%s' since '%s'"
+ + "was corrupted !!", backupPath, path));
+ } else {
+ throw ioe;
+ }
+ }
+ return perm;
+ }
+
+ /**
+ * The KeyStore might have gone down during a flush, In which case either the
+ * _NEW or _OLD files might exists. This method tries to load the KeyStore
+ * from one of these intermediate files.
+ * @param oldPath the _OLD file created during flush
+ * @param newPath the _NEW file created during flush
+ * @return The permissions of the loaded file
+ * @throws IOException
+ * @throws NoSuchAlgorithmException
+ * @throws CertificateException
+ */
+ private FsPermission tryLoadIncompleteFlush(Path oldPath, Path newPath)
+ throws IOException, NoSuchAlgorithmException, CertificateException {
+ FsPermission perm = null;
+ // Check if _NEW exists (in case flush had finished writing but not
+ // completed the re-naming)
+ if (fs.exists(newPath)) {
+ perm = loadAndReturnPerm(newPath, oldPath);
+ }
+ // try loading from _OLD (An earlier Flushing MIGHT not have completed
+ // writing completely)
+ if ((perm == null) && fs.exists(oldPath)) {
+ perm = loadAndReturnPerm(oldPath, newPath);
+ }
+ // If not loaded yet,
+ // required to create an empty keystore. *sigh*
+ if (perm == null) {
+ keyStore.load(null, password);
+ LOG.debug("KeyStore initialized anew successfully !!");
+ perm = new FsPermission("700");
+ }
+ return perm;
+ }
+
+ private FsPermission loadAndReturnPerm(Path pathToLoad, Path pathToDelete)
+ throws NoSuchAlgorithmException, CertificateException,
+ IOException {
+ FsPermission perm = null;
+ try {
+ perm = loadFromPath(pathToLoad, password);
+ renameOrFail(pathToLoad, path);
+ LOG.debug(String.format("KeyStore loaded successfully from '%s'!!",
+ pathToLoad));
+ if (fs.exists(pathToDelete)) {
+ fs.delete(pathToDelete, true);
+ }
+ } catch (IOException e) {
+ // Check for password issue : don't want to trash file due
+ // to wrong password
+ if (isBadorWrongPassword(e)) {
+ throw e;
+ }
+ }
+ return perm;
+ }
+
+ private boolean isBadorWrongPassword(IOException ioe) {
+ // As per documentation this is supposed to be the way to figure
+ // if password was correct
+ if (ioe.getCause() instanceof UnrecoverableKeyException) {
+ return true;
+ }
+ // Unfortunately that doesn't seem to work..
+ // Workaround :
+ if ((ioe.getCause() == null)
+ && (ioe.getMessage() != null)
+ && ((ioe.getMessage().contains("Keystore was tampered")) || (ioe
+ .getMessage().contains("password was incorrect")))) {
+ return true;
+ }
+ return false;
+ }
+
+ private FsPermission loadFromPath(Path p, char[] password)
+ throws IOException, NoSuchAlgorithmException, CertificateException {
+ FileStatus s = fs.getFileStatus(p);
+ keyStore.load(fs.open(p), password);
+ return s.getPermission();
+ }
+
+ private Path constructNewPath(Path path) {
+ Path newPath = new Path(path.toString() + "_NEW");
+ return newPath;
+ }
+
+ private Path constructOldPath(Path path) {
+ Path oldPath = new Path(path.toString() + "_OLD");
+ return oldPath;
+ }
+
@Override
public KeyVersion getKeyVersion(String versionName) throws IOException {
readLock.lock();
@@ -352,11 +498,22 @@ public KeyVersion rollNewVersion(String name,
@Override
public void flush() throws IOException {
+ Path newPath = constructNewPath(path);
+ Path oldPath = constructOldPath(path);
writeLock.lock();
try {
if (!changed) {
return;
}
+ // Might exist if a backup has been restored etc.
+ if (fs.exists(newPath)) {
+ renameOrFail(newPath, new Path(newPath.toString()
+ + "_ORPHANED_" + System.currentTimeMillis()));
+ }
+ if (fs.exists(oldPath)) {
+ renameOrFail(oldPath, new Path(oldPath.toString()
+ + "_ORPHANED_" + System.currentTimeMillis()));
+ }
// put all of the updates into the keystore
for(Map.Entry entry: cache.entrySet()) {
try {
@@ -366,25 +523,77 @@ public void flush() throws IOException {
throw new IOException("Can't set metadata key " + entry.getKey(),e );
}
}
+
+ // Save old File first
+ boolean fileExisted = backupToOld(oldPath);
// write out the keystore
- FSDataOutputStream out = FileSystem.create(fs, path, permissions);
+ // Write to _NEW path first :
try {
- keyStore.store(out, password);
- } catch (KeyStoreException e) {
- throw new IOException("Can't store keystore " + this, e);
- } catch (NoSuchAlgorithmException e) {
- throw new IOException("No such algorithm storing keystore " + this, e);
- } catch (CertificateException e) {
- throw new IOException("Certificate exception storing keystore " + this,
- e);
+ writeToNew(newPath);
+ } catch (IOException ioe) {
+ // rename _OLD back to curent and throw Exception
+ revertFromOld(oldPath, fileExisted);
+ throw ioe;
}
- out.close();
+ // Rename _NEW to CURRENT and delete _OLD
+ cleanupNewAndOld(newPath, oldPath);
changed = false;
} finally {
writeLock.unlock();
}
}
+ private void cleanupNewAndOld(Path newPath, Path oldPath) throws IOException {
+ // Rename _NEW to CURRENT
+ renameOrFail(newPath, path);
+ // Delete _OLD
+ if (fs.exists(oldPath)) {
+ fs.delete(oldPath, true);
+ }
+ }
+
+ private void writeToNew(Path newPath) throws IOException {
+ FSDataOutputStream out =
+ FileSystem.create(fs, newPath, permissions);
+ try {
+ keyStore.store(out, password);
+ } catch (KeyStoreException e) {
+ throw new IOException("Can't store keystore " + this, e);
+ } catch (NoSuchAlgorithmException e) {
+ throw new IOException(
+ "No such algorithm storing keystore " + this, e);
+ } catch (CertificateException e) {
+ throw new IOException(
+ "Certificate exception storing keystore " + this, e);
+ }
+ out.close();
+ }
+
+ private void revertFromOld(Path oldPath, boolean fileExisted)
+ throws IOException {
+ if (fileExisted) {
+ renameOrFail(oldPath, path);
+ }
+ }
+
+ private boolean backupToOld(Path oldPath)
+ throws IOException {
+ boolean fileExisted = false;
+ if (fs.exists(path)) {
+ renameOrFail(path, oldPath);
+ fileExisted = true;
+ }
+ return fileExisted;
+ }
+
+ private void renameOrFail(Path src, Path dest)
+ throws IOException {
+ if (!fs.rename(src, dest)) {
+ throw new IOException("Rename unsuccessful : "
+ + String.format("'%s' to '%s'", src, dest));
+ }
+ }
+
@Override
public String toString() {
return uri.toString();
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
index c5624ee1c4..cf5b11315f 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
@@ -512,7 +512,7 @@ private List createKeySets(String[] keyNames) {
List batch = new ArrayList();
int batchLen = 0;
for (String name : keyNames) {
- int additionalLen = KMSRESTConstants.KEY_OP.length() + 1 + name.length();
+ int additionalLen = KMSRESTConstants.KEY.length() + 1 + name.length();
batchLen += additionalLen;
// topping at 1500 to account for initial URL and encoded names
if (batchLen > 1500) {
@@ -536,7 +536,7 @@ public Metadata[] getKeysMetadata(String ... keyNames) throws IOException {
for (String[] keySet : keySets) {
if (keyNames.length > 0) {
Map queryStr = new HashMap();
- queryStr.put(KMSRESTConstants.KEY_OP, keySet);
+ queryStr.put(KMSRESTConstants.KEY, keySet);
URL url = createURL(KMSRESTConstants.KEYS_METADATA_RESOURCE, null,
null, queryStr);
HttpURLConnection conn = createConnection(url, HTTP_GET);
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSRESTConstants.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSRESTConstants.java
index b949ab91b5..b7d7898735 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSRESTConstants.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSRESTConstants.java
@@ -37,7 +37,7 @@ public class KMSRESTConstants {
public static final String EEK_SUB_RESOURCE = "_eek";
public static final String CURRENT_VERSION_SUB_RESOURCE = "_currentversion";
- public static final String KEY_OP = "key";
+ public static final String KEY = "key";
public static final String EEK_OP = "eek_op";
public static final String EEK_GENERATE = "generate";
public static final String EEK_DECRYPT = "decrypt";
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java
index 0e8db1df11..54ddedaff1 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java
@@ -128,20 +128,7 @@ private void checkPathArg( String path ) throws IllegalArgumentException {
"Can not create a Path from an empty string");
}
}
-
- /** check URI parameter of Path constructor. */
- private void checkPathArg(URI aUri) throws IllegalArgumentException {
- // disallow construction of a Path from an empty URI
- if (aUri == null) {
- throw new IllegalArgumentException(
- "Can not create a Path from a null URI");
- }
- if (aUri.toString().isEmpty()) {
- throw new IllegalArgumentException(
- "Can not create a Path from an empty URI");
- }
- }
-
+
/** Construct a path from a String. Path strings are URIs, but with
* unescaped elements and some additional normalization. */
public Path(String pathString) throws IllegalArgumentException {
@@ -189,7 +176,6 @@ public Path(String pathString) throws IllegalArgumentException {
* Construct a path from a URI
*/
public Path(URI aUri) {
- checkPathArg(aUri);
uri = aUri.normalize();
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsCollectorImpl.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsCollectorImpl.java
index 5f53629802..be442edb1b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsCollectorImpl.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsCollectorImpl.java
@@ -21,14 +21,18 @@
import java.util.Iterator;
import java.util.List;
+import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.Lists;
+import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.metrics2.MetricsInfo;
import org.apache.hadoop.metrics2.MetricsCollector;
import org.apache.hadoop.metrics2.MetricsFilter;
import static org.apache.hadoop.metrics2.lib.Interns.*;
-class MetricsCollectorImpl implements MetricsCollector,
+@InterfaceAudience.Private
+@VisibleForTesting
+public class MetricsCollectorImpl implements MetricsCollector,
Iterable {
private final List rbs = Lists.newArrayList();
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableStat.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableStat.java
index b8ba435bf4..ba377570ef 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableStat.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableStat.java
@@ -89,6 +89,14 @@ public MutableStat(String name, String description,
this(name, description, sampleName, valueName, false);
}
+ /**
+ * Set whether to display the extended stats (stdev, min/max etc.) or not
+ * @param extended enable/disable displaying extended stats
+ */
+ public synchronized void setExtended(boolean extended) {
+ this.extended = extended;
+ }
+
/**
* Add a number of samples and their sum to the running stat
* @param numSamples number of samples
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticatedURL.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticatedURL.java
new file mode 100644
index 0000000000..d955ada857
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticatedURL.java
@@ -0,0 +1,343 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.security.token.delegation.web;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
+import org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
+
+import java.io.IOException;
+import java.net.HttpURLConnection;
+import java.net.InetSocketAddress;
+import java.net.URL;
+import java.net.URLEncoder;
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * The DelegationTokenAuthenticatedURL
is a
+ * {@link AuthenticatedURL} sub-class with built-in Hadoop Delegation Token
+ * functionality.
+ *
+ * The authentication mechanisms supported by default are Hadoop Simple
+ * authentication (also known as pseudo authentication) and Kerberos SPNEGO
+ * authentication.
+ *
+ * Additional authentication mechanisms can be supported via {@link
+ * DelegationTokenAuthenticator} implementations.
+ *
+ * The default {@link DelegationTokenAuthenticator} is the {@link
+ * KerberosDelegationTokenAuthenticator} class which supports
+ * automatic fallback from Kerberos SPNEGO to Hadoop Simple authentication via
+ * the {@link PseudoDelegationTokenAuthenticator} class.
+ *
+ * AuthenticatedURL
instances are not thread-safe.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Unstable
+public class DelegationTokenAuthenticatedURL extends AuthenticatedURL {
+
+ /**
+ * Constant used in URL's query string to perform a proxy user request, the
+ * value of the DO_AS
parameter is the user the request will be
+ * done on behalf of.
+ */
+ static final String DO_AS = "doAs";
+
+ /**
+ * Client side authentication token that handles Delegation Tokens.
+ */
+ @InterfaceAudience.Public
+ @InterfaceStability.Unstable
+ public static class Token extends AuthenticatedURL.Token {
+ private
+ org.apache.hadoop.security.token.Token
+ delegationToken;
+
+ org.apache.hadoop.security.token.Token
+ getDelegationToken() {
+ return delegationToken;
+ }
+
+ }
+
+ private static Class extends DelegationTokenAuthenticator>
+ DEFAULT_AUTHENTICATOR = KerberosDelegationTokenAuthenticator.class;
+
+ /**
+ * Sets the default {@link DelegationTokenAuthenticator} class to use when an
+ * {@link DelegationTokenAuthenticatedURL} instance is created without
+ * specifying one.
+ *
+ * The default class is {@link KerberosDelegationTokenAuthenticator}
+ *
+ * @param authenticator the authenticator class to use as default.
+ */
+ public static void setDefaultDelegationTokenAuthenticator(
+ Class extends DelegationTokenAuthenticator> authenticator) {
+ DEFAULT_AUTHENTICATOR = authenticator;
+ }
+
+ /**
+ * Returns the default {@link DelegationTokenAuthenticator} class to use when
+ * an {@link DelegationTokenAuthenticatedURL} instance is created without
+ * specifying one.
+ *
+ * The default class is {@link KerberosDelegationTokenAuthenticator}
+ *
+ * @return the delegation token authenticator class to use as default.
+ */
+ public static Class extends DelegationTokenAuthenticator>
+ getDefaultDelegationTokenAuthenticator() {
+ return DEFAULT_AUTHENTICATOR;
+ }
+
+ private static DelegationTokenAuthenticator
+ obtainDelegationTokenAuthenticator(DelegationTokenAuthenticator dta) {
+ try {
+ return (dta != null) ? dta : DEFAULT_AUTHENTICATOR.newInstance();
+ } catch (Exception ex) {
+ throw new IllegalArgumentException(ex);
+ }
+ }
+
+ /**
+ * Creates an DelegationTokenAuthenticatedURL
.
+ *
+ * An instance of the default {@link DelegationTokenAuthenticator} will be
+ * used.
+ */
+ public DelegationTokenAuthenticatedURL() {
+ this(null, null);
+ }
+
+ /**
+ * Creates an DelegationTokenAuthenticatedURL
.
+ *
+ * @param authenticator the {@link DelegationTokenAuthenticator} instance to
+ * use, if null
the default one will be used.
+ */
+ public DelegationTokenAuthenticatedURL(
+ DelegationTokenAuthenticator authenticator) {
+ this(authenticator, null);
+ }
+
+ /**
+ * Creates an DelegationTokenAuthenticatedURL
using the default
+ * {@link DelegationTokenAuthenticator} class.
+ *
+ * @param connConfigurator a connection configurator.
+ */
+ public DelegationTokenAuthenticatedURL(
+ ConnectionConfigurator connConfigurator) {
+ this(null, connConfigurator);
+ }
+
+ /**
+ * Creates an DelegationTokenAuthenticatedURL
.
+ *
+ * @param authenticator the {@link DelegationTokenAuthenticator} instance to
+ * use, if null
the default one will be used.
+ * @param connConfigurator a connection configurator.
+ */
+ public DelegationTokenAuthenticatedURL(
+ DelegationTokenAuthenticator authenticator,
+ ConnectionConfigurator connConfigurator) {
+ super(obtainDelegationTokenAuthenticator(authenticator), connConfigurator);
+ }
+
+ /**
+ * Returns an authenticated {@link HttpURLConnection}, it uses a Delegation
+ * Token only if the given auth token is an instance of {@link Token} and
+ * it contains a Delegation Token, otherwise use the configured
+ * {@link DelegationTokenAuthenticator} to authenticate the connection.
+ *
+ * @param url the URL to connect to. Only HTTP/S URLs are supported.
+ * @param token the authentication token being used for the user.
+ * @return an authenticated {@link HttpURLConnection}.
+ * @throws IOException if an IO error occurred.
+ * @throws AuthenticationException if an authentication exception occurred.
+ */
+ @Override
+ public HttpURLConnection openConnection(URL url, AuthenticatedURL.Token token)
+ throws IOException, AuthenticationException {
+ return (token instanceof Token) ? openConnection(url, (Token) token)
+ : super.openConnection(url ,token);
+ }
+
+ /**
+ * Returns an authenticated {@link HttpURLConnection}. If the Delegation
+ * Token is present, it will be used taking precedence over the configured
+ * Authenticator
.
+ *
+ * @param url the URL to connect to. Only HTTP/S URLs are supported.
+ * @param token the authentication token being used for the user.
+ * @return an authenticated {@link HttpURLConnection}.
+ * @throws IOException if an IO error occurred.
+ * @throws AuthenticationException if an authentication exception occurred.
+ */
+ public HttpURLConnection openConnection(URL url, Token token)
+ throws IOException, AuthenticationException {
+ return openConnection(url, token, null);
+ }
+
+ private URL augmentURL(URL url, Map params)
+ throws IOException {
+ if (params != null && params.size() > 0) {
+ String urlStr = url.toExternalForm();
+ StringBuilder sb = new StringBuilder(urlStr);
+ String separator = (urlStr.contains("?")) ? "&" : "?";
+ for (Map.Entry param : params.entrySet()) {
+ sb.append(separator).append(param.getKey()).append("=").append(
+ param.getValue());
+ separator = "&";
+ }
+ url = new URL(sb.toString());
+ }
+ return url;
+ }
+
+ /**
+ * Returns an authenticated {@link HttpURLConnection}. If the Delegation
+ * Token is present, it will be used taking precedence over the configured
+ * Authenticator
. If the doAs
parameter is not NULL,
+ * the request will be done on behalf of the specified doAs
user.
+ *
+ * @param url the URL to connect to. Only HTTP/S URLs are supported.
+ * @param token the authentication token being used for the user.
+ * @param doAs user to do the the request on behalf of, if NULL the request is
+ * as self.
+ * @return an authenticated {@link HttpURLConnection}.
+ * @throws IOException if an IO error occurred.
+ * @throws AuthenticationException if an authentication exception occurred.
+ */
+ public HttpURLConnection openConnection(URL url, Token token, String doAs)
+ throws IOException, AuthenticationException {
+ Preconditions.checkNotNull(url, "url");
+ Preconditions.checkNotNull(token, "token");
+ Map extraParams = new HashMap();
+
+ // delegation token
+ Credentials creds = UserGroupInformation.getCurrentUser().getCredentials();
+ if (!creds.getAllTokens().isEmpty()) {
+ InetSocketAddress serviceAddr = new InetSocketAddress(url.getHost(),
+ url.getPort());
+ Text service = SecurityUtil.buildTokenService(serviceAddr);
+ org.apache.hadoop.security.token.Token extends TokenIdentifier> dt =
+ creds.getToken(service);
+ if (dt != null) {
+ extraParams.put(KerberosDelegationTokenAuthenticator.DELEGATION_PARAM,
+ dt.encodeToUrlString());
+ }
+ }
+
+ // proxyuser
+ if (doAs != null) {
+ extraParams.put(DO_AS, URLEncoder.encode(doAs, "UTF-8"));
+ }
+
+ url = augmentURL(url, extraParams);
+ return super.openConnection(url, token);
+ }
+
+ /**
+ * Requests a delegation token using the configured Authenticator
+ * for authentication.
+ *
+ * @param url the URL to get the delegation token from. Only HTTP/S URLs are
+ * supported.
+ * @param token the authentication token being used for the user where the
+ * Delegation token will be stored.
+ * @return a delegation token.
+ * @throws IOException if an IO error occurred.
+ * @throws AuthenticationException if an authentication exception occurred.
+ */
+ public org.apache.hadoop.security.token.Token
+ getDelegationToken(URL url, Token token, String renewer)
+ throws IOException, AuthenticationException {
+ Preconditions.checkNotNull(url, "url");
+ Preconditions.checkNotNull(token, "token");
+ try {
+ token.delegationToken =
+ ((KerberosDelegationTokenAuthenticator) getAuthenticator()).
+ getDelegationToken(url, token, renewer);
+ return token.delegationToken;
+ } catch (IOException ex) {
+ token.delegationToken = null;
+ throw ex;
+ }
+ }
+
+ /**
+ * Renews a delegation token from the server end-point using the
+ * configured Authenticator
for authentication.
+ *
+ * @param url the URL to renew the delegation token from. Only HTTP/S URLs are
+ * supported.
+ * @param token the authentication token with the Delegation Token to renew.
+ * @throws IOException if an IO error occurred.
+ * @throws AuthenticationException if an authentication exception occurred.
+ */
+ public long renewDelegationToken(URL url, Token token)
+ throws IOException, AuthenticationException {
+ Preconditions.checkNotNull(url, "url");
+ Preconditions.checkNotNull(token, "token");
+ Preconditions.checkNotNull(token.delegationToken,
+ "No delegation token available");
+ try {
+ return ((KerberosDelegationTokenAuthenticator) getAuthenticator()).
+ renewDelegationToken(url, token, token.delegationToken);
+ } catch (IOException ex) {
+ token.delegationToken = null;
+ throw ex;
+ }
+ }
+
+ /**
+ * Cancels a delegation token from the server end-point. It does not require
+ * being authenticated by the configured Authenticator
.
+ *
+ * @param url the URL to cancel the delegation token from. Only HTTP/S URLs
+ * are supported.
+ * @param token the authentication token with the Delegation Token to cancel.
+ * @throws IOException if an IO error occurred.
+ */
+ public void cancelDelegationToken(URL url, Token token)
+ throws IOException {
+ Preconditions.checkNotNull(url, "url");
+ Preconditions.checkNotNull(token, "token");
+ Preconditions.checkNotNull(token.delegationToken,
+ "No delegation token available");
+ try {
+ ((KerberosDelegationTokenAuthenticator) getAuthenticator()).
+ cancelDelegationToken(url, token, token.delegationToken);
+ } finally {
+ token.delegationToken = null;
+ }
+ }
+
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationFilter.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationFilter.java
new file mode 100644
index 0000000000..2411d3fbf4
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationFilter.java
@@ -0,0 +1,274 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.security.token.delegation.web;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.SaslRpcServer;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
+import org.apache.hadoop.security.authentication.server.AuthenticationHandler;
+import org.apache.hadoop.security.authentication.server.AuthenticationToken;
+import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
+import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHandler;
+import org.apache.hadoop.security.authorize.AuthorizationException;
+import org.apache.hadoop.security.authorize.ProxyUsers;
+import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
+import org.apache.http.NameValuePair;
+import org.apache.http.client.utils.URLEncodedUtils;
+import org.codehaus.jackson.map.ObjectMapper;
+
+import javax.servlet.FilterChain;
+import javax.servlet.FilterConfig;
+import javax.servlet.ServletContext;
+import javax.servlet.ServletException;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletRequestWrapper;
+import javax.servlet.http.HttpServletResponse;
+import java.io.IOException;
+import java.io.Writer;
+import java.nio.charset.Charset;
+import java.security.Principal;
+import java.util.Enumeration;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+
+/**
+ * The DelegationTokenAuthenticationFilter
filter is a
+ * {@link AuthenticationFilter} with Hadoop Delegation Token support.
+ *
+ * By default it uses it own instance of the {@link
+ * AbstractDelegationTokenSecretManager}. For situations where an external
+ * AbstractDelegationTokenSecretManager
is required (i.e. one that
+ * shares the secret with AbstractDelegationTokenSecretManager
+ * instance running in other services), the external
+ * AbstractDelegationTokenSecretManager
must be set as an
+ * attribute in the {@link ServletContext} of the web application using the
+ * {@link #DELEGATION_TOKEN_SECRET_MANAGER_ATTR} attribute name (
+ * 'hadoop.http.delegation-token-secret-manager').
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class DelegationTokenAuthenticationFilter
+ extends AuthenticationFilter {
+
+ private static final String APPLICATION_JSON_MIME = "application/json";
+ private static final String ERROR_EXCEPTION_JSON = "exception";
+ private static final String ERROR_MESSAGE_JSON = "message";
+
+ /**
+ * Sets an external DelegationTokenSecretManager
instance to
+ * manage creation and verification of Delegation Tokens.
+ *
+ * This is useful for use cases where secrets must be shared across multiple
+ * services.
+ */
+
+ public static final String DELEGATION_TOKEN_SECRET_MANAGER_ATTR =
+ "hadoop.http.delegation-token-secret-manager";
+
+ private static final Charset UTF8_CHARSET = Charset.forName("UTF-8");
+
+ private static final ThreadLocal UGI_TL =
+ new ThreadLocal();
+ public static final String PROXYUSER_PREFIX = "proxyuser";
+
+ private SaslRpcServer.AuthMethod handlerAuthMethod;
+
+ /**
+ * It delegates to
+ * {@link AuthenticationFilter#getConfiguration(String, FilterConfig)} and
+ * then overrides the {@link AuthenticationHandler} to use if authentication
+ * type is set to simple
or kerberos
in order to use
+ * the corresponding implementation with delegation token support.
+ *
+ * @param configPrefix parameter not used.
+ * @param filterConfig parameter not used.
+ * @return hadoop-auth de-prefixed configuration for the filter and handler.
+ */
+ @Override
+ protected Properties getConfiguration(String configPrefix,
+ FilterConfig filterConfig) throws ServletException {
+ Properties props = super.getConfiguration(configPrefix, filterConfig);
+ String authType = props.getProperty(AUTH_TYPE);
+ if (authType.equals(PseudoAuthenticationHandler.TYPE)) {
+ props.setProperty(AUTH_TYPE,
+ PseudoDelegationTokenAuthenticationHandler.class.getName());
+ } else if (authType.equals(KerberosAuthenticationHandler.TYPE)) {
+ props.setProperty(AUTH_TYPE,
+ KerberosDelegationTokenAuthenticationHandler.class.getName());
+ }
+ return props;
+ }
+
+ /**
+ * Returns the proxyuser configuration. All returned properties must start
+ * with proxyuser.
'
+ *
+ * Subclasses may override this method if the proxyuser configuration is
+ * read from other place than the filter init parameters.
+ *
+ * @param filterConfig filter configuration object
+ * @return the proxyuser configuration properties.
+ * @throws ServletException thrown if the configuration could not be created.
+ */
+ protected Configuration getProxyuserConfiguration(FilterConfig filterConfig)
+ throws ServletException {
+ // this filter class gets the configuration from the filter configs, we are
+ // creating an empty configuration and injecting the proxyuser settings in
+ // it. In the initialization of the filter, the returned configuration is
+ // passed to the ProxyUsers which only looks for 'proxyusers.' properties.
+ Configuration conf = new Configuration(false);
+ Enumeration> names = filterConfig.getInitParameterNames();
+ while (names.hasMoreElements()) {
+ String name = (String) names.nextElement();
+ if (name.startsWith(PROXYUSER_PREFIX + ".")) {
+ String value = filterConfig.getInitParameter(name);
+ conf.set(name, value);
+ }
+ }
+ return conf;
+ }
+
+
+ @Override
+ public void init(FilterConfig filterConfig) throws ServletException {
+ super.init(filterConfig);
+ AuthenticationHandler handler = getAuthenticationHandler();
+ AbstractDelegationTokenSecretManager dtSecretManager =
+ (AbstractDelegationTokenSecretManager) filterConfig.getServletContext().
+ getAttribute(DELEGATION_TOKEN_SECRET_MANAGER_ATTR);
+ if (dtSecretManager != null && handler
+ instanceof DelegationTokenAuthenticationHandler) {
+ DelegationTokenAuthenticationHandler dtHandler =
+ (DelegationTokenAuthenticationHandler) getAuthenticationHandler();
+ dtHandler.setExternalDelegationTokenSecretManager(dtSecretManager);
+ }
+ if (handler instanceof PseudoAuthenticationHandler ||
+ handler instanceof PseudoDelegationTokenAuthenticationHandler) {
+ setHandlerAuthMethod(SaslRpcServer.AuthMethod.SIMPLE);
+ }
+ if (handler instanceof KerberosAuthenticationHandler ||
+ handler instanceof KerberosDelegationTokenAuthenticationHandler) {
+ setHandlerAuthMethod(SaslRpcServer.AuthMethod.KERBEROS);
+ }
+
+ // proxyuser configuration
+ Configuration conf = getProxyuserConfiguration(filterConfig);
+ ProxyUsers.refreshSuperUserGroupsConfiguration(conf, PROXYUSER_PREFIX);
+ }
+
+ protected void setHandlerAuthMethod(SaslRpcServer.AuthMethod authMethod) {
+ this.handlerAuthMethod = authMethod;
+ }
+
+ @VisibleForTesting
+ static String getDoAs(HttpServletRequest request) {
+ List list = URLEncodedUtils.parse(request.getQueryString(),
+ UTF8_CHARSET);
+ if (list != null) {
+ for (NameValuePair nv : list) {
+ if (DelegationTokenAuthenticatedURL.DO_AS.equals(nv.getName())) {
+ return nv.getValue();
+ }
+ }
+ }
+ return null;
+ }
+
+ static UserGroupInformation getHttpUserGroupInformationInContext() {
+ return UGI_TL.get();
+ }
+
+ @Override
+ protected void doFilter(FilterChain filterChain, HttpServletRequest request,
+ HttpServletResponse response) throws IOException, ServletException {
+ boolean requestCompleted = false;
+ UserGroupInformation ugi = null;
+ AuthenticationToken authToken = (AuthenticationToken)
+ request.getUserPrincipal();
+ if (authToken != null && authToken != AuthenticationToken.ANONYMOUS) {
+ // if the request was authenticated because of a delegation token,
+ // then we ignore proxyuser (this is the same as the RPC behavior).
+ ugi = (UserGroupInformation) request.getAttribute(
+ DelegationTokenAuthenticationHandler.DELEGATION_TOKEN_UGI_ATTRIBUTE);
+ if (ugi == null) {
+ String realUser = request.getUserPrincipal().getName();
+ ugi = UserGroupInformation.createRemoteUser(realUser,
+ handlerAuthMethod);
+ String doAsUser = getDoAs(request);
+ if (doAsUser != null) {
+ ugi = UserGroupInformation.createProxyUser(doAsUser, ugi);
+ try {
+ ProxyUsers.authorize(ugi, request.getRemoteHost());
+ } catch (AuthorizationException ex) {
+ String msg = String.format(
+ "User '%s' from host '%s' not allowed to impersonate user '%s'",
+ realUser, request.getRemoteHost(), doAsUser);
+ response.setStatus(HttpServletResponse.SC_FORBIDDEN);
+ response.setContentType(APPLICATION_JSON_MIME);
+ Map json = new HashMap();
+ json.put(ERROR_EXCEPTION_JSON,
+ AuthorizationException.class.getName());
+ json.put(ERROR_MESSAGE_JSON, msg);
+ Writer writer = response.getWriter();
+ ObjectMapper jsonMapper = new ObjectMapper();
+ jsonMapper.writeValue(writer, json);
+ requestCompleted = true;
+ }
+ }
+ }
+ UGI_TL.set(ugi);
+ }
+ if (!requestCompleted) {
+ final UserGroupInformation ugiF = ugi;
+ try {
+ request = new HttpServletRequestWrapper(request) {
+
+ @Override
+ public String getAuthType() {
+ return (ugiF != null) ? handlerAuthMethod.toString() : null;
+ }
+
+ @Override
+ public String getRemoteUser() {
+ return (ugiF != null) ? ugiF.getShortUserName() : null;
+ }
+
+ @Override
+ public Principal getUserPrincipal() {
+ return (ugiF != null) ? new Principal() {
+ @Override
+ public String getName() {
+ return ugiF.getUserName();
+ }
+ } : null;
+ }
+ };
+ super.doFilter(filterChain, request, response);
+ } finally {
+ UGI_TL.remove();
+ }
+ }
+ }
+
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java
new file mode 100644
index 0000000000..3b6c289df5
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java
@@ -0,0 +1,359 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.security.token.delegation.web;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
+import org.apache.hadoop.security.authentication.server.AuthenticationHandler;
+import org.apache.hadoop.security.authentication.server.AuthenticationToken;
+import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
+import org.codehaus.jackson.map.ObjectMapper;
+
+import javax.servlet.ServletException;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+import javax.ws.rs.core.MediaType;
+import java.io.IOException;
+import java.io.Writer;
+import java.text.MessageFormat;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.LinkedHashMap;
+import java.util.Map;
+import java.util.Properties;
+import java.util.Set;
+
+/**
+ * An {@link AuthenticationHandler} that implements Kerberos SPNEGO mechanism
+ * for HTTP and supports Delegation Token functionality.
+ *
+ * In addition to the wrapped {@link AuthenticationHandler} configuration
+ * properties, this handler supports the following properties prefixed
+ * with the type of the wrapped AuthenticationHandler
:
+ *
+ * delegation-token.token-kind: the token kind for generated tokens
+ * (no default, required property).
+ * delegation-token.update-interval.sec: secret manager master key
+ * update interval in seconds (default 1 day).
+ * delegation-token.max-lifetime.sec: maximum life of a delegation
+ * token in seconds (default 7 days).
+ * delegation-token.renewal-interval.sec: renewal interval for
+ * delegation tokens in seconds (default 1 day).
+ * delegation-token.removal-scan-interval.sec: delegation tokens
+ * removal scan interval in seconds (default 1 hour).
+ *
+ *
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public abstract class DelegationTokenAuthenticationHandler
+ implements AuthenticationHandler {
+
+ protected static final String TYPE_POSTFIX = "-dt";
+
+ public static final String PREFIX = "delegation-token.";
+
+ public static final String TOKEN_KIND = PREFIX + "token-kind.sec";
+
+ public static final String UPDATE_INTERVAL = PREFIX + "update-interval.sec";
+ public static final long UPDATE_INTERVAL_DEFAULT = 24 * 60 * 60;
+
+ public static final String MAX_LIFETIME = PREFIX + "max-lifetime.sec";
+ public static final long MAX_LIFETIME_DEFAULT = 7 * 24 * 60 * 60;
+
+ public static final String RENEW_INTERVAL = PREFIX + "renew-interval.sec";
+ public static final long RENEW_INTERVAL_DEFAULT = 24 * 60 * 60;
+
+ public static final String REMOVAL_SCAN_INTERVAL = PREFIX +
+ "removal-scan-interval.sec";
+ public static final long REMOVAL_SCAN_INTERVAL_DEFAULT = 60 * 60;
+
+ private static final Set DELEGATION_TOKEN_OPS = new HashSet();
+
+ static final String DELEGATION_TOKEN_UGI_ATTRIBUTE =
+ "hadoop.security.delegation-token.ugi";
+
+ static {
+ DELEGATION_TOKEN_OPS.add(KerberosDelegationTokenAuthenticator.
+ DelegationTokenOperation.GETDELEGATIONTOKEN.toString());
+ DELEGATION_TOKEN_OPS.add(KerberosDelegationTokenAuthenticator.
+ DelegationTokenOperation.RENEWDELEGATIONTOKEN.toString());
+ DELEGATION_TOKEN_OPS.add(KerberosDelegationTokenAuthenticator.
+ DelegationTokenOperation.CANCELDELEGATIONTOKEN.toString());
+ }
+
+ private AuthenticationHandler authHandler;
+ private DelegationTokenManager tokenManager;
+ private String authType;
+
+ public DelegationTokenAuthenticationHandler(AuthenticationHandler handler) {
+ authHandler = handler;
+ authType = handler.getType();
+ }
+
+ @VisibleForTesting
+ DelegationTokenManager getTokenManager() {
+ return tokenManager;
+ }
+
+ @Override
+ public void init(Properties config) throws ServletException {
+ authHandler.init(config);
+ initTokenManager(config);
+ }
+
+ /**
+ * Sets an external DelegationTokenSecretManager
instance to
+ * manage creation and verification of Delegation Tokens.
+ *
+ * This is useful for use cases where secrets must be shared across multiple
+ * services.
+ *
+ * @param secretManager a DelegationTokenSecretManager
instance
+ */
+ public void setExternalDelegationTokenSecretManager(
+ AbstractDelegationTokenSecretManager secretManager) {
+ tokenManager.setExternalDelegationTokenSecretManager(secretManager);
+ }
+
+ @VisibleForTesting
+ @SuppressWarnings("unchecked")
+ public void initTokenManager(Properties config) {
+ String configPrefix = authHandler.getType() + ".";
+ Configuration conf = new Configuration(false);
+ for (Map.Entry entry : config.entrySet()) {
+ conf.set((String) entry.getKey(), (String) entry.getValue());
+ }
+ String tokenKind = conf.get(TOKEN_KIND);
+ if (tokenKind == null) {
+ throw new IllegalArgumentException(
+ "The configuration does not define the token kind");
+ }
+ tokenKind = tokenKind.trim();
+ long updateInterval = conf.getLong(configPrefix + UPDATE_INTERVAL,
+ UPDATE_INTERVAL_DEFAULT);
+ long maxLifeTime = conf.getLong(configPrefix + MAX_LIFETIME,
+ MAX_LIFETIME_DEFAULT);
+ long renewInterval = conf.getLong(configPrefix + RENEW_INTERVAL,
+ RENEW_INTERVAL_DEFAULT);
+ long removalScanInterval = conf.getLong(
+ configPrefix + REMOVAL_SCAN_INTERVAL, REMOVAL_SCAN_INTERVAL_DEFAULT);
+ tokenManager = new DelegationTokenManager(new Text(tokenKind),
+ updateInterval * 1000, maxLifeTime * 1000, renewInterval * 1000,
+ removalScanInterval * 1000);
+ tokenManager.init();
+ }
+
+ @Override
+ public void destroy() {
+ tokenManager.destroy();
+ authHandler.destroy();
+ }
+
+ @Override
+ public String getType() {
+ return authType;
+ }
+
+ private static final String ENTER = System.getProperty("line.separator");
+
+ @Override
+ @SuppressWarnings("unchecked")
+ public boolean managementOperation(AuthenticationToken token,
+ HttpServletRequest request, HttpServletResponse response)
+ throws IOException, AuthenticationException {
+ boolean requestContinues = true;
+ String op = ServletUtils.getParameter(request,
+ KerberosDelegationTokenAuthenticator.OP_PARAM);
+ op = (op != null) ? op.toUpperCase() : null;
+ if (DELEGATION_TOKEN_OPS.contains(op) &&
+ !request.getMethod().equals("OPTIONS")) {
+ KerberosDelegationTokenAuthenticator.DelegationTokenOperation dtOp =
+ KerberosDelegationTokenAuthenticator.
+ DelegationTokenOperation.valueOf(op);
+ if (dtOp.getHttpMethod().equals(request.getMethod())) {
+ boolean doManagement;
+ if (dtOp.requiresKerberosCredentials() && token == null) {
+ token = authenticate(request, response);
+ if (token == null) {
+ requestContinues = false;
+ doManagement = false;
+ } else {
+ doManagement = true;
+ }
+ } else {
+ doManagement = true;
+ }
+ if (doManagement) {
+ UserGroupInformation requestUgi = (token != null)
+ ? UserGroupInformation.createRemoteUser(token.getUserName())
+ : null;
+ Map map = null;
+ switch (dtOp) {
+ case GETDELEGATIONTOKEN:
+ if (requestUgi == null) {
+ throw new IllegalStateException("request UGI cannot be NULL");
+ }
+ String renewer = ServletUtils.getParameter(request,
+ KerberosDelegationTokenAuthenticator.RENEWER_PARAM);
+ try {
+ Token> dToken = tokenManager.createToken(requestUgi, renewer);
+ map = delegationTokenToJSON(dToken);
+ } catch (IOException ex) {
+ throw new AuthenticationException(ex.toString(), ex);
+ }
+ break;
+ case RENEWDELEGATIONTOKEN:
+ if (requestUgi == null) {
+ throw new IllegalStateException("request UGI cannot be NULL");
+ }
+ String tokenToRenew = ServletUtils.getParameter(request,
+ KerberosDelegationTokenAuthenticator.TOKEN_PARAM);
+ if (tokenToRenew == null) {
+ response.sendError(HttpServletResponse.SC_BAD_REQUEST,
+ MessageFormat.format(
+ "Operation [{0}] requires the parameter [{1}]", dtOp,
+ KerberosDelegationTokenAuthenticator.TOKEN_PARAM)
+ );
+ requestContinues = false;
+ } else {
+ Token dt =
+ new Token();
+ try {
+ dt.decodeFromUrlString(tokenToRenew);
+ long expirationTime = tokenManager.renewToken(dt,
+ requestUgi.getShortUserName());
+ map = new HashMap();
+ map.put("long", expirationTime);
+ } catch (IOException ex) {
+ throw new AuthenticationException(ex.toString(), ex);
+ }
+ }
+ break;
+ case CANCELDELEGATIONTOKEN:
+ String tokenToCancel = ServletUtils.getParameter(request,
+ KerberosDelegationTokenAuthenticator.TOKEN_PARAM);
+ if (tokenToCancel == null) {
+ response.sendError(HttpServletResponse.SC_BAD_REQUEST,
+ MessageFormat.format(
+ "Operation [{0}] requires the parameter [{1}]", dtOp,
+ KerberosDelegationTokenAuthenticator.TOKEN_PARAM)
+ );
+ requestContinues = false;
+ } else {
+ Token dt =
+ new Token();
+ try {
+ dt.decodeFromUrlString(tokenToCancel);
+ tokenManager.cancelToken(dt, (requestUgi != null)
+ ? requestUgi.getShortUserName() : null);
+ } catch (IOException ex) {
+ response.sendError(HttpServletResponse.SC_NOT_FOUND,
+ "Invalid delegation token, cannot cancel");
+ requestContinues = false;
+ }
+ }
+ break;
+ }
+ if (requestContinues) {
+ response.setStatus(HttpServletResponse.SC_OK);
+ if (map != null) {
+ response.setContentType(MediaType.APPLICATION_JSON);
+ Writer writer = response.getWriter();
+ ObjectMapper jsonMapper = new ObjectMapper();
+ jsonMapper.writeValue(writer, map);
+ writer.write(ENTER);
+ writer.flush();
+ }
+ requestContinues = false;
+ }
+ }
+ } else {
+ response.sendError(HttpServletResponse.SC_BAD_REQUEST,
+ MessageFormat.format(
+ "Wrong HTTP method [{0}] for operation [{1}], it should be " +
+ "[{2}]", request.getMethod(), dtOp, dtOp.getHttpMethod()));
+ requestContinues = false;
+ }
+ }
+ return requestContinues;
+ }
+
+ @SuppressWarnings("unchecked")
+ private static Map delegationTokenToJSON(Token token) throws IOException {
+ Map json = new LinkedHashMap();
+ json.put(
+ KerberosDelegationTokenAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON,
+ token.encodeToUrlString());
+ Map response = new LinkedHashMap();
+ response.put(KerberosDelegationTokenAuthenticator.DELEGATION_TOKEN_JSON,
+ json);
+ return response;
+ }
+
+ /**
+ * Authenticates a request looking for the delegation
+ * query-string parameter and verifying it is a valid token. If there is not
+ * delegation
query-string parameter, it delegates the
+ * authentication to the {@link KerberosAuthenticationHandler} unless it is
+ * disabled.
+ *
+ * @param request the HTTP client request.
+ * @param response the HTTP client response.
+ * @return the authentication token for the authenticated request.
+ * @throws IOException thrown if an IO error occurred.
+ * @throws AuthenticationException thrown if the authentication failed.
+ */
+ @Override
+ public AuthenticationToken authenticate(HttpServletRequest request,
+ HttpServletResponse response)
+ throws IOException, AuthenticationException {
+ AuthenticationToken token;
+ String delegationParam = ServletUtils.getParameter(request,
+ KerberosDelegationTokenAuthenticator.DELEGATION_PARAM);
+ if (delegationParam != null) {
+ try {
+ Token dt =
+ new Token();
+ dt.decodeFromUrlString(delegationParam);
+ UserGroupInformation ugi = tokenManager.verifyToken(dt);
+ final String shortName = ugi.getShortUserName();
+
+ // creating a ephemeral token
+ token = new AuthenticationToken(shortName, ugi.getUserName(),
+ getType());
+ token.setExpires(0);
+ request.setAttribute(DELEGATION_TOKEN_UGI_ATTRIBUTE, ugi);
+ } catch (Throwable ex) {
+ throw new AuthenticationException("Could not verify DelegationToken, " +
+ ex.toString(), ex);
+ }
+ } else {
+ token = authHandler.authenticate(request, response);
+ }
+ return token;
+ }
+
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java
new file mode 100644
index 0000000000..ec192dab8c
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java
@@ -0,0 +1,250 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.security.token.delegation.web;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
+import org.apache.hadoop.security.authentication.client.Authenticator;
+import org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
+import org.codehaus.jackson.map.ObjectMapper;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.net.HttpURLConnection;
+import java.net.InetSocketAddress;
+import java.net.URL;
+import java.net.URLEncoder;
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * {@link Authenticator} wrapper that enhances an {@link Authenticator} with
+ * Delegation Token support.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public abstract class DelegationTokenAuthenticator implements Authenticator {
+ private static Logger LOG =
+ LoggerFactory.getLogger(DelegationTokenAuthenticator.class);
+
+ private static final String CONTENT_TYPE = "Content-Type";
+ private static final String APPLICATION_JSON_MIME = "application/json";
+
+ private static final String HTTP_GET = "GET";
+ private static final String HTTP_PUT = "PUT";
+
+ public static final String OP_PARAM = "op";
+
+ public static final String DELEGATION_PARAM = "delegation";
+ public static final String TOKEN_PARAM = "token";
+ public static final String RENEWER_PARAM = "renewer";
+ public static final String DELEGATION_TOKEN_JSON = "Token";
+ public static final String DELEGATION_TOKEN_URL_STRING_JSON = "urlString";
+ public static final String RENEW_DELEGATION_TOKEN_JSON = "long";
+
+ /**
+ * DelegationToken operations.
+ */
+ @InterfaceAudience.Private
+ public static enum DelegationTokenOperation {
+ GETDELEGATIONTOKEN(HTTP_GET, true),
+ RENEWDELEGATIONTOKEN(HTTP_PUT, true),
+ CANCELDELEGATIONTOKEN(HTTP_PUT, false);
+
+ private String httpMethod;
+ private boolean requiresKerberosCredentials;
+
+ private DelegationTokenOperation(String httpMethod,
+ boolean requiresKerberosCredentials) {
+ this.httpMethod = httpMethod;
+ this.requiresKerberosCredentials = requiresKerberosCredentials;
+ }
+
+ public String getHttpMethod() {
+ return httpMethod;
+ }
+
+ public boolean requiresKerberosCredentials() {
+ return requiresKerberosCredentials;
+ }
+ }
+
+ private Authenticator authenticator;
+
+ public DelegationTokenAuthenticator(Authenticator authenticator) {
+ this.authenticator = authenticator;
+ }
+
+ @Override
+ public void setConnectionConfigurator(ConnectionConfigurator configurator) {
+ authenticator.setConnectionConfigurator(configurator);
+ }
+
+ private boolean hasDelegationToken(URL url) {
+ String queryStr = url.getQuery();
+ return (queryStr != null) && queryStr.contains(DELEGATION_PARAM + "=");
+ }
+
+ @Override
+ public void authenticate(URL url, AuthenticatedURL.Token token)
+ throws IOException, AuthenticationException {
+ if (!hasDelegationToken(url)) {
+ authenticator.authenticate(url, token);
+ }
+ }
+
+ /**
+ * Requests a delegation token using the configured Authenticator
+ * for authentication.
+ *
+ * @param url the URL to get the delegation token from. Only HTTP/S URLs are
+ * supported.
+ * @param token the authentication token being used for the user where the
+ * Delegation token will be stored.
+ * @throws IOException if an IO error occurred.
+ * @throws AuthenticationException if an authentication exception occurred.
+ */
+ public Token getDelegationToken(URL url,
+ AuthenticatedURL.Token token, String renewer)
+ throws IOException, AuthenticationException {
+ Map json = doDelegationTokenOperation(url, token,
+ DelegationTokenOperation.GETDELEGATIONTOKEN, renewer, null, true);
+ json = (Map) json.get(DELEGATION_TOKEN_JSON);
+ String tokenStr = (String) json.get(DELEGATION_TOKEN_URL_STRING_JSON);
+ Token dToken =
+ new Token();
+ dToken.decodeFromUrlString(tokenStr);
+ InetSocketAddress service = new InetSocketAddress(url.getHost(),
+ url.getPort());
+ SecurityUtil.setTokenService(dToken, service);
+ return dToken;
+ }
+
+ /**
+ * Renews a delegation token from the server end-point using the
+ * configured Authenticator
for authentication.
+ *
+ * @param url the URL to renew the delegation token from. Only HTTP/S URLs are
+ * supported.
+ * @param token the authentication token with the Delegation Token to renew.
+ * @throws IOException if an IO error occurred.
+ * @throws AuthenticationException if an authentication exception occurred.
+ */
+ public long renewDelegationToken(URL url,
+ AuthenticatedURL.Token token,
+ Token dToken)
+ throws IOException, AuthenticationException {
+ Map json = doDelegationTokenOperation(url, token,
+ DelegationTokenOperation.RENEWDELEGATIONTOKEN, null, dToken, true);
+ return (Long) json.get(RENEW_DELEGATION_TOKEN_JSON);
+ }
+
+ /**
+ * Cancels a delegation token from the server end-point. It does not require
+ * being authenticated by the configured Authenticator
.
+ *
+ * @param url the URL to cancel the delegation token from. Only HTTP/S URLs
+ * are supported.
+ * @param token the authentication token with the Delegation Token to cancel.
+ * @throws IOException if an IO error occurred.
+ */
+ public void cancelDelegationToken(URL url,
+ AuthenticatedURL.Token token,
+ Token dToken)
+ throws IOException {
+ try {
+ doDelegationTokenOperation(url, token,
+ DelegationTokenOperation.CANCELDELEGATIONTOKEN, null, dToken, false);
+ } catch (AuthenticationException ex) {
+ throw new IOException("This should not happen: " + ex.getMessage(), ex);
+ }
+ }
+
+ private Map doDelegationTokenOperation(URL url,
+ AuthenticatedURL.Token token, DelegationTokenOperation operation,
+ String renewer, Token> dToken, boolean hasResponse)
+ throws IOException, AuthenticationException {
+ Map ret = null;
+ Map params = new HashMap();
+ params.put(OP_PARAM, operation.toString());
+ if (renewer != null) {
+ params.put(RENEWER_PARAM, renewer);
+ }
+ if (dToken != null) {
+ params.put(TOKEN_PARAM, dToken.encodeToUrlString());
+ }
+ String urlStr = url.toExternalForm();
+ StringBuilder sb = new StringBuilder(urlStr);
+ String separator = (urlStr.contains("?")) ? "&" : "?";
+ for (Map.Entry entry : params.entrySet()) {
+ sb.append(separator).append(entry.getKey()).append("=").
+ append(URLEncoder.encode(entry.getValue(), "UTF8"));
+ separator = "&";
+ }
+ url = new URL(sb.toString());
+ AuthenticatedURL aUrl = new AuthenticatedURL(this);
+ HttpURLConnection conn = aUrl.openConnection(url, token);
+ conn.setRequestMethod(operation.getHttpMethod());
+ validateResponse(conn, HttpURLConnection.HTTP_OK);
+ if (hasResponse) {
+ String contentType = conn.getHeaderField(CONTENT_TYPE);
+ contentType = (contentType != null) ? contentType.toLowerCase()
+ : null;
+ if (contentType != null &&
+ contentType.contains(APPLICATION_JSON_MIME)) {
+ try {
+ ObjectMapper mapper = new ObjectMapper();
+ ret = mapper.readValue(conn.getInputStream(), Map.class);
+ } catch (Exception ex) {
+ throw new AuthenticationException(String.format(
+ "'%s' did not handle the '%s' delegation token operation: %s",
+ url.getAuthority(), operation, ex.getMessage()), ex);
+ }
+ } else {
+ throw new AuthenticationException(String.format("'%s' did not " +
+ "respond with JSON to the '%s' delegation token operation",
+ url.getAuthority(), operation));
+ }
+ }
+ return ret;
+ }
+
+ @SuppressWarnings("unchecked")
+ private static void validateResponse(HttpURLConnection conn, int expected)
+ throws IOException {
+ int status = conn.getResponseCode();
+ if (status != expected) {
+ try {
+ conn.getInputStream().close();
+ } catch (IOException ex) {
+ //NOP
+ }
+ String msg = String.format("HTTP status, expected [%d], got [%d]: %s",
+ expected, status, conn.getResponseMessage());
+ LOG.debug(msg);
+ throw new IOException(msg);
+ }
+ }
+
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/DelegationTokenIdentifier.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenIdentifier.java
similarity index 76%
rename from hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/DelegationTokenIdentifier.java
rename to hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenIdentifier.java
index baa4603bc4..2836b9ab73 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/DelegationTokenIdentifier.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenIdentifier.java
@@ -15,21 +15,24 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.lib.service;
+package org.apache.hadoop.security.token.delegation.web;
import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
+import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
/**
- * HttpFS DelegationTokenIdentifier
implementation.
+ * Concrete delegation token identifier used by {@link DelegationTokenManager},
+ * {@link KerberosDelegationTokenAuthenticationHandler} and
+ * {@link DelegationTokenAuthenticationFilter}.
*/
@InterfaceAudience.Private
+@InterfaceStability.Evolving
public class DelegationTokenIdentifier
- extends AbstractDelegationTokenIdentifier {
+ extends AbstractDelegationTokenIdentifier {
- private Text kind = WebHdfsFileSystem.TOKEN_KIND;
+ private Text kind;
public DelegationTokenIdentifier(Text kind) {
this.kind = kind;
@@ -50,8 +53,8 @@ public DelegationTokenIdentifier(Text kind, Text owner, Text renewer,
}
/**
- * Returns the kind, TOKEN_KIND
.
- * @return returns TOKEN_KIND
.
+ * Return the delegation token kind
+ * @return returns the delegation token kind
*/
@Override
public Text getKind() {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenManager.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenManager.java
new file mode 100644
index 0000000000..2e6b46e413
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenManager.java
@@ -0,0 +1,153 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.security.token.delegation.web;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
+
+import java.io.ByteArrayInputStream;
+import java.io.DataInputStream;
+import java.io.IOException;
+
+/**
+ * Delegation Token Manager used by the
+ * {@link KerberosDelegationTokenAuthenticationHandler}.
+ *
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+class DelegationTokenManager {
+
+ private static class DelegationTokenSecretManager
+ extends AbstractDelegationTokenSecretManager {
+
+ private Text tokenKind;
+
+ public DelegationTokenSecretManager(Text tokenKind,
+ long delegationKeyUpdateInterval,
+ long delegationTokenMaxLifetime,
+ long delegationTokenRenewInterval,
+ long delegationTokenRemoverScanInterval) {
+ super(delegationKeyUpdateInterval, delegationTokenMaxLifetime,
+ delegationTokenRenewInterval, delegationTokenRemoverScanInterval);
+ this.tokenKind = tokenKind;
+ }
+
+ @Override
+ public DelegationTokenIdentifier createIdentifier() {
+ return new DelegationTokenIdentifier(tokenKind);
+ }
+
+ }
+
+ private AbstractDelegationTokenSecretManager secretManager = null;
+ private boolean managedSecretManager;
+ private Text tokenKind;
+
+ public DelegationTokenManager(Text tokenKind,
+ long delegationKeyUpdateInterval,
+ long delegationTokenMaxLifetime,
+ long delegationTokenRenewInterval,
+ long delegationTokenRemoverScanInterval) {
+ this.secretManager = new DelegationTokenSecretManager(tokenKind,
+ delegationKeyUpdateInterval, delegationTokenMaxLifetime,
+ delegationTokenRenewInterval, delegationTokenRemoverScanInterval);
+ this.tokenKind = tokenKind;
+ managedSecretManager = true;
+ }
+
+ /**
+ * Sets an external DelegationTokenSecretManager
instance to
+ * manage creation and verification of Delegation Tokens.
+ *
+ * This is useful for use cases where secrets must be shared across multiple
+ * services.
+ *
+ * @param secretManager a DelegationTokenSecretManager
instance
+ */
+ public void setExternalDelegationTokenSecretManager(
+ AbstractDelegationTokenSecretManager secretManager) {
+ this.secretManager.stopThreads();
+ this.secretManager = secretManager;
+ this.tokenKind = secretManager.createIdentifier().getKind();
+ managedSecretManager = false;
+ }
+
+ public void init() {
+ if (managedSecretManager) {
+ try {
+ secretManager.startThreads();
+ } catch (IOException ex) {
+ throw new RuntimeException("Could not start " +
+ secretManager.getClass() + ": " + ex.toString(), ex);
+ }
+ }
+ }
+
+ public void destroy() {
+ if (managedSecretManager) {
+ secretManager.stopThreads();
+ }
+ }
+
+ @SuppressWarnings("unchecked")
+ public Token createToken(UserGroupInformation ugi,
+ String renewer) {
+ renewer = (renewer == null) ? ugi.getShortUserName() : renewer;
+ String user = ugi.getUserName();
+ Text owner = new Text(user);
+ Text realUser = null;
+ if (ugi.getRealUser() != null) {
+ realUser = new Text(ugi.getRealUser().getUserName());
+ }
+ DelegationTokenIdentifier tokenIdentifier = new DelegationTokenIdentifier(
+ tokenKind, owner, new Text(renewer), realUser);
+ return new Token(tokenIdentifier, secretManager);
+ }
+
+ @SuppressWarnings("unchecked")
+ public long renewToken(Token token, String renewer)
+ throws IOException {
+ return secretManager.renewToken(token, renewer);
+ }
+
+ @SuppressWarnings("unchecked")
+ public void cancelToken(Token token,
+ String canceler) throws IOException {
+ canceler = (canceler != null) ? canceler :
+ verifyToken(token).getShortUserName();
+ secretManager.cancelToken(token, canceler);
+ }
+
+ @SuppressWarnings("unchecked")
+ public UserGroupInformation verifyToken(Token
+ token) throws IOException {
+ ByteArrayInputStream buf = new ByteArrayInputStream(token.getIdentifier());
+ DataInputStream dis = new DataInputStream(buf);
+ DelegationTokenIdentifier id = new DelegationTokenIdentifier(tokenKind);
+ id.readFields(dis);
+ dis.close();
+ secretManager.verifyToken(id, token.getPassword());
+ return id.getUser();
+ }
+
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSPseudoAuthenticator.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/HttpUserGroupInformation.java
similarity index 58%
rename from hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSPseudoAuthenticator.java
rename to hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/HttpUserGroupInformation.java
index 180149c8e2..614c0d3b36 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSPseudoAuthenticator.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/HttpUserGroupInformation.java
@@ -15,33 +15,29 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-
-package org.apache.hadoop.fs.http.client;
+package org.apache.hadoop.security.token.delegation.web;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.authentication.client.PseudoAuthenticator;
-import java.io.IOException;
+import javax.servlet.http.HttpServletRequest;
/**
- * A PseudoAuthenticator
subclass that uses FileSystemAccess's
- * UserGroupInformation
to obtain the client user name (the UGI's login user).
+ * Util class that returns the remote {@link UserGroupInformation} in scope
+ * for the HTTP request.
*/
@InterfaceAudience.Private
-public class HttpFSPseudoAuthenticator extends PseudoAuthenticator {
+public class HttpUserGroupInformation {
/**
- * Return the client user name.
+ * Returns the remote {@link UserGroupInformation} in context for the current
+ * HTTP request, taking into account proxy user requests.
*
- * @return the client user name.
+ * @return the remote {@link UserGroupInformation}, NULL
if none.
*/
- @Override
- protected String getUserName() {
- try {
- return UserGroupInformation.getLoginUser().getUserName();
- } catch (IOException ex) {
- throw new SecurityException("Could not obtain current user, " + ex.getMessage(), ex);
- }
+ public static UserGroupInformation get() {
+ return DelegationTokenAuthenticationFilter.
+ getHttpUserGroupInformationInContext();
}
+
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/KerberosDelegationTokenAuthenticationHandler.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/KerberosDelegationTokenAuthenticationHandler.java
new file mode 100644
index 0000000000..395d2f2f27
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/KerberosDelegationTokenAuthenticationHandler.java
@@ -0,0 +1,54 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.security.token.delegation.web;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.security.authentication.server.AuthenticationHandler;
+import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
+
+/**
+ * An {@link AuthenticationHandler} that implements Kerberos SPNEGO mechanism
+ * for HTTP and supports Delegation Token functionality.
+ *
+ * In addition to the {@link KerberosAuthenticationHandler} configuration
+ * properties, this handler supports:
+ *
+ * kerberos.delegation-token.token-kind: the token kind for generated tokens
+ * (no default, required property).
+ * kerberos.delegation-token.update-interval.sec: secret manager master key
+ * update interval in seconds (default 1 day).
+ * kerberos.delegation-token.max-lifetime.sec: maximum life of a delegation
+ * token in seconds (default 7 days).
+ * kerberos.delegation-token.renewal-interval.sec: renewal interval for
+ * delegation tokens in seconds (default 1 day).
+ * kerberos.delegation-token.removal-scan-interval.sec: delegation tokens
+ * removal scan interval in seconds (default 1 hour).
+ *
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class KerberosDelegationTokenAuthenticationHandler
+ extends DelegationTokenAuthenticationHandler {
+
+ public KerberosDelegationTokenAuthenticationHandler() {
+ super(new KerberosAuthenticationHandler(KerberosAuthenticationHandler.TYPE +
+ TYPE_POSTFIX));
+ }
+
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/KerberosDelegationTokenAuthenticator.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/KerberosDelegationTokenAuthenticator.java
new file mode 100644
index 0000000000..7e0e266109
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/KerberosDelegationTokenAuthenticator.java
@@ -0,0 +1,46 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.security.token.delegation.web;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.security.authentication.client.Authenticator;
+import org.apache.hadoop.security.authentication.client.KerberosAuthenticator;
+
+/**
+ * The KerberosDelegationTokenAuthenticator
provides support for
+ * Kerberos SPNEGO authentication mechanism and support for Hadoop Delegation
+ * Token operations.
+ *
+ * It falls back to the {@link PseudoDelegationTokenAuthenticator} if the HTTP
+ * endpoint does not trigger a SPNEGO authentication
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class KerberosDelegationTokenAuthenticator
+ extends DelegationTokenAuthenticator {
+
+ public KerberosDelegationTokenAuthenticator() {
+ super(new KerberosAuthenticator() {
+ @Override
+ protected Authenticator getFallBackAuthenticator() {
+ return new PseudoDelegationTokenAuthenticator();
+ }
+ });
+ }
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/PseudoDelegationTokenAuthenticationHandler.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/PseudoDelegationTokenAuthenticationHandler.java
new file mode 100644
index 0000000000..6846fdb87e
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/PseudoDelegationTokenAuthenticationHandler.java
@@ -0,0 +1,55 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.security.token.delegation.web;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.security.authentication.server.AuthenticationHandler;
+import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
+import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHandler;
+
+/**
+ * An {@link AuthenticationHandler} that implements Kerberos SPNEGO mechanism
+ * for HTTP and supports Delegation Token functionality.
+ *
+ * In addition to the {@link KerberosAuthenticationHandler} configuration
+ * properties, this handler supports:
+ *
+ * simple.delegation-token.token-kind: the token kind for generated tokens
+ * (no default, required property).
+ * simple.delegation-token.update-interval.sec: secret manager master key
+ * update interval in seconds (default 1 day).
+ * simple.delegation-token.max-lifetime.sec: maximum life of a delegation
+ * token in seconds (default 7 days).
+ * simple.delegation-token.renewal-interval.sec: renewal interval for
+ * delegation tokens in seconds (default 1 day).
+ * simple.delegation-token.removal-scan-interval.sec: delegation tokens
+ * removal scan interval in seconds (default 1 hour).
+ *
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class PseudoDelegationTokenAuthenticationHandler
+ extends DelegationTokenAuthenticationHandler {
+
+ public PseudoDelegationTokenAuthenticationHandler() {
+ super(new PseudoAuthenticationHandler(PseudoAuthenticationHandler.TYPE +
+ TYPE_POSTFIX));
+ }
+
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/PseudoDelegationTokenAuthenticator.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/PseudoDelegationTokenAuthenticator.java
new file mode 100644
index 0000000000..8713aa47b8
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/PseudoDelegationTokenAuthenticator.java
@@ -0,0 +1,54 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.security.token.delegation.web;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authentication.client.PseudoAuthenticator;
+
+import java.io.IOException;
+
+/**
+ * The PseudoDelegationTokenAuthenticator
provides support for
+ * Hadoop's pseudo authentication mechanism that accepts
+ * the user name specified as a query string parameter and support for Hadoop
+ * Delegation Token operations.
+ *
+ * This mimics the model of Hadoop Simple authentication trusting the
+ * {@link UserGroupInformation#getCurrentUser()} value.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class PseudoDelegationTokenAuthenticator
+ extends DelegationTokenAuthenticator {
+
+ public PseudoDelegationTokenAuthenticator() {
+ super(new PseudoAuthenticator() {
+ @Override
+ protected String getUserName() {
+ try {
+ return UserGroupInformation.getCurrentUser().getShortUserName();
+ } catch (IOException ex) {
+ throw new RuntimeException(ex);
+ }
+ }
+ });
+ }
+
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/ServletUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/ServletUtils.java
new file mode 100644
index 0000000000..16137accc8
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/ServletUtils.java
@@ -0,0 +1,59 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.security.token.delegation.web;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.http.NameValuePair;
+import org.apache.http.client.utils.URLEncodedUtils;
+
+import javax.servlet.http.HttpServletRequest;
+import java.io.IOException;
+import java.nio.charset.Charset;
+import java.util.List;
+
+/**
+ * Servlet utility methods.
+ */
+@InterfaceAudience.Private
+class ServletUtils {
+ private static final Charset UTF8_CHARSET = Charset.forName("UTF-8");
+
+ /**
+ * Extract a query string parameter without triggering http parameters
+ * processing by the servlet container.
+ *
+ * @param request the request
+ * @param name the parameter to get the value.
+ * @return the parameter value, or NULL
if the parameter is not
+ * defined.
+ * @throws IOException thrown if there was an error parsing the query string.
+ */
+ public static String getParameter(HttpServletRequest request, String name)
+ throws IOException {
+ List list = URLEncodedUtils.parse(request.getQueryString(),
+ UTF8_CHARSET);
+ if (list != null) {
+ for (NameValuePair nv : list) {
+ if (name.equals(nv.getName())) {
+ return nv.getValue();
+ }
+ }
+ }
+ return null;
+ }
+}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
index 60b86e4442..08c1791008 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
@@ -178,6 +178,14 @@ public void testVariableSubstitution() throws IOException {
// check that expansion also occurs for getInt()
assertTrue(conf.getInt("intvar", -1) == 42);
assertTrue(conf.getInt("my.int", -1) == 42);
+
+ Map results = conf.getValByRegex("^my.*file$");
+ assertTrue(results.keySet().contains("my.relfile"));
+ assertTrue(results.keySet().contains("my.fullfile"));
+ assertTrue(results.keySet().contains("my.file"));
+ assertEquals(-1, results.get("my.relfile").indexOf("${"));
+ assertEquals(-1, results.get("my.fullfile").indexOf("${"));
+ assertEquals(-1, results.get("my.file").indexOf("${"));
}
public void testFinalParam() throws IOException {
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyProviderFactory.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyProviderFactory.java
index 7efaa8333b..d72ac51999 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyProviderFactory.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyProviderFactory.java
@@ -220,11 +220,76 @@ public void testJksProvider() throws Exception {
assertTrue(s.getPermission().toString().equals("rwx------"));
assertTrue(file + " should exist", file.isFile());
+ // Corrupt file and Check if JKS can reload from _OLD file
+ File oldFile = new File(file.getPath() + "_OLD");
+ file.renameTo(oldFile);
+ file.delete();
+ file.createNewFile();
+ assertTrue(oldFile.exists());
+ KeyProvider provider = KeyProviderFactory.getProviders(conf).get(0);
+ assertTrue(file.exists());
+ assertTrue(oldFile + "should be deleted", !oldFile.exists());
+ verifyAfterReload(file, provider);
+ assertTrue(!oldFile.exists());
+
+ // _NEW and current file should not exist together
+ File newFile = new File(file.getPath() + "_NEW");
+ newFile.createNewFile();
+ try {
+ provider = KeyProviderFactory.getProviders(conf).get(0);
+ Assert.fail("_NEW and current file should not exist together !!");
+ } catch (Exception e) {
+ // Ignore
+ } finally {
+ if (newFile.exists()) {
+ newFile.delete();
+ }
+ }
+
+ // Load from _NEW file
+ file.renameTo(newFile);
+ file.delete();
+ try {
+ provider = KeyProviderFactory.getProviders(conf).get(0);
+ Assert.assertFalse(newFile.exists());
+ Assert.assertFalse(oldFile.exists());
+ } catch (Exception e) {
+ Assert.fail("JKS should load from _NEW file !!");
+ // Ignore
+ }
+ verifyAfterReload(file, provider);
+
+ // _NEW exists but corrupt.. must load from _OLD
+ newFile.createNewFile();
+ file.renameTo(oldFile);
+ file.delete();
+ try {
+ provider = KeyProviderFactory.getProviders(conf).get(0);
+ Assert.assertFalse(newFile.exists());
+ Assert.assertFalse(oldFile.exists());
+ } catch (Exception e) {
+ Assert.fail("JKS should load from _OLD file !!");
+ // Ignore
+ } finally {
+ if (newFile.exists()) {
+ newFile.delete();
+ }
+ }
+ verifyAfterReload(file, provider);
+
// check permission retention after explicit change
fs.setPermission(path, new FsPermission("777"));
checkPermissionRetention(conf, ourUrl, path);
}
+ private void verifyAfterReload(File file, KeyProvider provider)
+ throws IOException {
+ List existingKeys = provider.getKeys();
+ assertTrue(existingKeys.contains("key4"));
+ assertTrue(existingKeys.contains("key3"));
+ assertTrue(file.exists());
+ }
+
public void checkPermissionRetention(Configuration conf, String ourUrl, Path path) throws Exception {
KeyProvider provider = KeyProviderFactory.getProviders(conf).get(0);
// let's add a new key and flush and check that permissions are still set to 777
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestPath.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestPath.java
index 54d25c995b..94908da7a3 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestPath.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestPath.java
@@ -26,13 +26,11 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.AvroTestUtil;
-import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Shell;
import com.google.common.base.Joiner;
import junit.framework.TestCase;
-import static org.junit.Assert.fail;
public class TestPath extends TestCase {
/**
@@ -307,28 +305,6 @@ public void testURI() throws URISyntaxException, IOException {
// if the child uri is absolute path
assertEquals("foo://bar/fud#boo", new Path(new Path(new URI(
"foo://bar/baz#bud")), new Path(new URI("/fud#boo"))).toString());
-
- // empty URI
- URI uri3 = new URI("");
- assertEquals("", uri3.toString());
- try {
- path = new Path(uri3);
- fail("Expected exception for empty URI");
- } catch (IllegalArgumentException e) {
- // expect to receive an IllegalArgumentException
- GenericTestUtils.assertExceptionContains("Can not create a Path"
- + " from an empty URI", e);
- }
- // null URI
- uri3 = null;
- try {
- path = new Path(uri3);
- fail("Expected exception for null URI");
- } catch (IllegalArgumentException e) {
- // expect to receive an IllegalArgumentException
- GenericTestUtils.assertExceptionContains("Can not create a Path"
- + " from a null URI", e);
- }
}
/** Test URIs created from Path objects */
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestDelegationTokenAuthenticationHandlerWithMocks.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestDelegationTokenAuthenticationHandlerWithMocks.java
new file mode 100644
index 0000000000..c9d255dc5a
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestDelegationTokenAuthenticationHandlerWithMocks.java
@@ -0,0 +1,326 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.security.token.delegation.web;
+
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
+import org.apache.hadoop.security.authentication.client.KerberosAuthenticator;
+import org.apache.hadoop.security.authentication.server.AuthenticationHandler;
+import org.apache.hadoop.security.authentication.server.AuthenticationToken;
+import org.apache.hadoop.security.token.SecretManager;
+import org.apache.hadoop.security.token.Token;
+import org.codehaus.jackson.map.ObjectMapper;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mockito;
+
+import javax.servlet.ServletException;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+import javax.ws.rs.core.MediaType;
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.io.StringWriter;
+import java.util.Map;
+import java.util.Properties;
+
+public class TestDelegationTokenAuthenticationHandlerWithMocks {
+
+ public static class MockDelegationTokenAuthenticationHandler
+ extends DelegationTokenAuthenticationHandler {
+
+ public MockDelegationTokenAuthenticationHandler() {
+ super(new AuthenticationHandler() {
+ @Override
+ public String getType() {
+ return "T";
+ }
+
+ @Override
+ public void init(Properties config) throws ServletException {
+
+ }
+
+ @Override
+ public void destroy() {
+
+ }
+
+ @Override
+ public boolean managementOperation(AuthenticationToken token,
+ HttpServletRequest request, HttpServletResponse response)
+ throws IOException, AuthenticationException {
+ return false;
+ }
+
+ @Override
+ public AuthenticationToken authenticate(HttpServletRequest request,
+ HttpServletResponse response)
+ throws IOException, AuthenticationException {
+ response.setStatus(HttpServletResponse.SC_UNAUTHORIZED);
+ response.setHeader(KerberosAuthenticator.WWW_AUTHENTICATE, "mock");
+ return null;
+ }
+ });
+ }
+
+ }
+
+ private DelegationTokenAuthenticationHandler handler;
+
+ @Before
+ public void setUp() throws Exception {
+ Properties conf = new Properties();
+
+ conf.put(KerberosDelegationTokenAuthenticationHandler.TOKEN_KIND, "foo");
+ handler = new MockDelegationTokenAuthenticationHandler();
+ handler.initTokenManager(conf);
+ }
+
+ @After
+ public void cleanUp() {
+ handler.destroy();
+ }
+
+ @Test
+ public void testManagementOperations() throws Exception {
+ testNonManagementOperation();
+ testManagementOperationErrors();
+ testGetToken(null, new Text("foo"));
+ testGetToken("bar", new Text("foo"));
+ testCancelToken();
+ testRenewToken();
+ }
+
+ private void testNonManagementOperation() throws Exception {
+ HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
+ Mockito.when(request.getParameter(
+ DelegationTokenAuthenticator.OP_PARAM)).thenReturn(null);
+ Assert.assertTrue(handler.managementOperation(null, request, null));
+ Mockito.when(request.getParameter(
+ DelegationTokenAuthenticator.OP_PARAM)).thenReturn("CREATE");
+ Assert.assertTrue(handler.managementOperation(null, request, null));
+ }
+
+ private void testManagementOperationErrors() throws Exception {
+ HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
+ HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
+ Mockito.when(request.getQueryString()).thenReturn(
+ DelegationTokenAuthenticator.OP_PARAM + "=" +
+ DelegationTokenAuthenticator.DelegationTokenOperation.
+ GETDELEGATIONTOKEN.toString()
+ );
+ Mockito.when(request.getMethod()).thenReturn("FOO");
+ Assert.assertFalse(handler.managementOperation(null, request, response));
+ Mockito.verify(response).sendError(
+ Mockito.eq(HttpServletResponse.SC_BAD_REQUEST),
+ Mockito.startsWith("Wrong HTTP method"));
+
+ Mockito.reset(response);
+ Mockito.when(request.getMethod()).thenReturn(
+ DelegationTokenAuthenticator.DelegationTokenOperation.
+ GETDELEGATIONTOKEN.getHttpMethod()
+ );
+ Assert.assertFalse(handler.managementOperation(null, request, response));
+ Mockito.verify(response).setStatus(
+ Mockito.eq(HttpServletResponse.SC_UNAUTHORIZED));
+ Mockito.verify(response).setHeader(
+ Mockito.eq(KerberosAuthenticator.WWW_AUTHENTICATE),
+ Mockito.eq("mock"));
+ }
+
+ private void testGetToken(String renewer, Text expectedTokenKind)
+ throws Exception {
+ DelegationTokenAuthenticator.DelegationTokenOperation op =
+ DelegationTokenAuthenticator.DelegationTokenOperation.
+ GETDELEGATIONTOKEN;
+ HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
+ HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
+ Mockito.when(request.getQueryString()).
+ thenReturn(DelegationTokenAuthenticator.OP_PARAM + "=" + op.toString());
+ Mockito.when(request.getMethod()).thenReturn(op.getHttpMethod());
+
+ AuthenticationToken token = Mockito.mock(AuthenticationToken.class);
+ Mockito.when(token.getUserName()).thenReturn("user");
+ Mockito.when(response.getWriter()).thenReturn(new PrintWriter(
+ new StringWriter()));
+ Assert.assertFalse(handler.managementOperation(token, request, response));
+
+ Mockito.when(request.getQueryString()).
+ thenReturn(DelegationTokenAuthenticator.OP_PARAM + "=" + op.toString() +
+ "&" + DelegationTokenAuthenticator.RENEWER_PARAM + "=" + renewer);
+
+ Mockito.reset(response);
+ Mockito.reset(token);
+ Mockito.when(token.getUserName()).thenReturn("user");
+ StringWriter writer = new StringWriter();
+ PrintWriter pwriter = new PrintWriter(writer);
+ Mockito.when(response.getWriter()).thenReturn(pwriter);
+ Assert.assertFalse(handler.managementOperation(token, request, response));
+ if (renewer == null) {
+ Mockito.verify(token).getUserName();
+ } else {
+ Mockito.verify(token).getUserName();
+ }
+ Mockito.verify(response).setStatus(HttpServletResponse.SC_OK);
+ Mockito.verify(response).setContentType(MediaType.APPLICATION_JSON);
+ pwriter.close();
+ String responseOutput = writer.toString();
+ String tokenLabel = DelegationTokenAuthenticator.
+ DELEGATION_TOKEN_JSON;
+ Assert.assertTrue(responseOutput.contains(tokenLabel));
+ Assert.assertTrue(responseOutput.contains(
+ DelegationTokenAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON));
+ ObjectMapper jsonMapper = new ObjectMapper();
+ Map json = jsonMapper.readValue(responseOutput, Map.class);
+ json = (Map) json.get(tokenLabel);
+ String tokenStr;
+ tokenStr = (String) json.get(DelegationTokenAuthenticator.
+ DELEGATION_TOKEN_URL_STRING_JSON);
+ Token dt = new Token();
+ dt.decodeFromUrlString(tokenStr);
+ handler.getTokenManager().verifyToken(dt);
+ Assert.assertEquals(expectedTokenKind, dt.getKind());
+ }
+
+ private void testCancelToken() throws Exception {
+ DelegationTokenAuthenticator.DelegationTokenOperation op =
+ DelegationTokenAuthenticator.DelegationTokenOperation.
+ CANCELDELEGATIONTOKEN;
+ HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
+ HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
+ Mockito.when(request.getQueryString()).thenReturn(
+ DelegationTokenAuthenticator.OP_PARAM + "=" + op.toString());
+ Mockito.when(request.getMethod()).
+ thenReturn(op.getHttpMethod());
+
+ Assert.assertFalse(handler.managementOperation(null, request, response));
+ Mockito.verify(response).sendError(
+ Mockito.eq(HttpServletResponse.SC_BAD_REQUEST),
+ Mockito.contains("requires the parameter [token]"));
+
+ Mockito.reset(response);
+ Token token =
+ handler.getTokenManager().createToken(
+ UserGroupInformation.getCurrentUser(), "foo");
+ Mockito.when(request.getQueryString()).thenReturn(
+ DelegationTokenAuthenticator.OP_PARAM + "=" + op.toString() + "&" +
+ DelegationTokenAuthenticator.TOKEN_PARAM + "=" +
+ token.encodeToUrlString());
+ Assert.assertFalse(handler.managementOperation(null, request, response));
+ Mockito.verify(response).setStatus(HttpServletResponse.SC_OK);
+ try {
+ handler.getTokenManager().verifyToken(token);
+ Assert.fail();
+ } catch (SecretManager.InvalidToken ex) {
+ //NOP
+ } catch (Throwable ex) {
+ Assert.fail();
+ }
+ }
+
+ private void testRenewToken() throws Exception {
+ DelegationTokenAuthenticator.DelegationTokenOperation op =
+ DelegationTokenAuthenticator.DelegationTokenOperation.
+ RENEWDELEGATIONTOKEN;
+ HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
+ HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
+ Mockito.when(request.getQueryString()).
+ thenReturn(DelegationTokenAuthenticator.OP_PARAM + "=" + op.toString());
+ Mockito.when(request.getMethod()).
+ thenReturn(op.getHttpMethod());
+
+ Assert.assertFalse(handler.managementOperation(null, request, response));
+ Mockito.verify(response).setStatus(
+ Mockito.eq(HttpServletResponse.SC_UNAUTHORIZED));
+ Mockito.verify(response).setHeader(Mockito.eq(
+ KerberosAuthenticator.WWW_AUTHENTICATE),
+ Mockito.eq("mock")
+ );
+
+ Mockito.reset(response);
+ AuthenticationToken token = Mockito.mock(AuthenticationToken.class);
+ Mockito.when(token.getUserName()).thenReturn("user");
+ Assert.assertFalse(handler.managementOperation(token, request, response));
+ Mockito.verify(response).sendError(
+ Mockito.eq(HttpServletResponse.SC_BAD_REQUEST),
+ Mockito.contains("requires the parameter [token]"));
+
+ Mockito.reset(response);
+ StringWriter writer = new StringWriter();
+ PrintWriter pwriter = new PrintWriter(writer);
+ Mockito.when(response.getWriter()).thenReturn(pwriter);
+ Token dToken =
+ handler.getTokenManager().createToken(
+ UserGroupInformation.getCurrentUser(), "user");
+ Mockito.when(request.getQueryString()).
+ thenReturn(DelegationTokenAuthenticator.OP_PARAM + "=" + op.toString() +
+ "&" + DelegationTokenAuthenticator.TOKEN_PARAM + "=" +
+ dToken.encodeToUrlString());
+ Assert.assertFalse(handler.managementOperation(token, request, response));
+ Mockito.verify(response).setStatus(HttpServletResponse.SC_OK);
+ pwriter.close();
+ Assert.assertTrue(writer.toString().contains("long"));
+ handler.getTokenManager().verifyToken(dToken);
+ }
+
+ @Test
+ public void testAuthenticate() throws Exception {
+ testValidDelegationToken();
+ testInvalidDelegationToken();
+ }
+
+ private void testValidDelegationToken() throws Exception {
+ HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
+ HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
+ Token dToken =
+ handler.getTokenManager().createToken(
+ UserGroupInformation.getCurrentUser(), "user");
+ Mockito.when(request.getQueryString()).thenReturn(
+ DelegationTokenAuthenticator.DELEGATION_PARAM + "=" +
+ dToken.encodeToUrlString());
+
+ AuthenticationToken token = handler.authenticate(request, response);
+ Assert.assertEquals(UserGroupInformation.getCurrentUser().
+ getShortUserName(), token.getUserName());
+ Assert.assertEquals(0, token.getExpires());
+ Assert.assertEquals(handler.getType(),
+ token.getType());
+ Assert.assertTrue(token.isExpired());
+ }
+
+ private void testInvalidDelegationToken() throws Exception {
+ HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
+ HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
+ Mockito.when(request.getQueryString()).thenReturn(
+ DelegationTokenAuthenticator.DELEGATION_PARAM + "=invalid");
+
+ try {
+ handler.authenticate(request, response);
+ Assert.fail();
+ } catch (AuthenticationException ex) {
+ //NOP
+ } catch (Exception ex) {
+ Assert.fail();
+ }
+ }
+
+}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestDelegationTokenManager.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestDelegationTokenManager.java
new file mode 100644
index 0000000000..4a0e8342f2
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestDelegationTokenManager.java
@@ -0,0 +1,59 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.security.token.delegation.web;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.util.StringUtils;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
+import java.util.Arrays;
+
+public class TestDelegationTokenManager {
+
+ private static final long DAY_IN_SECS = 86400;
+
+ @Test
+ public void testDTManager() throws Exception {
+ DelegationTokenManager tm = new DelegationTokenManager(new Text("foo"),
+ DAY_IN_SECS, DAY_IN_SECS, DAY_IN_SECS, DAY_IN_SECS);
+ tm.init();
+ Token token =
+ tm.createToken(UserGroupInformation.getCurrentUser(), "foo");
+ Assert.assertNotNull(token);
+ tm.verifyToken(token);
+ Assert.assertTrue(tm.renewToken(token, "foo") > System.currentTimeMillis());
+ tm.cancelToken(token, "foo");
+ try {
+ tm.verifyToken(token);
+ Assert.fail();
+ } catch (IOException ex) {
+ //NOP
+ } catch (Exception ex) {
+ Assert.fail();
+ }
+ tm.destroy();
+ }
+
+}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestWebDelegationToken.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestWebDelegationToken.java
new file mode 100644
index 0000000000..1b452f1824
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestWebDelegationToken.java
@@ -0,0 +1,869 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.security.token.delegation.web;
+
+import org.apache.commons.io.IOUtils;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.minikdc.MiniKdc;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
+import org.apache.hadoop.security.authentication.client.KerberosAuthenticator;
+import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
+import org.apache.hadoop.security.authentication.server.AuthenticationHandler;
+import org.apache.hadoop.security.authentication.server.AuthenticationToken;
+import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
+import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHandler;
+import org.apache.hadoop.security.authentication.util.KerberosUtil;
+import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
+import org.codehaus.jackson.map.ObjectMapper;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.mortbay.jetty.Connector;
+import org.mortbay.jetty.Server;
+import org.mortbay.jetty.servlet.Context;
+import org.mortbay.jetty.servlet.FilterHolder;
+import org.mortbay.jetty.servlet.ServletHolder;
+
+import javax.security.auth.Subject;
+import javax.security.auth.kerberos.KerberosPrincipal;
+import javax.security.auth.login.AppConfigurationEntry;
+import javax.security.auth.login.Configuration;
+import javax.security.auth.login.LoginContext;
+import javax.servlet.Filter;
+import javax.servlet.FilterConfig;
+import javax.servlet.ServletException;
+import javax.servlet.http.HttpServlet;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+import java.io.File;
+import java.io.IOException;
+import java.io.Writer;
+import java.net.HttpURLConnection;
+import java.net.InetAddress;
+import java.net.ServerSocket;
+import java.net.URL;
+import java.security.Principal;
+import java.security.PrivilegedActionException;
+import java.security.PrivilegedExceptionAction;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.Set;
+import java.util.UUID;
+import java.util.concurrent.Callable;
+
+public class TestWebDelegationToken {
+ private static final String OK_USER = "ok-user";
+ private static final String FAIL_USER = "fail-user";
+ private static final String FOO_USER = "foo";
+
+ private Server jetty;
+
+ public static class DummyAuthenticationHandler
+ implements AuthenticationHandler {
+ @Override
+ public String getType() {
+ return "dummy";
+ }
+
+ @Override
+ public void init(Properties config) throws ServletException {
+ }
+
+ @Override
+ public void destroy() {
+ }
+
+ @Override
+ public boolean managementOperation(AuthenticationToken token,
+ HttpServletRequest request, HttpServletResponse response)
+ throws IOException, AuthenticationException {
+ return false;
+ }
+
+ @Override
+ public AuthenticationToken authenticate(HttpServletRequest request,
+ HttpServletResponse response)
+ throws IOException, AuthenticationException {
+ AuthenticationToken token = null;
+ if (request.getParameter("authenticated") != null) {
+ token = new AuthenticationToken(request.getParameter("authenticated"),
+ "U", "test");
+ } else {
+ response.setStatus(HttpServletResponse.SC_UNAUTHORIZED);
+ response.setHeader(KerberosAuthenticator.WWW_AUTHENTICATE, "dummy");
+ }
+ return token;
+ }
+ }
+
+ public static class DummyDelegationTokenAuthenticationHandler extends
+ DelegationTokenAuthenticationHandler {
+ public DummyDelegationTokenAuthenticationHandler() {
+ super(new DummyAuthenticationHandler());
+ }
+
+ @Override
+ public void init(Properties config) throws ServletException {
+ Properties conf = new Properties(config);
+ conf.setProperty(TOKEN_KIND, "token-kind");
+ initTokenManager(conf);
+ }
+ }
+
+ public static class AFilter extends DelegationTokenAuthenticationFilter {
+
+ @Override
+ protected Properties getConfiguration(String configPrefix,
+ FilterConfig filterConfig) {
+ Properties conf = new Properties();
+ conf.setProperty(AUTH_TYPE,
+ DummyDelegationTokenAuthenticationHandler.class.getName());
+ return conf;
+ }
+ }
+
+ public static class PingServlet extends HttpServlet {
+
+ @Override
+ protected void doGet(HttpServletRequest req, HttpServletResponse resp)
+ throws ServletException, IOException {
+ resp.setStatus(HttpServletResponse.SC_OK);
+ resp.getWriter().write("ping");
+ }
+
+ @Override
+ protected void doPost(HttpServletRequest req, HttpServletResponse resp)
+ throws ServletException, IOException {
+ Writer writer = resp.getWriter();
+ writer.write("ping: ");
+ IOUtils.copy(req.getReader(), writer);
+ resp.setStatus(HttpServletResponse.SC_OK);
+ }
+ }
+
+ protected Server createJettyServer() {
+ try {
+ InetAddress localhost = InetAddress.getLocalHost();
+ ServerSocket ss = new ServerSocket(0, 50, localhost);
+ int port = ss.getLocalPort();
+ ss.close();
+ jetty = new Server(0);
+ jetty.getConnectors()[0].setHost("localhost");
+ jetty.getConnectors()[0].setPort(port);
+ return jetty;
+ } catch (Exception ex) {
+ throw new RuntimeException("Could not setup Jetty: " + ex.getMessage(),
+ ex);
+ }
+ }
+
+ protected String getJettyURL() {
+ Connector c = jetty.getConnectors()[0];
+ return "http://" + c.getHost() + ":" + c.getPort();
+ }
+
+ @Before
+ public void setUp() throws Exception {
+ // resetting hadoop security to simple
+ org.apache.hadoop.conf.Configuration conf =
+ new org.apache.hadoop.conf.Configuration();
+ UserGroupInformation.setConfiguration(conf);
+
+ jetty = createJettyServer();
+ }
+
+ @After
+ public void cleanUp() throws Exception {
+ jetty.stop();
+
+ // resetting hadoop security to simple
+ org.apache.hadoop.conf.Configuration conf =
+ new org.apache.hadoop.conf.Configuration();
+ UserGroupInformation.setConfiguration(conf);
+ }
+
+ protected Server getJetty() {
+ return jetty;
+ }
+
+ @Test
+ public void testRawHttpCalls() throws Exception {
+ final Server jetty = createJettyServer();
+ Context context = new Context();
+ context.setContextPath("/foo");
+ jetty.setHandler(context);
+ context.addFilter(new FilterHolder(AFilter.class), "/*", 0);
+ context.addServlet(new ServletHolder(PingServlet.class), "/bar");
+ try {
+ jetty.start();
+ URL nonAuthURL = new URL(getJettyURL() + "/foo/bar");
+ URL authURL = new URL(getJettyURL() + "/foo/bar?authenticated=foo");
+
+ // unauthenticated access to URL
+ HttpURLConnection conn = (HttpURLConnection) nonAuthURL.openConnection();
+ Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED,
+ conn.getResponseCode());
+
+ // authenticated access to URL
+ conn = (HttpURLConnection) authURL.openConnection();
+ Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
+
+ // unauthenticated access to get delegation token
+ URL url = new URL(nonAuthURL.toExternalForm() + "?op=GETDELEGATIONTOKEN");
+ conn = (HttpURLConnection) url.openConnection();
+ Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED,
+ conn.getResponseCode());
+
+ // authenticated access to get delegation token
+ url = new URL(authURL.toExternalForm() +
+ "&op=GETDELEGATIONTOKEN&renewer=foo");
+ conn = (HttpURLConnection) url.openConnection();
+ Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
+ ObjectMapper mapper = new ObjectMapper();
+ Map map = mapper.readValue(conn.getInputStream(), Map.class);
+ String dt = (String) ((Map) map.get("Token")).get("urlString");
+ Assert.assertNotNull(dt);
+
+ // delegation token access to URL
+ url = new URL(nonAuthURL.toExternalForm() + "?delegation=" + dt);
+ conn = (HttpURLConnection) url.openConnection();
+ Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
+
+ // delegation token and authenticated access to URL
+ url = new URL(authURL.toExternalForm() + "&delegation=" + dt);
+ conn = (HttpURLConnection) url.openConnection();
+ Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
+
+ // renewew delegation token, unauthenticated access to URL
+ url = new URL(nonAuthURL.toExternalForm() +
+ "?op=RENEWDELEGATIONTOKEN&token=" + dt);
+ conn = (HttpURLConnection) url.openConnection();
+ conn.setRequestMethod("PUT");
+ Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED,
+ conn.getResponseCode());
+
+ // renewew delegation token, authenticated access to URL
+ url = new URL(authURL.toExternalForm() +
+ "&op=RENEWDELEGATIONTOKEN&token=" + dt);
+ conn = (HttpURLConnection) url.openConnection();
+ conn.setRequestMethod("PUT");
+ Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
+
+ // renewew delegation token, authenticated access to URL, not renewer
+ url = new URL(getJettyURL() +
+ "/foo/bar?authenticated=bar&op=RENEWDELEGATIONTOKEN&token=" + dt);
+ conn = (HttpURLConnection) url.openConnection();
+ conn.setRequestMethod("PUT");
+ Assert.assertEquals(HttpURLConnection.HTTP_FORBIDDEN,
+ conn.getResponseCode());
+
+ // cancel delegation token, nonauthenticated access to URL
+ url = new URL(nonAuthURL.toExternalForm() +
+ "?op=CANCELDELEGATIONTOKEN&token=" + dt);
+ conn = (HttpURLConnection) url.openConnection();
+ conn.setRequestMethod("PUT");
+ Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
+
+ // cancel canceled delegation token, nonauthenticated access to URL
+ url = new URL(nonAuthURL.toExternalForm() +
+ "?op=CANCELDELEGATIONTOKEN&token=" + dt);
+ conn = (HttpURLConnection) url.openConnection();
+ conn.setRequestMethod("PUT");
+ Assert.assertEquals(HttpURLConnection.HTTP_NOT_FOUND,
+ conn.getResponseCode());
+
+ // get new delegation token
+ url = new URL(authURL.toExternalForm() +
+ "&op=GETDELEGATIONTOKEN&renewer=foo");
+ conn = (HttpURLConnection) url.openConnection();
+ Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
+ mapper = new ObjectMapper();
+ map = mapper.readValue(conn.getInputStream(), Map.class);
+ dt = (String) ((Map) map.get("Token")).get("urlString");
+ Assert.assertNotNull(dt);
+
+ // cancel delegation token, authenticated access to URL
+ url = new URL(authURL.toExternalForm() +
+ "&op=CANCELDELEGATIONTOKEN&token=" + dt);
+ conn = (HttpURLConnection) url.openConnection();
+ conn.setRequestMethod("PUT");
+ Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
+ } finally {
+ jetty.stop();
+ }
+ }
+
+ @Test
+ public void testDelegationTokenAuthenticatorCalls() throws Exception {
+ final Server jetty = createJettyServer();
+ Context context = new Context();
+ context.setContextPath("/foo");
+ jetty.setHandler(context);
+ context.addFilter(new FilterHolder(AFilter.class), "/*", 0);
+ context.addServlet(new ServletHolder(PingServlet.class), "/bar");
+
+ try {
+ jetty.start();
+ URL nonAuthURL = new URL(getJettyURL() + "/foo/bar");
+ URL authURL = new URL(getJettyURL() + "/foo/bar?authenticated=foo");
+ URL authURL2 = new URL(getJettyURL() + "/foo/bar?authenticated=bar");
+
+ DelegationTokenAuthenticatedURL.Token token =
+ new DelegationTokenAuthenticatedURL.Token();
+ DelegationTokenAuthenticatedURL aUrl =
+ new DelegationTokenAuthenticatedURL();
+
+ try {
+ aUrl.getDelegationToken(nonAuthURL, token, FOO_USER);
+ Assert.fail();
+ } catch (Exception ex) {
+ Assert.assertTrue(ex.getMessage().contains("401"));
+ }
+
+ aUrl.getDelegationToken(authURL, token, FOO_USER);
+ Assert.assertNotNull(token.getDelegationToken());
+ Assert.assertEquals(new Text("token-kind"),
+ token.getDelegationToken().getKind());
+
+ aUrl.renewDelegationToken(authURL, token);
+
+ try {
+ aUrl.renewDelegationToken(nonAuthURL, token);
+ Assert.fail();
+ } catch (Exception ex) {
+ Assert.assertTrue(ex.getMessage().contains("401"));
+ }
+
+ aUrl.getDelegationToken(authURL, token, FOO_USER);
+
+ try {
+ aUrl.renewDelegationToken(authURL2, token);
+ Assert.fail();
+ } catch (Exception ex) {
+ Assert.assertTrue(ex.getMessage().contains("403"));
+ }
+
+ aUrl.getDelegationToken(authURL, token, FOO_USER);
+
+ aUrl.cancelDelegationToken(authURL, token);
+
+ aUrl.getDelegationToken(authURL, token, FOO_USER);
+
+ aUrl.cancelDelegationToken(nonAuthURL, token);
+
+ aUrl.getDelegationToken(authURL, token, FOO_USER);
+
+ try {
+ aUrl.renewDelegationToken(nonAuthURL, token);
+ } catch (Exception ex) {
+ Assert.assertTrue(ex.getMessage().contains("401"));
+ }
+
+ } finally {
+ jetty.stop();
+ }
+ }
+
+ private static class DummyDelegationTokenSecretManager
+ extends AbstractDelegationTokenSecretManager {
+
+ public DummyDelegationTokenSecretManager() {
+ super(10000, 10000, 10000, 10000);
+ }
+
+ @Override
+ public DelegationTokenIdentifier createIdentifier() {
+ return new DelegationTokenIdentifier(new Text("fooKind"));
+ }
+
+ }
+
+ @Test
+ public void testExternalDelegationTokenSecretManager() throws Exception {
+ DummyDelegationTokenSecretManager secretMgr
+ = new DummyDelegationTokenSecretManager();
+ final Server jetty = createJettyServer();
+ Context context = new Context();
+ context.setContextPath("/foo");
+ jetty.setHandler(context);
+ context.addFilter(new FilterHolder(AFilter.class), "/*", 0);
+ context.addServlet(new ServletHolder(PingServlet.class), "/bar");
+ try {
+ secretMgr.startThreads();
+ context.setAttribute(DelegationTokenAuthenticationFilter.
+ DELEGATION_TOKEN_SECRET_MANAGER_ATTR, secretMgr);
+ jetty.start();
+ URL authURL = new URL(getJettyURL() + "/foo/bar?authenticated=foo");
+
+ DelegationTokenAuthenticatedURL.Token token =
+ new DelegationTokenAuthenticatedURL.Token();
+ DelegationTokenAuthenticatedURL aUrl =
+ new DelegationTokenAuthenticatedURL();
+
+ aUrl.getDelegationToken(authURL, token, FOO_USER);
+ Assert.assertNotNull(token.getDelegationToken());
+ Assert.assertEquals(new Text("fooKind"),
+ token.getDelegationToken().getKind());
+
+ } finally {
+ jetty.stop();
+ secretMgr.stopThreads();
+ }
+ }
+
+ public static class NoDTFilter extends AuthenticationFilter {
+
+ @Override
+ protected Properties getConfiguration(String configPrefix,
+ FilterConfig filterConfig) {
+ Properties conf = new Properties();
+ conf.setProperty(AUTH_TYPE, PseudoAuthenticationHandler.TYPE);
+ return conf;
+ }
+ }
+
+
+ public static class NoDTHandlerDTAFilter
+ extends DelegationTokenAuthenticationFilter {
+
+ @Override
+ protected Properties getConfiguration(String configPrefix,
+ FilterConfig filterConfig) {
+ Properties conf = new Properties();
+ conf.setProperty(AUTH_TYPE, PseudoAuthenticationHandler.TYPE);
+ return conf;
+ }
+ }
+
+ public static class UserServlet extends HttpServlet {
+
+ @Override
+ protected void doGet(HttpServletRequest req, HttpServletResponse resp)
+ throws ServletException, IOException {
+ resp.setStatus(HttpServletResponse.SC_OK);
+ resp.getWriter().write(req.getUserPrincipal().getName());
+ }
+ }
+
+ @Test
+ public void testDelegationTokenAuthenticationURLWithNoDTFilter()
+ throws Exception {
+ testDelegationTokenAuthenticatedURLWithNoDT(NoDTFilter.class);
+ }
+
+ @Test
+ public void testDelegationTokenAuthenticationURLWithNoDTHandler()
+ throws Exception {
+ testDelegationTokenAuthenticatedURLWithNoDT(NoDTHandlerDTAFilter.class);
+ }
+
+ // we are, also, implicitly testing KerberosDelegationTokenAuthenticator
+ // fallback here
+ private void testDelegationTokenAuthenticatedURLWithNoDT(
+ Class extends Filter> filterClass) throws Exception {
+ final Server jetty = createJettyServer();
+ Context context = new Context();
+ context.setContextPath("/foo");
+ jetty.setHandler(context);
+ context.addFilter(new FilterHolder(filterClass), "/*", 0);
+ context.addServlet(new ServletHolder(UserServlet.class), "/bar");
+
+ try {
+ jetty.start();
+ final URL url = new URL(getJettyURL() + "/foo/bar");
+
+ UserGroupInformation ugi = UserGroupInformation.createRemoteUser(FOO_USER);
+ ugi.doAs(new PrivilegedExceptionAction() {
+ @Override
+ public Void run() throws Exception {
+ DelegationTokenAuthenticatedURL.Token token =
+ new DelegationTokenAuthenticatedURL.Token();
+ DelegationTokenAuthenticatedURL aUrl =
+ new DelegationTokenAuthenticatedURL();
+ HttpURLConnection conn = aUrl.openConnection(url, token);
+ Assert.assertEquals(HttpURLConnection.HTTP_OK,
+ conn.getResponseCode());
+ List ret = IOUtils.readLines(conn.getInputStream());
+ Assert.assertEquals(1, ret.size());
+ Assert.assertEquals(FOO_USER, ret.get(0));
+
+ try {
+ aUrl.getDelegationToken(url, token, FOO_USER);
+ Assert.fail();
+ } catch (AuthenticationException ex) {
+ Assert.assertTrue(ex.getMessage().contains(
+ "delegation token operation"));
+ }
+ return null;
+ }
+ });
+ } finally {
+ jetty.stop();
+ }
+ }
+
+ public static class PseudoDTAFilter
+ extends DelegationTokenAuthenticationFilter {
+
+ @Override
+ protected Properties getConfiguration(String configPrefix,
+ FilterConfig filterConfig) {
+ Properties conf = new Properties();
+ conf.setProperty(AUTH_TYPE,
+ PseudoDelegationTokenAuthenticationHandler.class.getName());
+ conf.setProperty(DelegationTokenAuthenticationHandler.TOKEN_KIND,
+ "token-kind");
+ return conf;
+ }
+
+ @Override
+ protected org.apache.hadoop.conf.Configuration getProxyuserConfiguration(
+ FilterConfig filterConfig) throws ServletException {
+ org.apache.hadoop.conf.Configuration conf =
+ new org.apache.hadoop.conf.Configuration(false);
+ conf.set("proxyuser.foo.users", OK_USER);
+ conf.set("proxyuser.foo.hosts", "localhost");
+ return conf;
+ }
+ }
+
+ @Test
+ public void testFallbackToPseudoDelegationTokenAuthenticator()
+ throws Exception {
+ final Server jetty = createJettyServer();
+ Context context = new Context();
+ context.setContextPath("/foo");
+ jetty.setHandler(context);
+ context.addFilter(new FilterHolder(PseudoDTAFilter.class), "/*", 0);
+ context.addServlet(new ServletHolder(UserServlet.class), "/bar");
+
+ try {
+ jetty.start();
+ final URL url = new URL(getJettyURL() + "/foo/bar");
+
+ UserGroupInformation ugi = UserGroupInformation.createRemoteUser(FOO_USER);
+ ugi.doAs(new PrivilegedExceptionAction() {
+ @Override
+ public Void run() throws Exception {
+ DelegationTokenAuthenticatedURL.Token token =
+ new DelegationTokenAuthenticatedURL.Token();
+ DelegationTokenAuthenticatedURL aUrl =
+ new DelegationTokenAuthenticatedURL();
+ HttpURLConnection conn = aUrl.openConnection(url, token);
+ Assert.assertEquals(HttpURLConnection.HTTP_OK,
+ conn.getResponseCode());
+ List ret = IOUtils.readLines(conn.getInputStream());
+ Assert.assertEquals(1, ret.size());
+ Assert.assertEquals(FOO_USER, ret.get(0));
+
+ aUrl.getDelegationToken(url, token, FOO_USER);
+ Assert.assertNotNull(token.getDelegationToken());
+ Assert.assertEquals(new Text("token-kind"),
+ token.getDelegationToken().getKind());
+ return null;
+ }
+ });
+ } finally {
+ jetty.stop();
+ }
+ }
+
+ public static class KDTAFilter extends DelegationTokenAuthenticationFilter {
+ static String keytabFile;
+
+ @Override
+ protected Properties getConfiguration(String configPrefix,
+ FilterConfig filterConfig) {
+ Properties conf = new Properties();
+ conf.setProperty(AUTH_TYPE,
+ KerberosDelegationTokenAuthenticationHandler.class.getName());
+ conf.setProperty(KerberosAuthenticationHandler.KEYTAB, keytabFile);
+ conf.setProperty(KerberosAuthenticationHandler.PRINCIPAL,
+ "HTTP/localhost");
+ conf.setProperty(KerberosDelegationTokenAuthenticationHandler.TOKEN_KIND,
+ "token-kind");
+ return conf;
+ }
+ }
+
+ private static class KerberosConfiguration extends Configuration {
+ private String principal;
+ private String keytab;
+
+ public KerberosConfiguration(String principal, String keytab) {
+ this.principal = principal;
+ this.keytab = keytab;
+ }
+
+ @Override
+ public AppConfigurationEntry[] getAppConfigurationEntry(String name) {
+ Map options = new HashMap();
+ options.put("principal", principal);
+ options.put("keyTab", keytab);
+ options.put("useKeyTab", "true");
+ options.put("storeKey", "true");
+ options.put("doNotPrompt", "true");
+ options.put("useTicketCache", "true");
+ options.put("renewTGT", "true");
+ options.put("refreshKrb5Config", "true");
+ options.put("isInitiator", "true");
+ String ticketCache = System.getenv("KRB5CCNAME");
+ if (ticketCache != null) {
+ options.put("ticketCache", ticketCache);
+ }
+ options.put("debug", "true");
+
+ return new AppConfigurationEntry[]{
+ new AppConfigurationEntry(KerberosUtil.getKrb5LoginModuleName(),
+ AppConfigurationEntry.LoginModuleControlFlag.REQUIRED,
+ options),};
+ }
+ }
+
+ public static T doAsKerberosUser(String principal, String keytab,
+ final Callable callable) throws Exception {
+ LoginContext loginContext = null;
+ try {
+ Set principals = new HashSet();
+ principals.add(new KerberosPrincipal(principal));
+ Subject subject = new Subject(false, principals, new HashSet(),
+ new HashSet());
+ loginContext = new LoginContext("", subject, null,
+ new KerberosConfiguration(principal, keytab));
+ loginContext.login();
+ subject = loginContext.getSubject();
+ return Subject.doAs(subject, new PrivilegedExceptionAction() {
+ @Override
+ public T run() throws Exception {
+ return callable.call();
+ }
+ });
+ } catch (PrivilegedActionException ex) {
+ throw ex.getException();
+ } finally {
+ if (loginContext != null) {
+ loginContext.logout();
+ }
+ }
+ }
+
+ @Test
+ public void testKerberosDelegationTokenAuthenticator() throws Exception {
+ // setting hadoop security to kerberos
+ org.apache.hadoop.conf.Configuration conf =
+ new org.apache.hadoop.conf.Configuration();
+ conf.set("hadoop.security.authentication", "kerberos");
+ UserGroupInformation.setConfiguration(conf);
+
+ File testDir = new File("target/" + UUID.randomUUID().toString());
+ Assert.assertTrue(testDir.mkdirs());
+ MiniKdc kdc = new MiniKdc(MiniKdc.createConf(), testDir);
+ final Server jetty = createJettyServer();
+ Context context = new Context();
+ context.setContextPath("/foo");
+ jetty.setHandler(context);
+ context.addFilter(new FilterHolder(KDTAFilter.class), "/*", 0);
+ context.addServlet(new ServletHolder(UserServlet.class), "/bar");
+ try {
+ kdc.start();
+ File keytabFile = new File(testDir, "test.keytab");
+ kdc.createPrincipal(keytabFile, "client", "HTTP/localhost");
+ KDTAFilter.keytabFile = keytabFile.getAbsolutePath();
+ jetty.start();
+
+ final DelegationTokenAuthenticatedURL.Token token =
+ new DelegationTokenAuthenticatedURL.Token();
+ final DelegationTokenAuthenticatedURL aUrl =
+ new DelegationTokenAuthenticatedURL();
+ final URL url = new URL(getJettyURL() + "/foo/bar");
+
+ try {
+ aUrl.getDelegationToken(url, token, FOO_USER);
+ Assert.fail();
+ } catch (AuthenticationException ex) {
+ Assert.assertTrue(ex.getMessage().contains("GSSException"));
+ }
+
+ doAsKerberosUser("client", keytabFile.getAbsolutePath(),
+ new Callable() {
+ @Override
+ public Void call() throws Exception {
+ aUrl.getDelegationToken(url, token, "client");
+ Assert.assertNotNull(token.getDelegationToken());
+
+ aUrl.renewDelegationToken(url, token);
+ Assert.assertNotNull(token.getDelegationToken());
+
+ aUrl.getDelegationToken(url, token, FOO_USER);
+ Assert.assertNotNull(token.getDelegationToken());
+
+ try {
+ aUrl.renewDelegationToken(url, token);
+ Assert.fail();
+ } catch (Exception ex) {
+ Assert.assertTrue(ex.getMessage().contains("403"));
+ }
+
+ aUrl.getDelegationToken(url, token, FOO_USER);
+
+ aUrl.cancelDelegationToken(url, token);
+ Assert.assertNull(token.getDelegationToken());
+
+ return null;
+ }
+ });
+ } finally {
+ jetty.stop();
+ kdc.stop();
+ }
+ }
+
+ @Test
+ public void testProxyUser() throws Exception {
+ final Server jetty = createJettyServer();
+ Context context = new Context();
+ context.setContextPath("/foo");
+ jetty.setHandler(context);
+ context.addFilter(new FilterHolder(PseudoDTAFilter.class), "/*", 0);
+ context.addServlet(new ServletHolder(UserServlet.class), "/bar");
+
+ try {
+ jetty.start();
+ final URL url = new URL(getJettyURL() + "/foo/bar");
+
+ UserGroupInformation ugi = UserGroupInformation.createRemoteUser(FOO_USER);
+ ugi.doAs(new PrivilegedExceptionAction() {
+ @Override
+ public Void run() throws Exception {
+ DelegationTokenAuthenticatedURL.Token token =
+ new DelegationTokenAuthenticatedURL.Token();
+ DelegationTokenAuthenticatedURL aUrl =
+ new DelegationTokenAuthenticatedURL();
+
+ // proxyuser using authentication handler authentication
+ HttpURLConnection conn = aUrl.openConnection(url, token, OK_USER);
+ Assert.assertEquals(HttpURLConnection.HTTP_OK,
+ conn.getResponseCode());
+ List ret = IOUtils.readLines(conn.getInputStream());
+ Assert.assertEquals(1, ret.size());
+ Assert.assertEquals(OK_USER, ret.get(0));
+
+ // unauthorized proxy user using authentication handler authentication
+ conn = aUrl.openConnection(url, token, FAIL_USER);
+ Assert.assertEquals(HttpURLConnection.HTTP_FORBIDDEN,
+ conn.getResponseCode());
+
+ // proxy using delegation token authentication
+ aUrl.getDelegationToken(url, token, FOO_USER);
+
+ UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+ ugi.addToken(token.getDelegationToken());
+ token = new DelegationTokenAuthenticatedURL.Token();
+
+ // requests using delegation token as auth do not honor doAs
+ conn = aUrl.openConnection(url, token, OK_USER);
+ Assert.assertEquals(HttpURLConnection.HTTP_OK,
+ conn.getResponseCode());
+ ret = IOUtils.readLines(conn.getInputStream());
+ Assert.assertEquals(1, ret.size());
+ Assert.assertEquals(FOO_USER, ret.get(0));
+
+ return null;
+ }
+ });
+ } finally {
+ jetty.stop();
+ }
+ }
+
+
+ public static class UGIServlet extends HttpServlet {
+
+ @Override
+ protected void doGet(HttpServletRequest req, HttpServletResponse resp)
+ throws ServletException, IOException {
+ UserGroupInformation ugi = HttpUserGroupInformation.get();
+ if (ugi != null) {
+ String ret = "remoteuser=" + req.getRemoteUser() + ":ugi=" +
+ ugi.getShortUserName();
+ if (ugi.getAuthenticationMethod() ==
+ UserGroupInformation.AuthenticationMethod.PROXY) {
+ ret = "realugi=" + ugi.getRealUser().getShortUserName() + ":" + ret;
+ }
+ resp.setStatus(HttpServletResponse.SC_OK);
+ resp.getWriter().write(ret);
+ } else {
+ resp.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
+ }
+ }
+ }
+
+ @Test
+ public void testHttpUGI() throws Exception {
+ final Server jetty = createJettyServer();
+ Context context = new Context();
+ context.setContextPath("/foo");
+ jetty.setHandler(context);
+ context.addFilter(new FilterHolder(PseudoDTAFilter.class), "/*", 0);
+ context.addServlet(new ServletHolder(UGIServlet.class), "/bar");
+
+ try {
+ jetty.start();
+ final URL url = new URL(getJettyURL() + "/foo/bar");
+
+ UserGroupInformation ugi = UserGroupInformation.createRemoteUser(FOO_USER);
+ ugi.doAs(new PrivilegedExceptionAction() {
+ @Override
+ public Void run() throws Exception {
+ DelegationTokenAuthenticatedURL.Token token =
+ new DelegationTokenAuthenticatedURL.Token();
+ DelegationTokenAuthenticatedURL aUrl =
+ new DelegationTokenAuthenticatedURL();
+
+ // user foo
+ HttpURLConnection conn = aUrl.openConnection(url, token);
+ Assert.assertEquals(HttpURLConnection.HTTP_OK,
+ conn.getResponseCode());
+ List ret = IOUtils.readLines(conn.getInputStream());
+ Assert.assertEquals(1, ret.size());
+ Assert.assertEquals("remoteuser=" + FOO_USER+ ":ugi=" + FOO_USER,
+ ret.get(0));
+
+ // user ok-user via proxyuser foo
+ conn = aUrl.openConnection(url, token, OK_USER);
+ Assert.assertEquals(HttpURLConnection.HTTP_OK,
+ conn.getResponseCode());
+ ret = IOUtils.readLines(conn.getInputStream());
+ Assert.assertEquals(1, ret.size());
+ Assert.assertEquals("realugi=" + FOO_USER +":remoteuser=" + OK_USER +
+ ":ugi=" + OK_USER, ret.get(0));
+
+ return null;
+ }
+ });
+ } finally {
+ jetty.stop();
+ }
+ }
+
+}
diff --git a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java
index 9c4e794092..08cb91f650 100644
--- a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java
+++ b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java
@@ -47,7 +47,6 @@
import java.net.URI;
import java.net.URISyntaxException;
import java.security.Principal;
-import java.text.MessageFormat;
import java.util.ArrayList;
import java.util.LinkedList;
import java.util.List;
@@ -59,19 +58,14 @@
@Path(KMSRESTConstants.SERVICE_VERSION)
@InterfaceAudience.Private
public class KMS {
- public static final String CREATE_KEY = "CREATE_KEY";
- public static final String DELETE_KEY = "DELETE_KEY";
- public static final String ROLL_NEW_VERSION = "ROLL_NEW_VERSION";
- public static final String GET_KEYS = "GET_KEYS";
- public static final String GET_KEYS_METADATA = "GET_KEYS_METADATA";
- public static final String GET_KEY_VERSIONS = "GET_KEY_VERSIONS";
- public static final String GET_METADATA = "GET_METADATA";
- public static final String GET_KEY_VERSION = "GET_KEY_VERSION";
- public static final String GET_CURRENT_KEY = "GET_CURRENT_KEY";
- public static final String GENERATE_EEK = "GENERATE_EEK";
- public static final String DECRYPT_EEK = "DECRYPT_EEK";
-
+ public static enum KMSOp {
+ CREATE_KEY, DELETE_KEY, ROLL_NEW_VERSION,
+ GET_KEYS, GET_KEYS_METADATA,
+ GET_KEY_VERSIONS, GET_METADATA, GET_KEY_VERSION, GET_CURRENT_KEY,
+ GENERATE_EEK, DECRYPT_EEK
+ }
+
private KeyProviderCryptoExtension provider;
private KMSAudit kmsAudit;
@@ -91,22 +85,22 @@ private static Principal getPrincipal(SecurityContext securityContext)
private static final String UNAUTHORIZED_MSG_WITH_KEY =
- "User:{0} not allowed to do ''{1}'' on ''{2}''";
+ "User:%s not allowed to do '%s' on '%s'";
private static final String UNAUTHORIZED_MSG_WITHOUT_KEY =
- "User:{0} not allowed to do ''{1}''";
+ "User:%s not allowed to do '%s'";
private void assertAccess(KMSACLs.Type aclType, Principal principal,
- String operation) throws AccessControlException {
+ KMSOp operation) throws AccessControlException {
assertAccess(aclType, principal, operation, null);
}
private void assertAccess(KMSACLs.Type aclType, Principal principal,
- String operation, String key) throws AccessControlException {
+ KMSOp operation, String key) throws AccessControlException {
if (!KMSWebApp.getACLs().hasAccess(aclType, principal.getName())) {
KMSWebApp.getUnauthorizedCallsMeter().mark();
kmsAudit.unauthorized(principal, operation, key);
- throw new AuthorizationException(MessageFormat.format(
+ throw new AuthorizationException(String.format(
(key != null) ? UNAUTHORIZED_MSG_WITH_KEY
: UNAUTHORIZED_MSG_WITHOUT_KEY,
principal.getName(), operation, key));
@@ -135,7 +129,7 @@ public Response createKey(@Context SecurityContext securityContext,
Principal user = getPrincipal(securityContext);
String name = (String) jsonKey.get(KMSRESTConstants.NAME_FIELD);
KMSClientProvider.checkNotEmpty(name, KMSRESTConstants.NAME_FIELD);
- assertAccess(KMSACLs.Type.CREATE, user, CREATE_KEY, name);
+ assertAccess(KMSACLs.Type.CREATE, user, KMSOp.CREATE_KEY, name);
String cipher = (String) jsonKey.get(KMSRESTConstants.CIPHER_FIELD);
String material = (String) jsonKey.get(KMSRESTConstants.MATERIAL_FIELD);
int length = (jsonKey.containsKey(KMSRESTConstants.LENGTH_FIELD))
@@ -146,7 +140,7 @@ public Response createKey(@Context SecurityContext securityContext,
jsonKey.get(KMSRESTConstants.ATTRIBUTES_FIELD);
if (material != null) {
assertAccess(KMSACLs.Type.SET_KEY_MATERIAL, user,
- CREATE_KEY + " with user provided material", name);
+ KMSOp.CREATE_KEY, name);
}
KeyProvider.Options options = new KeyProvider.Options(
KMSWebApp.getConfiguration());
@@ -165,7 +159,7 @@ public Response createKey(@Context SecurityContext securityContext,
provider.flush();
- kmsAudit.ok(user, CREATE_KEY, name, "UserProvidedMaterial:" +
+ kmsAudit.ok(user, KMSOp.CREATE_KEY, name, "UserProvidedMaterial:" +
(material != null) + " Description:" + description);
if (!KMSWebApp.getACLs().hasAccess(KMSACLs.Type.GET, user.getName())) {
@@ -186,12 +180,12 @@ public Response deleteKey(@Context SecurityContext securityContext,
@PathParam("name") String name) throws Exception {
KMSWebApp.getAdminCallsMeter().mark();
Principal user = getPrincipal(securityContext);
- assertAccess(KMSACLs.Type.DELETE, user, DELETE_KEY, name);
+ assertAccess(KMSACLs.Type.DELETE, user, KMSOp.DELETE_KEY, name);
KMSClientProvider.checkNotEmpty(name, "name");
provider.deleteKey(name);
provider.flush();
- kmsAudit.ok(user, DELETE_KEY, name, "");
+ kmsAudit.ok(user, KMSOp.DELETE_KEY, name, "");
return Response.ok().build();
}
@@ -205,13 +199,13 @@ public Response rolloverKey(@Context SecurityContext securityContext,
throws Exception {
KMSWebApp.getAdminCallsMeter().mark();
Principal user = getPrincipal(securityContext);
- assertAccess(KMSACLs.Type.ROLLOVER, user, ROLL_NEW_VERSION, name);
+ assertAccess(KMSACLs.Type.ROLLOVER, user, KMSOp.ROLL_NEW_VERSION, name);
KMSClientProvider.checkNotEmpty(name, "name");
String material = (String)
jsonMaterial.get(KMSRESTConstants.MATERIAL_FIELD);
if (material != null) {
assertAccess(KMSACLs.Type.SET_KEY_MATERIAL, user,
- ROLL_NEW_VERSION + " with user provided material", name);
+ KMSOp.ROLL_NEW_VERSION, name);
}
KeyProvider.KeyVersion keyVersion = (material != null)
? provider.rollNewVersion(name, Base64.decodeBase64(material))
@@ -219,7 +213,7 @@ public Response rolloverKey(@Context SecurityContext securityContext,
provider.flush();
- kmsAudit.ok(user, ROLL_NEW_VERSION, name, "UserProvidedMaterial:" +
+ kmsAudit.ok(user, KMSOp.ROLL_NEW_VERSION, name, "UserProvidedMaterial:" +
(material != null) + " NewVersion:" + keyVersion.getVersionName());
if (!KMSWebApp.getACLs().hasAccess(KMSACLs.Type.GET, user.getName())) {
@@ -233,15 +227,15 @@ public Response rolloverKey(@Context SecurityContext securityContext,
@Path(KMSRESTConstants.KEYS_METADATA_RESOURCE)
@Produces(MediaType.APPLICATION_JSON)
public Response getKeysMetadata(@Context SecurityContext securityContext,
- @QueryParam(KMSRESTConstants.KEY_OP) List keyNamesList)
+ @QueryParam(KMSRESTConstants.KEY) List keyNamesList)
throws Exception {
KMSWebApp.getAdminCallsMeter().mark();
Principal user = getPrincipal(securityContext);
String[] keyNames = keyNamesList.toArray(new String[keyNamesList.size()]);
- assertAccess(KMSACLs.Type.GET_METADATA, user, GET_KEYS_METADATA);
+ assertAccess(KMSACLs.Type.GET_METADATA, user, KMSOp.GET_KEYS_METADATA);
KeyProvider.Metadata[] keysMeta = provider.getKeysMetadata(keyNames);
Object json = KMSServerJSONUtils.toJSON(keyNames, keysMeta);
- kmsAudit.ok(user, GET_KEYS_METADATA, "");
+ kmsAudit.ok(user, KMSOp.GET_KEYS_METADATA, "");
return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
}
@@ -252,9 +246,9 @@ public Response getKeyNames(@Context SecurityContext securityContext)
throws Exception {
KMSWebApp.getAdminCallsMeter().mark();
Principal user = getPrincipal(securityContext);
- assertAccess(KMSACLs.Type.GET_KEYS, user, GET_KEYS);
+ assertAccess(KMSACLs.Type.GET_KEYS, user, KMSOp.GET_KEYS);
Object json = provider.getKeys();
- kmsAudit.ok(user, GET_KEYS, "");
+ kmsAudit.ok(user, KMSOp.GET_KEYS, "");
return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
}
@@ -276,9 +270,9 @@ public Response getMetadata(@Context SecurityContext securityContext,
Principal user = getPrincipal(securityContext);
KMSClientProvider.checkNotEmpty(name, "name");
KMSWebApp.getAdminCallsMeter().mark();
- assertAccess(KMSACLs.Type.GET_METADATA, user, GET_METADATA, name);
+ assertAccess(KMSACLs.Type.GET_METADATA, user, KMSOp.GET_METADATA, name);
Object json = KMSServerJSONUtils.toJSON(name, provider.getMetadata(name));
- kmsAudit.ok(user, GET_METADATA, name, "");
+ kmsAudit.ok(user, KMSOp.GET_METADATA, name, "");
return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
}
@@ -292,9 +286,9 @@ public Response getCurrentVersion(@Context SecurityContext securityContext,
Principal user = getPrincipal(securityContext);
KMSClientProvider.checkNotEmpty(name, "name");
KMSWebApp.getKeyCallsMeter().mark();
- assertAccess(KMSACLs.Type.GET, user, GET_CURRENT_KEY, name);
+ assertAccess(KMSACLs.Type.GET, user, KMSOp.GET_CURRENT_KEY, name);
Object json = KMSServerJSONUtils.toJSON(provider.getCurrentKey(name));
- kmsAudit.ok(user, GET_CURRENT_KEY, name, "");
+ kmsAudit.ok(user, KMSOp.GET_CURRENT_KEY, name, "");
return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
}
@@ -308,9 +302,9 @@ public Response getKeyVersion(@Context SecurityContext securityContext,
KMSClientProvider.checkNotEmpty(versionName, "versionName");
KMSWebApp.getKeyCallsMeter().mark();
KeyVersion keyVersion = provider.getKeyVersion(versionName);
- assertAccess(KMSACLs.Type.GET, user, GET_KEY_VERSION);
+ assertAccess(KMSACLs.Type.GET, user, KMSOp.GET_KEY_VERSION);
if (keyVersion != null) {
- kmsAudit.ok(user, GET_KEY_VERSION, keyVersion.getName(), "");
+ kmsAudit.ok(user, KMSOp.GET_KEY_VERSION, keyVersion.getName(), "");
}
Object json = KMSServerJSONUtils.toJSON(keyVersion);
return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
@@ -334,7 +328,7 @@ public Response generateEncryptedKeys(
Object retJSON;
if (edekOp.equals(KMSRESTConstants.EEK_GENERATE)) {
- assertAccess(KMSACLs.Type.GENERATE_EEK, user, GENERATE_EEK, name);
+ assertAccess(KMSACLs.Type.GENERATE_EEK, user, KMSOp.GENERATE_EEK, name);
List retEdeks =
new LinkedList();
@@ -345,7 +339,7 @@ public Response generateEncryptedKeys(
} catch (Exception e) {
throw new IOException(e);
}
- kmsAudit.ok(user, GENERATE_EEK, name, "");
+ kmsAudit.ok(user, KMSOp.GENERATE_EEK, name, "");
retJSON = new ArrayList();
for (EncryptedKeyVersion edek : retEdeks) {
((ArrayList)retJSON).add(KMSServerJSONUtils.toJSON(edek));
@@ -380,7 +374,7 @@ public Response decryptEncryptedKey(@Context SecurityContext securityContext,
(String) jsonPayload.get(KMSRESTConstants.MATERIAL_FIELD);
Object retJSON;
if (eekOp.equals(KMSRESTConstants.EEK_DECRYPT)) {
- assertAccess(KMSACLs.Type.DECRYPT_EEK, user, DECRYPT_EEK, keyName);
+ assertAccess(KMSACLs.Type.DECRYPT_EEK, user, KMSOp.DECRYPT_EEK, keyName);
KMSClientProvider.checkNotNull(ivStr, KMSRESTConstants.IV_FIELD);
byte[] iv = Base64.decodeBase64(ivStr);
KMSClientProvider.checkNotNull(encMaterialStr,
@@ -391,7 +385,7 @@ public Response decryptEncryptedKey(@Context SecurityContext securityContext,
new KMSClientProvider.KMSEncryptedKeyVersion(keyName, versionName,
iv, KeyProviderCryptoExtension.EEK, encMaterial));
retJSON = KMSServerJSONUtils.toJSON(retKeyVersion);
- kmsAudit.ok(user, DECRYPT_EEK, keyName, "");
+ kmsAudit.ok(user, KMSOp.DECRYPT_EEK, keyName, "");
} else {
throw new IllegalArgumentException("Wrong " + KMSRESTConstants.EEK_OP +
" value, it must be " + KMSRESTConstants.EEK_GENERATE + " or " +
@@ -412,9 +406,9 @@ public Response getKeyVersions(@Context SecurityContext securityContext,
Principal user = getPrincipal(securityContext);
KMSClientProvider.checkNotEmpty(name, "name");
KMSWebApp.getKeyCallsMeter().mark();
- assertAccess(KMSACLs.Type.GET, user, GET_KEY_VERSIONS, name);
+ assertAccess(KMSACLs.Type.GET, user, KMSOp.GET_KEY_VERSIONS, name);
Object json = KMSServerJSONUtils.toJSON(provider.getKeyVersions(name));
- kmsAudit.ok(user, GET_KEY_VERSIONS, name, "");
+ kmsAudit.ok(user, KMSOp.GET_KEY_VERSIONS, name, "");
return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
}
diff --git a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAudit.java b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAudit.java
index 3d387eb354..30d340d785 100644
--- a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAudit.java
+++ b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAudit.java
@@ -50,11 +50,11 @@ private static class AuditEvent {
private final AtomicLong accessCount = new AtomicLong(-1);
private final String keyName;
private final String user;
- private final String op;
+ private final KMS.KMSOp op;
private final String extraMsg;
private final long startTime = System.currentTimeMillis();
- private AuditEvent(String keyName, String user, String op, String msg) {
+ private AuditEvent(String keyName, String user, KMS.KMSOp op, String msg) {
this.keyName = keyName;
this.user = user;
this.op = op;
@@ -77,7 +77,7 @@ public String getUser() {
return user;
}
- public String getOp() {
+ public KMS.KMSOp getOp() {
return op;
}
@@ -90,8 +90,9 @@ public static enum OpStatus {
OK, UNAUTHORIZED, UNAUTHENTICATED, ERROR;
}
- private static Set AGGREGATE_OPS_WHITELIST = Sets.newHashSet(
- KMS.GET_KEY_VERSION, KMS.GET_CURRENT_KEY, KMS.DECRYPT_EEK, KMS.GENERATE_EEK
+ private static Set AGGREGATE_OPS_WHITELIST = Sets.newHashSet(
+ KMS.KMSOp.GET_KEY_VERSION, KMS.KMSOp.GET_CURRENT_KEY,
+ KMS.KMSOp.DECRYPT_EEK, KMS.KMSOp.GENERATE_EEK
);
private Cache cache;
@@ -137,10 +138,10 @@ private void logEvent(AuditEvent event) {
event.getExtraMsg());
}
- private void op(OpStatus opStatus, final String op, final String user,
+ private void op(OpStatus opStatus, final KMS.KMSOp op, final String user,
final String key, final String extraMsg) {
if (!Strings.isNullOrEmpty(user) && !Strings.isNullOrEmpty(key)
- && !Strings.isNullOrEmpty(op)
+ && (op != null)
&& AGGREGATE_OPS_WHITELIST.contains(op)) {
String cacheKey = createCacheKey(user, key, op);
if (opStatus == OpStatus.UNAUTHORIZED) {
@@ -167,7 +168,7 @@ public AuditEvent call() throws Exception {
}
} else {
List kvs = new LinkedList();
- if (!Strings.isNullOrEmpty(op)) {
+ if (op != null) {
kvs.add("op=" + op);
}
if (!Strings.isNullOrEmpty(key)) {
@@ -185,16 +186,16 @@ public AuditEvent call() throws Exception {
}
}
- public void ok(Principal user, String op, String key,
+ public void ok(Principal user, KMS.KMSOp op, String key,
String extraMsg) {
op(OpStatus.OK, op, user.getName(), key, extraMsg);
}
- public void ok(Principal user, String op, String extraMsg) {
+ public void ok(Principal user, KMS.KMSOp op, String extraMsg) {
op(OpStatus.OK, op, user.getName(), null, extraMsg);
}
- public void unauthorized(Principal user, String op, String key) {
+ public void unauthorized(Principal user, KMS.KMSOp op, String key) {
op(OpStatus.UNAUTHORIZED, op, user.getName(), key, "");
}
@@ -211,7 +212,7 @@ public void unauthenticated(String remoteHost, String method,
+ " URL:" + url + " ErrorMsg:'" + extraMsg + "'");
}
- private static String createCacheKey(String user, String key, String op) {
+ private static String createCacheKey(String user, String key, KMS.KMSOp op) {
return user + "#" + key + "#" + op;
}
diff --git a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
index 30d742e7fe..35dccfc489 100644
--- a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
+++ b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
@@ -17,6 +17,7 @@
*/
package org.apache.hadoop.crypto.key.kms.server;
+import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import java.io.File;
@@ -26,6 +27,7 @@
/**
* Utility class to load KMS configuration files.
*/
+@InterfaceAudience.Private
public class KMSConfiguration {
public static final String KMS_CONFIG_DIR = "kms.config.dir";
diff --git a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSJMXServlet.java b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSJMXServlet.java
index c8556af193..6918015a90 100644
--- a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSJMXServlet.java
+++ b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSJMXServlet.java
@@ -17,12 +17,15 @@
*/
package org.apache.hadoop.crypto.key.kms.server;
+import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.jmx.JMXJsonServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
+
import java.io.IOException;
+@InterfaceAudience.Private
public class KMSJMXServlet extends JMXJsonServlet {
@Override
diff --git a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSAudit.java b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSAudit.java
index b5d9a36d19..857884d4e1 100644
--- a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSAudit.java
+++ b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSAudit.java
@@ -23,6 +23,7 @@
import java.io.PrintStream;
import java.security.Principal;
+import org.apache.hadoop.crypto.key.kms.server.KMS.KMSOp;
import org.apache.log4j.LogManager;
import org.apache.log4j.PropertyConfigurator;
import org.junit.After;
@@ -82,16 +83,16 @@ private String getAndResetLogOutput() {
public void testAggregation() throws Exception {
Principal luser = Mockito.mock(Principal.class);
Mockito.when(luser.getName()).thenReturn("luser");
- kmsAudit.ok(luser, KMS.DECRYPT_EEK, "k1", "testmsg");
- kmsAudit.ok(luser, KMS.DECRYPT_EEK, "k1", "testmsg");
- kmsAudit.ok(luser, KMS.DECRYPT_EEK, "k1", "testmsg");
- kmsAudit.ok(luser, KMS.DELETE_KEY, "k1", "testmsg");
- kmsAudit.ok(luser, KMS.ROLL_NEW_VERSION, "k1", "testmsg");
- kmsAudit.ok(luser, KMS.DECRYPT_EEK, "k1", "testmsg");
- kmsAudit.ok(luser, KMS.DECRYPT_EEK, "k1", "testmsg");
- kmsAudit.ok(luser, KMS.DECRYPT_EEK, "k1", "testmsg");
+ kmsAudit.ok(luser, KMSOp.DECRYPT_EEK, "k1", "testmsg");
+ kmsAudit.ok(luser, KMSOp.DECRYPT_EEK, "k1", "testmsg");
+ kmsAudit.ok(luser, KMSOp.DECRYPT_EEK, "k1", "testmsg");
+ kmsAudit.ok(luser, KMSOp.DELETE_KEY, "k1", "testmsg");
+ kmsAudit.ok(luser, KMSOp.ROLL_NEW_VERSION, "k1", "testmsg");
+ kmsAudit.ok(luser, KMSOp.DECRYPT_EEK, "k1", "testmsg");
+ kmsAudit.ok(luser, KMSOp.DECRYPT_EEK, "k1", "testmsg");
+ kmsAudit.ok(luser, KMSOp.DECRYPT_EEK, "k1", "testmsg");
Thread.sleep(1500);
- kmsAudit.ok(luser, KMS.DECRYPT_EEK, "k1", "testmsg");
+ kmsAudit.ok(luser, KMSOp.DECRYPT_EEK, "k1", "testmsg");
Thread.sleep(1500);
String out = getAndResetLogOutput();
System.out.println(out);
@@ -110,15 +111,15 @@ public void testAggregation() throws Exception {
public void testAggregationUnauth() throws Exception {
Principal luser = Mockito.mock(Principal.class);
Mockito.when(luser.getName()).thenReturn("luser");
- kmsAudit.unauthorized(luser, KMS.GENERATE_EEK, "k2");
+ kmsAudit.unauthorized(luser, KMSOp.GENERATE_EEK, "k2");
Thread.sleep(1000);
- kmsAudit.ok(luser, KMS.GENERATE_EEK, "k3", "testmsg");
- kmsAudit.ok(luser, KMS.GENERATE_EEK, "k3", "testmsg");
- kmsAudit.ok(luser, KMS.GENERATE_EEK, "k3", "testmsg");
- kmsAudit.ok(luser, KMS.GENERATE_EEK, "k3", "testmsg");
- kmsAudit.ok(luser, KMS.GENERATE_EEK, "k3", "testmsg");
- kmsAudit.unauthorized(luser, KMS.GENERATE_EEK, "k3");
- kmsAudit.ok(luser, KMS.GENERATE_EEK, "k3", "testmsg");
+ kmsAudit.ok(luser, KMSOp.GENERATE_EEK, "k3", "testmsg");
+ kmsAudit.ok(luser, KMSOp.GENERATE_EEK, "k3", "testmsg");
+ kmsAudit.ok(luser, KMSOp.GENERATE_EEK, "k3", "testmsg");
+ kmsAudit.ok(luser, KMSOp.GENERATE_EEK, "k3", "testmsg");
+ kmsAudit.ok(luser, KMSOp.GENERATE_EEK, "k3", "testmsg");
+ kmsAudit.unauthorized(luser, KMSOp.GENERATE_EEK, "k3");
+ kmsAudit.ok(luser, KMSOp.GENERATE_EEK, "k3", "testmsg");
Thread.sleep(2000);
String out = getAndResetLogOutput();
System.out.println(out);
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
index e637972cb1..3749bc333a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
@@ -39,12 +39,14 @@
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.lib.wsrs.EnumSetParam;
-import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
-import org.apache.hadoop.security.authentication.client.Authenticator;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
+import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL;
+import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticator;
+import org.apache.hadoop.security.token.delegation.web.KerberosDelegationTokenAuthenticator;
import org.apache.hadoop.util.Progressable;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.StringUtils;
@@ -67,7 +69,6 @@
import java.io.InputStream;
import java.io.OutputStream;
import java.net.HttpURLConnection;
-import java.net.InetSocketAddress;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
@@ -75,7 +76,6 @@
import java.text.MessageFormat;
import java.util.HashMap;
import java.util.Map;
-import java.util.concurrent.Callable;
/**
* HttpFSServer implementation of the FileSystemAccess FileSystem.
@@ -217,34 +217,15 @@ public String getMethod() {
}
-
- private AuthenticatedURL.Token authToken = new AuthenticatedURL.Token();
+ private DelegationTokenAuthenticatedURL authURL;
+ private DelegationTokenAuthenticatedURL.Token authToken =
+ new DelegationTokenAuthenticatedURL.Token();
private URI uri;
- private InetSocketAddress httpFSAddr;
private Path workingDir;
private UserGroupInformation realUser;
private String doAs;
- private Token> delegationToken;
- //This method enables handling UGI doAs with SPNEGO, we have to
- //fallback to the realuser who logged in with Kerberos credentials
- private T doAsRealUserIfNecessary(final Callable callable)
- throws IOException {
- try {
- if (realUser.getShortUserName().equals(doAs)) {
- return callable.call();
- } else {
- return realUser.doAs(new PrivilegedExceptionAction() {
- @Override
- public T run() throws Exception {
- return callable.call();
- }
- });
- }
- } catch (Exception ex) {
- throw new IOException(ex.toString(), ex);
- }
- }
+
/**
* Convenience method that creates a HttpURLConnection
for the
@@ -291,20 +272,26 @@ private HttpURLConnection getConnection(final String method,
private HttpURLConnection getConnection(final String method,
Map params, Map> multiValuedParams,
Path path, boolean makeQualified) throws IOException {
- if (!realUser.getShortUserName().equals(doAs)) {
- params.put(DO_AS_PARAM, doAs);
- }
- HttpFSKerberosAuthenticator.injectDelegationToken(params, delegationToken);
if (makeQualified) {
path = makeQualified(path);
}
final URL url = HttpFSUtils.createURL(path, params, multiValuedParams);
- return doAsRealUserIfNecessary(new Callable() {
- @Override
- public HttpURLConnection call() throws Exception {
- return getConnection(url, method);
+ try {
+ return UserGroupInformation.getCurrentUser().doAs(
+ new PrivilegedExceptionAction() {
+ @Override
+ public HttpURLConnection run() throws Exception {
+ return getConnection(url, method);
+ }
+ }
+ );
+ } catch (Exception ex) {
+ if (ex instanceof IOException) {
+ throw (IOException) ex;
+ } else {
+ throw new IOException(ex);
}
- });
+ }
}
/**
@@ -321,12 +308,8 @@ public HttpURLConnection call() throws Exception {
* @throws IOException thrown if an IO error occurrs.
*/
private HttpURLConnection getConnection(URL url, String method) throws IOException {
- Class extends Authenticator> klass =
- getConf().getClass("httpfs.authenticator.class",
- HttpFSKerberosAuthenticator.class, Authenticator.class);
- Authenticator authenticator = ReflectionUtils.newInstance(klass, getConf());
try {
- HttpURLConnection conn = new AuthenticatedURL(authenticator).openConnection(url, authToken);
+ HttpURLConnection conn = authURL.openConnection(url, authToken);
conn.setRequestMethod(method);
if (method.equals(HTTP_POST) || method.equals(HTTP_PUT)) {
conn.setDoOutput(true);
@@ -357,10 +340,17 @@ public void initialize(URI name, Configuration conf) throws IOException {
super.initialize(name, conf);
try {
uri = new URI(name.getScheme() + "://" + name.getAuthority());
- httpFSAddr = NetUtils.createSocketAddr(getCanonicalUri().toString());
} catch (URISyntaxException ex) {
throw new IOException(ex);
}
+
+ Class extends DelegationTokenAuthenticator> klass =
+ getConf().getClass("httpfs.authenticator.class",
+ KerberosDelegationTokenAuthenticator.class,
+ DelegationTokenAuthenticator.class);
+ DelegationTokenAuthenticator authenticator =
+ ReflectionUtils.newInstance(klass, getConf());
+ authURL = new DelegationTokenAuthenticatedURL(authenticator);
}
@Override
@@ -1059,38 +1049,57 @@ public void readFields(DataInput in) throws IOException {
@Override
public Token> getDelegationToken(final String renewer)
throws IOException {
- return doAsRealUserIfNecessary(new Callable>() {
- @Override
- public Token> call() throws Exception {
- return HttpFSKerberosAuthenticator.
- getDelegationToken(uri, httpFSAddr, authToken, renewer);
+ try {
+ return UserGroupInformation.getCurrentUser().doAs(
+ new PrivilegedExceptionAction>() {
+ @Override
+ public Token> run() throws Exception {
+ return authURL.getDelegationToken(uri.toURL(), authToken,
+ renewer);
+ }
+ }
+ );
+ } catch (Exception ex) {
+ if (ex instanceof IOException) {
+ throw (IOException) ex;
+ } else {
+ throw new IOException(ex);
}
- });
+ }
}
public long renewDelegationToken(final Token> token) throws IOException {
- return doAsRealUserIfNecessary(new Callable() {
- @Override
- public Long call() throws Exception {
- return HttpFSKerberosAuthenticator.
- renewDelegationToken(uri, authToken, token);
+ try {
+ return UserGroupInformation.getCurrentUser().doAs(
+ new PrivilegedExceptionAction() {
+ @Override
+ public Long run() throws Exception {
+ return authURL.renewDelegationToken(uri.toURL(), authToken);
+ }
+ }
+ );
+ } catch (Exception ex) {
+ if (ex instanceof IOException) {
+ throw (IOException) ex;
+ } else {
+ throw new IOException(ex);
}
- });
+ }
}
public void cancelDelegationToken(final Token> token) throws IOException {
- HttpFSKerberosAuthenticator.
- cancelDelegationToken(uri, authToken, token);
+ authURL.cancelDelegationToken(uri.toURL(), authToken);
}
@Override
public Token> getRenewToken() {
- return delegationToken;
+ return null; //TODO : for renewer
}
@Override
+ @SuppressWarnings("unchecked")
public void setDelegationToken(Token token) {
- delegationToken = token;
+ //TODO : for renewer
}
@Override
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSKerberosAuthenticator.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSKerberosAuthenticator.java
deleted file mode 100644
index a6f7c54a9a..0000000000
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSKerberosAuthenticator.java
+++ /dev/null
@@ -1,188 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.fs.http.client;
-
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.security.SecurityUtil;
-import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
-import org.apache.hadoop.security.authentication.client.AuthenticationException;
-import org.apache.hadoop.security.authentication.client.Authenticator;
-import org.apache.hadoop.security.authentication.client.KerberosAuthenticator;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
-import org.json.simple.JSONObject;
-
-import java.io.IOException;
-import java.net.HttpURLConnection;
-import java.net.InetSocketAddress;
-import java.net.URI;
-import java.net.URL;
-import java.util.HashMap;
-import java.util.Map;
-
-/**
- * A KerberosAuthenticator
subclass that fallback to
- * {@link HttpFSPseudoAuthenticator}.
- */
-@InterfaceAudience.Private
-public class HttpFSKerberosAuthenticator extends KerberosAuthenticator {
-
- /**
- * Returns the fallback authenticator if the server does not use
- * Kerberos SPNEGO HTTP authentication.
- *
- * @return a {@link HttpFSPseudoAuthenticator} instance.
- */
- @Override
- protected Authenticator getFallBackAuthenticator() {
- return new HttpFSPseudoAuthenticator();
- }
-
- private static final String HTTP_GET = "GET";
- private static final String HTTP_PUT = "PUT";
-
- public static final String DELEGATION_PARAM = "delegation";
- public static final String TOKEN_PARAM = "token";
- public static final String RENEWER_PARAM = "renewer";
- public static final String DELEGATION_TOKEN_JSON = "Token";
- public static final String DELEGATION_TOKEN_URL_STRING_JSON = "urlString";
- public static final String RENEW_DELEGATION_TOKEN_JSON = "long";
-
- /**
- * DelegationToken operations.
- */
- @InterfaceAudience.Private
- public static enum DelegationTokenOperation {
- GETDELEGATIONTOKEN(HTTP_GET, true),
- RENEWDELEGATIONTOKEN(HTTP_PUT, true),
- CANCELDELEGATIONTOKEN(HTTP_PUT, false);
-
- private String httpMethod;
- private boolean requiresKerberosCredentials;
-
- private DelegationTokenOperation(String httpMethod,
- boolean requiresKerberosCredentials) {
- this.httpMethod = httpMethod;
- this.requiresKerberosCredentials = requiresKerberosCredentials;
- }
-
- public String getHttpMethod() {
- return httpMethod;
- }
-
- public boolean requiresKerberosCredentials() {
- return requiresKerberosCredentials;
- }
-
- }
-
- public static void injectDelegationToken(Map params,
- Token> dtToken)
- throws IOException {
- if (dtToken != null) {
- params.put(DELEGATION_PARAM, dtToken.encodeToUrlString());
- }
- }
-
- private boolean hasDelegationToken(URL url) {
- return url.getQuery().contains(DELEGATION_PARAM + "=");
- }
-
- @Override
- public void authenticate(URL url, AuthenticatedURL.Token token)
- throws IOException, AuthenticationException {
- if (!hasDelegationToken(url)) {
- super.authenticate(url, token);
- }
- }
-
- public static final String OP_PARAM = "op";
-
- public static Token> getDelegationToken(URI fsURI,
- InetSocketAddress httpFSAddr, AuthenticatedURL.Token token,
- String renewer) throws IOException {
- DelegationTokenOperation op =
- DelegationTokenOperation.GETDELEGATIONTOKEN;
- Map params = new HashMap();
- params.put(OP_PARAM, op.toString());
- params.put(RENEWER_PARAM,renewer);
- URL url = HttpFSUtils.createURL(new Path(fsURI), params);
- AuthenticatedURL aUrl =
- new AuthenticatedURL(new HttpFSKerberosAuthenticator());
- try {
- HttpURLConnection conn = aUrl.openConnection(url, token);
- conn.setRequestMethod(op.getHttpMethod());
- HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
- JSONObject json = (JSONObject) ((JSONObject)
- HttpFSUtils.jsonParse(conn)).get(DELEGATION_TOKEN_JSON);
- String tokenStr = (String)
- json.get(DELEGATION_TOKEN_URL_STRING_JSON);
- Token dToken =
- new Token();
- dToken.decodeFromUrlString(tokenStr);
- SecurityUtil.setTokenService(dToken, httpFSAddr);
- return dToken;
- } catch (AuthenticationException ex) {
- throw new IOException(ex.toString(), ex);
- }
- }
-
- public static long renewDelegationToken(URI fsURI,
- AuthenticatedURL.Token token, Token> dToken) throws IOException {
- Map params = new HashMap();
- params.put(OP_PARAM,
- DelegationTokenOperation.RENEWDELEGATIONTOKEN.toString());
- params.put(TOKEN_PARAM, dToken.encodeToUrlString());
- URL url = HttpFSUtils.createURL(new Path(fsURI), params);
- AuthenticatedURL aUrl =
- new AuthenticatedURL(new HttpFSKerberosAuthenticator());
- try {
- HttpURLConnection conn = aUrl.openConnection(url, token);
- conn.setRequestMethod(
- DelegationTokenOperation.RENEWDELEGATIONTOKEN.getHttpMethod());
- HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
- JSONObject json = (JSONObject) ((JSONObject)
- HttpFSUtils.jsonParse(conn)).get(DELEGATION_TOKEN_JSON);
- return (Long)(json.get(RENEW_DELEGATION_TOKEN_JSON));
- } catch (AuthenticationException ex) {
- throw new IOException(ex.toString(), ex);
- }
- }
-
- public static void cancelDelegationToken(URI fsURI,
- AuthenticatedURL.Token token, Token> dToken) throws IOException {
- Map params = new HashMap();
- params.put(OP_PARAM,
- DelegationTokenOperation.CANCELDELEGATIONTOKEN.toString());
- params.put(TOKEN_PARAM, dToken.encodeToUrlString());
- URL url = HttpFSUtils.createURL(new Path(fsURI), params);
- AuthenticatedURL aUrl =
- new AuthenticatedURL(new HttpFSKerberosAuthenticator());
- try {
- HttpURLConnection conn = aUrl.openConnection(url, token);
- conn.setRequestMethod(
- DelegationTokenOperation.CANCELDELEGATIONTOKEN.getHttpMethod());
- HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
- } catch (AuthenticationException ex) {
- throw new IOException(ex.toString(), ex);
- }
- }
-
-}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSAuthenticationFilter.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSAuthenticationFilter.java
index 545654c2f7..d65616d45c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSAuthenticationFilter.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSAuthenticationFilter.java
@@ -20,7 +20,10 @@
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
+import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticationFilter;
+
import javax.servlet.FilterConfig;
+import javax.servlet.ServletException;
import java.io.FileReader;
import java.io.IOException;
import java.io.Reader;
@@ -32,7 +35,9 @@
* from HttpFSServer's server configuration.
*/
@InterfaceAudience.Private
-public class HttpFSAuthenticationFilter extends AuthenticationFilter {
+public class HttpFSAuthenticationFilter
+ extends DelegationTokenAuthenticationFilter {
+
private static final String CONF_PREFIX = "httpfs.authentication.";
private static final String SIGNATURE_SECRET_FILE = SIGNATURE_SECRET + ".file";
@@ -50,7 +55,8 @@ public class HttpFSAuthenticationFilter extends AuthenticationFilter {
* @return hadoop-auth configuration read from HttpFSServer's configuration.
*/
@Override
- protected Properties getConfiguration(String configPrefix, FilterConfig filterConfig) {
+ protected Properties getConfiguration(String configPrefix,
+ FilterConfig filterConfig) throws ServletException{
Properties props = new Properties();
Configuration conf = HttpFSServerWebApp.get().getConfig();
@@ -64,11 +70,6 @@ protected Properties getConfiguration(String configPrefix, FilterConfig filterCo
}
}
- if (props.getProperty(AUTH_TYPE).equals("kerberos")) {
- props.setProperty(AUTH_TYPE,
- HttpFSKerberosAuthenticationHandler.class.getName());
- }
-
String signatureSecretFile = props.getProperty(SIGNATURE_SECRET_FILE, null);
if (signatureSecretFile == null) {
throw new RuntimeException("Undefined property: " + SIGNATURE_SECRET_FILE);
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSKerberosAuthenticationHandler.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSKerberosAuthenticationHandler.java
deleted file mode 100644
index fc2649ce79..0000000000
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSKerberosAuthenticationHandler.java
+++ /dev/null
@@ -1,230 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.fs.http.server;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.fs.http.client.HttpFSFileSystem;
-import org.apache.hadoop.fs.http.client.HttpFSKerberosAuthenticator;
-import org.apache.hadoop.fs.http.client.HttpFSKerberosAuthenticator.DelegationTokenOperation;
-import org.apache.hadoop.lib.service.DelegationTokenIdentifier;
-import org.apache.hadoop.lib.service.DelegationTokenManager;
-import org.apache.hadoop.lib.service.DelegationTokenManagerException;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.authentication.client.AuthenticationException;
-import org.apache.hadoop.security.authentication.server.AuthenticationToken;
-import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
-import org.apache.hadoop.security.token.Token;
-import org.json.simple.JSONObject;
-
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.http.HttpServletResponse;
-import javax.ws.rs.core.MediaType;
-import java.io.IOException;
-import java.io.Writer;
-import java.text.MessageFormat;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.LinkedHashMap;
-import java.util.Map;
-import java.util.Set;
-
-/**
- * Server side AuthenticationHandler
that authenticates requests
- * using the incoming delegation token as a 'delegation' query string parameter.
- *
- * If not delegation token is present in the request it delegates to the
- * {@link KerberosAuthenticationHandler}
- */
-@InterfaceAudience.Private
-public class HttpFSKerberosAuthenticationHandler
- extends KerberosAuthenticationHandler {
-
- static final Set DELEGATION_TOKEN_OPS =
- new HashSet();
-
- static {
- DELEGATION_TOKEN_OPS.add(
- DelegationTokenOperation.GETDELEGATIONTOKEN.toString());
- DELEGATION_TOKEN_OPS.add(
- DelegationTokenOperation.RENEWDELEGATIONTOKEN.toString());
- DELEGATION_TOKEN_OPS.add(
- DelegationTokenOperation.CANCELDELEGATIONTOKEN.toString());
- }
-
- public static final String TYPE = "kerberos-dt";
-
- /**
- * Returns authentication type of the handler.
- *
- * @return delegationtoken-kerberos
- */
- @Override
- public String getType() {
- return TYPE;
- }
-
- private static final String ENTER = System.getProperty("line.separator");
-
- @Override
- @SuppressWarnings("unchecked")
- public boolean managementOperation(AuthenticationToken token,
- HttpServletRequest request, HttpServletResponse response)
- throws IOException, AuthenticationException {
- boolean requestContinues = true;
- String op = request.getParameter(HttpFSFileSystem.OP_PARAM);
- op = (op != null) ? op.toUpperCase() : null;
- if (DELEGATION_TOKEN_OPS.contains(op) &&
- !request.getMethod().equals("OPTIONS")) {
- DelegationTokenOperation dtOp =
- DelegationTokenOperation.valueOf(op);
- if (dtOp.getHttpMethod().equals(request.getMethod())) {
- if (dtOp.requiresKerberosCredentials() && token == null) {
- response.sendError(HttpServletResponse.SC_UNAUTHORIZED,
- MessageFormat.format(
- "Operation [{0}] requires SPNEGO authentication established",
- dtOp));
- requestContinues = false;
- } else {
- DelegationTokenManager tokenManager =
- HttpFSServerWebApp.get().get(DelegationTokenManager.class);
- try {
- Map map = null;
- switch (dtOp) {
- case GETDELEGATIONTOKEN:
- String renewerParam =
- request.getParameter(HttpFSKerberosAuthenticator.RENEWER_PARAM);
- if (renewerParam == null) {
- renewerParam = token.getUserName();
- }
- Token> dToken = tokenManager.createToken(
- UserGroupInformation.getCurrentUser(), renewerParam);
- map = delegationTokenToJSON(dToken);
- break;
- case RENEWDELEGATIONTOKEN:
- case CANCELDELEGATIONTOKEN:
- String tokenParam =
- request.getParameter(HttpFSKerberosAuthenticator.TOKEN_PARAM);
- if (tokenParam == null) {
- response.sendError(HttpServletResponse.SC_BAD_REQUEST,
- MessageFormat.format(
- "Operation [{0}] requires the parameter [{1}]",
- dtOp, HttpFSKerberosAuthenticator.TOKEN_PARAM));
- requestContinues = false;
- } else {
- if (dtOp == DelegationTokenOperation.CANCELDELEGATIONTOKEN) {
- Token dt =
- new Token();
- dt.decodeFromUrlString(tokenParam);
- tokenManager.cancelToken(dt,
- UserGroupInformation.getCurrentUser().getUserName());
- } else {
- Token dt =
- new Token();
- dt.decodeFromUrlString(tokenParam);
- long expirationTime =
- tokenManager.renewToken(dt, token.getUserName());
- map = new HashMap();
- map.put("long", expirationTime);
- }
- }
- break;
- }
- if (requestContinues) {
- response.setStatus(HttpServletResponse.SC_OK);
- if (map != null) {
- response.setContentType(MediaType.APPLICATION_JSON);
- Writer writer = response.getWriter();
- JSONObject.writeJSONString(map, writer);
- writer.write(ENTER);
- writer.flush();
-
- }
- requestContinues = false;
- }
- } catch (DelegationTokenManagerException ex) {
- throw new AuthenticationException(ex.toString(), ex);
- }
- }
- } else {
- response.sendError(HttpServletResponse.SC_BAD_REQUEST,
- MessageFormat.format(
- "Wrong HTTP method [{0}] for operation [{1}], it should be [{2}]",
- request.getMethod(), dtOp, dtOp.getHttpMethod()));
- requestContinues = false;
- }
- }
- return requestContinues;
- }
-
- @SuppressWarnings("unchecked")
- private static Map delegationTokenToJSON(Token token) throws IOException {
- Map json = new LinkedHashMap();
- json.put(HttpFSKerberosAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON,
- token.encodeToUrlString());
- Map response = new LinkedHashMap();
- response.put(HttpFSKerberosAuthenticator.DELEGATION_TOKEN_JSON, json);
- return response;
- }
-
- /**
- * Authenticates a request looking for the delegation
- * query-string parameter and verifying it is a valid token. If there is not
- * delegation
query-string parameter, it delegates the
- * authentication to the {@link KerberosAuthenticationHandler} unless it is
- * disabled.
- *
- * @param request the HTTP client request.
- * @param response the HTTP client response.
- *
- * @return the authentication token for the authenticated request.
- * @throws IOException thrown if an IO error occurred.
- * @throws AuthenticationException thrown if the authentication failed.
- */
- @Override
- public AuthenticationToken authenticate(HttpServletRequest request,
- HttpServletResponse response)
- throws IOException, AuthenticationException {
- AuthenticationToken token;
- String delegationParam =
- request.getParameter(HttpFSKerberosAuthenticator.DELEGATION_PARAM);
- if (delegationParam != null) {
- try {
- Token dt =
- new Token();
- dt.decodeFromUrlString(delegationParam);
- DelegationTokenManager tokenManager =
- HttpFSServerWebApp.get().get(DelegationTokenManager.class);
- UserGroupInformation ugi = tokenManager.verifyToken(dt);
- final String shortName = ugi.getShortUserName();
-
- // creating a ephemeral token
- token = new AuthenticationToken(shortName, ugi.getUserName(),
- getType());
- token.setExpires(0);
- } catch (Throwable ex) {
- throw new AuthenticationException("Could not verify DelegationToken, " +
- ex.toString(), ex);
- }
- } else {
- token = super.authenticate(request, response);
- }
- return token;
- }
-
-
-}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/DelegationTokenManager.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/DelegationTokenManager.java
deleted file mode 100644
index a163baf16c..0000000000
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/DelegationTokenManager.java
+++ /dev/null
@@ -1,78 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.lib.service;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.token.Token;
-
-/**
- * Service interface to manage HttpFS delegation tokens.
- */
-@InterfaceAudience.Private
-public interface DelegationTokenManager {
-
- /**
- * Creates a delegation token.
- *
- * @param ugi UGI creating the token.
- * @param renewer token renewer.
- * @return new delegation token.
- * @throws DelegationTokenManagerException thrown if the token could not be
- * created.
- */
- public Token createToken(UserGroupInformation ugi,
- String renewer)
- throws DelegationTokenManagerException;
-
- /**
- * Renews a delegation token.
- *
- * @param token delegation token to renew.
- * @param renewer token renewer.
- * @return epoc expiration time.
- * @throws DelegationTokenManagerException thrown if the token could not be
- * renewed.
- */
- public long renewToken(Token token, String renewer)
- throws DelegationTokenManagerException;
-
- /**
- * Cancels a delegation token.
- *
- * @param token delegation token to cancel.
- * @param canceler token canceler.
- * @throws DelegationTokenManagerException thrown if the token could not be
- * canceled.
- */
- public void cancelToken(Token token,
- String canceler)
- throws DelegationTokenManagerException;
-
- /**
- * Verifies a delegation token.
- *
- * @param token delegation token to verify.
- * @return the UGI for the token.
- * @throws DelegationTokenManagerException thrown if the token could not be
- * verified.
- */
- public UserGroupInformation verifyToken(Token token)
- throws DelegationTokenManagerException;
-
-}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/DelegationTokenManagerException.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/DelegationTokenManagerException.java
deleted file mode 100644
index 62ec2f920b..0000000000
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/DelegationTokenManagerException.java
+++ /dev/null
@@ -1,51 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.lib.service;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.lib.lang.XException;
-
-/**
- * Exception thrown by the {@link DelegationTokenManager} service implementation.
- */
-@InterfaceAudience.Private
-public class DelegationTokenManagerException extends XException {
-
- public enum ERROR implements XException.ERROR {
- DT01("Could not verify delegation token, {0}"),
- DT02("Could not renew delegation token, {0}"),
- DT03("Could not cancel delegation token, {0}"),
- DT04("Could not create delegation token, {0}");
-
- private String template;
-
- ERROR(String template) {
- this.template = template;
- }
-
- @Override
- public String getTemplate() {
- return template;
- }
- }
-
- public DelegationTokenManagerException(ERROR error, Object... params) {
- super(error, params);
- }
-
-}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/security/DelegationTokenManagerService.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/security/DelegationTokenManagerService.java
deleted file mode 100644
index dca13d4a07..0000000000
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/security/DelegationTokenManagerService.java
+++ /dev/null
@@ -1,242 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.lib.service.security;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.fs.http.server.HttpFSServerWebApp;
-import org.apache.hadoop.hdfs.web.SWebHdfsFileSystem;
-import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.lib.server.BaseService;
-import org.apache.hadoop.lib.server.ServerException;
-import org.apache.hadoop.lib.server.ServiceException;
-import org.apache.hadoop.lib.service.DelegationTokenIdentifier;
-import org.apache.hadoop.lib.service.DelegationTokenManager;
-import org.apache.hadoop.lib.service.DelegationTokenManagerException;
-import org.apache.hadoop.security.SecurityUtil;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
-
-import java.io.ByteArrayInputStream;
-import java.io.DataInputStream;
-import java.io.IOException;
-
-/**
- * DelegationTokenManager service implementation.
- */
-@InterfaceAudience.Private
-public class DelegationTokenManagerService extends BaseService
- implements DelegationTokenManager {
-
- private static final String PREFIX = "delegation.token.manager";
-
- private static final String UPDATE_INTERVAL = "update.interval";
-
- private static final String MAX_LIFETIME = "max.lifetime";
-
- private static final String RENEW_INTERVAL = "renew.interval";
-
- private static final long HOUR = 60 * 60 * 1000;
- private static final long DAY = 24 * HOUR;
-
- DelegationTokenSecretManager secretManager = null;
-
- private Text tokenKind;
-
- public DelegationTokenManagerService() {
- super(PREFIX);
- }
-
- /**
- * Initializes the service.
- *
- * @throws ServiceException thrown if the service could not be initialized.
- */
- @Override
- protected void init() throws ServiceException {
-
- long updateInterval = getServiceConfig().getLong(UPDATE_INTERVAL, DAY);
- long maxLifetime = getServiceConfig().getLong(MAX_LIFETIME, 7 * DAY);
- long renewInterval = getServiceConfig().getLong(RENEW_INTERVAL, DAY);
- tokenKind = (HttpFSServerWebApp.get().isSslEnabled())
- ? SWebHdfsFileSystem.TOKEN_KIND : WebHdfsFileSystem.TOKEN_KIND;
- secretManager = new DelegationTokenSecretManager(tokenKind, updateInterval,
- maxLifetime,
- renewInterval, HOUR);
- try {
- secretManager.startThreads();
- } catch (IOException ex) {
- throw new ServiceException(ServiceException.ERROR.S12,
- DelegationTokenManager.class.getSimpleName(),
- ex.toString(), ex);
- }
- }
-
- /**
- * Destroys the service.
- */
- @Override
- public void destroy() {
- secretManager.stopThreads();
- super.destroy();
- }
-
- /**
- * Returns the service interface.
- *
- * @return the service interface.
- */
- @Override
- public Class getInterface() {
- return DelegationTokenManager.class;
- }
-
- /**
- * Creates a delegation token.
- *
- * @param ugi UGI creating the token.
- * @param renewer token renewer.
- * @return new delegation token.
- * @throws DelegationTokenManagerException thrown if the token could not be
- * created.
- */
- @Override
- public Token createToken(UserGroupInformation ugi,
- String renewer)
- throws DelegationTokenManagerException {
- renewer = (renewer == null) ? ugi.getShortUserName() : renewer;
- String user = ugi.getUserName();
- Text owner = new Text(user);
- Text realUser = null;
- if (ugi.getRealUser() != null) {
- realUser = new Text(ugi.getRealUser().getUserName());
- }
- DelegationTokenIdentifier tokenIdentifier =
- new DelegationTokenIdentifier(tokenKind, owner, new Text(renewer), realUser);
- Token token =
- new Token(tokenIdentifier, secretManager);
- try {
- SecurityUtil.setTokenService(token,
- HttpFSServerWebApp.get().getAuthority());
- } catch (ServerException ex) {
- throw new DelegationTokenManagerException(
- DelegationTokenManagerException.ERROR.DT04, ex.toString(), ex);
- }
- return token;
- }
-
- /**
- * Renews a delegation token.
- *
- * @param token delegation token to renew.
- * @param renewer token renewer.
- * @return epoc expiration time.
- * @throws DelegationTokenManagerException thrown if the token could not be
- * renewed.
- */
- @Override
- public long renewToken(Token token, String renewer)
- throws DelegationTokenManagerException {
- try {
- return secretManager.renewToken(token, renewer);
- } catch (IOException ex) {
- throw new DelegationTokenManagerException(
- DelegationTokenManagerException.ERROR.DT02, ex.toString(), ex);
- }
- }
-
- /**
- * Cancels a delegation token.
- *
- * @param token delegation token to cancel.
- * @param canceler token canceler.
- * @throws DelegationTokenManagerException thrown if the token could not be
- * canceled.
- */
- @Override
- public void cancelToken(Token token,
- String canceler)
- throws DelegationTokenManagerException {
- try {
- secretManager.cancelToken(token, canceler);
- } catch (IOException ex) {
- throw new DelegationTokenManagerException(
- DelegationTokenManagerException.ERROR.DT03, ex.toString(), ex);
- }
- }
-
- /**
- * Verifies a delegation token.
- *
- * @param token delegation token to verify.
- * @return the UGI for the token.
- * @throws DelegationTokenManagerException thrown if the token could not be
- * verified.
- */
- @Override
- public UserGroupInformation verifyToken(Token token)
- throws DelegationTokenManagerException {
- ByteArrayInputStream buf = new ByteArrayInputStream(token.getIdentifier());
- DataInputStream dis = new DataInputStream(buf);
- DelegationTokenIdentifier id = new DelegationTokenIdentifier(tokenKind);
- try {
- id.readFields(dis);
- dis.close();
- secretManager.verifyToken(id, token.getPassword());
- } catch (Exception ex) {
- throw new DelegationTokenManagerException(
- DelegationTokenManagerException.ERROR.DT01, ex.toString(), ex);
- }
- return id.getUser();
- }
-
- private static class DelegationTokenSecretManager
- extends AbstractDelegationTokenSecretManager {
-
- private Text tokenKind;
-
- /**
- * Create a secret manager
- *
- * @param delegationKeyUpdateInterval the number of seconds for rolling new
- * secret keys.
- * @param delegationTokenMaxLifetime the maximum lifetime of the delegation
- * tokens
- * @param delegationTokenRenewInterval how often the tokens must be renewed
- * @param delegationTokenRemoverScanInterval how often the tokens are
- * scanned
- * for expired tokens
- */
- public DelegationTokenSecretManager(Text tokenKind, long delegationKeyUpdateInterval,
- long delegationTokenMaxLifetime,
- long delegationTokenRenewInterval,
- long delegationTokenRemoverScanInterval) {
- super(delegationKeyUpdateInterval, delegationTokenMaxLifetime,
- delegationTokenRenewInterval, delegationTokenRemoverScanInterval);
- this.tokenKind = tokenKind;
- }
-
- @Override
- public DelegationTokenIdentifier createIdentifier() {
- return new DelegationTokenIdentifier(tokenKind);
- }
-
- }
-
-}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/resources/httpfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/resources/httpfs-default.xml
index 87cd73020d..05e1400ca9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/resources/httpfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/resources/httpfs-default.xml
@@ -35,7 +35,6 @@
org.apache.hadoop.lib.service.scheduler.SchedulerService,
org.apache.hadoop.lib.service.security.GroupsService,
org.apache.hadoop.lib.service.security.ProxyUserService,
- org.apache.hadoop.lib.service.security.DelegationTokenManagerService,
org.apache.hadoop.lib.service.hadoop.FileSystemAccessService
@@ -226,12 +225,4 @@
-
- httpfs.user.provider.user.pattern
- ^[A-Za-z_][A-Za-z0-9._-]*[$]?$
-
- Valid pattern for user and group names, it must be a valid java regex.
-
-
-
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/HttpFSKerberosAuthenticationHandlerForTesting.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/HttpFSKerberosAuthenticationHandlerForTesting.java
index 760cfd548a..9a51bd386b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/HttpFSKerberosAuthenticationHandlerForTesting.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/HttpFSKerberosAuthenticationHandlerForTesting.java
@@ -17,15 +17,19 @@
*/
package org.apache.hadoop.fs.http.server;
+import org.apache.hadoop.security.token.delegation.web.KerberosDelegationTokenAuthenticationHandler;
+
import javax.servlet.ServletException;
import java.util.Properties;
public class HttpFSKerberosAuthenticationHandlerForTesting
- extends HttpFSKerberosAuthenticationHandler {
+ extends KerberosDelegationTokenAuthenticationHandler {
@Override
public void init(Properties config) throws ServletException {
//NOP overwrite to avoid Kerberos initialization
+ config.setProperty(TOKEN_KIND, "t");
+ initTokenManager(config);
}
@Override
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSCustomUserName.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSCustomUserName.java
deleted file mode 100644
index e8407fc30c..0000000000
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSCustomUserName.java
+++ /dev/null
@@ -1,94 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.fs.http.server;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.http.client.HttpFSKerberosAuthenticator;
-import org.apache.hadoop.lib.server.Service;
-import org.apache.hadoop.lib.server.ServiceException;
-import org.apache.hadoop.lib.service.Groups;
-import org.apache.hadoop.lib.wsrs.UserProvider;
-import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
-import org.apache.hadoop.security.authentication.server.AuthenticationToken;
-import org.apache.hadoop.security.authentication.util.Signer;
-import org.apache.hadoop.test.HFSTestCase;
-import org.apache.hadoop.test.HadoopUsersConfTestHelper;
-import org.apache.hadoop.test.TestDir;
-import org.apache.hadoop.test.TestDirHelper;
-import org.apache.hadoop.test.TestHdfs;
-import org.apache.hadoop.test.TestHdfsHelper;
-import org.apache.hadoop.test.TestJetty;
-import org.apache.hadoop.test.TestJettyHelper;
-import org.json.simple.JSONObject;
-import org.json.simple.parser.JSONParser;
-import org.junit.Assert;
-import org.junit.Test;
-import org.mortbay.jetty.Server;
-import org.mortbay.jetty.webapp.WebAppContext;
-
-import java.io.BufferedReader;
-import java.io.File;
-import java.io.FileOutputStream;
-import java.io.FileWriter;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.InputStreamReader;
-import java.io.OutputStream;
-import java.io.Writer;
-import java.net.HttpURLConnection;
-import java.net.URL;
-import java.text.MessageFormat;
-import java.util.Arrays;
-import java.util.List;
-
-public class TestHttpFSCustomUserName extends HFSTestCase {
-
- @Test
- @TestDir
- @TestJetty
- public void defaultUserName() throws Exception {
- String dir = TestDirHelper.getTestDir().getAbsolutePath();
-
- Configuration httpfsConf = new Configuration(false);
- HttpFSServerWebApp server =
- new HttpFSServerWebApp(dir, dir, dir, dir, httpfsConf);
- server.init();
- Assert.assertEquals(UserProvider.USER_PATTERN_DEFAULT,
- UserProvider.getUserPattern().pattern());
- server.destroy();
- }
-
- @Test
- @TestDir
- @TestJetty
- public void customUserName() throws Exception {
- String dir = TestDirHelper.getTestDir().getAbsolutePath();
-
- Configuration httpfsConf = new Configuration(false);
- httpfsConf.set(UserProvider.USER_PATTERN_KEY, "1");
- HttpFSServerWebApp server =
- new HttpFSServerWebApp(dir, dir, dir, dir, httpfsConf);
- server.init();
- Assert.assertEquals("1", UserProvider.getUserPattern().pattern());
- server.destroy();
- }
-
-}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSKerberosAuthenticationHandler.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSKerberosAuthenticationHandler.java
deleted file mode 100644
index 25612a0f3c..0000000000
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSKerberosAuthenticationHandler.java
+++ /dev/null
@@ -1,316 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.http.server;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.http.client.HttpFSFileSystem;
-import org.apache.hadoop.fs.http.client.HttpFSKerberosAuthenticator;
-import org.apache.hadoop.fs.http.client.HttpFSKerberosAuthenticator.DelegationTokenOperation;
-import org.apache.hadoop.hdfs.web.SWebHdfsFileSystem;
-import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.lib.service.DelegationTokenIdentifier;
-import org.apache.hadoop.lib.service.DelegationTokenManager;
-import org.apache.hadoop.lib.service.DelegationTokenManagerException;
-import org.apache.hadoop.lib.servlet.ServerWebApp;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.authentication.client.AuthenticationException;
-import org.apache.hadoop.security.authentication.server.AuthenticationHandler;
-import org.apache.hadoop.security.authentication.server.AuthenticationToken;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.test.HFSTestCase;
-import org.apache.hadoop.test.TestDir;
-import org.apache.hadoop.test.TestDirHelper;
-import org.json.simple.JSONObject;
-import org.json.simple.parser.JSONParser;
-import org.junit.Assert;
-import org.junit.Test;
-import org.mockito.Mockito;
-
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.http.HttpServletResponse;
-import javax.ws.rs.core.MediaType;
-import java.io.PrintWriter;
-import java.io.StringWriter;
-import java.net.InetAddress;
-import java.net.InetSocketAddress;
-
-public class TestHttpFSKerberosAuthenticationHandler extends HFSTestCase {
-
- @Test
- @TestDir
- public void testManagementOperationsWebHdfsFileSystem() throws Exception {
- testManagementOperations(WebHdfsFileSystem.TOKEN_KIND);
- }
-
- @Test
- @TestDir
- public void testManagementOperationsSWebHdfsFileSystem() throws Exception {
- try {
- System.setProperty(HttpFSServerWebApp.NAME +
- ServerWebApp.SSL_ENABLED, "true");
- testManagementOperations(SWebHdfsFileSystem.TOKEN_KIND);
- } finally {
- System.getProperties().remove(HttpFSServerWebApp.NAME +
- ServerWebApp.SSL_ENABLED);
- }
- }
-
- private void testManagementOperations(Text expectedTokenKind) throws Exception {
- String dir = TestDirHelper.getTestDir().getAbsolutePath();
-
- Configuration httpfsConf = new Configuration(false);
- HttpFSServerWebApp server =
- new HttpFSServerWebApp(dir, dir, dir, dir, httpfsConf);
- server.setAuthority(new InetSocketAddress(InetAddress.getLocalHost(),
- 14000));
- AuthenticationHandler handler =
- new HttpFSKerberosAuthenticationHandlerForTesting();
- try {
- server.init();
- handler.init(null);
-
- testNonManagementOperation(handler);
- testManagementOperationErrors(handler);
- testGetToken(handler, null, expectedTokenKind);
- testGetToken(handler, "foo", expectedTokenKind);
- testCancelToken(handler);
- testRenewToken(handler);
-
- } finally {
- if (handler != null) {
- handler.destroy();
- }
- server.destroy();
- }
- }
-
- private void testNonManagementOperation(AuthenticationHandler handler)
- throws Exception {
- HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
- Mockito.when(request.getParameter(HttpFSFileSystem.OP_PARAM)).
- thenReturn(null);
- Assert.assertTrue(handler.managementOperation(null, request, null));
- Mockito.when(request.getParameter(HttpFSFileSystem.OP_PARAM)).
- thenReturn(HttpFSFileSystem.Operation.CREATE.toString());
- Assert.assertTrue(handler.managementOperation(null, request, null));
- }
-
- private void testManagementOperationErrors(AuthenticationHandler handler)
- throws Exception {
- HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
- HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
- Mockito.when(request.getParameter(HttpFSFileSystem.OP_PARAM)).
- thenReturn(DelegationTokenOperation.GETDELEGATIONTOKEN.toString());
- Mockito.when(request.getMethod()).thenReturn("FOO");
- Assert.assertFalse(handler.managementOperation(null, request, response));
- Mockito.verify(response).sendError(
- Mockito.eq(HttpServletResponse.SC_BAD_REQUEST),
- Mockito.startsWith("Wrong HTTP method"));
-
- Mockito.reset(response);
- Mockito.when(request.getMethod()).
- thenReturn(DelegationTokenOperation.GETDELEGATIONTOKEN.getHttpMethod());
- Assert.assertFalse(handler.managementOperation(null, request, response));
- Mockito.verify(response).sendError(
- Mockito.eq(HttpServletResponse.SC_UNAUTHORIZED),
- Mockito.contains("requires SPNEGO"));
- }
-
- private void testGetToken(AuthenticationHandler handler, String renewer,
- Text expectedTokenKind) throws Exception {
- DelegationTokenOperation op = DelegationTokenOperation.GETDELEGATIONTOKEN;
- HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
- HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
- Mockito.when(request.getParameter(HttpFSFileSystem.OP_PARAM)).
- thenReturn(op.toString());
- Mockito.when(request.getMethod()).
- thenReturn(op.getHttpMethod());
-
- AuthenticationToken token = Mockito.mock(AuthenticationToken.class);
- Mockito.when(token.getUserName()).thenReturn("user");
- Assert.assertFalse(handler.managementOperation(null, request, response));
- Mockito.when(request.getParameter(HttpFSKerberosAuthenticator.RENEWER_PARAM)).
- thenReturn(renewer);
-
- Mockito.reset(response);
- StringWriter writer = new StringWriter();
- PrintWriter pwriter = new PrintWriter(writer);
- Mockito.when(response.getWriter()).thenReturn(pwriter);
- Assert.assertFalse(handler.managementOperation(token, request, response));
- if (renewer == null) {
- Mockito.verify(token).getUserName();
- } else {
- Mockito.verify(token, Mockito.never()).getUserName();
- }
- Mockito.verify(response).setStatus(HttpServletResponse.SC_OK);
- Mockito.verify(response).setContentType(MediaType.APPLICATION_JSON);
- pwriter.close();
- String responseOutput = writer.toString();
- String tokenLabel = HttpFSKerberosAuthenticator.DELEGATION_TOKEN_JSON;
- Assert.assertTrue(responseOutput.contains(tokenLabel));
- Assert.assertTrue(responseOutput.contains(
- HttpFSKerberosAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON));
- JSONObject json = (JSONObject) new JSONParser().parse(responseOutput);
- json = (JSONObject) json.get(tokenLabel);
- String tokenStr;
- tokenStr = (String)
- json.get(HttpFSKerberosAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON);
- Token dt = new Token();
- dt.decodeFromUrlString(tokenStr);
- HttpFSServerWebApp.get().get(DelegationTokenManager.class).verifyToken(dt);
- Assert.assertEquals(expectedTokenKind, dt.getKind());
- }
-
- private void testCancelToken(AuthenticationHandler handler)
- throws Exception {
- DelegationTokenOperation op =
- DelegationTokenOperation.CANCELDELEGATIONTOKEN;
- HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
- HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
- Mockito.when(request.getParameter(HttpFSFileSystem.OP_PARAM)).
- thenReturn(op.toString());
- Mockito.when(request.getMethod()).
- thenReturn(op.getHttpMethod());
-
- Assert.assertFalse(handler.managementOperation(null, request, response));
- Mockito.verify(response).sendError(
- Mockito.eq(HttpServletResponse.SC_BAD_REQUEST),
- Mockito.contains("requires the parameter [token]"));
-
- Mockito.reset(response);
- Token token =
- HttpFSServerWebApp.get().get(DelegationTokenManager.class).createToken(
- UserGroupInformation.getCurrentUser(), "foo");
- Mockito.when(request.getParameter(HttpFSKerberosAuthenticator.TOKEN_PARAM)).
- thenReturn(token.encodeToUrlString());
- Assert.assertFalse(handler.managementOperation(null, request, response));
- Mockito.verify(response).setStatus(HttpServletResponse.SC_OK);
- try {
- HttpFSServerWebApp.get().get(DelegationTokenManager.class).verifyToken(token);
- Assert.fail();
- }
- catch (DelegationTokenManagerException ex) {
- Assert.assertTrue(ex.toString().contains("DT01"));
- }
- }
-
- private void testRenewToken(AuthenticationHandler handler)
- throws Exception {
- DelegationTokenOperation op =
- DelegationTokenOperation.RENEWDELEGATIONTOKEN;
- HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
- HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
- Mockito.when(request.getParameter(HttpFSFileSystem.OP_PARAM)).
- thenReturn(op.toString());
- Mockito.when(request.getMethod()).
- thenReturn(op.getHttpMethod());
-
- Assert.assertFalse(handler.managementOperation(null, request, response));
- Mockito.verify(response).sendError(
- Mockito.eq(HttpServletResponse.SC_UNAUTHORIZED),
- Mockito.contains("equires SPNEGO authentication established"));
-
- Mockito.reset(response);
- AuthenticationToken token = Mockito.mock(AuthenticationToken.class);
- Mockito.when(token.getUserName()).thenReturn("user");
- Assert.assertFalse(handler.managementOperation(token, request, response));
- Mockito.verify(response).sendError(
- Mockito.eq(HttpServletResponse.SC_BAD_REQUEST),
- Mockito.contains("requires the parameter [token]"));
-
- Mockito.reset(response);
- StringWriter writer = new StringWriter();
- PrintWriter pwriter = new PrintWriter(writer);
- Mockito.when(response.getWriter()).thenReturn(pwriter);
- Token dToken =
- HttpFSServerWebApp.get().get(DelegationTokenManager.class).createToken(
- UserGroupInformation.getCurrentUser(), "user");
- Mockito.when(request.getParameter(HttpFSKerberosAuthenticator.TOKEN_PARAM)).
- thenReturn(dToken.encodeToUrlString());
- Assert.assertFalse(handler.managementOperation(token, request, response));
- Mockito.verify(response).setStatus(HttpServletResponse.SC_OK);
- pwriter.close();
- Assert.assertTrue(writer.toString().contains("long"));
- HttpFSServerWebApp.get().get(DelegationTokenManager.class).verifyToken(dToken);
- }
-
- @Test
- @TestDir
- public void testAuthenticate() throws Exception {
- String dir = TestDirHelper.getTestDir().getAbsolutePath();
-
- Configuration httpfsConf = new Configuration(false);
- HttpFSServerWebApp server =
- new HttpFSServerWebApp(dir, dir, dir, dir, httpfsConf);
- server.setAuthority(new InetSocketAddress(InetAddress.getLocalHost(),
- 14000));
- AuthenticationHandler handler =
- new HttpFSKerberosAuthenticationHandlerForTesting();
- try {
- server.init();
- handler.init(null);
-
- testValidDelegationToken(handler);
- testInvalidDelegationToken(handler);
- } finally {
- if (handler != null) {
- handler.destroy();
- }
- server.destroy();
- }
- }
-
- private void testValidDelegationToken(AuthenticationHandler handler)
- throws Exception {
- HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
- HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
- Token dToken =
- HttpFSServerWebApp.get().get(DelegationTokenManager.class).createToken(
- UserGroupInformation.getCurrentUser(), "user");
- Mockito.when(request.getParameter(HttpFSKerberosAuthenticator.DELEGATION_PARAM)).
- thenReturn(dToken.encodeToUrlString());
-
- AuthenticationToken token = handler.authenticate(request, response);
- Assert.assertEquals(UserGroupInformation.getCurrentUser().getShortUserName(),
- token.getUserName());
- Assert.assertEquals(0, token.getExpires());
- Assert.assertEquals(HttpFSKerberosAuthenticationHandler.TYPE,
- token.getType());
- Assert.assertTrue(token.isExpired());
- }
-
- private void testInvalidDelegationToken(AuthenticationHandler handler)
- throws Exception {
- HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
- HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
- Mockito.when(request.getParameter(HttpFSKerberosAuthenticator.DELEGATION_PARAM)).
- thenReturn("invalid");
-
- try {
- handler.authenticate(request, response);
- Assert.fail();
- } catch (AuthenticationException ex) {
- //NOP
- } catch (Exception ex) {
- Assert.fail();
- }
- }
-
-}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java
index 3e08662a9b..c6c0d19d2a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java
@@ -18,6 +18,8 @@
package org.apache.hadoop.fs.http.server;
import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticator;
+import org.apache.hadoop.security.token.delegation.web.KerberosDelegationTokenAuthenticationHandler;
import org.json.simple.JSONArray;
import org.junit.Assert;
@@ -43,7 +45,6 @@
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.XAttrCodec;
-import org.apache.hadoop.fs.http.client.HttpFSKerberosAuthenticator;
import org.apache.hadoop.lib.server.Service;
import org.apache.hadoop.lib.server.ServiceException;
import org.apache.hadoop.lib.service.Groups;
@@ -682,7 +683,7 @@ public void testDelegationTokenOperations() throws Exception {
AuthenticationToken token =
new AuthenticationToken("u", "p",
- HttpFSKerberosAuthenticationHandlerForTesting.TYPE);
+ new KerberosDelegationTokenAuthenticationHandler().getType());
token.setExpires(System.currentTimeMillis() + 100000000);
Signer signer = new Signer(new StringSignerSecretProvider("secret"));
String tokenSigned = signer.sign(token.toString());
@@ -706,9 +707,9 @@ public void testDelegationTokenOperations() throws Exception {
JSONObject json = (JSONObject)
new JSONParser().parse(new InputStreamReader(conn.getInputStream()));
json = (JSONObject)
- json.get(HttpFSKerberosAuthenticator.DELEGATION_TOKEN_JSON);
+ json.get(DelegationTokenAuthenticator.DELEGATION_TOKEN_JSON);
String tokenStr = (String)
- json.get(HttpFSKerberosAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON);
+ json.get(DelegationTokenAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON);
url = new URL(TestJettyHelper.getJettyURL(),
"/webhdfs/v1/?op=GETHOMEDIRECTORY&delegation=" + tokenStr);
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSWithKerberos.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSWithKerberos.java
index 45ce8ed730..757e3fd7e7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSWithKerberos.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSWithKerberos.java
@@ -23,11 +23,11 @@
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.http.client.HttpFSFileSystem;
-import org.apache.hadoop.fs.http.client.HttpFSKerberosAuthenticator;
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticator;
import org.apache.hadoop.test.HFSTestCase;
import org.apache.hadoop.test.KerberosTestUtils;
import org.apache.hadoop.test.TestDir;
@@ -166,9 +166,9 @@ public Void call() throws Exception {
.parse(new InputStreamReader(conn.getInputStream()));
json =
(JSONObject) json
- .get(HttpFSKerberosAuthenticator.DELEGATION_TOKEN_JSON);
+ .get(DelegationTokenAuthenticator.DELEGATION_TOKEN_JSON);
String tokenStr = (String) json
- .get(HttpFSKerberosAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON);
+ .get(DelegationTokenAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON);
//access httpfs using the delegation token
url = new URL(TestJettyHelper.getJettyURL(),
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/security/TestDelegationTokenManagerService.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/security/TestDelegationTokenManagerService.java
deleted file mode 100644
index da588e0114..0000000000
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/service/security/TestDelegationTokenManagerService.java
+++ /dev/null
@@ -1,89 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.lib.service.security;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.http.server.HttpFSServerWebApp;
-import org.apache.hadoop.lib.server.Server;
-import org.apache.hadoop.lib.service.DelegationTokenManager;
-import org.apache.hadoop.lib.service.DelegationTokenManagerException;
-import org.apache.hadoop.lib.service.hadoop.FileSystemAccessService;
-import org.apache.hadoop.lib.service.instrumentation.InstrumentationService;
-import org.apache.hadoop.lib.service.scheduler.SchedulerService;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.test.HTestCase;
-import org.apache.hadoop.test.TestDir;
-import org.apache.hadoop.test.TestDirHelper;
-import org.apache.hadoop.util.StringUtils;
-import org.junit.Assert;
-import org.junit.Test;
-
-import java.net.InetAddress;
-import java.net.InetSocketAddress;
-import java.util.Arrays;
-
-public class TestDelegationTokenManagerService extends HTestCase {
-
- @Test
- @TestDir
- public void service() throws Exception {
- String dir = TestDirHelper.getTestDir().getAbsolutePath();
- Configuration conf = new Configuration(false);
- conf.set("httpfs.services", StringUtils.join(",",
- Arrays.asList(InstrumentationService.class.getName(),
- SchedulerService.class.getName(),
- FileSystemAccessService.class.getName(),
- DelegationTokenManagerService.class.getName())));
- Server server = new HttpFSServerWebApp(dir, dir, dir, dir, conf);
- server.init();
- DelegationTokenManager tm = server.get(DelegationTokenManager.class);
- Assert.assertNotNull(tm);
- server.destroy();
- }
-
- @Test
- @TestDir
- @SuppressWarnings("unchecked")
- public void tokens() throws Exception {
- String dir = TestDirHelper.getTestDir().getAbsolutePath();
- Configuration conf = new Configuration(false);
- conf.set("server.services", StringUtils.join(",",
- Arrays.asList(DelegationTokenManagerService.class.getName())));
- HttpFSServerWebApp server = new HttpFSServerWebApp(dir, dir, dir, dir, conf);
- server.setAuthority(new InetSocketAddress(InetAddress.getLocalHost(), 14000));
- server.init();
- DelegationTokenManager tm = server.get(DelegationTokenManager.class);
- Token token = tm.createToken(UserGroupInformation.getCurrentUser(), "foo");
- Assert.assertNotNull(token);
- tm.verifyToken(token);
- Assert.assertTrue(tm.renewToken(token, "foo") > System.currentTimeMillis());
- tm.cancelToken(token, "foo");
- try {
- tm.verifyToken(token);
- Assert.fail();
- } catch (DelegationTokenManagerException ex) {
- //NOP
- } catch (Exception ex) {
- Assert.fail();
- }
- server.destroy();
- }
-
-}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
index cccc464e55..3ef9240263 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
@@ -724,6 +724,10 @@ READ3Response read(XDR xdr, SecurityHandler securityHandler,
FSDataInputStream fis = clientCache.getDfsInputStream(userName,
Nfs3Utils.getFileIdPath(handle));
+ if (fis == null) {
+ return new READ3Response(Nfs3Status.NFS3ERR_ACCES);
+ }
+
try {
readCount = fis.read(offset, readbuffer, 0, count);
} catch (IOException e) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestRpcProgramNfs3.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestRpcProgramNfs3.java
index e89929b889..3fc0d99188 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestRpcProgramNfs3.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestRpcProgramNfs3.java
@@ -278,13 +278,11 @@ public void testRead() throws Exception {
readReq.serialize(xdr_req);
// Attempt by an unpriviledged user should fail.
- /* Hits HDFS-6582. It needs to be fixed first.
READ3Response response1 = nfsd.read(xdr_req.asReadOnlyWrap(),
securityHandlerUnpriviledged,
new InetSocketAddress("localhost", 1234));
assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
response1.getStatus());
- */
// Attempt by a priviledged user should pass.
READ3Response response2 = nfsd.read(xdr_req.asReadOnlyWrap(),
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 4047a51905..65253c299f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -370,12 +370,38 @@ Release 2.6.0 - UNRELEASED
HDFS-6781. Separate HDFS commands from CommandsManual.apt.vm. (Akira
Ajisaka via Arpit Agarwal)
+ HDFS-6728. Dynamically add new volumes to DataStorage, formatted if
+ necessary. (Lei Xu via atm)
+
+ HDFS-6740. Make FSDataset support adding data volumes dynamically. (Lei
+ Xu via atm)
+
+ HDFS-6722. Display readable last contact time for dead nodes on NN webUI.
+ (Ming Ma via wheat9)
+
+ HDFS-6772. Get DN storages out of blockContentsStale state faster after
+ NN restarts. (Ming Ma via Arpit Agarwal)
+
+ HDFS-573. Porting libhdfs to Windows. (cnauroth)
+
+ HDFS-6828. Separate block replica dispatching from Balancer. (szetszwo via
+ jing9)
+
+ HDFS-6837. Code cleanup for Balancer and Dispatcher. (szetszwo via
+ jing9)
+
+ HDFS-6838. Code cleanup for unnecessary INode replacement.
+ (Jing Zhao via wheat9)
+
OPTIMIZATIONS
HDFS-6690. Deduplicate xattr names in memory. (wang)
BUG FIXES
+ HDFS-6823. dfs.web.authentication.kerberos.principal shows up in logs for
+ insecure HDFS (Allen Wittenauer via raviprak)
+
HDFS-6617. Flake TestDFSZKFailoverController.testManualFailoverWithDFSHAAdmin
due to a long edit log sync op. (Liang Xie via cnauroth)
@@ -462,6 +488,9 @@ Release 2.6.0 - UNRELEASED
HDFS-6791. A block could remain under replicated if all of its replicas are on
decommissioned nodes. (Ming Ma via jing9)
+ HDFS-6582. Missing null check in RpcProgramNfs3#read(XDR, SecurityHandler)
+ (Abhiraj Butala via brandonli)
+
Release 2.5.0 - UNRELEASED
INCOMPATIBLE CHANGES
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index e910695f17..9b026f2bdb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -361,16 +361,97 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
- windows
+ native-win
false
windows
-
- true
-
+
+
+
+ org.apache.maven.plugins
+ maven-enforcer-plugin
+
+
+ enforce-os
+
+ enforce
+
+
+
+
+ windows
+ native-win build only supported on Windows
+
+
+ true
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-antrun-plugin
+
+
+ make
+ compile
+
+ run
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ native_tests
+ test
+ run
+
+ ${skipTests}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
native
@@ -408,21 +489,25 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
test
run
+ ${skipTests}
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt
index fc5ebea4c0..854988b9c5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt
@@ -76,9 +76,39 @@ if (NOT GENERATED_JAVAH)
MESSAGE(FATAL_ERROR "You must set the CMake variable GENERATED_JAVAH")
endif (NOT GENERATED_JAVAH)
-set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -g -Wall -O2")
-set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D_REENTRANT -D_GNU_SOURCE")
-set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D_LARGEFILE_SOURCE -D_FILE_OFFSET_BITS=64")
+if (WIN32)
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /O2")
+
+ # Set warning level 4.
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /W4")
+
+ # Skip "unreferenced formal parameter".
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /wd4100")
+
+ # Skip "conditional expression is constant".
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /wd4127")
+
+ # Skip deprecated POSIX function warnings.
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D_CRT_NONSTDC_NO_DEPRECATE")
+
+ # Skip CRT non-secure function warnings. If we can convert usage of
+ # strerror, getenv and ctime to their secure CRT equivalents, then we can
+ # re-enable the CRT non-secure function warnings.
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D_CRT_SECURE_NO_WARNINGS")
+
+ # Omit unneeded headers.
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DWIN32_LEAN_AND_MEAN")
+
+ set(OS_DIR main/native/libhdfs/os/windows)
+ set(OUT_DIR target/bin)
+else (WIN32)
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -g -Wall -O2")
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D_REENTRANT -D_GNU_SOURCE")
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D_LARGEFILE_SOURCE -D_FILE_OFFSET_BITS=64")
+ set(OS_DIR main/native/libhdfs/os/posix)
+ set(OS_LINK_LIBRARIES pthread)
+ set(OUT_DIR target/usr/local/lib)
+endif (WIN32)
include_directories(
${GENERATED_JAVAH}
@@ -87,6 +117,7 @@ include_directories(
${JNI_INCLUDE_DIRS}
main/native
main/native/libhdfs
+ ${OS_DIR}
)
set(_FUSE_DFS_VERSION 0.1.0)
@@ -96,6 +127,9 @@ add_dual_library(hdfs
main/native/libhdfs/exception.c
main/native/libhdfs/jni_helper.c
main/native/libhdfs/hdfs.c
+ main/native/libhdfs/common/htable.c
+ ${OS_DIR}/mutexes.c
+ ${OS_DIR}/thread_local_storage.c
)
if (NEED_LINK_DL)
set(LIB_DL dl)
@@ -104,17 +138,14 @@ endif(NEED_LINK_DL)
target_link_dual_libraries(hdfs
${JAVA_JVM_LIBRARY}
${LIB_DL}
- pthread
+ ${OS_LINK_LIBRARIES}
)
-dual_output_directory(hdfs target/usr/local/lib)
+
+dual_output_directory(hdfs ${OUT_DIR})
set(LIBHDFS_VERSION "0.0.0")
set_target_properties(hdfs PROPERTIES
SOVERSION ${LIBHDFS_VERSION})
-add_library(posix_util
- main/native/util/posix_util.c
-)
-
add_executable(test_libhdfs_ops
main/native/libhdfs/test/test_libhdfs_ops.c
)
@@ -156,11 +187,12 @@ target_link_libraries(test_native_mini_dfs
add_executable(test_libhdfs_threaded
main/native/libhdfs/expect.c
main/native/libhdfs/test_libhdfs_threaded.c
+ ${OS_DIR}/thread.c
)
target_link_libraries(test_libhdfs_threaded
hdfs
native_mini_dfs
- pthread
+ ${OS_LINK_LIBRARIES}
)
add_executable(test_libhdfs_zerocopy
@@ -170,17 +202,21 @@ add_executable(test_libhdfs_zerocopy
target_link_libraries(test_libhdfs_zerocopy
hdfs
native_mini_dfs
- pthread
+ ${OS_LINK_LIBRARIES}
)
-add_executable(test_libhdfs_vecsum
- main/native/libhdfs/test/vecsum.c
-)
-target_link_libraries(test_libhdfs_vecsum
- hdfs
- pthread
- rt
-)
+# Skip vecsum on Windows. This could be made to work in the future by
+# introducing an abstraction layer over the sys/mman.h functions.
+if (NOT WIN32)
+ add_executable(test_libhdfs_vecsum
+ main/native/libhdfs/test/vecsum.c
+ )
+ target_link_libraries(test_libhdfs_vecsum
+ hdfs
+ pthread
+ rt
+ )
+endif(NOT WIN32)
IF(REQUIRE_LIBWEBHDFS)
add_subdirectory(contrib/libwebhdfs)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
index 99a5cfcfca..5559e0dbc9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
@@ -1671,9 +1671,11 @@ public static HttpServer2.Builder httpServerTemplateForNNAndJN(
.setKeytabConfKey(getSpnegoKeytabKey(conf, spnegoKeytabFileKey));
// initialize the webserver for uploading/downloading files.
- LOG.info("Starting web server as: "
- + SecurityUtil.getServerPrincipal(conf.get(spnegoUserNameKey),
- httpAddr.getHostName()));
+ if (UserGroupInformation.isSecurityEnabled()) {
+ LOG.info("Starting web server as: "
+ + SecurityUtil.getServerPrincipal(conf.get(spnegoUserNameKey),
+ httpAddr.getHostName()));
+ }
if (policy.isHttpEnabled()) {
if (httpAddr.getPort() == 0) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
index 33f7a0aacf..7661d25ee7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
@@ -18,19 +18,9 @@
package org.apache.hadoop.hdfs.server.balancer;
import static com.google.common.base.Preconditions.checkArgument;
-import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_DEFAULT;
-import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY;
-import static org.apache.hadoop.hdfs.protocolPB.PBHelper.vintPrefixed;
-import java.io.BufferedInputStream;
-import java.io.BufferedOutputStream;
-import java.io.DataInputStream;
-import java.io.DataOutputStream;
import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
import java.io.PrintStream;
-import java.net.Socket;
import java.net.URI;
import java.text.DateFormat;
import java.util.ArrayList;
@@ -38,20 +28,11 @@
import java.util.Collection;
import java.util.Collections;
import java.util.Date;
-import java.util.EnumMap;
import java.util.Formatter;
-import java.util.HashMap;
-import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
-import java.util.Map;
import java.util.Set;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-import java.util.concurrent.atomic.AtomicLong;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -63,31 +44,16 @@
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.StorageType;
-import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
-import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
-import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
-import org.apache.hadoop.hdfs.protocol.datatransfer.TrustedChannelResolver;
-import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataTransferSaslUtil;
-import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient;
-import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
-import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
-import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
+import org.apache.hadoop.hdfs.server.balancer.Dispatcher.DDatanode;
+import org.apache.hadoop.hdfs.server.balancer.Dispatcher.DDatanode.StorageGroup;
+import org.apache.hadoop.hdfs.server.balancer.Dispatcher.Source;
+import org.apache.hadoop.hdfs.server.balancer.Dispatcher.Task;
+import org.apache.hadoop.hdfs.server.balancer.Dispatcher.Util;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
-import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.namenode.UnsupportedActionException;
-import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.net.NetworkTopology;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.util.HostsFileReader;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.util.Tool;
@@ -200,15 +166,7 @@ public class Balancer {
private static final long GB = 1L << 30; //1GB
private static final long MAX_SIZE_TO_MOVE = 10*GB;
- private static final long MAX_BLOCKS_SIZE_TO_FETCH = 2*GB;
- /** The maximum number of concurrent blocks moves for
- * balancing purpose at a datanode
- */
- private static final int MAX_NO_PENDING_BLOCK_ITERATIONS = 5;
- public static final long DELAY_AFTER_ERROR = 10 * 1000L; //10 seconds
- public static final int BLOCK_MOVE_READ_TIMEOUT=20*60*1000; // 20 minutes
-
private static final String USAGE = "Usage: java "
+ Balancer.class.getSimpleName()
+ "\n\t[-policy ]\tthe balancing policy: "
@@ -220,652 +178,17 @@ public class Balancer {
+ "\n\t[-include [-f | comma-sperated list of hosts]]"
+ "\tIncludes only the specified datanodes.";
- private final NameNodeConnector nnc;
- private final KeyManager keyManager;
-
+ private final Dispatcher dispatcher;
private final BalancingPolicy policy;
- private final SaslDataTransferClient saslClient;
private final double threshold;
- // set of data nodes to be excluded from balancing operations.
- Set nodesToBeExcluded;
- //Restrict balancing to the following nodes.
- Set nodesToBeIncluded;
// all data node lists
private final Collection overUtilized = new LinkedList();
private final Collection aboveAvgUtilized = new LinkedList();
- private final Collection belowAvgUtilized
- = new LinkedList();
- private final Collection underUtilized
- = new LinkedList();
-
- private final Collection sources = new HashSet();
- private final Collection targets
- = new HashSet();
-
- private final Map globalBlockList
- = new HashMap();
- private final MovedBlocks movedBlocks;
-
- /** Map (datanodeUuid,storageType -> StorageGroup) */
- private final StorageGroupMap storageGroupMap = new StorageGroupMap();
-
- private NetworkTopology cluster;
-
- private final ExecutorService moverExecutor;
- private final ExecutorService dispatcherExecutor;
- private final int maxConcurrentMovesPerNode;
-
-
- private static class StorageGroupMap {
- private static String toKey(String datanodeUuid, StorageType storageType) {
- return datanodeUuid + ":" + storageType;
- }
-
- private final Map map
- = new HashMap();
-
- BalancerDatanode.StorageGroup get(String datanodeUuid, StorageType storageType) {
- return map.get(toKey(datanodeUuid, storageType));
- }
-
- void put(BalancerDatanode.StorageGroup g) {
- final String key = toKey(g.getDatanode().getDatanodeUuid(), g.storageType);
- final BalancerDatanode.StorageGroup existing = map.put(key, g);
- Preconditions.checkState(existing == null);
- }
-
- int size() {
- return map.size();
- }
-
- void clear() {
- map.clear();
- }
- }
- /* This class keeps track of a scheduled block move */
- private class PendingBlockMove {
- private BalancerBlock block;
- private Source source;
- private BalancerDatanode proxySource;
- private BalancerDatanode.StorageGroup target;
-
- /** constructor */
- private PendingBlockMove() {
- }
-
- @Override
- public String toString() {
- final Block b = block.getBlock();
- return b + " with size=" + b.getNumBytes() + " from "
- + source.getDisplayName() + " to " + target.getDisplayName()
- + " through " + proxySource.datanode;
- }
-
- /* choose a block & a proxy source for this pendingMove
- * whose source & target have already been chosen.
- *
- * Return true if a block and its proxy are chosen; false otherwise
- */
- private boolean chooseBlockAndProxy() {
- // iterate all source's blocks until find a good one
- for (Iterator blocks=
- source.getBlockIterator(); blocks.hasNext();) {
- if (markMovedIfGoodBlock(blocks.next())) {
- blocks.remove();
- return true;
- }
- }
- return false;
- }
-
- /* Return true if the given block is good for the tentative move;
- * If it is good, add it to the moved list to marked as "Moved".
- * A block is good if
- * 1. it is a good candidate; see isGoodBlockCandidate
- * 2. can find a proxy source that's not busy for this move
- */
- private boolean markMovedIfGoodBlock(BalancerBlock block) {
- synchronized(block) {
- synchronized(movedBlocks) {
- if (isGoodBlockCandidate(source, target, block)) {
- this.block = block;
- if ( chooseProxySource() ) {
- movedBlocks.put(block);
- if (LOG.isDebugEnabled()) {
- LOG.debug("Decided to move " + this);
- }
- return true;
- }
- }
- }
- }
- return false;
- }
-
- /* Now we find out source, target, and block, we need to find a proxy
- *
- * @return true if a proxy is found; otherwise false
- */
- private boolean chooseProxySource() {
- final DatanodeInfo targetDN = target.getDatanode();
- // if node group is supported, first try add nodes in the same node group
- if (cluster.isNodeGroupAware()) {
- for (BalancerDatanode.StorageGroup loc : block.getLocations()) {
- if (cluster.isOnSameNodeGroup(loc.getDatanode(), targetDN) && addTo(loc)) {
- return true;
- }
- }
- }
- // check if there is replica which is on the same rack with the target
- for (BalancerDatanode.StorageGroup loc : block.getLocations()) {
- if (cluster.isOnSameRack(loc.getDatanode(), targetDN) && addTo(loc)) {
- return true;
- }
- }
- // find out a non-busy replica
- for (BalancerDatanode.StorageGroup loc : block.getLocations()) {
- if (addTo(loc)) {
- return true;
- }
- }
- return false;
- }
-
- /** add to a proxy source for specific block movement */
- private boolean addTo(BalancerDatanode.StorageGroup g) {
- final BalancerDatanode bdn = g.getBalancerDatanode();
- if (bdn.addPendingBlock(this)) {
- proxySource = bdn;
- return true;
- }
- return false;
- }
-
- /* Dispatch the block move task to the proxy source & wait for the response
- */
- private void dispatch() {
- Socket sock = new Socket();
- DataOutputStream out = null;
- DataInputStream in = null;
- try {
- sock.connect(
- NetUtils.createSocketAddr(target.getDatanode().getXferAddr()),
- HdfsServerConstants.READ_TIMEOUT);
- /* Unfortunately we don't have a good way to know if the Datanode is
- * taking a really long time to move a block, OR something has
- * gone wrong and it's never going to finish. To deal with this
- * scenario, we set a long timeout (20 minutes) to avoid hanging
- * the balancer indefinitely.
- */
- sock.setSoTimeout(BLOCK_MOVE_READ_TIMEOUT);
-
- sock.setKeepAlive(true);
-
- OutputStream unbufOut = sock.getOutputStream();
- InputStream unbufIn = sock.getInputStream();
- ExtendedBlock eb = new ExtendedBlock(nnc.getBlockpoolID(), block.getBlock());
- Token accessToken = keyManager.getAccessToken(eb);
- IOStreamPair saslStreams = saslClient.socketSend(sock, unbufOut,
- unbufIn, keyManager, accessToken, target.getDatanode());
- unbufOut = saslStreams.out;
- unbufIn = saslStreams.in;
- out = new DataOutputStream(new BufferedOutputStream(unbufOut,
- HdfsConstants.IO_FILE_BUFFER_SIZE));
- in = new DataInputStream(new BufferedInputStream(unbufIn,
- HdfsConstants.IO_FILE_BUFFER_SIZE));
-
- sendRequest(out, eb, StorageType.DEFAULT, accessToken);
- receiveResponse(in);
- bytesMoved.addAndGet(block.getNumBytes());
- LOG.info("Successfully moved " + this);
- } catch (IOException e) {
- LOG.warn("Failed to move " + this + ": " + e.getMessage());
- /* proxy or target may have an issue, insert a small delay
- * before using these nodes further. This avoids a potential storm
- * of "threads quota exceeded" Warnings when the balancer
- * gets out of sync with work going on in datanode.
- */
- proxySource.activateDelay(DELAY_AFTER_ERROR);
- target.getBalancerDatanode().activateDelay(DELAY_AFTER_ERROR);
- } finally {
- IOUtils.closeStream(out);
- IOUtils.closeStream(in);
- IOUtils.closeSocket(sock);
-
- proxySource.removePendingBlock(this);
- target.getBalancerDatanode().removePendingBlock(this);
-
- synchronized (this ) {
- reset();
- }
- synchronized (Balancer.this) {
- Balancer.this.notifyAll();
- }
- }
- }
-
- /* Send a block replace request to the output stream*/
- private void sendRequest(DataOutputStream out, ExtendedBlock eb,
- StorageType storageType,
- Token accessToken) throws IOException {
- new Sender(out).replaceBlock(eb, storageType, accessToken,
- source.getDatanode().getDatanodeUuid(), proxySource.datanode);
- }
-
- /* Receive a block copy response from the input stream */
- private void receiveResponse(DataInputStream in) throws IOException {
- BlockOpResponseProto response = BlockOpResponseProto.parseFrom(
- vintPrefixed(in));
- if (response.getStatus() != Status.SUCCESS) {
- if (response.getStatus() == Status.ERROR_ACCESS_TOKEN)
- throw new IOException("block move failed due to access token error");
- throw new IOException("block move is failed: " +
- response.getMessage());
- }
- }
-
- /* reset the object */
- private void reset() {
- block = null;
- source = null;
- proxySource = null;
- target = null;
- }
-
- /* start a thread to dispatch the block move */
- private void scheduleBlockMove() {
- moverExecutor.execute(new Runnable() {
- @Override
- public void run() {
- if (LOG.isDebugEnabled()) {
- LOG.debug("Start moving " + PendingBlockMove.this);
- }
- dispatch();
- }
- });
- }
- }
-
- /* A class for keeping track of blocks in the Balancer */
- static class BalancerBlock extends MovedBlocks.Locations {
- BalancerBlock(Block block) {
- super(block);
- }
- }
-
- /* The class represents a desired move of bytes between two nodes
- * and the target.
- * An object of this class is stored in a source.
- */
- static private class Task {
- private final BalancerDatanode.StorageGroup target;
- private long size; //bytes scheduled to move
-
- /* constructor */
- private Task(BalancerDatanode.StorageGroup target, long size) {
- this.target = target;
- this.size = size;
- }
- }
-
-
- /* A class that keeps track of a datanode in Balancer */
- private static class BalancerDatanode {
-
- /** A group of storages in a datanode with the same storage type. */
- private class StorageGroup {
- final StorageType storageType;
- final double utilization;
- final long maxSize2Move;
- private long scheduledSize = 0L;
-
- private StorageGroup(StorageType storageType, double utilization,
- long maxSize2Move) {
- this.storageType = storageType;
- this.utilization = utilization;
- this.maxSize2Move = maxSize2Move;
- }
-
- BalancerDatanode getBalancerDatanode() {
- return BalancerDatanode.this;
- }
-
- DatanodeInfo getDatanode() {
- return BalancerDatanode.this.datanode;
- }
-
- /** Decide if still need to move more bytes */
- protected synchronized boolean hasSpaceForScheduling() {
- return availableSizeToMove() > 0L;
- }
-
- /** @return the total number of bytes that need to be moved */
- synchronized long availableSizeToMove() {
- return maxSize2Move - scheduledSize;
- }
-
- /** increment scheduled size */
- synchronized void incScheduledSize(long size) {
- scheduledSize += size;
- }
-
- /** @return scheduled size */
- synchronized long getScheduledSize() {
- return scheduledSize;
- }
-
- /** Reset scheduled size to zero. */
- synchronized void resetScheduledSize() {
- scheduledSize = 0L;
- }
-
- /** @return the name for display */
- String getDisplayName() {
- return datanode + ":" + storageType;
- }
-
- @Override
- public String toString() {
- return "" + utilization;
- }
- }
-
- final DatanodeInfo datanode;
- final EnumMap storageMap
- = new EnumMap(StorageType.class);
- protected long delayUntil = 0L;
- // blocks being moved but not confirmed yet
- private final List pendingBlocks;
- private final int maxConcurrentMoves;
-
- @Override
- public String toString() {
- return getClass().getSimpleName() + ":" + datanode + ":" + storageMap;
- }
-
- /* Constructor
- * Depending on avgutil & threshold, calculate maximum bytes to move
- */
- private BalancerDatanode(DatanodeStorageReport report,
- double threshold, int maxConcurrentMoves) {
- this.datanode = report.getDatanodeInfo();
- this.maxConcurrentMoves = maxConcurrentMoves;
- this.pendingBlocks = new ArrayList(maxConcurrentMoves);
- }
-
- private void put(StorageType storageType, StorageGroup g) {
- final StorageGroup existing = storageMap.put(storageType, g);
- Preconditions.checkState(existing == null);
- }
-
- StorageGroup addStorageGroup(StorageType storageType, double utilization,
- long maxSize2Move) {
- final StorageGroup g = new StorageGroup(storageType, utilization,
- maxSize2Move);
- put(storageType, g);
- return g;
- }
-
- Source addSource(StorageType storageType, double utilization,
- long maxSize2Move, Balancer balancer) {
- final Source s = balancer.new Source(storageType, utilization,
- maxSize2Move, this);
- put(storageType, s);
- return s;
- }
-
- synchronized private void activateDelay(long delta) {
- delayUntil = Time.now() + delta;
- }
-
- synchronized private boolean isDelayActive() {
- if (delayUntil == 0 || Time.now() > delayUntil){
- delayUntil = 0;
- return false;
- }
- return true;
- }
-
- /* Check if the node can schedule more blocks to move */
- synchronized private boolean isPendingQNotFull() {
- if ( pendingBlocks.size() < this.maxConcurrentMoves ) {
- return true;
- }
- return false;
- }
-
- /* Check if all the dispatched moves are done */
- synchronized private boolean isPendingQEmpty() {
- return pendingBlocks.isEmpty();
- }
-
- /* Add a scheduled block move to the node */
- private synchronized boolean addPendingBlock(
- PendingBlockMove pendingBlock) {
- if (!isDelayActive() && isPendingQNotFull()) {
- return pendingBlocks.add(pendingBlock);
- }
- return false;
- }
-
- /* Remove a scheduled block move from the node */
- private synchronized boolean removePendingBlock(
- PendingBlockMove pendingBlock) {
- return pendingBlocks.remove(pendingBlock);
- }
- }
-
- /** A node that can be the sources of a block move */
- private class Source extends BalancerDatanode.StorageGroup {
-
- /* A thread that initiates a block move
- * and waits for block move to complete */
- private class BlockMoveDispatcher implements Runnable {
- @Override
- public void run() {
- dispatchBlocks();
- }
- }
-
- private final List tasks = new ArrayList(2);
- private long blocksToReceive = 0L;
- /* source blocks point to balancerBlocks in the global list because
- * we want to keep one copy of a block in balancer and be aware that
- * the locations are changing over time.
- */
- private final List srcBlockList
- = new ArrayList();
-
- /* constructor */
- private Source(StorageType storageType, double utilization,
- long maxSize2Move, BalancerDatanode dn) {
- dn.super(storageType, utilization, maxSize2Move);
- }
-
- /** Add a task */
- private void addTask(Task task) {
- Preconditions.checkState(task.target != this,
- "Source and target are the same storage group " + getDisplayName());
- incScheduledSize(task.size);
- tasks.add(task);
- }
-
- /* Return an iterator to this source's blocks */
- private Iterator getBlockIterator() {
- return srcBlockList.iterator();
- }
-
- /* fetch new blocks of this source from namenode and
- * update this source's block list & the global block list
- * Return the total size of the received blocks in the number of bytes.
- */
- private long getBlockList() throws IOException {
- final long size = Math.min(MAX_BLOCKS_SIZE_TO_FETCH, blocksToReceive);
- final BlockWithLocations[] newBlocks = nnc.getNamenode().getBlocks(
- getDatanode(), size).getBlocks();
-
- long bytesReceived = 0;
- for (BlockWithLocations blk : newBlocks) {
- bytesReceived += blk.getBlock().getNumBytes();
- BalancerBlock block;
- synchronized(globalBlockList) {
- block = globalBlockList.get(blk.getBlock());
- if (block==null) {
- block = new BalancerBlock(blk.getBlock());
- globalBlockList.put(blk.getBlock(), block);
- } else {
- block.clearLocations();
- }
-
- synchronized (block) {
- // update locations
- final String[] datanodeUuids = blk.getDatanodeUuids();
- final StorageType[] storageTypes = blk.getStorageTypes();
- for (int i = 0; i < datanodeUuids.length; i++) {
- final BalancerDatanode.StorageGroup g = storageGroupMap.get(
- datanodeUuids[i], storageTypes[i]);
- if (g != null) { // not unknown
- block.addLocation(g);
- }
- }
- }
- if (!srcBlockList.contains(block) && isGoodBlockCandidate(block)) {
- // filter bad candidates
- srcBlockList.add(block);
- }
- }
- }
- return bytesReceived;
- }
-
- /* Decide if the given block is a good candidate to move or not */
- private boolean isGoodBlockCandidate(BalancerBlock block) {
- for (Task t : tasks) {
- if (Balancer.this.isGoodBlockCandidate(this, t.target, block)) {
- return true;
- }
- }
- return false;
- }
-
- /* Return a block that's good for the source thread to dispatch immediately
- * The block's source, target, and proxy source are determined too.
- * When choosing proxy and target, source & target throttling
- * has been considered. They are chosen only when they have the capacity
- * to support this block move.
- * The block should be dispatched immediately after this method is returned.
- */
- private PendingBlockMove chooseNextBlockToMove() {
- for (Iterator i = tasks.iterator(); i.hasNext();) {
- final Task task = i.next();
- final BalancerDatanode target = task.target.getBalancerDatanode();
- PendingBlockMove pendingBlock = new PendingBlockMove();
- if (target.addPendingBlock(pendingBlock)) {
- // target is not busy, so do a tentative block allocation
- pendingBlock.source = this;
- pendingBlock.target = task.target;
- if ( pendingBlock.chooseBlockAndProxy() ) {
- long blockSize = pendingBlock.block.getNumBytes();
- incScheduledSize(-blockSize);
- task.size -= blockSize;
- if (task.size == 0) {
- i.remove();
- }
- return pendingBlock;
- } else {
- // cancel the tentative move
- target.removePendingBlock(pendingBlock);
- }
- }
- }
- return null;
- }
-
- /* iterate all source's blocks to remove moved ones */
- private void filterMovedBlocks() {
- for (Iterator blocks=getBlockIterator();
- blocks.hasNext();) {
- if (movedBlocks.contains(blocks.next().getBlock())) {
- blocks.remove();
- }
- }
- }
-
- private static final int SOURCE_BLOCK_LIST_MIN_SIZE=5;
- /* Return if should fetch more blocks from namenode */
- private boolean shouldFetchMoreBlocks() {
- return srcBlockList.size()0;
- }
-
- /* This method iteratively does the following:
- * it first selects a block to move,
- * then sends a request to the proxy source to start the block move
- * when the source's block list falls below a threshold, it asks
- * the namenode for more blocks.
- * It terminates when it has dispatch enough block move tasks or
- * it has received enough blocks from the namenode, or
- * the elapsed time of the iteration has exceeded the max time limit.
- */
- private static final long MAX_ITERATION_TIME = 20*60*1000L; //20 mins
- private void dispatchBlocks() {
- long startTime = Time.now();
- long scheduledSize = getScheduledSize();
- this.blocksToReceive = 2*scheduledSize;
- boolean isTimeUp = false;
- int noPendingBlockIteration = 0;
- while(!isTimeUp && getScheduledSize()>0 &&
- (!srcBlockList.isEmpty() || blocksToReceive>0)) {
- PendingBlockMove pendingBlock = chooseNextBlockToMove();
- if (pendingBlock != null) {
- // move the block
- pendingBlock.scheduleBlockMove();
- continue;
- }
-
- /* Since we can not schedule any block to move,
- * filter any moved blocks from the source block list and
- * check if we should fetch more blocks from the namenode
- */
- filterMovedBlocks(); // filter already moved blocks
- if (shouldFetchMoreBlocks()) {
- // fetch new blocks
- try {
- blocksToReceive -= getBlockList();
- continue;
- } catch (IOException e) {
- LOG.warn("Exception while getting block list", e);
- return;
- }
- } else {
- // source node cannot find a pendingBlockToMove, iteration +1
- noPendingBlockIteration++;
- // in case no blocks can be moved for source node's task,
- // jump out of while-loop after 5 iterations.
- if (noPendingBlockIteration >= MAX_NO_PENDING_BLOCK_ITERATIONS) {
- resetScheduledSize();
- }
- }
-
- // check if time is up or not
- if (Time.now()-startTime > MAX_ITERATION_TIME) {
- isTimeUp = true;
- continue;
- }
-
- /* Now we can not schedule any block to move and there are
- * no new blocks added to the source block list, so we wait.
- */
- try {
- synchronized(Balancer.this) {
- Balancer.this.wait(1000); // wait for targets/sources to be idle
- }
- } catch (InterruptedException ignored) {
- }
- }
- }
- }
+ private final Collection belowAvgUtilized
+ = new LinkedList();
+ private final Collection underUtilized
+ = new LinkedList();
/* Check that this Balancer is compatible with the Block Placement Policy
* used by the Namenode.
@@ -887,38 +210,26 @@ private static void checkReplicationPolicyCompatibility(Configuration conf
* when connection fails.
*/
Balancer(NameNodeConnector theblockpool, Parameters p, Configuration conf) {
+ final long movedWinWidth = conf.getLong(
+ DFSConfigKeys.DFS_BALANCER_MOVEDWINWIDTH_KEY,
+ DFSConfigKeys.DFS_BALANCER_MOVEDWINWIDTH_DEFAULT);
+ final int moverThreads = conf.getInt(
+ DFSConfigKeys.DFS_BALANCER_MOVERTHREADS_KEY,
+ DFSConfigKeys.DFS_BALANCER_MOVERTHREADS_DEFAULT);
+ final int dispatcherThreads = conf.getInt(
+ DFSConfigKeys.DFS_BALANCER_DISPATCHERTHREADS_KEY,
+ DFSConfigKeys.DFS_BALANCER_DISPATCHERTHREADS_DEFAULT);
+ final int maxConcurrentMovesPerNode = conf.getInt(
+ DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY,
+ DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_DEFAULT);
+
+ this.dispatcher = new Dispatcher(theblockpool, p.nodesToBeIncluded,
+ p.nodesToBeExcluded, movedWinWidth, moverThreads, dispatcherThreads,
+ maxConcurrentMovesPerNode, conf);
this.threshold = p.threshold;
this.policy = p.policy;
- this.nodesToBeExcluded = p.nodesToBeExcluded;
- this.nodesToBeIncluded = p.nodesToBeIncluded;
- this.nnc = theblockpool;
- this.keyManager = nnc.getKeyManager();
-
- final long movedWinWidth = conf.getLong(
- DFSConfigKeys.DFS_BALANCER_MOVEDWINWIDTH_KEY,
- DFSConfigKeys.DFS_BALANCER_MOVEDWINWIDTH_DEFAULT);
- movedBlocks = new MovedBlocks(movedWinWidth);
-
- cluster = NetworkTopology.getInstance(conf);
-
- this.moverExecutor = Executors.newFixedThreadPool(
- conf.getInt(DFSConfigKeys.DFS_BALANCER_MOVERTHREADS_KEY,
- DFSConfigKeys.DFS_BALANCER_MOVERTHREADS_DEFAULT));
- this.dispatcherExecutor = Executors.newFixedThreadPool(
- conf.getInt(DFSConfigKeys.DFS_BALANCER_DISPATCHERTHREADS_KEY,
- DFSConfigKeys.DFS_BALANCER_DISPATCHERTHREADS_DEFAULT));
- this.maxConcurrentMovesPerNode =
- conf.getInt(DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY,
- DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_DEFAULT);
- this.saslClient = new SaslDataTransferClient(
- DataTransferSaslUtil.getSaslPropertiesResolver(conf),
- TrustedChannelResolver.getInstance(conf),
- conf.getBoolean(
- IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY,
- IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_DEFAULT));
}
-
private static long getCapacity(DatanodeStorageReport report, StorageType t) {
long capacity = 0L;
for(StorageReport r : report.getStorageReports()) {
@@ -939,26 +250,6 @@ private static long getRemaining(DatanodeStorageReport report, StorageType t) {
return remaining;
}
- private boolean shouldIgnore(DatanodeInfo dn) {
- //ignore decommissioned nodes
- final boolean decommissioned = dn.isDecommissioned();
- //ignore decommissioning nodes
- final boolean decommissioning = dn.isDecommissionInProgress();
- // ignore nodes in exclude list
- final boolean excluded = Util.shouldBeExcluded(nodesToBeExcluded, dn);
- // ignore nodes not in the include list (if include list is not empty)
- final boolean notIncluded = !Util.shouldBeIncluded(nodesToBeIncluded, dn);
-
- if (decommissioned || decommissioning || excluded || notIncluded) {
- if (LOG.isTraceEnabled()) {
- LOG.trace("Excluding datanode " + dn + ": " + decommissioned + ", "
- + decommissioning + ", " + excluded + ", " + notIncluded);
- }
- return true;
- }
- return false;
- }
-
/**
* Given a datanode storage set, build a network topology and decide
* over-utilized storages, above average utilized storages,
@@ -966,16 +257,11 @@ private boolean shouldIgnore(DatanodeInfo dn) {
* The input datanode storage set is shuffled in order to randomize
* to the storage matching later on.
*
- * @return the total number of bytes that are
- * needed to move to make the cluster balanced.
- * @param reports a set of datanode storage reports
+ * @return the number of bytes needed to move in order to balance the cluster.
*/
- private long init(DatanodeStorageReport[] reports) {
+ private long init(List reports) {
// compute average utilization
for (DatanodeStorageReport r : reports) {
- if (shouldIgnore(r.getDatanodeInfo())) {
- continue;
- }
policy.accumulateSpaces(r);
}
policy.initAvgUtilization();
@@ -983,15 +269,8 @@ private long init(DatanodeStorageReport[] reports) {
// create network topology and classify utilization collections:
// over-utilized, above-average, below-average and under-utilized.
long overLoadedBytes = 0L, underLoadedBytes = 0L;
- for(DatanodeStorageReport r : DFSUtil.shuffle(reports)) {
- final DatanodeInfo datanode = r.getDatanodeInfo();
- if (shouldIgnore(datanode)) {
- continue; // ignore decommissioning or decommissioned nodes
- }
- cluster.add(datanode);
-
- final BalancerDatanode dn = new BalancerDatanode(r, underLoadedBytes,
- maxConcurrentMovesPerNode);
+ for(DatanodeStorageReport r : reports) {
+ final DDatanode dn = dispatcher.newDatanode(r);
for(StorageType t : StorageType.asList()) {
final Double utilization = policy.getUtilization(r, t);
if (utilization == null) { // datanode does not have such storage type
@@ -1004,9 +283,9 @@ private long init(DatanodeStorageReport[] reports) {
final long maxSize2Move = computeMaxSize2Move(capacity,
getRemaining(r, t), utilizationDiff, threshold);
- final BalancerDatanode.StorageGroup g;
+ final StorageGroup g;
if (utilizationDiff > 0) {
- final Source s = dn.addSource(t, utilization, maxSize2Move, this);
+ final Source s = dn.addSource(t, maxSize2Move, dispatcher);
if (thresholdDiff <= 0) { // within threshold
aboveAvgUtilized.add(s);
} else {
@@ -1015,7 +294,7 @@ private long init(DatanodeStorageReport[] reports) {
}
g = s;
} else {
- g = dn.addStorageGroup(t, utilization, maxSize2Move);
+ g = dn.addStorageGroup(t, maxSize2Move);
if (thresholdDiff <= 0) { // within threshold
belowAvgUtilized.add(g);
} else {
@@ -1023,14 +302,15 @@ private long init(DatanodeStorageReport[] reports) {
underUtilized.add(g);
}
}
- storageGroupMap.put(g);
+ dispatcher.getStorageGroupMap().put(g);
}
}
logUtilizationCollections();
- Preconditions.checkState(storageGroupMap.size() == overUtilized.size()
- + underUtilized.size() + aboveAvgUtilized.size() + belowAvgUtilized.size(),
+ Preconditions.checkState(dispatcher.getStorageGroupMap().size()
+ == overUtilized.size() + underUtilized.size() + aboveAvgUtilized.size()
+ + belowAvgUtilized.size(),
"Mismatched number of storage groups");
// return number of bytes to be moved in order to make the cluster balanced
@@ -1063,7 +343,7 @@ private void logUtilizationCollections() {
logUtilizationCollection("underutilized", underUtilized);
}
- private static
+ private static
void logUtilizationCollection(String name, Collection items) {
LOG.info(items.size() + " " + name + ": " + items);
}
@@ -1077,7 +357,7 @@ void logUtilizationCollection(String name, Collection items) {
*/
private long chooseStorageGroups() {
// First, match nodes on the same node group if cluster is node group aware
- if (cluster.isNodeGroupAware()) {
+ if (dispatcher.getCluster().isNodeGroupAware()) {
chooseStorageGroups(Matcher.SAME_NODE_GROUP);
}
@@ -1086,15 +366,7 @@ private long chooseStorageGroups() {
// At last, match all remaining nodes
chooseStorageGroups(Matcher.ANY_OTHER);
- Preconditions.checkState(storageGroupMap.size() >= sources.size() + targets.size(),
- "Mismatched number of datanodes (" + storageGroupMap.size() + " < "
- + sources.size() + " sources, " + targets.size() + " targets)");
-
- long bytesToMove = 0L;
- for (Source src : sources) {
- bytesToMove += src.getScheduledSize();
- }
- return bytesToMove;
+ return dispatcher.bytesToMove();
}
/** Decide all pairs according to the matcher. */
@@ -1124,8 +396,7 @@ private void chooseStorageGroups(final Matcher matcher) {
* datanodes or the candidates are source nodes with (utilization > Avg), and
* the others are target nodes with (utilization < Avg).
*/
- private
+ private
void chooseStorageGroups(Collection groups, Collection candidates,
Matcher matcher) {
for(final Iterator i = groups.iterator(); i.hasNext();) {
@@ -1141,9 +412,8 @@ void chooseStorageGroups(Collection groups, Collection candidates,
* For the given datanode, choose a candidate and then schedule it.
* @return true if a candidate is chosen; false if no candidates is chosen.
*/
- private
- boolean choose4One(BalancerDatanode.StorageGroup g,
- Collection candidates, Matcher matcher) {
+ private boolean choose4One(StorageGroup g,
+ Collection candidates, Matcher matcher) {
final Iterator i = candidates.iterator();
final C chosen = chooseCandidate(g, i, matcher);
@@ -1161,28 +431,26 @@ boolean choose4One(BalancerDatanode.StorageGroup g,
return true;
}
- private void matchSourceWithTargetToMove(Source source,
- BalancerDatanode.StorageGroup target) {
+ private void matchSourceWithTargetToMove(Source source, StorageGroup target) {
long size = Math.min(source.availableSizeToMove(), target.availableSizeToMove());
final Task task = new Task(target, size);
source.addTask(task);
- target.incScheduledSize(task.size);
- sources.add(source);
- targets.add(target);
+ target.incScheduledSize(task.getSize());
+ dispatcher.add(source, target);
LOG.info("Decided to move "+StringUtils.byteDesc(size)+" bytes from "
+ source.getDisplayName() + " to " + target.getDisplayName());
}
/** Choose a candidate for the given datanode. */
- private
+ private
C chooseCandidate(G g, Iterator candidates, Matcher matcher) {
if (g.hasSpaceForScheduling()) {
for(; candidates.hasNext(); ) {
final C c = candidates.next();
if (!c.hasSpaceForScheduling()) {
candidates.remove();
- } else if (matcher.match(cluster, g.getDatanode(), c.getDatanode())) {
+ } else if (matcher.match(dispatcher.getCluster(),
+ g.getDatanodeInfo(), c.getDatanodeInfo())) {
return c;
}
}
@@ -1190,203 +458,25 @@ C chooseCandidate(G g, Iterator candidates, Matcher matcher) {
return null;
}
- private final AtomicLong bytesMoved = new AtomicLong();
-
- /* Start a thread to dispatch block moves for each source.
- * The thread selects blocks to move & sends request to proxy source to
- * initiate block move. The process is flow controlled. Block selection is
- * blocked if there are too many un-confirmed block moves.
- * Return the total number of bytes successfully moved in this iteration.
- */
- private long dispatchBlockMoves() throws InterruptedException {
- long bytesLastMoved = bytesMoved.get();
- Future>[] futures = new Future>[sources.size()];
- int i=0;
- for (Source source : sources) {
- futures[i++] = dispatcherExecutor.submit(source.new BlockMoveDispatcher());
- }
-
- // wait for all dispatcher threads to finish
- for (Future> future : futures) {
- try {
- future.get();
- } catch (ExecutionException e) {
- LOG.warn("Dispatcher thread failed", e.getCause());
- }
- }
-
- // wait for all block moving to be done
- waitForMoveCompletion();
-
- return bytesMoved.get()-bytesLastMoved;
- }
-
- // The sleeping period before checking if block move is completed again
- static private long blockMoveWaitTime = 30000L;
-
- /** set the sleeping period for block move completion check */
- static void setBlockMoveWaitTime(long time) {
- blockMoveWaitTime = time;
- }
-
- /* wait for all block move confirmations
- * by checking each target's pendingMove queue
- */
- private void waitForMoveCompletion() {
- boolean shouldWait;
- do {
- shouldWait = false;
- for (BalancerDatanode.StorageGroup target : targets) {
- if (!target.getBalancerDatanode().isPendingQEmpty()) {
- shouldWait = true;
- break;
- }
- }
- if (shouldWait) {
- try {
- Thread.sleep(blockMoveWaitTime);
- } catch (InterruptedException ignored) {
- }
- }
- } while (shouldWait);
- }
-
- /* Decide if it is OK to move the given block from source to target
- * A block is a good candidate if
- * 1. the block is not in the process of being moved/has not been moved;
- * 2. the block does not have a replica on the target;
- * 3. doing the move does not reduce the number of racks that the block has
- */
- private boolean isGoodBlockCandidate(Source source,
- BalancerDatanode.StorageGroup target, BalancerBlock block) {
- if (source.storageType != target.storageType) {
- return false;
- }
- // check if the block is moved or not
- if (movedBlocks.contains(block.getBlock())) {
- return false;
- }
- if (block.isLocatedOn(target)) {
- return false;
- }
- if (cluster.isNodeGroupAware() &&
- isOnSameNodeGroupWithReplicas(target, block, source)) {
- return false;
- }
-
- boolean goodBlock = false;
- if (cluster.isOnSameRack(source.getDatanode(), target.getDatanode())) {
- // good if source and target are on the same rack
- goodBlock = true;
- } else {
- boolean notOnSameRack = true;
- synchronized (block) {
- for (BalancerDatanode.StorageGroup loc : block.getLocations()) {
- if (cluster.isOnSameRack(loc.getDatanode(), target.getDatanode())) {
- notOnSameRack = false;
- break;
- }
- }
- }
- if (notOnSameRack) {
- // good if target is target is not on the same rack as any replica
- goodBlock = true;
- } else {
- // good if source is on the same rack as on of the replicas
- for (BalancerDatanode.StorageGroup loc : block.getLocations()) {
- if (loc != source &&
- cluster.isOnSameRack(loc.getDatanode(), source.getDatanode())) {
- goodBlock = true;
- break;
- }
- }
- }
- }
- return goodBlock;
- }
-
- /**
- * Check if there are any replica (other than source) on the same node group
- * with target. If true, then target is not a good candidate for placing
- * specific block replica as we don't want 2 replicas under the same nodegroup
- * after balance.
- * @param target targetDataNode
- * @param block dataBlock
- * @param source sourceDataNode
- * @return true if there are any replica (other than source) on the same node
- * group with target
- */
- private boolean isOnSameNodeGroupWithReplicas(BalancerDatanode.StorageGroup target,
- BalancerBlock block, Source source) {
- final DatanodeInfo targetDn = target.getDatanode();
- for (BalancerDatanode.StorageGroup loc : block.getLocations()) {
- if (loc != source &&
- cluster.isOnSameNodeGroup(loc.getDatanode(), targetDn)) {
- return true;
- }
- }
- return false;
- }
-
/* reset all fields in a balancer preparing for the next iteration */
private void resetData(Configuration conf) {
- this.cluster = NetworkTopology.getInstance(conf);
this.overUtilized.clear();
this.aboveAvgUtilized.clear();
this.belowAvgUtilized.clear();
this.underUtilized.clear();
- this.storageGroupMap.clear();
- this.sources.clear();
- this.targets.clear();
this.policy.reset();
- cleanGlobalBlockList();
- this.movedBlocks.cleanup();
+ dispatcher.reset(conf);;
}
- /* Remove all blocks from the global block list except for the ones in the
- * moved list.
- */
- private void cleanGlobalBlockList() {
- for (Iterator globalBlockListIterator=globalBlockList.keySet().iterator();
- globalBlockListIterator.hasNext();) {
- Block block = globalBlockListIterator.next();
- if(!movedBlocks.contains(block)) {
- globalBlockListIterator.remove();
- }
- }
- }
-
- // Exit status
- enum ReturnStatus {
- // These int values will map directly to the balancer process's exit code.
- SUCCESS(0),
- IN_PROGRESS(1),
- ALREADY_RUNNING(-1),
- NO_MOVE_BLOCK(-2),
- NO_MOVE_PROGRESS(-3),
- IO_EXCEPTION(-4),
- ILLEGAL_ARGS(-5),
- INTERRUPTED(-6);
-
- final int code;
-
- ReturnStatus(int code) {
- this.code = code;
- }
- }
-
/** Run an iteration for all datanodes. */
- private ReturnStatus run(int iteration, Formatter formatter,
+ private ExitStatus run(int iteration, Formatter formatter,
Configuration conf) {
try {
- /* get all live datanodes of a cluster and their disk usage
- * decide the number of bytes need to be moved
- */
- final long bytesLeftToMove = init(
- nnc.getClient().getDatanodeStorageReport(DatanodeReportType.LIVE));
+ final List reports = dispatcher.init();
+ final long bytesLeftToMove = init(reports);
if (bytesLeftToMove == 0) {
System.out.println("The cluster is balanced. Exiting...");
- return ReturnStatus.SUCCESS;
+ return ExitStatus.SUCCESS;
} else {
LOG.info( "Need to move "+ StringUtils.byteDesc(bytesLeftToMove)
+ " to make the cluster balanced." );
@@ -1400,7 +490,7 @@ private ReturnStatus run(int iteration, Formatter formatter,
final long bytesToMove = chooseStorageGroups();
if (bytesToMove == 0) {
System.out.println("No block can be moved. Exiting...");
- return ReturnStatus.NO_MOVE_BLOCK;
+ return ExitStatus.NO_MOVE_BLOCK;
} else {
LOG.info( "Will move " + StringUtils.byteDesc(bytesToMove) +
" in this iteration");
@@ -1409,7 +499,7 @@ private ReturnStatus run(int iteration, Formatter formatter,
formatter.format("%-24s %10d %19s %18s %17s%n",
DateFormat.getDateTimeInstance().format(new Date()),
iteration,
- StringUtils.byteDesc(bytesMoved.get()),
+ StringUtils.byteDesc(dispatcher.getBytesMoved()),
StringUtils.byteDesc(bytesLeftToMove),
StringUtils.byteDesc(bytesToMove)
);
@@ -1420,24 +510,22 @@ private ReturnStatus run(int iteration, Formatter formatter,
* available to move.
* Exit no byte has been moved for 5 consecutive iterations.
*/
- if (!this.nnc.shouldContinue(dispatchBlockMoves())) {
- return ReturnStatus.NO_MOVE_PROGRESS;
+ if (!dispatcher.dispatchAndCheckContinue()) {
+ return ExitStatus.NO_MOVE_PROGRESS;
}
- return ReturnStatus.IN_PROGRESS;
+ return ExitStatus.IN_PROGRESS;
} catch (IllegalArgumentException e) {
System.out.println(e + ". Exiting ...");
- return ReturnStatus.ILLEGAL_ARGS;
+ return ExitStatus.ILLEGAL_ARGUMENTS;
} catch (IOException e) {
System.out.println(e + ". Exiting ...");
- return ReturnStatus.IO_EXCEPTION;
+ return ExitStatus.IO_EXCEPTION;
} catch (InterruptedException e) {
System.out.println(e + ". Exiting ...");
- return ReturnStatus.INTERRUPTED;
+ return ExitStatus.INTERRUPTED;
} finally {
- // shutdown thread pools
- dispatcherExecutor.shutdownNow();
- moverExecutor.shutdownNow();
+ dispatcher.shutdownNow();
}
}
@@ -1474,14 +562,14 @@ static int run(Collection namenodes, final Parameters p,
Collections.shuffle(connectors);
for(NameNodeConnector nnc : connectors) {
final Balancer b = new Balancer(nnc, p, conf);
- final ReturnStatus r = b.run(iteration, formatter, conf);
+ final ExitStatus r = b.run(iteration, formatter, conf);
// clean all lists
b.resetData(conf);
- if (r == ReturnStatus.IN_PROGRESS) {
+ if (r == ExitStatus.IN_PROGRESS) {
done = false;
- } else if (r != ReturnStatus.SUCCESS) {
+ } else if (r != ExitStatus.SUCCESS) {
//must be an error statue, return.
- return r.code;
+ return r.getExitCode();
}
}
@@ -1494,7 +582,7 @@ static int run(Collection namenodes, final Parameters p,
nnc.close();
}
}
- return ReturnStatus.SUCCESS.code;
+ return ExitStatus.SUCCESS.getExitCode();
}
/* Given elaspedTime in ms, return a printable string */
@@ -1546,76 +634,6 @@ public String toString() {
}
}
- static class Util {
-
- /**
- * @param datanode
- * @return returns true if data node is part of the excludedNodes.
- */
- static boolean shouldBeExcluded(Set excludedNodes, DatanodeInfo datanode) {
- return isIn(excludedNodes, datanode);
- }
-
- /**
- * @param datanode
- * @return returns true if includedNodes is empty or data node is part of the includedNodes.
- */
- static boolean shouldBeIncluded(Set includedNodes, DatanodeInfo datanode) {
- return (includedNodes.isEmpty() ||
- isIn(includedNodes, datanode));
- }
- /**
- * Match is checked using host name , ip address with and without port number.
- * @param datanodeSet
- * @param datanode
- * @return true if the datanode's transfer address matches the set of nodes.
- */
- private static boolean isIn(Set datanodeSet, DatanodeInfo datanode) {
- return isIn(datanodeSet, datanode.getPeerHostName(), datanode.getXferPort()) ||
- isIn(datanodeSet, datanode.getIpAddr(), datanode.getXferPort()) ||
- isIn(datanodeSet, datanode.getHostName(), datanode.getXferPort());
- }
-
- /**
- * returns true if nodes contains host or host:port
- * @param nodes
- * @param host
- * @param port
- * @return
- */
- private static boolean isIn(Set nodes, String host, int port) {
- if (host == null) {
- return false;
- }
- return (nodes.contains(host) || nodes.contains(host +":"+ port));
- }
-
- /**
- * parse a comma separated string to obtain set of host names
- * @param string
- * @return
- */
- static Set parseHostList(String string) {
- String[] addrs = StringUtils.getTrimmedStrings(string);
- return new HashSet(Arrays.asList(addrs));
- }
-
- /**
- * read set of host names from a file
- * @param fileName
- * @return
- */
- static Set getHostListFromFile(String fileName) {
- Set nodes = new HashSet ();
- try {
- HostsFileReader.readFileToSet("nodes", fileName, nodes);
- return StringUtils.getTrimmedStrings(nodes);
- } catch (IOException e) {
- throw new IllegalArgumentException("Unable to open file: " + fileName);
- }
- }
- }
-
static class Cli extends Configured implements Tool {
/**
* Parse arguments and then run Balancer.
@@ -1635,10 +653,10 @@ public int run(String[] args) {
return Balancer.run(namenodes, parse(args), conf);
} catch (IOException e) {
System.out.println(e + ". Exiting ...");
- return ReturnStatus.IO_EXCEPTION.code;
+ return ExitStatus.IO_EXCEPTION.getExitCode();
} catch (InterruptedException e) {
System.out.println(e + ". Exiting ...");
- return ReturnStatus.INTERRUPTED.code;
+ return ExitStatus.INTERRUPTED.getExitCode();
} finally {
System.out.format("%-24s ", DateFormat.getDateTimeInstance().format(new Date()));
System.out.println("Balancing took " + time2Str(Time.now()-startTime));
@@ -1688,7 +706,7 @@ static Parameters parse(String[] args) {
checkArgument(++i < args.length,
"File containing nodes to exclude is not specified: args = "
+ Arrays.toString(args));
- nodesTobeExcluded = Util.getHostListFromFile(args[i]);
+ nodesTobeExcluded = Util.getHostListFromFile(args[i], "exclude");
} else {
nodesTobeExcluded = Util.parseHostList(args[i]);
}
@@ -1700,7 +718,7 @@ static Parameters parse(String[] args) {
checkArgument(++i < args.length,
"File containing nodes to include is not specified: args = "
+ Arrays.toString(args));
- nodesTobeIncluded = Util.getHostListFromFile(args[i]);
+ nodesTobeIncluded = Util.getHostListFromFile(args[i], "include");
} else {
nodesTobeIncluded = Util.parseHostList(args[i]);
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
new file mode 100644
index 0000000000..4a6f96be7e
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java
@@ -0,0 +1,1034 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.balancer;
+
+import static org.apache.hadoop.hdfs.protocolPB.PBHelper.vintPrefixed;
+
+import java.io.BufferedInputStream;
+import java.io.BufferedOutputStream;
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.net.Socket;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.EnumMap;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.atomic.AtomicLong;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.StorageType;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
+import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
+import org.apache.hadoop.hdfs.protocol.datatransfer.TrustedChannelResolver;
+import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataTransferSaslUtil;
+import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient;
+import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
+import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
+import org.apache.hadoop.hdfs.server.balancer.Dispatcher.DDatanode.StorageGroup;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
+import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
+import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.net.NetworkTopology;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.util.HostsFileReader;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.Time;
+
+import com.google.common.base.Preconditions;
+
+/** Dispatching block replica moves between datanodes. */
+@InterfaceAudience.Private
+public class Dispatcher {
+ static final Log LOG = LogFactory.getLog(Dispatcher.class);
+
+ private static final long GB = 1L << 30; // 1GB
+ private static final long MAX_BLOCKS_SIZE_TO_FETCH = 2 * GB;
+
+ private static final int MAX_NO_PENDING_MOVE_ITERATIONS = 5;
+ private static final long DELAY_AFTER_ERROR = 10 * 1000L; // 10 seconds
+ private static final int BLOCK_MOVE_READ_TIMEOUT = 20 * 60 * 1000; // 20
+ // minutes
+
+ private final NameNodeConnector nnc;
+ private final SaslDataTransferClient saslClient;
+
+ /** Set of datanodes to be excluded. */
+ private final Set excludedNodes;
+ /** Restrict to the following nodes. */
+ private final Set includedNodes;
+
+ private final Collection sources = new HashSet();
+ private final Collection targets = new HashSet();
+
+ private final GlobalBlockMap globalBlocks = new GlobalBlockMap();
+ private final MovedBlocks movedBlocks;
+
+ /** Map (datanodeUuid,storageType -> StorageGroup) */
+ private final StorageGroupMap storageGroupMap = new StorageGroupMap();
+
+ private NetworkTopology cluster;
+
+ private final ExecutorService moveExecutor;
+ private final ExecutorService dispatchExecutor;
+ /** The maximum number of concurrent blocks moves at a datanode */
+ private final int maxConcurrentMovesPerNode;
+
+ private final AtomicLong bytesMoved = new AtomicLong();
+
+ private static class GlobalBlockMap {
+ private final Map map = new HashMap();
+
+ /**
+ * Get the block from the map;
+ * if the block is not found, create a new block and put it in the map.
+ */
+ private DBlock get(Block b) {
+ DBlock block = map.get(b);
+ if (block == null) {
+ block = new DBlock(b);
+ map.put(b, block);
+ }
+ return block;
+ }
+
+ /** Remove all blocks except for the moved blocks. */
+ private void removeAllButRetain(MovedBlocks movedBlocks) {
+ for (Iterator i = map.keySet().iterator(); i.hasNext();) {
+ if (!movedBlocks.contains(i.next())) {
+ i.remove();
+ }
+ }
+ }
+ }
+
+ static class StorageGroupMap {
+ private static String toKey(String datanodeUuid, StorageType storageType) {
+ return datanodeUuid + ":" + storageType;
+ }
+
+ private final Map map = new HashMap();
+
+ StorageGroup get(String datanodeUuid, StorageType storageType) {
+ return map.get(toKey(datanodeUuid, storageType));
+ }
+
+ void put(StorageGroup g) {
+ final String key = toKey(g.getDatanodeInfo().getDatanodeUuid(), g.storageType);
+ final StorageGroup existing = map.put(key, g);
+ Preconditions.checkState(existing == null);
+ }
+
+ int size() {
+ return map.size();
+ }
+
+ void clear() {
+ map.clear();
+ }
+ }
+
+ /** This class keeps track of a scheduled block move */
+ private class PendingMove {
+ private DBlock block;
+ private Source source;
+ private DDatanode proxySource;
+ private StorageGroup target;
+
+ private PendingMove() {
+ }
+
+ @Override
+ public String toString() {
+ final Block b = block.getBlock();
+ return b + " with size=" + b.getNumBytes() + " from "
+ + source.getDisplayName() + " to " + target.getDisplayName()
+ + " through " + proxySource.datanode;
+ }
+
+ /**
+ * Choose a block & a proxy source for this pendingMove whose source &
+ * target have already been chosen.
+ *
+ * @return true if a block and its proxy are chosen; false otherwise
+ */
+ private boolean chooseBlockAndProxy() {
+ // iterate all source's blocks until find a good one
+ for (Iterator i = source.getBlockIterator(); i.hasNext();) {
+ if (markMovedIfGoodBlock(i.next())) {
+ i.remove();
+ return true;
+ }
+ }
+ return false;
+ }
+
+ /**
+ * @return true if the given block is good for the tentative move.
+ */
+ private boolean markMovedIfGoodBlock(DBlock block) {
+ synchronized (block) {
+ synchronized (movedBlocks) {
+ if (isGoodBlockCandidate(source, target, block)) {
+ this.block = block;
+ if (chooseProxySource()) {
+ movedBlocks.put(block);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Decided to move " + this);
+ }
+ return true;
+ }
+ }
+ }
+ }
+ return false;
+ }
+
+ /**
+ * Choose a proxy source.
+ *
+ * @return true if a proxy is found; otherwise false
+ */
+ private boolean chooseProxySource() {
+ final DatanodeInfo targetDN = target.getDatanodeInfo();
+ // if node group is supported, first try add nodes in the same node group
+ if (cluster.isNodeGroupAware()) {
+ for (StorageGroup loc : block.getLocations()) {
+ if (cluster.isOnSameNodeGroup(loc.getDatanodeInfo(), targetDN)
+ && addTo(loc)) {
+ return true;
+ }
+ }
+ }
+ // check if there is replica which is on the same rack with the target
+ for (StorageGroup loc : block.getLocations()) {
+ if (cluster.isOnSameRack(loc.getDatanodeInfo(), targetDN) && addTo(loc)) {
+ return true;
+ }
+ }
+ // find out a non-busy replica
+ for (StorageGroup loc : block.getLocations()) {
+ if (addTo(loc)) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ /** add to a proxy source for specific block movement */
+ private boolean addTo(StorageGroup g) {
+ final DDatanode dn = g.getDDatanode();
+ if (dn.addPendingBlock(this)) {
+ proxySource = dn;
+ return true;
+ }
+ return false;
+ }
+
+ /** Dispatch the move to the proxy source & wait for the response. */
+ private void dispatch() {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Start moving " + this);
+ }
+
+ Socket sock = new Socket();
+ DataOutputStream out = null;
+ DataInputStream in = null;
+ try {
+ sock.connect(
+ NetUtils.createSocketAddr(target.getDatanodeInfo().getXferAddr()),
+ HdfsServerConstants.READ_TIMEOUT);
+ /*
+ * Unfortunately we don't have a good way to know if the Datanode is
+ * taking a really long time to move a block, OR something has gone
+ * wrong and it's never going to finish. To deal with this scenario, we
+ * set a long timeout (20 minutes) to avoid hanging indefinitely.
+ */
+ sock.setSoTimeout(BLOCK_MOVE_READ_TIMEOUT);
+
+ sock.setKeepAlive(true);
+
+ OutputStream unbufOut = sock.getOutputStream();
+ InputStream unbufIn = sock.getInputStream();
+ ExtendedBlock eb = new ExtendedBlock(nnc.getBlockpoolID(),
+ block.getBlock());
+ final KeyManager km = nnc.getKeyManager();
+ Token accessToken = km.getAccessToken(eb);
+ IOStreamPair saslStreams = saslClient.socketSend(sock, unbufOut,
+ unbufIn, km, accessToken, target.getDatanodeInfo());
+ unbufOut = saslStreams.out;
+ unbufIn = saslStreams.in;
+ out = new DataOutputStream(new BufferedOutputStream(unbufOut,
+ HdfsConstants.IO_FILE_BUFFER_SIZE));
+ in = new DataInputStream(new BufferedInputStream(unbufIn,
+ HdfsConstants.IO_FILE_BUFFER_SIZE));
+
+ sendRequest(out, eb, accessToken);
+ receiveResponse(in);
+ bytesMoved.addAndGet(block.getNumBytes());
+ LOG.info("Successfully moved " + this);
+ } catch (IOException e) {
+ LOG.warn("Failed to move " + this + ": " + e.getMessage());
+ // Proxy or target may have some issues, delay before using these nodes
+ // further in order to avoid a potential storm of "threads quota
+ // exceeded" warnings when the dispatcher gets out of sync with work
+ // going on in datanodes.
+ proxySource.activateDelay(DELAY_AFTER_ERROR);
+ target.getDDatanode().activateDelay(DELAY_AFTER_ERROR);
+ } finally {
+ IOUtils.closeStream(out);
+ IOUtils.closeStream(in);
+ IOUtils.closeSocket(sock);
+
+ proxySource.removePendingBlock(this);
+ target.getDDatanode().removePendingBlock(this);
+
+ synchronized (this) {
+ reset();
+ }
+ synchronized (Dispatcher.this) {
+ Dispatcher.this.notifyAll();
+ }
+ }
+ }
+
+ /** Send a block replace request to the output stream */
+ private void sendRequest(DataOutputStream out, ExtendedBlock eb,
+ Token accessToken) throws IOException {
+ new Sender(out).replaceBlock(eb, target.storageType, accessToken,
+ source.getDatanodeInfo().getDatanodeUuid(), proxySource.datanode);
+ }
+
+ /** Receive a block copy response from the input stream */
+ private void receiveResponse(DataInputStream in) throws IOException {
+ BlockOpResponseProto response = BlockOpResponseProto
+ .parseFrom(vintPrefixed(in));
+ if (response.getStatus() != Status.SUCCESS) {
+ if (response.getStatus() == Status.ERROR_ACCESS_TOKEN) {
+ throw new IOException("block move failed due to access token error");
+ }
+ throw new IOException("block move is failed: " + response.getMessage());
+ }
+ }
+
+ /** reset the object */
+ private void reset() {
+ block = null;
+ source = null;
+ proxySource = null;
+ target = null;
+ }
+ }
+
+ /** A class for keeping track of block locations in the dispatcher. */
+ private static class DBlock extends MovedBlocks.Locations {
+ DBlock(Block block) {
+ super(block);
+ }
+ }
+
+ /** The class represents a desired move. */
+ static class Task {
+ private final StorageGroup target;
+ private long size; // bytes scheduled to move
+
+ Task(StorageGroup target, long size) {
+ this.target = target;
+ this.size = size;
+ }
+
+ long getSize() {
+ return size;
+ }
+ }
+
+ /** A class that keeps track of a datanode. */
+ static class DDatanode {
+
+ /** A group of storages in a datanode with the same storage type. */
+ class StorageGroup {
+ final StorageType storageType;
+ final long maxSize2Move;
+ private long scheduledSize = 0L;
+
+ private StorageGroup(StorageType storageType, long maxSize2Move) {
+ this.storageType = storageType;
+ this.maxSize2Move = maxSize2Move;
+ }
+
+ private DDatanode getDDatanode() {
+ return DDatanode.this;
+ }
+
+ DatanodeInfo getDatanodeInfo() {
+ return DDatanode.this.datanode;
+ }
+
+ /** Decide if still need to move more bytes */
+ synchronized boolean hasSpaceForScheduling() {
+ return availableSizeToMove() > 0L;
+ }
+
+ /** @return the total number of bytes that need to be moved */
+ synchronized long availableSizeToMove() {
+ return maxSize2Move - scheduledSize;
+ }
+
+ /** increment scheduled size */
+ synchronized void incScheduledSize(long size) {
+ scheduledSize += size;
+ }
+
+ /** @return scheduled size */
+ synchronized long getScheduledSize() {
+ return scheduledSize;
+ }
+
+ /** Reset scheduled size to zero. */
+ synchronized void resetScheduledSize() {
+ scheduledSize = 0L;
+ }
+
+ /** @return the name for display */
+ String getDisplayName() {
+ return datanode + ":" + storageType;
+ }
+
+ @Override
+ public String toString() {
+ return getDisplayName();
+ }
+ }
+
+ final DatanodeInfo datanode;
+ final EnumMap storageMap
+ = new EnumMap(StorageType.class);
+ protected long delayUntil = 0L;
+ /** blocks being moved but not confirmed yet */
+ private final List pendings;
+ private final int maxConcurrentMoves;
+
+ @Override
+ public String toString() {
+ return getClass().getSimpleName() + ":" + datanode + ":" + storageMap.values();
+ }
+
+ private DDatanode(DatanodeStorageReport r, int maxConcurrentMoves) {
+ this.datanode = r.getDatanodeInfo();
+ this.maxConcurrentMoves = maxConcurrentMoves;
+ this.pendings = new ArrayList(maxConcurrentMoves);
+ }
+
+ private void put(StorageType storageType, StorageGroup g) {
+ final StorageGroup existing = storageMap.put(storageType, g);
+ Preconditions.checkState(existing == null);
+ }
+
+ StorageGroup addStorageGroup(StorageType storageType, long maxSize2Move) {
+ final StorageGroup g = new StorageGroup(storageType, maxSize2Move);
+ put(storageType, g);
+ return g;
+ }
+
+ Source addSource(StorageType storageType, long maxSize2Move, Dispatcher d) {
+ final Source s = d.new Source(storageType, maxSize2Move, this);
+ put(storageType, s);
+ return s;
+ }
+
+ synchronized private void activateDelay(long delta) {
+ delayUntil = Time.monotonicNow() + delta;
+ }
+
+ synchronized private boolean isDelayActive() {
+ if (delayUntil == 0 || Time.monotonicNow() > delayUntil) {
+ delayUntil = 0;
+ return false;
+ }
+ return true;
+ }
+
+ /** Check if the node can schedule more blocks to move */
+ synchronized boolean isPendingQNotFull() {
+ return pendings.size() < maxConcurrentMoves;
+ }
+
+ /** Check if all the dispatched moves are done */
+ synchronized boolean isPendingQEmpty() {
+ return pendings.isEmpty();
+ }
+
+ /** Add a scheduled block move to the node */
+ synchronized boolean addPendingBlock(PendingMove pendingBlock) {
+ if (!isDelayActive() && isPendingQNotFull()) {
+ return pendings.add(pendingBlock);
+ }
+ return false;
+ }
+
+ /** Remove a scheduled block move from the node */
+ synchronized boolean removePendingBlock(PendingMove pendingBlock) {
+ return pendings.remove(pendingBlock);
+ }
+ }
+
+ /** A node that can be the sources of a block move */
+ class Source extends DDatanode.StorageGroup {
+
+ private final List tasks = new ArrayList(2);
+ private long blocksToReceive = 0L;
+ /**
+ * Source blocks point to the objects in {@link Dispatcher#globalBlocks}
+ * because we want to keep one copy of a block and be aware that the
+ * locations are changing over time.
+ */
+ private final List srcBlocks = new ArrayList();
+
+ private Source(StorageType storageType, long maxSize2Move, DDatanode dn) {
+ dn.super(storageType, maxSize2Move);
+ }
+
+ /** Add a task */
+ void addTask(Task task) {
+ Preconditions.checkState(task.target != this,
+ "Source and target are the same storage group " + getDisplayName());
+ incScheduledSize(task.size);
+ tasks.add(task);
+ }
+
+ /** @return an iterator to this source's blocks */
+ Iterator getBlockIterator() {
+ return srcBlocks.iterator();
+ }
+
+ /**
+ * Fetch new blocks of this source from namenode and update this source's
+ * block list & {@link Dispatcher#globalBlocks}.
+ *
+ * @return the total size of the received blocks in the number of bytes.
+ */
+ private long getBlockList() throws IOException {
+ final long size = Math.min(MAX_BLOCKS_SIZE_TO_FETCH, blocksToReceive);
+ final BlocksWithLocations newBlocks = nnc.getBlocks(getDatanodeInfo(), size);
+
+ long bytesReceived = 0;
+ for (BlockWithLocations blk : newBlocks.getBlocks()) {
+ bytesReceived += blk.getBlock().getNumBytes();
+ synchronized (globalBlocks) {
+ final DBlock block = globalBlocks.get(blk.getBlock());
+ synchronized (block) {
+ block.clearLocations();
+
+ // update locations
+ final String[] datanodeUuids = blk.getDatanodeUuids();
+ final StorageType[] storageTypes = blk.getStorageTypes();
+ for (int i = 0; i < datanodeUuids.length; i++) {
+ final StorageGroup g = storageGroupMap.get(
+ datanodeUuids[i], storageTypes[i]);
+ if (g != null) { // not unknown
+ block.addLocation(g);
+ }
+ }
+ }
+ if (!srcBlocks.contains(block) && isGoodBlockCandidate(block)) {
+ // filter bad candidates
+ srcBlocks.add(block);
+ }
+ }
+ }
+ return bytesReceived;
+ }
+
+ /** Decide if the given block is a good candidate to move or not */
+ private boolean isGoodBlockCandidate(DBlock block) {
+ for (Task t : tasks) {
+ if (Dispatcher.this.isGoodBlockCandidate(this, t.target, block)) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ /**
+ * Choose a move for the source. The block's source, target, and proxy
+ * are determined too. When choosing proxy and target, source &
+ * target throttling has been considered. They are chosen only when they
+ * have the capacity to support this block move. The block should be
+ * dispatched immediately after this method is returned.
+ *
+ * @return a move that's good for the source to dispatch immediately.
+ */
+ private PendingMove chooseNextMove() {
+ for (Iterator i = tasks.iterator(); i.hasNext();) {
+ final Task task = i.next();
+ final DDatanode target = task.target.getDDatanode();
+ PendingMove pendingBlock = new PendingMove();
+ if (target.addPendingBlock(pendingBlock)) {
+ // target is not busy, so do a tentative block allocation
+ pendingBlock.source = this;
+ pendingBlock.target = task.target;
+ if (pendingBlock.chooseBlockAndProxy()) {
+ long blockSize = pendingBlock.block.getNumBytes();
+ incScheduledSize(-blockSize);
+ task.size -= blockSize;
+ if (task.size == 0) {
+ i.remove();
+ }
+ return pendingBlock;
+ } else {
+ // cancel the tentative move
+ target.removePendingBlock(pendingBlock);
+ }
+ }
+ }
+ return null;
+ }
+
+ /** Iterate all source's blocks to remove moved ones */
+ private void removeMovedBlocks() {
+ for (Iterator i = getBlockIterator(); i.hasNext();) {
+ if (movedBlocks.contains(i.next().getBlock())) {
+ i.remove();
+ }
+ }
+ }
+
+ private static final int SOURCE_BLOCKS_MIN_SIZE = 5;
+
+ /** @return if should fetch more blocks from namenode */
+ private boolean shouldFetchMoreBlocks() {
+ return srcBlocks.size() < SOURCE_BLOCKS_MIN_SIZE && blocksToReceive > 0;
+ }
+
+ private static final long MAX_ITERATION_TIME = 20 * 60 * 1000L; // 20 mins
+
+ /**
+ * This method iteratively does the following: it first selects a block to
+ * move, then sends a request to the proxy source to start the block move
+ * when the source's block list falls below a threshold, it asks the
+ * namenode for more blocks. It terminates when it has dispatch enough block
+ * move tasks or it has received enough blocks from the namenode, or the
+ * elapsed time of the iteration has exceeded the max time limit.
+ */
+ private void dispatchBlocks() {
+ final long startTime = Time.monotonicNow();
+ this.blocksToReceive = 2 * getScheduledSize();
+ boolean isTimeUp = false;
+ int noPendingMoveIteration = 0;
+ while (!isTimeUp && getScheduledSize() > 0
+ && (!srcBlocks.isEmpty() || blocksToReceive > 0)) {
+ final PendingMove p = chooseNextMove();
+ if (p != null) {
+ // move the block
+ moveExecutor.execute(new Runnable() {
+ @Override
+ public void run() {
+ p.dispatch();
+ }
+ });
+ continue;
+ }
+
+ // Since we cannot schedule any block to move,
+ // remove any moved blocks from the source block list and
+ removeMovedBlocks(); // filter already moved blocks
+ // check if we should fetch more blocks from the namenode
+ if (shouldFetchMoreBlocks()) {
+ // fetch new blocks
+ try {
+ blocksToReceive -= getBlockList();
+ continue;
+ } catch (IOException e) {
+ LOG.warn("Exception while getting block list", e);
+ return;
+ }
+ } else {
+ // source node cannot find a pending block to move, iteration +1
+ noPendingMoveIteration++;
+ // in case no blocks can be moved for source node's task,
+ // jump out of while-loop after 5 iterations.
+ if (noPendingMoveIteration >= MAX_NO_PENDING_MOVE_ITERATIONS) {
+ resetScheduledSize();
+ }
+ }
+
+ // check if time is up or not
+ if (Time.monotonicNow() - startTime > MAX_ITERATION_TIME) {
+ isTimeUp = true;
+ continue;
+ }
+
+ // Now we can not schedule any block to move and there are
+ // no new blocks added to the source block list, so we wait.
+ try {
+ synchronized (Dispatcher.this) {
+ Dispatcher.this.wait(1000); // wait for targets/sources to be idle
+ }
+ } catch (InterruptedException ignored) {
+ }
+ }
+ }
+ }
+
+ public Dispatcher(NameNodeConnector nnc, Set includedNodes,
+ Set excludedNodes, long movedWinWidth, int moverThreads,
+ int dispatcherThreads, int maxConcurrentMovesPerNode, Configuration conf) {
+ this.nnc = nnc;
+ this.excludedNodes = excludedNodes;
+ this.includedNodes = includedNodes;
+ this.movedBlocks = new MovedBlocks(movedWinWidth);
+
+ this.cluster = NetworkTopology.getInstance(conf);
+
+ this.moveExecutor = Executors.newFixedThreadPool(moverThreads);
+ this.dispatchExecutor = Executors.newFixedThreadPool(dispatcherThreads);
+ this.maxConcurrentMovesPerNode = maxConcurrentMovesPerNode;
+
+ final boolean fallbackToSimpleAuthAllowed = conf.getBoolean(
+ CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY,
+ CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_DEFAULT);
+ this.saslClient = new SaslDataTransferClient(
+ DataTransferSaslUtil.getSaslPropertiesResolver(conf),
+ TrustedChannelResolver.getInstance(conf), fallbackToSimpleAuthAllowed);
+ }
+
+ StorageGroupMap getStorageGroupMap() {
+ return storageGroupMap;
+ }
+
+ NetworkTopology getCluster() {
+ return cluster;
+ }
+
+ long getBytesMoved() {
+ return bytesMoved.get();
+ }
+
+ long bytesToMove() {
+ Preconditions.checkState(
+ storageGroupMap.size() >= sources.size() + targets.size(),
+ "Mismatched number of storage groups (" + storageGroupMap.size()
+ + " < " + sources.size() + " sources + " + targets.size()
+ + " targets)");
+
+ long b = 0L;
+ for (Source src : sources) {
+ b += src.getScheduledSize();
+ }
+ return b;
+ }
+
+ void add(Source source, StorageGroup target) {
+ sources.add(source);
+ targets.add(target);
+ }
+
+ private boolean shouldIgnore(DatanodeInfo dn) {
+ // ignore decommissioned nodes
+ final boolean decommissioned = dn.isDecommissioned();
+ // ignore decommissioning nodes
+ final boolean decommissioning = dn.isDecommissionInProgress();
+ // ignore nodes in exclude list
+ final boolean excluded = Util.isExcluded(excludedNodes, dn);
+ // ignore nodes not in the include list (if include list is not empty)
+ final boolean notIncluded = !Util.isIncluded(includedNodes, dn);
+
+ if (decommissioned || decommissioning || excluded || notIncluded) {
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("Excluding datanode " + dn + ": " + decommissioned + ", "
+ + decommissioning + ", " + excluded + ", " + notIncluded);
+ }
+ return true;
+ }
+ return false;
+ }
+
+ /** Get live datanode storage reports and then build the network topology. */
+ List init() throws IOException {
+ final DatanodeStorageReport[] reports = nnc.getLiveDatanodeStorageReport();
+ final List trimmed = new ArrayList();
+ // create network topology and classify utilization collections:
+ // over-utilized, above-average, below-average and under-utilized.
+ for (DatanodeStorageReport r : DFSUtil.shuffle(reports)) {
+ final DatanodeInfo datanode = r.getDatanodeInfo();
+ if (shouldIgnore(datanode)) {
+ continue;
+ }
+ trimmed.add(r);
+ cluster.add(datanode);
+ }
+ return trimmed;
+ }
+
+ public DDatanode newDatanode(DatanodeStorageReport r) {
+ return new DDatanode(r, maxConcurrentMovesPerNode);
+ }
+
+ public boolean dispatchAndCheckContinue() throws InterruptedException {
+ return nnc.shouldContinue(dispatchBlockMoves());
+ }
+
+ /**
+ * Dispatch block moves for each source. The thread selects blocks to move &
+ * sends request to proxy source to initiate block move. The process is flow
+ * controlled. Block selection is blocked if there are too many un-confirmed
+ * block moves.
+ *
+ * @return the total number of bytes successfully moved in this iteration.
+ */
+ private long dispatchBlockMoves() throws InterruptedException {
+ final long bytesLastMoved = bytesMoved.get();
+ final Future>[] futures = new Future>[sources.size()];
+
+ final Iterator i = sources.iterator();
+ for (int j = 0; j < futures.length; j++) {
+ final Source s = i.next();
+ futures[j] = dispatchExecutor.submit(new Runnable() {
+ @Override
+ public void run() {
+ s.dispatchBlocks();
+ }
+ });
+ }
+
+ // wait for all dispatcher threads to finish
+ for (Future> future : futures) {
+ try {
+ future.get();
+ } catch (ExecutionException e) {
+ LOG.warn("Dispatcher thread failed", e.getCause());
+ }
+ }
+
+ // wait for all block moving to be done
+ waitForMoveCompletion();
+
+ return bytesMoved.get() - bytesLastMoved;
+ }
+
+ /** The sleeping period before checking if block move is completed again */
+ static private long blockMoveWaitTime = 30000L;
+
+ /** set the sleeping period for block move completion check */
+ static void setBlockMoveWaitTime(long time) {
+ blockMoveWaitTime = time;
+ }
+
+ /** Wait for all block move confirmations. */
+ private void waitForMoveCompletion() {
+ for(;;) {
+ boolean empty = true;
+ for (StorageGroup t : targets) {
+ if (!t.getDDatanode().isPendingQEmpty()) {
+ empty = false;
+ break;
+ }
+ }
+ if (empty) {
+ return; //all pending queues are empty
+ }
+ try {
+ Thread.sleep(blockMoveWaitTime);
+ } catch (InterruptedException ignored) {
+ }
+ }
+ }
+
+ /**
+ * Decide if the block is a good candidate to be moved from source to target.
+ * A block is a good candidate if
+ * 1. the block is not in the process of being moved/has not been moved;
+ * 2. the block does not have a replica on the target;
+ * 3. doing the move does not reduce the number of racks that the block has
+ */
+ private boolean isGoodBlockCandidate(Source source, StorageGroup target,
+ DBlock block) {
+ if (source.storageType != target.storageType) {
+ return false;
+ }
+ // check if the block is moved or not
+ if (movedBlocks.contains(block.getBlock())) {
+ return false;
+ }
+ if (block.isLocatedOn(target)) {
+ return false;
+ }
+ if (cluster.isNodeGroupAware()
+ && isOnSameNodeGroupWithReplicas(target, block, source)) {
+ return false;
+ }
+ if (reduceNumOfRacks(source, target, block)) {
+ return false;
+ }
+ return true;
+ }
+
+ /**
+ * Determine whether moving the given block replica from source to target
+ * would reduce the number of racks of the block replicas.
+ */
+ private boolean reduceNumOfRacks(Source source, StorageGroup target,
+ DBlock block) {
+ final DatanodeInfo sourceDn = source.getDatanodeInfo();
+ if (cluster.isOnSameRack(sourceDn, target.getDatanodeInfo())) {
+ // source and target are on the same rack
+ return false;
+ }
+ boolean notOnSameRack = true;
+ synchronized (block) {
+ for (StorageGroup loc : block.getLocations()) {
+ if (cluster.isOnSameRack(loc.getDatanodeInfo(), target.getDatanodeInfo())) {
+ notOnSameRack = false;
+ break;
+ }
+ }
+ }
+ if (notOnSameRack) {
+ // target is not on the same rack as any replica
+ return false;
+ }
+ for (StorageGroup g : block.getLocations()) {
+ if (g != source && cluster.isOnSameRack(g.getDatanodeInfo(), sourceDn)) {
+ // source is on the same rack of another replica
+ return false;
+ }
+ }
+ return true;
+ }
+
+ /**
+ * Check if there are any replica (other than source) on the same node group
+ * with target. If true, then target is not a good candidate for placing
+ * specific replica as we don't want 2 replicas under the same nodegroup.
+ *
+ * @return true if there are any replica (other than source) on the same node
+ * group with target
+ */
+ private boolean isOnSameNodeGroupWithReplicas(
+ StorageGroup target, DBlock block, Source source) {
+ final DatanodeInfo targetDn = target.getDatanodeInfo();
+ for (StorageGroup g : block.getLocations()) {
+ if (g != source && cluster.isOnSameNodeGroup(g.getDatanodeInfo(), targetDn)) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ /** Reset all fields in order to prepare for the next iteration */
+ void reset(Configuration conf) {
+ cluster = NetworkTopology.getInstance(conf);
+ storageGroupMap.clear();
+ sources.clear();
+ targets.clear();
+ globalBlocks.removeAllButRetain(movedBlocks);
+ movedBlocks.cleanup();
+ }
+
+ /** shutdown thread pools */
+ void shutdownNow() {
+ dispatchExecutor.shutdownNow();
+ moveExecutor.shutdownNow();
+ }
+
+ static class Util {
+ /** @return true if data node is part of the excludedNodes. */
+ static boolean isExcluded(Set excludedNodes, DatanodeInfo dn) {
+ return isIn(excludedNodes, dn);
+ }
+
+ /**
+ * @return true if includedNodes is empty or data node is part of the
+ * includedNodes.
+ */
+ static boolean isIncluded(Set includedNodes, DatanodeInfo dn) {
+ return (includedNodes.isEmpty() || isIn(includedNodes, dn));
+ }
+
+ /**
+ * Match is checked using host name , ip address with and without port
+ * number.
+ *
+ * @return true if the datanode's transfer address matches the set of nodes.
+ */
+ private static boolean isIn(Set datanodes, DatanodeInfo dn) {
+ return isIn(datanodes, dn.getPeerHostName(), dn.getXferPort())
+ || isIn(datanodes, dn.getIpAddr(), dn.getXferPort())
+ || isIn(datanodes, dn.getHostName(), dn.getXferPort());
+ }
+
+ /** @return true if nodes contains host or host:port */
+ private static boolean isIn(Set nodes, String host, int port) {
+ if (host == null) {
+ return false;
+ }
+ return (nodes.contains(host) || nodes.contains(host + ":" + port));
+ }
+
+ /**
+ * Parse a comma separated string to obtain set of host names
+ *
+ * @return set of host names
+ */
+ static Set parseHostList(String string) {
+ String[] addrs = StringUtils.getTrimmedStrings(string);
+ return new HashSet(Arrays.asList(addrs));
+ }
+
+ /**
+ * Read set of host names from a file
+ *
+ * @return set of host names
+ */
+ static Set getHostListFromFile(String fileName, String type) {
+ Set nodes = new HashSet();
+ try {
+ HostsFileReader.readFileToSet(type, fileName, nodes);
+ return StringUtils.getTrimmedStrings(nodes);
+ } catch (IOException e) {
+ throw new IllegalArgumentException(
+ "Failed to read host list from file: " + fileName);
+ }
+ }
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/event/RMAppAttemptNewSavedEvent.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/ExitStatus.java
similarity index 52%
rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/event/RMAppAttemptNewSavedEvent.java
rename to hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/ExitStatus.java
index 97611bc34a..e36258ffca 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/event/RMAppAttemptNewSavedEvent.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/ExitStatus.java
@@ -1,4 +1,4 @@
-/*
+/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -15,25 +15,30 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+package org.apache.hadoop.hdfs.server.balancer;
-package org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event;
+/**
+ * Exit status - The values associated with each exit status is directly mapped
+ * to the process's exit code in command line.
+ */
+public enum ExitStatus {
+ SUCCESS(0),
+ IN_PROGRESS(1),
+ ALREADY_RUNNING(-1),
+ NO_MOVE_BLOCK(-2),
+ NO_MOVE_PROGRESS(-3),
+ IO_EXCEPTION(-4),
+ ILLEGAL_ARGUMENTS(-5),
+ INTERRUPTED(-6);
-import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
-import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEvent;
-import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEventType;
+ private final int code;
-public class RMAppAttemptNewSavedEvent extends RMAppAttemptEvent {
-
- final Exception storedException;
-
- public RMAppAttemptNewSavedEvent(ApplicationAttemptId appAttemptId,
- Exception storedException) {
- super(appAttemptId, RMAppAttemptEventType.ATTEMPT_NEW_SAVED);
- this.storedException = storedException;
+ private ExitStatus(int code) {
+ this.code = code;
}
- public Exception getStoredException() {
- return storedException;
+ /** @return the command line exit code. */
+ public int getExitCode() {
+ return code;
}
-
-}
+}
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java
index 3d1c4e6946..820a4edd57 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java
@@ -34,6 +34,10 @@
import org.apache.hadoop.hdfs.NameNodeProxies;
import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.io.IOUtils;
@@ -90,14 +94,16 @@ public String getBlockpoolID() {
return blockpoolID;
}
- /** @return the namenode proxy. */
- public NamenodeProtocol getNamenode() {
- return namenode;
+ /** @return blocks with locations. */
+ public BlocksWithLocations getBlocks(DatanodeInfo datanode, long size)
+ throws IOException {
+ return namenode.getBlocks(datanode, size);
}
- /** @return the client proxy. */
- public ClientProtocol getClient() {
- return client;
+ /** @return live datanode storage reports. */
+ public DatanodeStorageReport[] getLiveDatanodeStorageReport()
+ throws IOException {
+ return client.getDatanodeStorageReport(DatanodeReportType.LIVE);
}
/** @return the key manager */
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index 69b2b69541..709f060d23 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -135,7 +135,10 @@ public class DatanodeManager {
/** The number of stale DataNodes */
private volatile int numStaleNodes;
-
+
+ /** The number of stale storages */
+ private volatile int numStaleStorages;
+
/**
* Whether or not this cluster has ever consisted of more than 1 rack,
* according to the NetworkTopology.
@@ -1142,6 +1145,22 @@ public int getNumStaleNodes() {
return this.numStaleNodes;
}
+ /**
+ * Get the number of content stale storages.
+ */
+ public int getNumStaleStorages() {
+ return numStaleStorages;
+ }
+
+ /**
+ * Set the number of content stale storages.
+ *
+ * @param numStaleStorages The number of content stale storages.
+ */
+ void setNumStaleStorages(int numStaleStorages) {
+ this.numStaleStorages = numStaleStorages;
+ }
+
/** Fetch live and dead datanodes. */
public void fetchDatanodes(final List live,
final List dead, final boolean removeDecommissionNode) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java
index 901f7e3653..a4f839afd3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java
@@ -256,6 +256,7 @@ void heartbeatCheck() {
DatanodeID dead = null;
// check the number of stale nodes
int numOfStaleNodes = 0;
+ int numOfStaleStorages = 0;
synchronized(this) {
for (DatanodeDescriptor d : datanodes) {
if (dead == null && dm.isDatanodeDead(d)) {
@@ -265,10 +266,17 @@ void heartbeatCheck() {
if (d.isStale(dm.getStaleInterval())) {
numOfStaleNodes++;
}
+ DatanodeStorageInfo[] storageInfos = d.getStorageInfos();
+ for(DatanodeStorageInfo storageInfo : storageInfos) {
+ if (storageInfo.areBlockContentsStale()) {
+ numOfStaleStorages++;
+ }
+ }
}
// Set the number of stale nodes in the DatanodeManager
dm.setNumStaleNodes(numOfStaleNodes);
+ dm.setNumStaleStorages(numOfStaleStorages);
}
allAlive = dead == null;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
index 39e842ccfd..822c03d8c3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
@@ -601,7 +601,7 @@ boolean processCommandFromActor(DatanodeCommand cmd,
LOG.info("DatanodeCommand action : DNA_REGISTER from " + actor.nnAddr
+ " with " + actor.state + " state");
actor.reRegister();
- return true;
+ return false;
}
writeLock();
try {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
index 83237e60cc..59ca11a540 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
@@ -222,7 +222,19 @@ private void connectToNNAndHandshake() throws IOException {
// Second phase of the handshake with the NN.
register();
}
-
+
+ // This is useful to make sure NN gets Heartbeat before Blockreport
+ // upon NN restart while DN keeps retrying Otherwise,
+ // 1. NN restarts.
+ // 2. Heartbeat RPC will retry and succeed. NN asks DN to reregister.
+ // 3. After reregistration completes, DN will send Blockreport first.
+ // 4. Given NN receives Blockreport after Heartbeat, it won't mark
+ // DatanodeStorageInfo#blockContentsStale to false until the next
+ // Blockreport.
+ void scheduleHeartbeat() {
+ lastHeartbeat = 0;
+ }
+
/**
* This methods arranges for the data node to send the block report at
* the next heartbeat.
@@ -902,6 +914,7 @@ void reRegister() throws IOException {
retrieveNamespaceInfo();
// and re-register
register();
+ scheduleHeartbeat();
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
index d065b5736e..8e65dd0b54 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
@@ -36,8 +36,10 @@
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
+import java.util.HashSet;
import java.util.Iterator;
import java.util.Properties;
+import java.util.Set;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
@@ -106,13 +108,22 @@ private BlockPoolSliceStorage() {
void recoverTransitionRead(DataNode datanode, NamespaceInfo nsInfo,
Collection dataDirs, StartupOption startOpt) throws IOException {
LOG.info("Analyzing storage directories for bpid " + nsInfo.getBlockPoolID());
+ Set existingStorageDirs = new HashSet();
+ for (int i = 0; i < getNumStorageDirs(); i++) {
+ existingStorageDirs.add(getStorageDir(i).getRoot().getAbsolutePath());
+ }
+
// 1. For each BP data directory analyze the state and
// check whether all is consistent before transitioning.
- this.storageDirs = new ArrayList(dataDirs.size());
ArrayList dataDirStates = new ArrayList(
dataDirs.size());
for (Iterator it = dataDirs.iterator(); it.hasNext();) {
File dataDir = it.next();
+ if (existingStorageDirs.contains(dataDir.getAbsolutePath())) {
+ LOG.info("Storage directory " + dataDir + " has already been used.");
+ it.remove();
+ continue;
+ }
StorageDirectory sd = new StorageDirectory(dataDir, null, true);
StorageState curState;
try {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
index 5a55d094e1..4b9656eb8e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
@@ -55,6 +55,7 @@
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
+import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
@@ -172,43 +173,99 @@ public String getTrashDirectoryForBlockFile(String bpid, File blockFile) {
}
/**
- * Analyze storage directories.
- * Recover from previous transitions if required.
- * Perform fs state transition if necessary depending on the namespace info.
- * Read storage info.
- *
- * This method should be synchronized between multiple DN threads. Only the
- * first DN thread does DN level storage dir recoverTransitionRead.
- *
+ * {{@inheritDoc org.apache.hadoop.hdfs.server.common.Storage#writeAll()}}
+ */
+ private void writeAll(Collection dirs) throws IOException {
+ this.layoutVersion = getServiceLayoutVersion();
+ for (StorageDirectory dir : dirs) {
+ writeProperties(dir);
+ }
+ }
+
+ /**
+ * Add a list of volumes to be managed by DataStorage. If the volume is empty,
+ * format it, otherwise recover it from previous transitions if required.
+ *
+ * @param datanode the reference to DataNode.
* @param nsInfo namespace information
* @param dataDirs array of data storage directories
* @param startOpt startup option
* @throws IOException
*/
- synchronized void recoverTransitionRead(DataNode datanode,
+ synchronized void addStorageLocations(DataNode datanode,
NamespaceInfo nsInfo, Collection dataDirs,
StartupOption startOpt)
throws IOException {
- if (initialized) {
- // DN storage has been initialized, no need to do anything
- return;
+ // Similar to recoverTransitionRead, it first ensures the datanode level
+ // format is completed.
+ List tmpDataDirs =
+ new ArrayList(dataDirs);
+ addStorageLocations(datanode, nsInfo, tmpDataDirs, startOpt, false, true);
+
+ Collection bpDataDirs = new ArrayList();
+ String bpid = nsInfo.getBlockPoolID();
+ for (StorageLocation dir : dataDirs) {
+ File dnRoot = dir.getFile();
+ File bpRoot = BlockPoolSliceStorage.getBpRoot(bpid, new File(dnRoot,
+ STORAGE_DIR_CURRENT));
+ bpDataDirs.add(bpRoot);
}
- LOG.info("Data-node version: " + HdfsConstants.DATANODE_LAYOUT_VERSION
- + " and name-node layout version: " + nsInfo.getLayoutVersion());
-
- // 1. For each data directory calculate its state and
- // check whether all is consistent before transitioning.
- // Format and recover.
- this.storageDirs = new ArrayList(dataDirs.size());
- ArrayList dataDirStates = new ArrayList(dataDirs.size());
+ // mkdir for the list of BlockPoolStorage
+ makeBlockPoolDataDir(bpDataDirs, null);
+ BlockPoolSliceStorage bpStorage = this.bpStorageMap.get(bpid);
+ if (bpStorage == null) {
+ bpStorage = new BlockPoolSliceStorage(
+ nsInfo.getNamespaceID(), bpid, nsInfo.getCTime(),
+ nsInfo.getClusterID());
+ }
+
+ bpStorage.recoverTransitionRead(datanode, nsInfo, bpDataDirs, startOpt);
+ addBlockPoolStorage(bpid, bpStorage);
+ }
+
+ /**
+ * Add a list of volumes to be managed by this DataStorage. If the volume is
+ * empty, it formats the volume, otherwise it recovers it from previous
+ * transitions if required.
+ *
+ * If isInitialize is false, only the directories that have finished the
+ * doTransition() process will be added into DataStorage.
+ *
+ * @param datanode the reference to DataNode.
+ * @param nsInfo namespace information
+ * @param dataDirs array of data storage directories
+ * @param startOpt startup option
+ * @param isInitialize whether it is called when DataNode starts up.
+ * @throws IOException
+ */
+ private synchronized void addStorageLocations(DataNode datanode,
+ NamespaceInfo nsInfo, Collection dataDirs,
+ StartupOption startOpt, boolean isInitialize, boolean ignoreExistingDirs)
+ throws IOException {
+ Set existingStorageDirs = new HashSet();
+ for (int i = 0; i < getNumStorageDirs(); i++) {
+ existingStorageDirs.add(getStorageDir(i).getRoot().getAbsolutePath());
+ }
+
+ // 1. For each data directory calculate its state and check whether all is
+ // consistent before transitioning. Format and recover.
+ ArrayList dataDirStates =
+ new ArrayList(dataDirs.size());
+ List addedStorageDirectories =
+ new ArrayList();
for(Iterator it = dataDirs.iterator(); it.hasNext();) {
File dataDir = it.next().getFile();
+ if (existingStorageDirs.contains(dataDir.getAbsolutePath())) {
+ LOG.info("Storage directory " + dataDir + " has already been used.");
+ it.remove();
+ continue;
+ }
StorageDirectory sd = new StorageDirectory(dataDir);
StorageState curState;
try {
curState = sd.analyzeStorage(startOpt, this);
// sd is locked but not opened
- switch(curState) {
+ switch (curState) {
case NORMAL:
break;
case NON_EXISTENT:
@@ -217,7 +274,8 @@ synchronized void recoverTransitionRead(DataNode datanode,
it.remove();
continue;
case NOT_FORMATTED: // format
- LOG.info("Storage directory " + dataDir + " is not formatted");
+ LOG.info("Storage directory " + dataDir + " is not formatted for "
+ + nsInfo.getBlockPoolID());
LOG.info("Formatting ...");
format(sd, nsInfo, datanode.getDatanodeUuid());
break;
@@ -231,33 +289,82 @@ synchronized void recoverTransitionRead(DataNode datanode,
//continue with other good dirs
continue;
}
- // add to the storage list
- addStorageDir(sd);
+ if (isInitialize) {
+ addStorageDir(sd);
+ }
+ addedStorageDirectories.add(sd);
dataDirStates.add(curState);
}
- if (dataDirs.size() == 0 || dataDirStates.size() == 0) // none of the data dirs exist
+ if (dataDirs.size() == 0 || dataDirStates.size() == 0) {
+ // none of the data dirs exist
+ if (ignoreExistingDirs) {
+ return;
+ }
throw new IOException(
"All specified directories are not accessible or do not exist.");
+ }
// 2. Do transitions
// Each storage directory is treated individually.
- // During startup some of them can upgrade or rollback
- // while others could be uptodate for the regular startup.
- try {
- for (int idx = 0; idx < getNumStorageDirs(); idx++) {
- doTransition(datanode, getStorageDir(idx), nsInfo, startOpt);
- createStorageID(getStorageDir(idx));
+ // During startup some of them can upgrade or rollback
+ // while others could be up-to-date for the regular startup.
+ for (Iterator it = addedStorageDirectories.iterator();
+ it.hasNext(); ) {
+ StorageDirectory sd = it.next();
+ try {
+ doTransition(datanode, sd, nsInfo, startOpt);
+ createStorageID(sd);
+ } catch (IOException e) {
+ if (!isInitialize) {
+ sd.unlock();
+ it.remove();
+ continue;
+ }
+ unlockAll();
+ throw e;
}
- } catch (IOException e) {
- unlockAll();
- throw e;
}
- // 3. Update all storages. Some of them might have just been formatted.
- this.writeAll();
+ // 3. Update all successfully loaded storages. Some of them might have just
+ // been formatted.
+ this.writeAll(addedStorageDirectories);
+
+ // 4. Make newly loaded storage directories visible for service.
+ if (!isInitialize) {
+ this.storageDirs.addAll(addedStorageDirectories);
+ }
+ }
+
+ /**
+ * Analyze storage directories.
+ * Recover from previous transitions if required.
+ * Perform fs state transition if necessary depending on the namespace info.
+ * Read storage info.
+ *
+ * This method should be synchronized between multiple DN threads. Only the
+ * first DN thread does DN level storage dir recoverTransitionRead.
+ *
+ * @param nsInfo namespace information
+ * @param dataDirs array of data storage directories
+ * @param startOpt startup option
+ * @throws IOException
+ */
+ synchronized void recoverTransitionRead(DataNode datanode,
+ NamespaceInfo nsInfo, Collection dataDirs,
+ StartupOption startOpt)
+ throws IOException {
+ if (initialized) {
+ // DN storage has been initialized, no need to do anything
+ return;
+ }
+ LOG.info("DataNode version: " + HdfsConstants.DATANODE_LAYOUT_VERSION
+ + " and NameNode layout version: " + nsInfo.getLayoutVersion());
+
+ this.storageDirs = new ArrayList(dataDirs.size());
+ addStorageLocations(datanode, nsInfo, dataDirs, startOpt, true, false);
- // 4. mark DN storage is initialized
+ // mark DN storage is initialized
this.initialized = true;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StorageLocation.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StorageLocation.java
index f85fcb115e..feb5ac9ac2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StorageLocation.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StorageLocation.java
@@ -78,7 +78,7 @@ public File getFile() {
* @return A StorageLocation object if successfully parsed, null otherwise.
* Does not throw any exceptions.
*/
- static StorageLocation parse(String rawLocation)
+ public static StorageLocation parse(String rawLocation)
throws IOException, SecurityException {
Matcher matcher = regex.matcher(rawLocation);
StorageType storageType = StorageType.DEFAULT;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
index 5e4f55e733..a64f9c0d58 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
@@ -22,6 +22,7 @@
import java.io.FileDescriptor;
import java.io.IOException;
import java.io.InputStream;
+import java.util.Collection;
import java.util.List;
import java.util.Map;
@@ -39,6 +40,7 @@
import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica;
import org.apache.hadoop.hdfs.server.datanode.Replica;
import org.apache.hadoop.hdfs.server.datanode.ReplicaInPipelineInterface;
+import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetFactory;
import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean;
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
@@ -91,6 +93,10 @@ public RollingLogs createRollingLogs(String bpid, String prefix
/** @return a list of volumes. */
public List getVolumes();
+ /** Add an array of StorageLocation to FsDataset. */
+ public void addVolumes(Collection volumes)
+ throws IOException;
+
/** @return a storage with the given storage ID */
public DatanodeStorage getStorage(final String storageUuid);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java
index b76cee48ed..539e97be4a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java
@@ -61,6 +61,7 @@ class FsDatasetAsyncDiskService {
private static final long THREADS_KEEP_ALIVE_SECONDS = 60;
private final DataNode datanode;
+ private final ThreadGroup threadGroup;
private Map executors
= new HashMap();
@@ -70,42 +71,52 @@ class FsDatasetAsyncDiskService {
*
* The AsyncDiskServices uses one ThreadPool per volume to do the async
* disk operations.
- *
- * @param volumes The roots of the data volumes.
*/
- FsDatasetAsyncDiskService(DataNode datanode, File[] volumes) {
+ FsDatasetAsyncDiskService(DataNode datanode) {
this.datanode = datanode;
+ this.threadGroup = new ThreadGroup(getClass().getSimpleName());
+ }
- final ThreadGroup threadGroup = new ThreadGroup(getClass().getSimpleName());
- // Create one ThreadPool per volume
- for (int v = 0 ; v < volumes.length; v++) {
- final File vol = volumes[v];
- ThreadFactory threadFactory = new ThreadFactory() {
- int counter = 0;
+ private void addExecutorForVolume(final File volume) {
+ ThreadFactory threadFactory = new ThreadFactory() {
+ int counter = 0;
- @Override
- public Thread newThread(Runnable r) {
- int thisIndex;
- synchronized (this) {
- thisIndex = counter++;
- }
- Thread t = new Thread(threadGroup, r);
- t.setName("Async disk worker #" + thisIndex +
- " for volume " + vol);
- return t;
- }
- };
+ @Override
+ public Thread newThread(Runnable r) {
+ int thisIndex;
+ synchronized (this) {
+ thisIndex = counter++;
+ }
+ Thread t = new Thread(threadGroup, r);
+ t.setName("Async disk worker #" + thisIndex +
+ " for volume " + volume);
+ return t;
+ }
+ };
- ThreadPoolExecutor executor = new ThreadPoolExecutor(
- CORE_THREADS_PER_VOLUME, MAXIMUM_THREADS_PER_VOLUME,
- THREADS_KEEP_ALIVE_SECONDS, TimeUnit.SECONDS,
- new LinkedBlockingQueue(), threadFactory);
+ ThreadPoolExecutor executor = new ThreadPoolExecutor(
+ CORE_THREADS_PER_VOLUME, MAXIMUM_THREADS_PER_VOLUME,
+ THREADS_KEEP_ALIVE_SECONDS, TimeUnit.SECONDS,
+ new LinkedBlockingQueue(), threadFactory);
- // This can reduce the number of running threads
- executor.allowCoreThreadTimeOut(true);
- executors.put(vol, executor);
+ // This can reduce the number of running threads
+ executor.allowCoreThreadTimeOut(true);
+ executors.put(volume, executor);
+ }
+
+ /**
+ * Starts AsyncDiskService for a new volume
+ * @param volume the root of the new data volume.
+ */
+ synchronized void addVolume(File volume) {
+ if (executors == null) {
+ throw new RuntimeException("AsyncDiskService is already shutdown");
}
-
+ ThreadPoolExecutor executor = executors.get(volume);
+ if (executor != null) {
+ throw new RuntimeException("Volume " + volume + " is already existed.");
+ }
+ addExecutorForVolume(volume);
}
synchronized long countPendingDeletions() {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index a43ef84920..148055c6f9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -202,6 +202,7 @@ public LengthInputStream getMetaDataInputStream(ExtendedBlock b)
final Map storageMap;
final FsDatasetAsyncDiskService asyncDiskService;
final FsDatasetCache cacheManager;
+ private final Configuration conf;
private final int validVolsRequired;
final ReplicaMap volumeMap;
@@ -216,6 +217,7 @@ public LengthInputStream getMetaDataInputStream(ExtendedBlock b)
) throws IOException {
this.datanode = datanode;
this.dataStorage = storage;
+ this.conf = conf;
// The number of volumes required for operation is the total number
// of volumes minus the number of failed volumes we can tolerate.
final int volFailuresTolerated =
@@ -242,38 +244,76 @@ public LengthInputStream getMetaDataInputStream(ExtendedBlock b)
}
storageMap = new HashMap();
- final List volArray = new ArrayList(
- storage.getNumStorageDirs());
- for (int idx = 0; idx < storage.getNumStorageDirs(); idx++) {
- Storage.StorageDirectory sd = storage.getStorageDir(idx);
- final File dir = sd.getCurrentDir();
- final StorageType storageType = getStorageTypeFromLocations(dataLocations, sd.getRoot());
- volArray.add(new FsVolumeImpl(this, sd.getStorageUuid(), dir, conf,
- storageType));
- LOG.info("Added volume - " + dir + ", StorageType: " + storageType);
- storageMap.put(sd.getStorageUuid(),
- new DatanodeStorage(sd.getStorageUuid(), DatanodeStorage.State.NORMAL, storageType));
- }
volumeMap = new ReplicaMap(this);
-
@SuppressWarnings("unchecked")
final VolumeChoosingPolicy blockChooserImpl =
ReflectionUtils.newInstance(conf.getClass(
DFSConfigKeys.DFS_DATANODE_FSDATASET_VOLUME_CHOOSING_POLICY_KEY,
RoundRobinVolumeChoosingPolicy.class,
VolumeChoosingPolicy.class), conf);
- volumes = new FsVolumeList(volArray, volsFailed, blockChooserImpl);
- volumes.initializeReplicaMaps(volumeMap);
+ volumes = new FsVolumeList(volsFailed, blockChooserImpl);
+ asyncDiskService = new FsDatasetAsyncDiskService(datanode);
- File[] roots = new File[storage.getNumStorageDirs()];
for (int idx = 0; idx < storage.getNumStorageDirs(); idx++) {
- roots[idx] = storage.getStorageDir(idx).getCurrentDir();
+ addVolume(dataLocations, storage.getStorageDir(idx));
}
- asyncDiskService = new FsDatasetAsyncDiskService(datanode, roots);
+
cacheManager = new FsDatasetCache(this);
registerMBean(datanode.getDatanodeUuid());
}
+ private void addVolume(Collection dataLocations,
+ Storage.StorageDirectory sd) throws IOException {
+ final File dir = sd.getCurrentDir();
+ final StorageType storageType =
+ getStorageTypeFromLocations(dataLocations, sd.getRoot());
+
+ // If IOException raises from FsVolumeImpl() or getVolumeMap(), there is
+ // nothing needed to be rolled back to make various data structures, e.g.,
+ // storageMap and asyncDiskService, consistent.
+ FsVolumeImpl fsVolume = new FsVolumeImpl(
+ this, sd.getStorageUuid(), dir, this.conf, storageType);
+ fsVolume.getVolumeMap(volumeMap);
+
+ volumes.addVolume(fsVolume);
+ storageMap.put(sd.getStorageUuid(),
+ new DatanodeStorage(sd.getStorageUuid(),
+ DatanodeStorage.State.NORMAL,
+ storageType));
+ asyncDiskService.addVolume(sd.getCurrentDir());
+
+ LOG.info("Added volume - " + dir + ", StorageType: " + storageType);
+ }
+
+ /**
+ * Add an array of StorageLocation to FsDataset.
+ *
+ * @pre dataStorage must have these volumes.
+ * @param volumes
+ * @throws IOException
+ */
+ @Override
+ public synchronized void addVolumes(Collection volumes)
+ throws IOException {
+ final Collection dataLocations =
+ DataNode.getStorageLocations(this.conf);
+ Map allStorageDirs =
+ new HashMap();
+ for (int idx = 0; idx < dataStorage.getNumStorageDirs(); idx++) {
+ Storage.StorageDirectory sd = dataStorage.getStorageDir(idx);
+ allStorageDirs.put(sd.getRoot().getAbsolutePath(), sd);
+ }
+
+ for (StorageLocation vol : volumes) {
+ String key = vol.getFile().getAbsolutePath();
+ if (!allStorageDirs.containsKey(key)) {
+ LOG.warn("Attempt to add an invalid volume: " + vol.getFile());
+ } else {
+ addVolume(dataLocations, allStorageDirs.get(key));
+ }
+ }
+ }
+
private StorageType getStorageTypeFromLocations(
Collection dataLocations, File dir) {
for (StorageLocation dataLocation : dataLocations) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
index 59a5c9021c..d4f8adc011 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
@@ -40,9 +40,8 @@ class FsVolumeList {
private final VolumeChoosingPolicy blockChooser;
private volatile int numFailedVolumes;
- FsVolumeList(List volumes, int failedVols,
+ FsVolumeList(int failedVols,
VolumeChoosingPolicy blockChooser) {
- this.volumes = Collections.unmodifiableList(volumes);
this.blockChooser = blockChooser;
this.numFailedVolumes = failedVols;
}
@@ -101,12 +100,6 @@ long getRemaining() throws IOException {
}
return remaining;
}
-
- void initializeReplicaMaps(ReplicaMap globalReplicaMap) throws IOException {
- for (FsVolumeImpl v : volumes) {
- v.getVolumeMap(globalReplicaMap);
- }
- }
void getAllVolumesMap(final String bpid, final ReplicaMap volumeMap) throws IOException {
long totalStartTime = Time.monotonicNow();
@@ -205,6 +198,19 @@ public String toString() {
return volumes.toString();
}
+ /**
+ * Dynamically add new volumes to the existing volumes that this DN manages.
+ * @param newVolume the instance of new FsVolumeImpl.
+ */
+ synchronized void addVolume(FsVolumeImpl newVolume) {
+ // Make a copy of volumes to add new volumes.
+ final List volumeList = volumes == null ?
+ new ArrayList() :
+ new ArrayList(volumes);
+ volumeList.add(newVolume);
+ volumes = Collections.unmodifiableList(volumeList);
+ FsDatasetImpl.LOG.info("Added new volume: " + newVolume.toString());
+ }
void addBlockPool(final String bpid, final Configuration conf) throws IOException {
long totalStartTime = Time.monotonicNow();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index 1c99276506..15210b9ed1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -786,8 +786,6 @@ private static void validateRenameSource(String src, INodesInPath srcIIP)
checkSnapshot(srcInode, null);
}
-
-
private class RenameOperation {
private final INodesInPath srcIIP;
private final INodesInPath dstIIP;
@@ -820,7 +818,7 @@ private RenameOperation(String src, String dst, INodesInPath srcIIP, INodesInPat
// snapshot is taken on the dst tree, changes will be recorded in the latest
// snapshot of the src tree.
if (isSrcInSnapshot) {
- srcChild = srcChild.recordModification(srcIIP.getLatestSnapshotId());
+ srcChild.recordModification(srcIIP.getLatestSnapshotId());
}
// check srcChild for reference
@@ -950,8 +948,7 @@ Block[] unprotectedSetReplication(String src, short replication,
updateCount(iip, 0, dsDelta, true);
}
- file = file.setFileReplication(replication, iip.getLatestSnapshotId(),
- inodeMap);
+ file.setFileReplication(replication, iip.getLatestSnapshotId());
final short newBR = file.getBlockReplication();
// check newBR < oldBR case.
@@ -1234,8 +1231,7 @@ long unprotectedDelete(INodesInPath iip, BlocksMapUpdateInfo collectedBlocks,
// record modification
final int latestSnapshot = iip.getLatestSnapshotId();
- targetNode = targetNode.recordModification(latestSnapshot);
- iip.setLastINode(targetNode);
+ targetNode.recordModification(latestSnapshot);
// Remove the node from the namespace
long removed = removeLastINode(iip);
@@ -2161,7 +2157,7 @@ INodeDirectory unprotectedSetQuota(String src, long nsQuota, long dsQuota)
}
final int latest = iip.getLatestSnapshotId();
- dirNode = dirNode.recordModification(latest);
+ dirNode.recordModification(latest);
dirNode.setQuota(nsQuota, dsQuota);
return dirNode;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 6de30adae7..bb58e2e7ec 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -2733,7 +2733,7 @@ LocatedBlock prepareFileForWrite(String src, INodeFile file,
boolean writeToEditLog,
int latestSnapshot, boolean logRetryCache)
throws IOException {
- file = file.recordModification(latestSnapshot);
+ file.recordModification(latestSnapshot);
final INodeFile cons = file.toUnderConstruction(leaseHolder, clientMachine);
leaseManager.addLease(cons.getFileUnderConstructionFeature()
@@ -4441,7 +4441,7 @@ private void finalizeINodeFileUnderConstruction(String src,
Preconditions.checkArgument(uc != null);
leaseManager.removeLease(uc.getClientName(), src);
- pendingFile = pendingFile.recordModification(latestSnapshot);
+ pendingFile.recordModification(latestSnapshot);
// The file is no longer pending.
// Create permanent INode, update blocks. No need to replace the inode here
@@ -6341,7 +6341,6 @@ void shutdown() {
blockManager.shutdown();
}
}
-
@Override // FSNamesystemMBean
public int getNumLiveDataNodes() {
@@ -6388,6 +6387,15 @@ public int getNumStaleDataNodes() {
return getBlockManager().getDatanodeManager().getNumStaleNodes();
}
+ /**
+ * Storages are marked as "content stale" after NN restart or fails over and
+ * before NN receives the first Heartbeat followed by the first Blockreport.
+ */
+ @Override // FSNamesystemMBean
+ public int getNumStaleStorages() {
+ return getBlockManager().getDatanodeManager().getNumStaleStorages();
+ }
+
/**
* Sets the current generation stamp for legacy blocks
*/
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
index b1e4982165..c346be9c68 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
@@ -97,9 +97,9 @@ public final String getUserName() {
/** Set user */
final INode setUser(String user, int latestSnapshotId)
throws QuotaExceededException {
- final INode nodeToUpdate = recordModification(latestSnapshotId);
- nodeToUpdate.setUser(user);
- return nodeToUpdate;
+ recordModification(latestSnapshotId);
+ setUser(user);
+ return this;
}
/**
* @param snapshotId
@@ -122,9 +122,9 @@ public final String getGroupName() {
/** Set group */
final INode setGroup(String group, int latestSnapshotId)
throws QuotaExceededException {
- final INode nodeToUpdate = recordModification(latestSnapshotId);
- nodeToUpdate.setGroup(group);
- return nodeToUpdate;
+ recordModification(latestSnapshotId);
+ setGroup(group);
+ return this;
}
/**
@@ -148,9 +148,9 @@ public final FsPermission getFsPermission() {
/** Set the {@link FsPermission} of this {@link INode} */
INode setPermission(FsPermission permission, int latestSnapshotId)
throws QuotaExceededException {
- final INode nodeToUpdate = recordModification(latestSnapshotId);
- nodeToUpdate.setPermission(permission);
- return nodeToUpdate;
+ recordModification(latestSnapshotId);
+ setPermission(permission);
+ return this;
}
abstract AclFeature getAclFeature(int snapshotId);
@@ -164,18 +164,18 @@ public final AclFeature getAclFeature() {
final INode addAclFeature(AclFeature aclFeature, int latestSnapshotId)
throws QuotaExceededException {
- final INode nodeToUpdate = recordModification(latestSnapshotId);
- nodeToUpdate.addAclFeature(aclFeature);
- return nodeToUpdate;
+ recordModification(latestSnapshotId);
+ addAclFeature(aclFeature);
+ return this;
}
abstract void removeAclFeature();
final INode removeAclFeature(int latestSnapshotId)
throws QuotaExceededException {
- final INode nodeToUpdate = recordModification(latestSnapshotId);
- nodeToUpdate.removeAclFeature();
- return nodeToUpdate;
+ recordModification(latestSnapshotId);
+ removeAclFeature();
+ return this;
}
/**
@@ -199,9 +199,9 @@ public final XAttrFeature getXAttrFeature() {
final INode addXAttrFeature(XAttrFeature xAttrFeature, int latestSnapshotId)
throws QuotaExceededException {
- final INode nodeToUpdate = recordModification(latestSnapshotId);
- nodeToUpdate.addXAttrFeature(xAttrFeature);
- return nodeToUpdate;
+ recordModification(latestSnapshotId);
+ addXAttrFeature(xAttrFeature);
+ return this;
}
/**
@@ -211,9 +211,9 @@ final INode addXAttrFeature(XAttrFeature xAttrFeature, int latestSnapshotId)
final INode removeXAttrFeature(int lastestSnapshotId)
throws QuotaExceededException {
- final INode nodeToUpdate = recordModification(lastestSnapshotId);
- nodeToUpdate.removeXAttrFeature();
- return nodeToUpdate;
+ recordModification(lastestSnapshotId);
+ removeXAttrFeature();
+ return this;
}
/**
@@ -298,11 +298,8 @@ public final boolean shouldRecordInSrcSnapshot(final int latestInDst) {
* @param latestSnapshotId The id of the latest snapshot that has been taken.
* Note that it is {@link Snapshot#CURRENT_STATE_ID}
* if no snapshots have been taken.
- * @return The current inode, which usually is the same object of this inode.
- * However, in some cases, this inode may be replaced with a new inode
- * for maintaining snapshots. The current inode is then the new inode.
*/
- abstract INode recordModification(final int latestSnapshotId)
+ abstract void recordModification(final int latestSnapshotId)
throws QuotaExceededException;
/** Check whether it's a reference. */
@@ -652,9 +649,9 @@ public abstract INode updateModificationTime(long mtime, int latestSnapshotId)
/** Set the last modification time of inode. */
public final INode setModificationTime(long modificationTime,
int latestSnapshotId) throws QuotaExceededException {
- final INode nodeToUpdate = recordModification(latestSnapshotId);
- nodeToUpdate.setModificationTime(modificationTime);
- return nodeToUpdate;
+ recordModification(latestSnapshotId);
+ setModificationTime(modificationTime);
+ return this;
}
/**
@@ -682,9 +679,9 @@ public final long getAccessTime() {
*/
public final INode setAccessTime(long accessTime, int latestSnapshotId)
throws QuotaExceededException {
- final INode nodeToUpdate = recordModification(latestSnapshotId);
- nodeToUpdate.setAccessTime(accessTime);
- return nodeToUpdate;
+ recordModification(latestSnapshotId);
+ setAccessTime(accessTime);
+ return this;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
index 1d77347051..5e6a4b4f78 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
@@ -318,7 +318,7 @@ INodeReference.WithName replaceChild4ReferenceWithName(INode oldChild,
}
@Override
- public INodeDirectory recordModification(int latestSnapshotId)
+ public void recordModification(int latestSnapshotId)
throws QuotaExceededException {
if (isInLatestSnapshot(latestSnapshotId)
&& !shouldRecordInSrcSnapshot(latestSnapshotId)) {
@@ -330,7 +330,6 @@ public INodeDirectory recordModification(int latestSnapshotId)
// record self in the diff list if necessary
sf.getDiffs().saveSelf2Snapshot(latestSnapshotId, this, null);
}
- return this;
}
/**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
index 730746013a..94fa686709 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
@@ -284,7 +284,7 @@ public INodeFileAttributes getSnapshotINode(final int snapshotId) {
}
@Override
- public INodeFile recordModification(final int latestSnapshotId)
+ public void recordModification(final int latestSnapshotId)
throws QuotaExceededException {
if (isInLatestSnapshot(latestSnapshotId)
&& !shouldRecordInSrcSnapshot(latestSnapshotId)) {
@@ -296,7 +296,6 @@ public INodeFile recordModification(final int latestSnapshotId)
// record self in the diff list if necessary
sf.getDiffs().saveSelf2Snapshot(latestSnapshotId, this, null);
}
- return this;
}
public FileDiffList getDiffs() {
@@ -344,11 +343,10 @@ public final void setFileReplication(short replication) {
/** Set the replication factor of this file. */
public final INodeFile setFileReplication(short replication,
- int latestSnapshotId, final INodeMap inodeMap)
- throws QuotaExceededException {
- final INodeFile nodeToUpdate = recordModification(latestSnapshotId);
- nodeToUpdate.setFileReplication(replication);
- return nodeToUpdate;
+ int latestSnapshotId) throws QuotaExceededException {
+ recordModification(latestSnapshotId);
+ setFileReplication(replication);
+ return this;
}
/** @return preferred block size (in bytes) of the file. */
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeMap.java
index bd0355b661..02c0815c55 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeMap.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeMap.java
@@ -93,9 +93,8 @@ public INode get(long id) {
"", "", new FsPermission((short) 0)), 0, 0) {
@Override
- INode recordModification(int latestSnapshotId)
+ void recordModification(int latestSnapshotId)
throws QuotaExceededException {
- return null;
}
@Override
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
index ac0f19d032..05e144d21b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
@@ -287,11 +287,9 @@ public final void setAccessTime(long accessTime) {
}
@Override
- final INode recordModification(int latestSnapshotId)
+ final void recordModification(int latestSnapshotId)
throws QuotaExceededException {
referred.recordModification(latestSnapshotId);
- // reference is never replaced
- return this;
}
@Override // used by WithCount
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java
index deb3ada16e..6729cd256c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java
@@ -47,12 +47,11 @@ public class INodeSymlink extends INodeWithAdditionalFields {
}
@Override
- INode recordModification(int latestSnapshotId) throws QuotaExceededException {
+ void recordModification(int latestSnapshotId) throws QuotaExceededException {
if (isInLatestSnapshot(latestSnapshotId)) {
INodeDirectory parent = getParent();
parent.saveChild2Snapshot(this, latestSnapshotId, new INodeSymlink(this));
}
- return this;
}
/** @return true unconditionally. */
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMBean.java
index f02eb84693..587746df15 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMBean.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMBean.java
@@ -151,4 +151,11 @@ public interface FSNamesystemMBean {
* @return number of blocks pending deletion
*/
long getPendingDeletionBlocks();
+
+ /**
+ * Number of content stale storages.
+ * @return number of content stale storages
+ */
+ public int getNumStaleStorages();
+
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RegisterCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RegisterCommand.java
index a6cd4498fc..a102c8291b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RegisterCommand.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RegisterCommand.java
@@ -22,6 +22,9 @@
/**
* A BlockCommand is an instruction to a datanode to register with the namenode.
+ * This command can't be combined with other commands in the same response.
+ * This is because after the datanode processes RegisterCommand, it will skip
+ * the rest of the DatanodeCommands in the same HeartbeatResponse.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/CMakeLists.txt
index dd3f1e62ab..8a8ea5b2b3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/CMakeLists.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/CMakeLists.txt
@@ -37,6 +37,10 @@ ELSE (${CMAKE_SYSTEM_NAME} MATCHES "Linux")
ENDIF (${CMAKE_SYSTEM_NAME} MATCHES "Linux")
IF(FUSE_FOUND)
+ add_library(posix_util
+ ../util/posix_util.c
+ )
+
add_executable(fuse_dfs
fuse_dfs.c
fuse_options.c
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/common/htable.c b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/common/htable.c
new file mode 100644
index 0000000000..8d3d3c548c
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/common/htable.c
@@ -0,0 +1,271 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "common/htable.h"
+
+#include
+#include
+#include
+#include
+#include
+
+struct htable_pair {
+ void *key;
+ void *val;
+};
+
+/**
+ * A hash table which uses linear probing.
+ */
+struct htable {
+ uint32_t capacity;
+ uint32_t used;
+ htable_hash_fn_t hash_fun;
+ htable_eq_fn_t eq_fun;
+ struct htable_pair *elem;
+};
+
+/**
+ * An internal function for inserting a value into the hash table.
+ *
+ * Note: this function assumes that you have made enough space in the table.
+ *
+ * @param nelem The new element to insert.
+ * @param capacity The capacity of the hash table.
+ * @param hash_fun The hash function to use.
+ * @param key The key to insert.
+ * @param val The value to insert.
+ */
+static void htable_insert_internal(struct htable_pair *nelem,
+ uint32_t capacity, htable_hash_fn_t hash_fun, void *key,
+ void *val)
+{
+ uint32_t i;
+
+ i = hash_fun(key, capacity);
+ while (1) {
+ if (!nelem[i].key) {
+ nelem[i].key = key;
+ nelem[i].val = val;
+ return;
+ }
+ i++;
+ if (i == capacity) {
+ i = 0;
+ }
+ }
+}
+
+static int htable_realloc(struct htable *htable, uint32_t new_capacity)
+{
+ struct htable_pair *nelem;
+ uint32_t i, old_capacity = htable->capacity;
+ htable_hash_fn_t hash_fun = htable->hash_fun;
+
+ nelem = calloc(new_capacity, sizeof(struct htable_pair));
+ if (!nelem) {
+ return ENOMEM;
+ }
+ for (i = 0; i < old_capacity; i++) {
+ struct htable_pair *pair = htable->elem + i;
+ htable_insert_internal(nelem, new_capacity, hash_fun,
+ pair->key, pair->val);
+ }
+ free(htable->elem);
+ htable->elem = nelem;
+ htable->capacity = new_capacity;
+ return 0;
+}
+
+struct htable *htable_alloc(uint32_t size,
+ htable_hash_fn_t hash_fun, htable_eq_fn_t eq_fun)
+{
+ struct htable *htable;
+
+ htable = calloc(1, sizeof(*htable));
+ if (!htable) {
+ return NULL;
+ }
+ size = (size + 1) >> 1;
+ size = size << 1;
+ if (size < HTABLE_MIN_SIZE) {
+ size = HTABLE_MIN_SIZE;
+ }
+ htable->hash_fun = hash_fun;
+ htable->eq_fun = eq_fun;
+ htable->used = 0;
+ if (htable_realloc(htable, size)) {
+ free(htable);
+ return NULL;
+ }
+ return htable;
+}
+
+void htable_visit(struct htable *htable, visitor_fn_t fun, void *ctx)
+{
+ uint32_t i;
+
+ for (i = 0; i != htable->capacity; ++i) {
+ struct htable_pair *elem = htable->elem + i;
+ if (elem->key) {
+ fun(ctx, elem->key, elem->val);
+ }
+ }
+}
+
+void htable_free(struct htable *htable)
+{
+ if (htable) {
+ free(htable->elem);
+ free(htable);
+ }
+}
+
+int htable_put(struct htable *htable, void *key, void *val)
+{
+ int ret;
+ uint32_t nused;
+
+ // NULL is not a valid key value.
+ // This helps us implement htable_get_internal efficiently, since we know
+ // that we can stop when we encounter the first NULL key.
+ if (!key) {
+ return EINVAL;
+ }
+ // NULL is not a valid value. Otherwise the results of htable_get would
+ // be confusing (does a NULL return mean entry not found, or that the
+ // entry was found and was NULL?)
+ if (!val) {
+ return EINVAL;
+ }
+ // Re-hash if we have used more than half of the hash table
+ nused = htable->used + 1;
+ if (nused >= (htable->capacity / 2)) {
+ ret = htable_realloc(htable, htable->capacity * 2);
+ if (ret)
+ return ret;
+ }
+ htable_insert_internal(htable->elem, htable->capacity,
+ htable->hash_fun, key, val);
+ htable->used++;
+ return 0;
+}
+
+static int htable_get_internal(const struct htable *htable,
+ const void *key, uint32_t *out)
+{
+ uint32_t start_idx, idx;
+
+ start_idx = htable->hash_fun(key, htable->capacity);
+ idx = start_idx;
+ while (1) {
+ struct htable_pair *pair = htable->elem + idx;
+ if (!pair->key) {
+ // We always maintain the invariant that the entries corresponding
+ // to a given key are stored in a contiguous block, not separated
+ // by any NULLs. So if we encounter a NULL, our search is over.
+ return ENOENT;
+ } else if (htable->eq_fun(pair->key, key)) {
+ *out = idx;
+ return 0;
+ }
+ idx++;
+ if (idx == htable->capacity) {
+ idx = 0;
+ }
+ if (idx == start_idx) {
+ return ENOENT;
+ }
+ }
+}
+
+void *htable_get(const struct htable *htable, const void *key)
+{
+ uint32_t idx;
+
+ if (htable_get_internal(htable, key, &idx)) {
+ return NULL;
+ }
+ return htable->elem[idx].val;
+}
+
+void htable_pop(struct htable *htable, const void *key,
+ void **found_key, void **found_val)
+{
+ uint32_t hole, i;
+ const void *nkey;
+
+ if (htable_get_internal(htable, key, &hole)) {
+ *found_key = NULL;
+ *found_val = NULL;
+ return;
+ }
+ i = hole;
+ htable->used--;
+ // We need to maintain the compactness invariant used in
+ // htable_get_internal. This invariant specifies that the entries for any
+ // given key are never separated by NULLs (although they may be separated
+ // by entries for other keys.)
+ while (1) {
+ i++;
+ if (i == htable->capacity) {
+ i = 0;
+ }
+ nkey = htable->elem[i].key;
+ if (!nkey) {
+ *found_key = htable->elem[hole].key;
+ *found_val = htable->elem[hole].val;
+ htable->elem[hole].key = NULL;
+ htable->elem[hole].val = NULL;
+ return;
+ } else if (htable->eq_fun(key, nkey)) {
+ htable->elem[hole].key = htable->elem[i].key;
+ htable->elem[hole].val = htable->elem[i].val;
+ hole = i;
+ }
+ }
+}
+
+uint32_t htable_used(const struct htable *htable)
+{
+ return htable->used;
+}
+
+uint32_t htable_capacity(const struct htable *htable)
+{
+ return htable->capacity;
+}
+
+uint32_t ht_hash_string(const void *str, uint32_t max)
+{
+ const char *s = str;
+ uint32_t hash = 0;
+
+ while (*s) {
+ hash = (hash * 31) + *s;
+ s++;
+ }
+ return hash % max;
+}
+
+int ht_compare_string(const void *a, const void *b)
+{
+ return strcmp(a, b) == 0;
+}
+
+// vim: ts=4:sw=4:tw=79:et
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/common/htable.h b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/common/htable.h
new file mode 100644
index 0000000000..33f1229051
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/common/htable.h
@@ -0,0 +1,161 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef HADOOP_CORE_COMMON_HASH_TABLE
+#define HADOOP_CORE_COMMON_HASH_TABLE
+
+#include
+#include
+#include
+
+#define HTABLE_MIN_SIZE 4
+
+struct htable;
+
+/**
+ * An HTable hash function.
+ *
+ * @param key The key.
+ * @param capacity The total capacity.
+ *
+ * @return The hash slot. Must be less than the capacity.
+ */
+typedef uint32_t (*htable_hash_fn_t)(const void *key, uint32_t capacity);
+
+/**
+ * An HTable equality function. Compares two keys.
+ *
+ * @param a First key.
+ * @param b Second key.
+ *
+ * @return nonzero if the keys are equal.
+ */
+typedef int (*htable_eq_fn_t)(const void *a, const void *b);
+
+/**
+ * Allocate a new hash table.
+ *
+ * @param capacity The minimum suggested starting capacity.
+ * @param hash_fun The hash function to use in this hash table.
+ * @param eq_fun The equals function to use in this hash table.
+ *
+ * @return The new hash table on success; NULL on OOM.
+ */
+struct htable *htable_alloc(uint32_t capacity, htable_hash_fn_t hash_fun,
+ htable_eq_fn_t eq_fun);
+
+typedef void (*visitor_fn_t)(void *ctx, void *key, void *val);
+
+/**
+ * Visit all of the entries in the hash table.
+ *
+ * @param htable The hash table.
+ * @param fun The callback function to invoke on each key and value.
+ * @param ctx Context pointer to pass to the callback.
+ */
+void htable_visit(struct htable *htable, visitor_fn_t fun, void *ctx);
+
+/**
+ * Free the hash table.
+ *
+ * It is up the calling code to ensure that the keys and values inside the
+ * table are de-allocated, if that is necessary.
+ *
+ * @param htable The hash table.
+ */
+void htable_free(struct htable *htable);
+
+/**
+ * Add an entry to the hash table.
+ *
+ * @param htable The hash table.
+ * @param key The key to add. This cannot be NULL.
+ * @param fun The value to add. This cannot be NULL.
+ *
+ * @return 0 on success;
+ * EEXIST if the value already exists in the table;
+ * ENOMEM if there is not enough memory to add the element.
+ * EFBIG if the hash table has too many entries to fit in 32
+ * bits.
+ */
+int htable_put(struct htable *htable, void *key, void *val);
+
+/**
+ * Get an entry from the hash table.
+ *
+ * @param htable The hash table.
+ * @param key The key to find.
+ *
+ * @return NULL if there is no such entry; the entry otherwise.
+ */
+void *htable_get(const struct htable *htable, const void *key);
+
+/**
+ * Get an entry from the hash table and remove it.
+ *
+ * @param htable The hash table.
+ * @param key The key for the entry find and remove.
+ * @param found_key (out param) NULL if the entry was not found; the found key
+ * otherwise.
+ * @param found_val (out param) NULL if the entry was not found; the found
+ * value otherwise.
+ */
+void htable_pop(struct htable *htable, const void *key,
+ void **found_key, void **found_val);
+
+/**
+ * Get the number of entries used in the hash table.
+ *
+ * @param htable The hash table.
+ *
+ * @return The number of entries used in the hash table.
+ */
+uint32_t htable_used(const struct htable *htable);
+
+/**
+ * Get the capacity of the hash table.
+ *
+ * @param htable The hash table.
+ *
+ * @return The capacity of the hash table.
+ */
+uint32_t htable_capacity(const struct htable *htable);
+
+/**
+ * Hash a string.
+ *
+ * @param str The string.
+ * @param max Maximum hash value
+ *
+ * @return A number less than max.
+ */
+uint32_t ht_hash_string(const void *str, uint32_t max);
+
+/**
+ * Compare two strings.
+ *
+ * @param a The first string.
+ * @param b The second string.
+ *
+ * @return 1 if the strings are identical; 0 otherwise.
+ */
+int ht_compare_string(const void *a, const void *b);
+
+#endif
+
+// vim: ts=4:sw=4:tw=79:et
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/exception.c b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/exception.c
index d7e1720845..2373aa7802 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/exception.c
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/exception.c
@@ -19,8 +19,8 @@
#include "exception.h"
#include "hdfs.h"
#include "jni_helper.h"
+#include "platform.h"
-#include
#include
#include
#include
@@ -35,54 +35,54 @@ struct ExceptionInfo {
static const struct ExceptionInfo gExceptionInfo[] = {
{
- .name = "java.io.FileNotFoundException",
- .noPrintFlag = NOPRINT_EXC_FILE_NOT_FOUND,
- .excErrno = ENOENT,
+ "java.io.FileNotFoundException",
+ NOPRINT_EXC_FILE_NOT_FOUND,
+ ENOENT,
},
{
- .name = "org.apache.hadoop.security.AccessControlException",
- .noPrintFlag = NOPRINT_EXC_ACCESS_CONTROL,
- .excErrno = EACCES,
+ "org.apache.hadoop.security.AccessControlException",
+ NOPRINT_EXC_ACCESS_CONTROL,
+ EACCES,
},
{
- .name = "org.apache.hadoop.fs.UnresolvedLinkException",
- .noPrintFlag = NOPRINT_EXC_UNRESOLVED_LINK,
- .excErrno = ENOLINK,
+ "org.apache.hadoop.fs.UnresolvedLinkException",
+ NOPRINT_EXC_UNRESOLVED_LINK,
+ ENOLINK,
},
{
- .name = "org.apache.hadoop.fs.ParentNotDirectoryException",
- .noPrintFlag = NOPRINT_EXC_PARENT_NOT_DIRECTORY,
- .excErrno = ENOTDIR,
+ "org.apache.hadoop.fs.ParentNotDirectoryException",
+ NOPRINT_EXC_PARENT_NOT_DIRECTORY,
+ ENOTDIR,
},
{
- .name = "java.lang.IllegalArgumentException",
- .noPrintFlag = NOPRINT_EXC_ILLEGAL_ARGUMENT,
- .excErrno = EINVAL,
+ "java.lang.IllegalArgumentException",
+ NOPRINT_EXC_ILLEGAL_ARGUMENT,
+ EINVAL,
},
{
- .name = "java.lang.OutOfMemoryError",
- .noPrintFlag = 0,
- .excErrno = ENOMEM,
+ "java.lang.OutOfMemoryError",
+ 0,
+ ENOMEM,
},
{
- .name = "org.apache.hadoop.hdfs.server.namenode.SafeModeException",
- .noPrintFlag = 0,
- .excErrno = EROFS,
+ "org.apache.hadoop.hdfs.server.namenode.SafeModeException",
+ 0,
+ EROFS,
},
{
- .name = "org.apache.hadoop.fs.FileAlreadyExistsException",
- .noPrintFlag = 0,
- .excErrno = EEXIST,
+ "org.apache.hadoop.fs.FileAlreadyExistsException",
+ 0,
+ EEXIST,
},
{
- .name = "org.apache.hadoop.hdfs.protocol.QuotaExceededException",
- .noPrintFlag = 0,
- .excErrno = EDQUOT,
+ "org.apache.hadoop.hdfs.protocol.QuotaExceededException",
+ 0,
+ EDQUOT,
},
{
- .name = "org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException",
- .noPrintFlag = 0,
- .excErrno = ESTALE,
+ "org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException",
+ 0,
+ ESTALE,
},
};
@@ -113,6 +113,7 @@ int printExceptionAndFreeV(JNIEnv *env, jthrowable exc, int noPrintFlags,
jstring jStr = NULL;
jvalue jVal;
jthrowable jthr;
+ const char *stackTrace;
jthr = classNameOfObject(exc, env, &className);
if (jthr) {
@@ -148,7 +149,7 @@ int printExceptionAndFreeV(JNIEnv *env, jthrowable exc, int noPrintFlags,
destroyLocalReference(env, jthr);
} else {
jStr = jVal.l;
- const char *stackTrace = (*env)->GetStringUTFChars(env, jStr, NULL);
+ stackTrace = (*env)->GetStringUTFChars(env, jStr, NULL);
if (!stackTrace) {
fprintf(stderr, "(unable to get stack trace for %s exception: "
"GetStringUTFChars error.)\n", className);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/exception.h b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/exception.h
index e36151314e..5fa7fa6ebd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/exception.h
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/exception.h
@@ -34,13 +34,14 @@
* usually not what you want.)
*/
+#include "platform.h"
+
#include
#include
#include
#include
#include
-#include
#include
/**
@@ -109,7 +110,7 @@ int printExceptionAndFreeV(JNIEnv *env, jthrowable exc, int noPrintFlags,
* object.
*/
int printExceptionAndFree(JNIEnv *env, jthrowable exc, int noPrintFlags,
- const char *fmt, ...) __attribute__((format(printf, 4, 5)));
+ const char *fmt, ...) TYPE_CHECKED_PRINTF_FORMAT(4, 5);
/**
* Print out information about the pending exception and free it.
@@ -124,7 +125,7 @@ int printExceptionAndFree(JNIEnv *env, jthrowable exc, int noPrintFlags,
* object.
*/
int printPendingExceptionAndFree(JNIEnv *env, int noPrintFlags,
- const char *fmt, ...) __attribute__((format(printf, 3, 4)));
+ const char *fmt, ...) TYPE_CHECKED_PRINTF_FORMAT(3, 4);
/**
* Get a local reference to the pending exception and clear it.
@@ -150,6 +151,7 @@ jthrowable getPendingExceptionAndClear(JNIEnv *env);
* @return A local reference to a RuntimeError
*/
jthrowable newRuntimeError(JNIEnv *env, const char *fmt, ...)
- __attribute__((format(printf, 2, 3)));
+ TYPE_CHECKED_PRINTF_FORMAT(2, 3);
+#undef TYPE_CHECKED_PRINTF_FORMAT
#endif
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/expect.c b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/expect.c
index 6b80ea90a2..576e9ef363 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/expect.c
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/expect.c
@@ -49,18 +49,18 @@ int expectFileStats(hdfsFile file,
stats->totalShortCircuitBytesRead,
stats->totalZeroCopyBytesRead);
if (expectedTotalBytesRead != UINT64_MAX) {
- EXPECT_INT64_EQ(expectedTotalBytesRead, stats->totalBytesRead);
+ EXPECT_UINT64_EQ(expectedTotalBytesRead, stats->totalBytesRead);
}
if (expectedTotalLocalBytesRead != UINT64_MAX) {
- EXPECT_INT64_EQ(expectedTotalLocalBytesRead,
+ EXPECT_UINT64_EQ(expectedTotalLocalBytesRead,
stats->totalLocalBytesRead);
}
if (expectedTotalShortCircuitBytesRead != UINT64_MAX) {
- EXPECT_INT64_EQ(expectedTotalShortCircuitBytesRead,
+ EXPECT_UINT64_EQ(expectedTotalShortCircuitBytesRead,
stats->totalShortCircuitBytesRead);
}
if (expectedTotalZeroCopyBytesRead != UINT64_MAX) {
- EXPECT_INT64_EQ(expectedTotalZeroCopyBytesRead,
+ EXPECT_UINT64_EQ(expectedTotalZeroCopyBytesRead,
stats->totalZeroCopyBytesRead);
}
hdfsFileFreeReadStatistics(stats);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/expect.h b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/expect.h
index 15fa510630..e64b108e20 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/expect.h
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/expect.h
@@ -126,6 +126,18 @@ struct hdfsFile_internal;
} \
} while (0);
+#define EXPECT_UINT64_EQ(x, y) \
+ do { \
+ uint64_t __my_ret__ = y; \
+ int __my_errno__ = errno; \
+ if (__my_ret__ != (x)) { \
+ fprintf(stderr, "TEST_ERROR: failed on %s:%d with return " \
+ "value %"PRIu64" (errno: %d): expected %"PRIu64"\n", \
+ __FILE__, __LINE__, __my_ret__, __my_errno__, (x)); \
+ return -1; \
+ } \
+ } while (0);
+
#define RETRY_ON_EINTR_GET_ERRNO(ret, expr) do { \
ret = expr; \
if (!ret) \
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c
index 07088d09c4..c382b9a34e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/hdfs.c
@@ -19,7 +19,9 @@
#include "exception.h"
#include "hdfs.h"
#include "jni_helper.h"
+#include "platform.h"
+#include
#include
#include
#include
@@ -63,9 +65,9 @@ static void hdfsFreeFileInfoEntry(hdfsFileInfo *hdfsFileInfo);
*/
enum hdfsStreamType
{
- UNINITIALIZED = 0,
- INPUT = 1,
- OUTPUT = 2,
+ HDFS_STREAM_UNINITIALIZED = 0,
+ HDFS_STREAM_INPUT = 1,
+ HDFS_STREAM_OUTPUT = 2,
};
/**
@@ -79,7 +81,7 @@ struct hdfsFile_internal {
int hdfsFileIsOpenForRead(hdfsFile file)
{
- return (file->type == INPUT);
+ return (file->type == HDFS_STREAM_INPUT);
}
int hdfsFileGetReadStatistics(hdfsFile file,
@@ -96,7 +98,7 @@ int hdfsFileGetReadStatistics(hdfsFile file,
errno = EINTERNAL;
return -1;
}
- if (file->type != INPUT) {
+ if (file->type != HDFS_STREAM_INPUT) {
ret = EINVAL;
goto done;
}
@@ -180,7 +182,7 @@ void hdfsFileFreeReadStatistics(struct hdfsReadStatistics *stats)
int hdfsFileIsOpenForWrite(hdfsFile file)
{
- return (file->type == OUTPUT);
+ return (file->type == HDFS_STREAM_OUTPUT);
}
int hdfsFileUsesDirectRead(hdfsFile file)
@@ -441,7 +443,7 @@ void hdfsBuilderSetKerbTicketCachePath(struct hdfsBuilder *bld,
bld->kerbTicketCachePath = kerbTicketCachePath;
}
-hdfsFS hdfsConnect(const char* host, tPort port)
+hdfsFS hdfsConnect(const char *host, tPort port)
{
struct hdfsBuilder *bld = hdfsNewBuilder();
if (!bld)
@@ -452,7 +454,7 @@ hdfsFS hdfsConnect(const char* host, tPort port)
}
/** Always return a new FileSystem handle */
-hdfsFS hdfsConnectNewInstance(const char* host, tPort port)
+hdfsFS hdfsConnectNewInstance(const char *host, tPort port)
{
struct hdfsBuilder *bld = hdfsNewBuilder();
if (!bld)
@@ -463,7 +465,7 @@ hdfsFS hdfsConnectNewInstance(const char* host, tPort port)
return hdfsBuilderConnect(bld);
}
-hdfsFS hdfsConnectAsUser(const char* host, tPort port, const char *user)
+hdfsFS hdfsConnectAsUser(const char *host, tPort port, const char *user)
{
struct hdfsBuilder *bld = hdfsNewBuilder();
if (!bld)
@@ -475,7 +477,7 @@ hdfsFS hdfsConnectAsUser(const char* host, tPort port, const char *user)
}
/** Always return a new FileSystem handle */
-hdfsFS hdfsConnectAsUserNewInstance(const char* host, tPort port,
+hdfsFS hdfsConnectAsUserNewInstance(const char *host, tPort port,
const char *user)
{
struct hdfsBuilder *bld = hdfsNewBuilder();
@@ -518,7 +520,7 @@ static int calcEffectiveURI(struct hdfsBuilder *bld, char ** uri)
if (bld->port == 0) {
suffix[0] = '\0';
} else {
- lastColon = rindex(bld->nn, ':');
+ lastColon = strrchr(bld->nn, ':');
if (lastColon && (strspn(lastColon + 1, "0123456789") ==
strlen(lastColon + 1))) {
fprintf(stderr, "port %d was given, but URI '%s' already "
@@ -737,6 +739,8 @@ int hdfsDisconnect(hdfsFS fs)
//Get the JNIEnv* corresponding to current thread
JNIEnv* env = getJNIEnv();
int ret;
+ jobject jFS;
+ jthrowable jthr;
if (env == NULL) {
errno = EINTERNAL;
@@ -744,7 +748,7 @@ int hdfsDisconnect(hdfsFS fs)
}
//Parameters
- jobject jFS = (jobject)fs;
+ jFS = (jobject)fs;
//Sanity check
if (fs == NULL) {
@@ -752,7 +756,7 @@ int hdfsDisconnect(hdfsFS fs)
return -1;
}
- jthrowable jthr = invokeMethod(env, NULL, INSTANCE, jFS, HADOOP_FS,
+ jthr = invokeMethod(env, NULL, INSTANCE, jFS, HADOOP_FS,
"close", "()V");
if (jthr) {
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
@@ -792,7 +796,7 @@ static jthrowable getDefaultBlockSize(JNIEnv *env, jobject jFS,
return NULL;
}
-hdfsFile hdfsOpenFile(hdfsFS fs, const char* path, int flags,
+hdfsFile hdfsOpenFile(hdfsFS fs, const char *path, int flags,
int bufferSize, short replication, tSize blockSize)
{
/*
@@ -801,15 +805,7 @@ hdfsFile hdfsOpenFile(hdfsFS fs, const char* path, int flags,
FSData{Input|Output}Stream f{is|os} = fs.create(f);
return f{is|os};
*/
- /* Get the JNIEnv* corresponding to current thread */
- JNIEnv* env = getJNIEnv();
int accmode = flags & O_ACCMODE;
-
- if (env == NULL) {
- errno = EINTERNAL;
- return NULL;
- }
-
jstring jStrBufferSize = NULL, jStrReplication = NULL;
jobject jConfiguration = NULL, jPath = NULL, jFile = NULL;
jobject jFS = (jobject)fs;
@@ -817,6 +813,20 @@ hdfsFile hdfsOpenFile(hdfsFS fs, const char* path, int flags,
jvalue jVal;
hdfsFile file = NULL;
int ret;
+ jint jBufferSize = bufferSize;
+ jshort jReplication = replication;
+
+ /* The hadoop java api/signature */
+ const char *method = NULL;
+ const char *signature = NULL;
+
+ /* Get the JNIEnv* corresponding to current thread */
+ JNIEnv* env = getJNIEnv();
+ if (env == NULL) {
+ errno = EINTERNAL;
+ return NULL;
+ }
+
if (accmode == O_RDONLY || accmode == O_WRONLY) {
/* yay */
@@ -834,10 +844,6 @@ hdfsFile hdfsOpenFile(hdfsFS fs, const char* path, int flags,
fprintf(stderr, "WARN: hdfs does not truly support O_CREATE && O_EXCL\n");
}
- /* The hadoop java api/signature */
- const char* method = NULL;
- const char* signature = NULL;
-
if (accmode == O_RDONLY) {
method = "open";
signature = JMETHOD2(JPARAM(HADOOP_PATH), "I", JPARAM(HADOOP_ISTRM));
@@ -867,8 +873,6 @@ hdfsFile hdfsOpenFile(hdfsFS fs, const char* path, int flags,
}
jConfiguration = jVal.l;
- jint jBufferSize = bufferSize;
- jshort jReplication = replication;
jStrBufferSize = (*env)->NewStringUTF(env, "io.file.buffer.size");
if (!jStrBufferSize) {
ret = printPendingExceptionAndFree(env, PRINT_EXC_ALL, "OOM");
@@ -905,7 +909,7 @@ hdfsFile hdfsOpenFile(hdfsFS fs, const char* path, int flags,
path);
goto done;
}
- jReplication = jVal.i;
+ jReplication = (jshort)jVal.i;
}
}
@@ -955,7 +959,8 @@ hdfsFile hdfsOpenFile(hdfsFS fs, const char* path, int flags,
"hdfsOpenFile(%s): NewGlobalRef", path);
goto done;
}
- file->type = (((flags & O_WRONLY) == 0) ? INPUT : OUTPUT);
+ file->type = (((flags & O_WRONLY) == 0) ? HDFS_STREAM_INPUT :
+ HDFS_STREAM_OUTPUT);
file->flags = 0;
if ((flags & O_WRONLY) == 0) {
@@ -998,31 +1003,33 @@ int hdfsCloseFile(hdfsFS fs, hdfsFile file)
// JAVA EQUIVALENT:
// file.close
+ //The interface whose 'close' method to be called
+ const char *interface;
+ const char *interfaceShortName;
+
+ //Caught exception
+ jthrowable jthr;
+
//Get the JNIEnv* corresponding to current thread
JNIEnv* env = getJNIEnv();
-
if (env == NULL) {
errno = EINTERNAL;
return -1;
}
- //Caught exception
- jthrowable jthr;
-
//Sanity check
- if (!file || file->type == UNINITIALIZED) {
+ if (!file || file->type == HDFS_STREAM_UNINITIALIZED) {
errno = EBADF;
return -1;
}
- //The interface whose 'close' method to be called
- const char* interface = (file->type == INPUT) ?
+ interface = (file->type == HDFS_STREAM_INPUT) ?
HADOOP_ISTRM : HADOOP_OSTRM;
jthr = invokeMethod(env, NULL, INSTANCE, file->file, interface,
"close", "()V");
if (jthr) {
- const char *interfaceShortName = (file->type == INPUT) ?
+ interfaceShortName = (file->type == HDFS_STREAM_INPUT) ?
"FSDataInputStream" : "FSDataOutputStream";
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
"%s#close", interfaceShortName);
@@ -1044,15 +1051,15 @@ int hdfsCloseFile(hdfsFS fs, hdfsFile file)
int hdfsExists(hdfsFS fs, const char *path)
{
JNIEnv *env = getJNIEnv();
- if (env == NULL) {
- errno = EINTERNAL;
- return -1;
- }
-
jobject jPath;
jvalue jVal;
jobject jFS = (jobject)fs;
jthrowable jthr;
+
+ if (env == NULL) {
+ errno = EINTERNAL;
+ return -1;
+ }
if (path == NULL) {
errno = EINVAL;
@@ -1088,13 +1095,13 @@ static int readPrepare(JNIEnv* env, hdfsFS fs, hdfsFile f,
*jInputStream = (jobject)(f ? f->file : NULL);
//Sanity check
- if (!f || f->type == UNINITIALIZED) {
+ if (!f || f->type == HDFS_STREAM_UNINITIALIZED) {
errno = EBADF;
return -1;
}
//Error checking... make sure that this file is 'readable'
- if (f->type != INPUT) {
+ if (f->type != HDFS_STREAM_INPUT) {
fprintf(stderr, "Cannot read from a non-InputStream object!\n");
errno = EINVAL;
return -1;
@@ -1105,6 +1112,13 @@ static int readPrepare(JNIEnv* env, hdfsFS fs, hdfsFile f,
tSize hdfsRead(hdfsFS fs, hdfsFile f, void* buffer, tSize length)
{
+ jobject jInputStream;
+ jbyteArray jbRarray;
+ jint noReadBytes = length;
+ jvalue jVal;
+ jthrowable jthr;
+ JNIEnv* env;
+
if (length == 0) {
return 0;
} else if (length < 0) {
@@ -1120,23 +1134,17 @@ tSize hdfsRead(hdfsFS fs, hdfsFile f, void* buffer, tSize length)
// fis.read(bR);
//Get the JNIEnv* corresponding to current thread
- JNIEnv* env = getJNIEnv();
+ env = getJNIEnv();
if (env == NULL) {
errno = EINTERNAL;
return -1;
}
//Parameters
- jobject jInputStream;
if (readPrepare(env, fs, f, &jInputStream) == -1) {
return -1;
}
- jbyteArray jbRarray;
- jint noReadBytes = length;
- jvalue jVal;
- jthrowable jthr;
-
//Read the requisite bytes
jbRarray = (*env)->NewByteArray(env, length);
if (!jbRarray) {
@@ -1179,6 +1187,11 @@ tSize readDirect(hdfsFS fs, hdfsFile f, void* buffer, tSize length)
// ByteBuffer bbuffer = ByteBuffer.allocateDirect(length) // wraps C buffer
// fis.read(bbuffer);
+ jobject jInputStream;
+ jvalue jVal;
+ jthrowable jthr;
+ jobject bb;
+
//Get the JNIEnv* corresponding to current thread
JNIEnv* env = getJNIEnv();
if (env == NULL) {
@@ -1186,16 +1199,12 @@ tSize readDirect(hdfsFS fs, hdfsFile f, void* buffer, tSize length)
return -1;
}
- jobject jInputStream;
if (readPrepare(env, fs, f, &jInputStream) == -1) {
return -1;
}
- jvalue jVal;
- jthrowable jthr;
-
//Read the requisite bytes
- jobject bb = (*env)->NewDirectByteBuffer(env, buffer, length);
+ bb = (*env)->NewDirectByteBuffer(env, buffer, length);
if (bb == NULL) {
errno = printPendingExceptionAndFree(env, PRINT_EXC_ALL,
"readDirect: NewDirectByteBuffer");
@@ -1227,7 +1236,7 @@ tSize hdfsPread(hdfsFS fs, hdfsFile f, tOffset position,
errno = EINVAL;
return -1;
}
- if (!f || f->type == UNINITIALIZED) {
+ if (!f || f->type == HDFS_STREAM_UNINITIALIZED) {
errno = EBADF;
return -1;
}
@@ -1239,7 +1248,7 @@ tSize hdfsPread(hdfsFS fs, hdfsFile f, tOffset position,
}
//Error checking... make sure that this file is 'readable'
- if (f->type != INPUT) {
+ if (f->type != HDFS_STREAM_INPUT) {
fprintf(stderr, "Cannot read from a non-InputStream object!\n");
errno = EINVAL;
return -1;
@@ -1287,6 +1296,10 @@ tSize hdfsWrite(hdfsFS fs, hdfsFile f, const void* buffer, tSize length)
// byte b[] = str.getBytes();
// fso.write(b);
+ jobject jOutputStream;
+ jbyteArray jbWarray;
+ jthrowable jthr;
+
//Get the JNIEnv* corresponding to current thread
JNIEnv* env = getJNIEnv();
if (env == NULL) {
@@ -1295,14 +1308,12 @@ tSize hdfsWrite(hdfsFS fs, hdfsFile f, const void* buffer, tSize length)
}
//Sanity check
- if (!f || f->type == UNINITIALIZED) {
+ if (!f || f->type == HDFS_STREAM_UNINITIALIZED) {
errno = EBADF;
return -1;
}
- jobject jOutputStream = f->file;
- jbyteArray jbWarray;
- jthrowable jthr;
+ jOutputStream = f->file;
if (length < 0) {
errno = EINVAL;
@@ -1310,7 +1321,7 @@ tSize hdfsWrite(hdfsFS fs, hdfsFile f, const void* buffer, tSize length)
}
//Error checking... make sure that this file is 'writable'
- if (f->type != OUTPUT) {
+ if (f->type != HDFS_STREAM_OUTPUT) {
fprintf(stderr, "Cannot write into a non-OutputStream object!\n");
errno = EINVAL;
return -1;
@@ -1355,6 +1366,9 @@ int hdfsSeek(hdfsFS fs, hdfsFile f, tOffset desiredPos)
// JAVA EQUIVALENT
// fis.seek(pos);
+ jobject jInputStream;
+ jthrowable jthr;
+
//Get the JNIEnv* corresponding to current thread
JNIEnv* env = getJNIEnv();
if (env == NULL) {
@@ -1363,13 +1377,13 @@ int hdfsSeek(hdfsFS fs, hdfsFile f, tOffset desiredPos)
}
//Sanity check
- if (!f || f->type != INPUT) {
+ if (!f || f->type != HDFS_STREAM_INPUT) {
errno = EBADF;
return -1;
}
- jobject jInputStream = f->file;
- jthrowable jthr = invokeMethod(env, NULL, INSTANCE, jInputStream,
+ jInputStream = f->file;
+ jthr = invokeMethod(env, NULL, INSTANCE, jInputStream,
HADOOP_ISTRM, "seek", "(J)V", desiredPos);
if (jthr) {
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
@@ -1387,6 +1401,11 @@ tOffset hdfsTell(hdfsFS fs, hdfsFile f)
// JAVA EQUIVALENT
// pos = f.getPos();
+ jobject jStream;
+ const char *interface;
+ jvalue jVal;
+ jthrowable jthr;
+
//Get the JNIEnv* corresponding to current thread
JNIEnv* env = getJNIEnv();
if (env == NULL) {
@@ -1395,22 +1414,21 @@ tOffset hdfsTell(hdfsFS fs, hdfsFile f)
}
//Sanity check
- if (!f || f->type == UNINITIALIZED) {
+ if (!f || f->type == HDFS_STREAM_UNINITIALIZED) {
errno = EBADF;
return -1;
}
//Parameters
- jobject jStream = f->file;
- const char* interface = (f->type == INPUT) ?
+ jStream = f->file;
+ interface = (f->type == HDFS_STREAM_INPUT) ?
HADOOP_ISTRM : HADOOP_OSTRM;
- jvalue jVal;
- jthrowable jthr = invokeMethod(env, &jVal, INSTANCE, jStream,
+ jthr = invokeMethod(env, &jVal, INSTANCE, jStream,
interface, "getPos", "()J");
if (jthr) {
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
"hdfsTell: %s#getPos",
- ((f->type == INPUT) ? "FSDataInputStream" :
+ ((f->type == HDFS_STREAM_INPUT) ? "FSDataInputStream" :
"FSDataOutputStream"));
return -1;
}
@@ -1422,6 +1440,8 @@ int hdfsFlush(hdfsFS fs, hdfsFile f)
// JAVA EQUIVALENT
// fos.flush();
+ jthrowable jthr;
+
//Get the JNIEnv* corresponding to current thread
JNIEnv* env = getJNIEnv();
if (env == NULL) {
@@ -1430,11 +1450,11 @@ int hdfsFlush(hdfsFS fs, hdfsFile f)
}
//Sanity check
- if (!f || f->type != OUTPUT) {
+ if (!f || f->type != HDFS_STREAM_OUTPUT) {
errno = EBADF;
return -1;
}
- jthrowable jthr = invokeMethod(env, NULL, INSTANCE, f->file,
+ jthr = invokeMethod(env, NULL, INSTANCE, f->file,
HADOOP_OSTRM, "flush", "()V");
if (jthr) {
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
@@ -1446,6 +1466,9 @@ int hdfsFlush(hdfsFS fs, hdfsFile f)
int hdfsHFlush(hdfsFS fs, hdfsFile f)
{
+ jobject jOutputStream;
+ jthrowable jthr;
+
//Get the JNIEnv* corresponding to current thread
JNIEnv* env = getJNIEnv();
if (env == NULL) {
@@ -1454,13 +1477,13 @@ int hdfsHFlush(hdfsFS fs, hdfsFile f)
}
//Sanity check
- if (!f || f->type != OUTPUT) {
+ if (!f || f->type != HDFS_STREAM_OUTPUT) {
errno = EBADF;
return -1;
}
- jobject jOutputStream = f->file;
- jthrowable jthr = invokeMethod(env, NULL, INSTANCE, jOutputStream,
+ jOutputStream = f->file;
+ jthr = invokeMethod(env, NULL, INSTANCE, jOutputStream,
HADOOP_OSTRM, "hflush", "()V");
if (jthr) {
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
@@ -1472,6 +1495,9 @@ int hdfsHFlush(hdfsFS fs, hdfsFile f)
int hdfsHSync(hdfsFS fs, hdfsFile f)
{
+ jobject jOutputStream;
+ jthrowable jthr;
+
//Get the JNIEnv* corresponding to current thread
JNIEnv* env = getJNIEnv();
if (env == NULL) {
@@ -1480,13 +1506,13 @@ int hdfsHSync(hdfsFS fs, hdfsFile f)
}
//Sanity check
- if (!f || f->type != OUTPUT) {
+ if (!f || f->type != HDFS_STREAM_OUTPUT) {
errno = EBADF;
return -1;
}
- jobject jOutputStream = f->file;
- jthrowable jthr = invokeMethod(env, NULL, INSTANCE, jOutputStream,
+ jOutputStream = f->file;
+ jthr = invokeMethod(env, NULL, INSTANCE, jOutputStream,
HADOOP_OSTRM, "hsync", "()V");
if (jthr) {
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
@@ -1501,6 +1527,10 @@ int hdfsAvailable(hdfsFS fs, hdfsFile f)
// JAVA EQUIVALENT
// fis.available();
+ jobject jInputStream;
+ jvalue jVal;
+ jthrowable jthr;
+
//Get the JNIEnv* corresponding to current thread
JNIEnv* env = getJNIEnv();
if (env == NULL) {
@@ -1509,15 +1539,14 @@ int hdfsAvailable(hdfsFS fs, hdfsFile f)
}
//Sanity check
- if (!f || f->type != INPUT) {
+ if (!f || f->type != HDFS_STREAM_INPUT) {
errno = EBADF;
return -1;
}
//Parameters
- jobject jInputStream = f->file;
- jvalue jVal;
- jthrowable jthr = invokeMethod(env, &jVal, INSTANCE, jInputStream,
+ jInputStream = f->file;
+ jthr = invokeMethod(env, &jVal, INSTANCE, jInputStream,
HADOOP_ISTRM, "available", "()I");
if (jthr) {
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
@@ -1527,20 +1556,13 @@ int hdfsAvailable(hdfsFS fs, hdfsFile f)
return jVal.i;
}
-static int hdfsCopyImpl(hdfsFS srcFS, const char* src, hdfsFS dstFS,
- const char* dst, jboolean deleteSource)
+static int hdfsCopyImpl(hdfsFS srcFS, const char *src, hdfsFS dstFS,
+ const char *dst, jboolean deleteSource)
{
//JAVA EQUIVALENT
// FileUtil#copy(srcFS, srcPath, dstFS, dstPath,
// deleteSource = false, conf)
- //Get the JNIEnv* corresponding to current thread
- JNIEnv* env = getJNIEnv();
- if (env == NULL) {
- errno = EINTERNAL;
- return -1;
- }
-
//Parameters
jobject jSrcFS = (jobject)srcFS;
jobject jDstFS = (jobject)dstFS;
@@ -1549,6 +1571,13 @@ static int hdfsCopyImpl(hdfsFS srcFS, const char* src, hdfsFS dstFS,
jvalue jVal;
int ret;
+ //Get the JNIEnv* corresponding to current thread
+ JNIEnv* env = getJNIEnv();
+ if (env == NULL) {
+ errno = EINTERNAL;
+ return -1;
+ }
+
jthr = constructNewObjectOfPath(env, src, &jSrcPath);
if (jthr) {
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
@@ -1603,22 +1632,28 @@ done:
return 0;
}
-int hdfsCopy(hdfsFS srcFS, const char* src, hdfsFS dstFS, const char* dst)
+int hdfsCopy(hdfsFS srcFS, const char *src, hdfsFS dstFS, const char *dst)
{
return hdfsCopyImpl(srcFS, src, dstFS, dst, 0);
}
-int hdfsMove(hdfsFS srcFS, const char* src, hdfsFS dstFS, const char* dst)
+int hdfsMove(hdfsFS srcFS, const char *src, hdfsFS dstFS, const char *dst)
{
return hdfsCopyImpl(srcFS, src, dstFS, dst, 1);
}
-int hdfsDelete(hdfsFS fs, const char* path, int recursive)
+int hdfsDelete(hdfsFS fs, const char *path, int recursive)
{
// JAVA EQUIVALENT:
// Path p = new Path(path);
// bool retval = fs.delete(p, recursive);
+ jobject jFS = (jobject)fs;
+ jthrowable jthr;
+ jobject jPath;
+ jvalue jVal;
+ jboolean jRecursive;
+
//Get the JNIEnv* corresponding to current thread
JNIEnv* env = getJNIEnv();
if (env == NULL) {
@@ -1626,18 +1661,13 @@ int hdfsDelete(hdfsFS fs, const char* path, int recursive)
return -1;
}
- jobject jFS = (jobject)fs;
- jthrowable jthr;
- jobject jPath;
- jvalue jVal;
-
jthr = constructNewObjectOfPath(env, path, &jPath);
if (jthr) {
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
"hdfsDelete(path=%s): constructNewObjectOfPath", path);
return -1;
}
- jboolean jRecursive = recursive ? JNI_TRUE : JNI_FALSE;
+ jRecursive = recursive ? JNI_TRUE : JNI_FALSE;
jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
"delete", "(Lorg/apache/hadoop/fs/Path;Z)Z",
jPath, jRecursive);
@@ -1657,13 +1687,19 @@ int hdfsDelete(hdfsFS fs, const char* path, int recursive)
-int hdfsRename(hdfsFS fs, const char* oldPath, const char* newPath)
+int hdfsRename(hdfsFS fs, const char *oldPath, const char *newPath)
{
// JAVA EQUIVALENT:
// Path old = new Path(oldPath);
// Path new = new Path(newPath);
// fs.rename(old, new);
+ jobject jFS = (jobject)fs;
+ jthrowable jthr;
+ jobject jOldPath = NULL, jNewPath = NULL;
+ int ret = -1;
+ jvalue jVal;
+
//Get the JNIEnv* corresponding to current thread
JNIEnv* env = getJNIEnv();
if (env == NULL) {
@@ -1671,12 +1707,6 @@ int hdfsRename(hdfsFS fs, const char* oldPath, const char* newPath)
return -1;
}
- jobject jFS = (jobject)fs;
- jthrowable jthr;
- jobject jOldPath = NULL, jNewPath = NULL;
- int ret = -1;
- jvalue jVal;
-
jthr = constructNewObjectOfPath(env, oldPath, &jOldPath );
if (jthr) {
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
@@ -1721,13 +1751,6 @@ char* hdfsGetWorkingDirectory(hdfsFS fs, char* buffer, size_t bufferSize)
// Path p = fs.getWorkingDirectory();
// return p.toString()
- //Get the JNIEnv* corresponding to current thread
- JNIEnv* env = getJNIEnv();
- if (env == NULL) {
- errno = EINTERNAL;
- return NULL;
- }
-
jobject jPath = NULL;
jstring jPathString = NULL;
jobject jFS = (jobject)fs;
@@ -1736,6 +1759,13 @@ char* hdfsGetWorkingDirectory(hdfsFS fs, char* buffer, size_t bufferSize)
int ret;
const char *jPathChars = NULL;
+ //Get the JNIEnv* corresponding to current thread
+ JNIEnv* env = getJNIEnv();
+ if (env == NULL) {
+ errno = EINTERNAL;
+ return NULL;
+ }
+
//FileSystem#getWorkingDirectory()
jthr = invokeMethod(env, &jVal, INSTANCE, jFS,
HADOOP_FS, "getWorkingDirectory",
@@ -1794,11 +1824,15 @@ done:
-int hdfsSetWorkingDirectory(hdfsFS fs, const char* path)
+int hdfsSetWorkingDirectory(hdfsFS fs, const char *path)
{
// JAVA EQUIVALENT:
// fs.setWorkingDirectory(Path(path));
+ jobject jFS = (jobject)fs;
+ jthrowable jthr;
+ jobject jPath;
+
//Get the JNIEnv* corresponding to current thread
JNIEnv* env = getJNIEnv();
if (env == NULL) {
@@ -1806,10 +1840,6 @@ int hdfsSetWorkingDirectory(hdfsFS fs, const char* path)
return -1;
}
- jobject jFS = (jobject)fs;
- jthrowable jthr;
- jobject jPath;
-
//Create an object of org.apache.hadoop.fs.Path
jthr = constructNewObjectOfPath(env, path, &jPath);
if (jthr) {
@@ -1835,11 +1865,16 @@ int hdfsSetWorkingDirectory(hdfsFS fs, const char* path)
-int hdfsCreateDirectory(hdfsFS fs, const char* path)
+int hdfsCreateDirectory(hdfsFS fs, const char *path)
{
// JAVA EQUIVALENT:
// fs.mkdirs(new Path(path));
+ jobject jFS = (jobject)fs;
+ jobject jPath;
+ jthrowable jthr;
+ jvalue jVal;
+
//Get the JNIEnv* corresponding to current thread
JNIEnv* env = getJNIEnv();
if (env == NULL) {
@@ -1847,10 +1882,6 @@ int hdfsCreateDirectory(hdfsFS fs, const char* path)
return -1;
}
- jobject jFS = (jobject)fs;
- jobject jPath;
- jthrowable jthr;
-
//Create an object of org.apache.hadoop.fs.Path
jthr = constructNewObjectOfPath(env, path, &jPath);
if (jthr) {
@@ -1860,7 +1891,6 @@ int hdfsCreateDirectory(hdfsFS fs, const char* path)
}
//Create the directory
- jvalue jVal;
jVal.z = 0;
jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
"mkdirs", "(Lorg/apache/hadoop/fs/Path;)Z",
@@ -1886,11 +1916,16 @@ int hdfsCreateDirectory(hdfsFS fs, const char* path)
}
-int hdfsSetReplication(hdfsFS fs, const char* path, int16_t replication)
+int hdfsSetReplication(hdfsFS fs, const char *path, int16_t replication)
{
// JAVA EQUIVALENT:
// fs.setReplication(new Path(path), replication);
+ jobject jFS = (jobject)fs;
+ jthrowable jthr;
+ jobject jPath;
+ jvalue jVal;
+
//Get the JNIEnv* corresponding to current thread
JNIEnv* env = getJNIEnv();
if (env == NULL) {
@@ -1898,11 +1933,7 @@ int hdfsSetReplication(hdfsFS fs, const char* path, int16_t replication)
return -1;
}
- jobject jFS = (jobject)fs;
- jthrowable jthr;
-
//Create an object of org.apache.hadoop.fs.Path
- jobject jPath;
jthr = constructNewObjectOfPath(env, path, &jPath);
if (jthr) {
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
@@ -1911,7 +1942,6 @@ int hdfsSetReplication(hdfsFS fs, const char* path, int16_t replication)
}
//Create the directory
- jvalue jVal;
jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
"setReplication", "(Lorg/apache/hadoop/fs/Path;S)Z",
jPath, replication);
@@ -1932,11 +1962,17 @@ int hdfsSetReplication(hdfsFS fs, const char* path, int16_t replication)
return 0;
}
-int hdfsChown(hdfsFS fs, const char* path, const char *owner, const char *group)
+int hdfsChown(hdfsFS fs, const char *path, const char *owner, const char *group)
{
// JAVA EQUIVALENT:
// fs.setOwner(path, owner, group)
+ jobject jFS = (jobject)fs;
+ jobject jPath = NULL;
+ jstring jOwner = NULL, jGroup = NULL;
+ jthrowable jthr;
+ int ret;
+
//Get the JNIEnv* corresponding to current thread
JNIEnv* env = getJNIEnv();
if (env == NULL) {
@@ -1948,12 +1984,6 @@ int hdfsChown(hdfsFS fs, const char* path, const char *owner, const char *group)
return 0;
}
- jobject jFS = (jobject)fs;
- jobject jPath = NULL;
- jstring jOwner = NULL, jGroup = NULL;
- jthrowable jthr;
- int ret;
-
jthr = constructNewObjectOfPath(env, path, &jPath);
if (jthr) {
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
@@ -2001,12 +2031,17 @@ done:
return 0;
}
-int hdfsChmod(hdfsFS fs, const char* path, short mode)
+int hdfsChmod(hdfsFS fs, const char *path, short mode)
{
int ret;
// JAVA EQUIVALENT:
// fs.setPermission(path, FsPermission)
+ jthrowable jthr;
+ jobject jPath = NULL, jPermObj = NULL;
+ jobject jFS = (jobject)fs;
+ jshort jmode = mode;
+
//Get the JNIEnv* corresponding to current thread
JNIEnv* env = getJNIEnv();
if (env == NULL) {
@@ -2014,12 +2049,7 @@ int hdfsChmod(hdfsFS fs, const char* path, short mode)
return -1;
}
- jthrowable jthr;
- jobject jPath = NULL, jPermObj = NULL;
- jobject jFS = (jobject)fs;
-
// construct jPerm = FsPermission.createImmutable(short mode);
- jshort jmode = mode;
jthr = constructNewObjectOfClass(env, &jPermObj,
HADOOP_FSPERM,"(S)V",jmode);
if (jthr) {
@@ -2061,11 +2091,16 @@ done:
return 0;
}
-int hdfsUtime(hdfsFS fs, const char* path, tTime mtime, tTime atime)
+int hdfsUtime(hdfsFS fs, const char *path, tTime mtime, tTime atime)
{
// JAVA EQUIVALENT:
// fs.setTimes(src, mtime, atime)
+
jthrowable jthr;
+ jobject jFS = (jobject)fs;
+ jobject jPath;
+ static const tTime NO_CHANGE = -1;
+ jlong jmtime, jatime;
//Get the JNIEnv* corresponding to current thread
JNIEnv* env = getJNIEnv();
@@ -2074,10 +2109,7 @@ int hdfsUtime(hdfsFS fs, const char* path, tTime mtime, tTime atime)
return -1;
}
- jobject jFS = (jobject)fs;
-
//Create an object of org.apache.hadoop.fs.Path
- jobject jPath;
jthr = constructNewObjectOfPath(env, path, &jPath);
if (jthr) {
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
@@ -2085,9 +2117,8 @@ int hdfsUtime(hdfsFS fs, const char* path, tTime mtime, tTime atime)
return -1;
}
- const tTime NO_CHANGE = -1;
- jlong jmtime = (mtime == NO_CHANGE) ? -1 : (mtime * (jlong)1000);
- jlong jatime = (atime == NO_CHANGE) ? -1 : (atime * (jlong)1000);
+ jmtime = (mtime == NO_CHANGE) ? -1 : (mtime * (jlong)1000);
+ jatime = (atime == NO_CHANGE) ? -1 : (atime * (jlong)1000);
jthr = invokeMethod(env, NULL, INSTANCE, jFS, HADOOP_FS,
"setTimes", JMETHOD3(JPARAM(HADOOP_PATH), "J", "J", JAVA_VOID),
@@ -2397,7 +2428,7 @@ struct hadoopRzBuffer* hadoopReadZero(hdfsFile file,
errno = EINTERNAL;
return NULL;
}
- if (file->type != INPUT) {
+ if (file->type != HDFS_STREAM_INPUT) {
fputs("Cannot read from a non-InputStream object!\n", stderr);
ret = EINVAL;
goto done;
@@ -2495,10 +2526,12 @@ void hadoopRzBufferFree(hdfsFile file, struct hadoopRzBuffer *buffer)
}
char***
-hdfsGetHosts(hdfsFS fs, const char* path, tOffset start, tOffset length)
+hdfsGetHosts(hdfsFS fs, const char *path, tOffset start, tOffset length)
{
// JAVA EQUIVALENT:
// fs.getFileBlockLoctions(new Path(path), start, length);
+
+ jobject jFS = (jobject)fs;
jthrowable jthr;
jobject jPath = NULL;
jobject jFileStatus = NULL;
@@ -2508,6 +2541,9 @@ hdfsGetHosts(hdfsFS fs, const char* path, tOffset start, tOffset length)
char*** blockHosts = NULL;
int i, j, ret;
jsize jNumFileBlocks = 0;
+ jobject jFileBlock;
+ jsize jNumBlockHosts;
+ const char *hostName;
//Get the JNIEnv* corresponding to current thread
JNIEnv* env = getJNIEnv();
@@ -2516,8 +2552,6 @@ hdfsGetHosts(hdfsFS fs, const char* path, tOffset start, tOffset length)
return NULL;
}
- jobject jFS = (jobject)fs;
-
//Create an object of org.apache.hadoop.fs.Path
jthr = constructNewObjectOfPath(env, path, &jPath);
if (jthr) {
@@ -2567,7 +2601,7 @@ hdfsGetHosts(hdfsFS fs, const char* path, tOffset start, tOffset length)
//Now parse each block to get hostnames
for (i = 0; i < jNumFileBlocks; ++i) {
- jobject jFileBlock =
+ jFileBlock =
(*env)->GetObjectArrayElement(env, jBlockLocations, i);
if (!jFileBlock) {
ret = printPendingExceptionAndFree(env, PRINT_EXC_ALL,
@@ -2593,7 +2627,7 @@ hdfsGetHosts(hdfsFS fs, const char* path, tOffset start, tOffset length)
goto done;
}
//Figure out no of hosts in jFileBlockHosts, and allocate the memory
- jsize jNumBlockHosts = (*env)->GetArrayLength(env, jFileBlockHosts);
+ jNumBlockHosts = (*env)->GetArrayLength(env, jFileBlockHosts);
blockHosts[i] = calloc(jNumBlockHosts + 1, sizeof(char*));
if (!blockHosts[i]) {
ret = ENOMEM;
@@ -2601,7 +2635,6 @@ hdfsGetHosts(hdfsFS fs, const char* path, tOffset start, tOffset length)
}
//Now parse each hostname
- const char *hostName;
for (j = 0; j < jNumBlockHosts; ++j) {
jHost = (*env)->GetObjectArrayElement(env, jFileBlockHosts, j);
if (!jHost) {
@@ -2669,6 +2702,10 @@ tOffset hdfsGetDefaultBlockSize(hdfsFS fs)
// JAVA EQUIVALENT:
// fs.getDefaultBlockSize();
+ jobject jFS = (jobject)fs;
+ jvalue jVal;
+ jthrowable jthr;
+
//Get the JNIEnv* corresponding to current thread
JNIEnv* env = getJNIEnv();
if (env == NULL) {
@@ -2676,11 +2713,7 @@ tOffset hdfsGetDefaultBlockSize(hdfsFS fs)
return -1;
}
- jobject jFS = (jobject)fs;
-
//FileSystem#getDefaultBlockSize()
- jvalue jVal;
- jthrowable jthr;
jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
"getDefaultBlockSize", "()J");
if (jthr) {
@@ -2732,6 +2765,11 @@ tOffset hdfsGetCapacity(hdfsFS fs)
// FsStatus fss = fs.getStatus();
// return Fss.getCapacity();
+ jobject jFS = (jobject)fs;
+ jvalue jVal;
+ jthrowable jthr;
+ jobject fss;
+
//Get the JNIEnv* corresponding to current thread
JNIEnv* env = getJNIEnv();
if (env == NULL) {
@@ -2739,11 +2777,7 @@ tOffset hdfsGetCapacity(hdfsFS fs)
return -1;
}
- jobject jFS = (jobject)fs;
-
//FileSystem#getStatus
- jvalue jVal;
- jthrowable jthr;
jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
"getStatus", "()Lorg/apache/hadoop/fs/FsStatus;");
if (jthr) {
@@ -2751,7 +2785,7 @@ tOffset hdfsGetCapacity(hdfsFS fs)
"hdfsGetCapacity: FileSystem#getStatus");
return -1;
}
- jobject fss = (jobject)jVal.l;
+ fss = (jobject)jVal.l;
jthr = invokeMethod(env, &jVal, INSTANCE, fss, HADOOP_FSSTATUS,
"getCapacity", "()J");
destroyLocalReference(env, fss);
@@ -2771,6 +2805,11 @@ tOffset hdfsGetUsed(hdfsFS fs)
// FsStatus fss = fs.getStatus();
// return Fss.getUsed();
+ jobject jFS = (jobject)fs;
+ jvalue jVal;
+ jthrowable jthr;
+ jobject fss;
+
//Get the JNIEnv* corresponding to current thread
JNIEnv* env = getJNIEnv();
if (env == NULL) {
@@ -2778,11 +2817,7 @@ tOffset hdfsGetUsed(hdfsFS fs)
return -1;
}
- jobject jFS = (jobject)fs;
-
//FileSystem#getStatus
- jvalue jVal;
- jthrowable jthr;
jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
"getStatus", "()Lorg/apache/hadoop/fs/FsStatus;");
if (jthr) {
@@ -2790,7 +2825,7 @@ tOffset hdfsGetUsed(hdfsFS fs)
"hdfsGetUsed: FileSystem#getStatus");
return -1;
}
- jobject fss = (jobject)jVal.l;
+ fss = (jobject)jVal.l;
jthr = invokeMethod(env, &jVal, INSTANCE, fss, HADOOP_FSSTATUS,
"getUsed", "()J");
destroyLocalReference(env, fss);
@@ -2814,6 +2849,9 @@ getFileInfoFromStat(JNIEnv *env, jobject jStat, hdfsFileInfo *fileInfo)
jstring jUserName = NULL;
jstring jGroupName = NULL;
jobject jPermission = NULL;
+ const char *cPathName;
+ const char *cUserName;
+ const char *cGroupName;
jthr = invokeMethod(env, &jVal, INSTANCE, jStat,
HADOOP_STAT, "isDir", "()Z");
@@ -2869,7 +2907,7 @@ getFileInfoFromStat(JNIEnv *env, jobject jStat, hdfsFileInfo *fileInfo)
if (jthr)
goto done;
jPathName = jVal.l;
- const char *cPathName =
+ cPathName =
(const char*) ((*env)->GetStringUTFChars(env, jPathName, NULL));
if (!cPathName) {
jthr = getPendingExceptionAndClear(env);
@@ -2882,7 +2920,7 @@ getFileInfoFromStat(JNIEnv *env, jobject jStat, hdfsFileInfo *fileInfo)
if (jthr)
goto done;
jUserName = jVal.l;
- const char* cUserName =
+ cUserName =
(const char*) ((*env)->GetStringUTFChars(env, jUserName, NULL));
if (!cUserName) {
jthr = getPendingExceptionAndClear(env);
@@ -2891,7 +2929,6 @@ getFileInfoFromStat(JNIEnv *env, jobject jStat, hdfsFileInfo *fileInfo)
fileInfo->mOwner = strdup(cUserName);
(*env)->ReleaseStringUTFChars(env, jUserName, cUserName);
- const char* cGroupName;
jthr = invokeMethod(env, &jVal, INSTANCE, jStat, HADOOP_STAT,
"getGroup", "()Ljava/lang/String;");
if (jthr)
@@ -2978,13 +3015,15 @@ getFileInfo(JNIEnv *env, jobject jFS, jobject jPath, hdfsFileInfo **fileInfo)
-hdfsFileInfo* hdfsListDirectory(hdfsFS fs, const char* path, int *numEntries)
+hdfsFileInfo* hdfsListDirectory(hdfsFS fs, const char *path, int *numEntries)
{
// JAVA EQUIVALENT:
// Path p(path);
// Path []pathList = fs.listPaths(p)
// foreach path in pathList
// getFileInfo(path)
+
+ jobject jFS = (jobject)fs;
jthrowable jthr;
jobject jPath = NULL;
hdfsFileInfo *pathList = NULL;
@@ -2992,6 +3031,8 @@ hdfsFileInfo* hdfsListDirectory(hdfsFS fs, const char* path, int *numEntries)
jvalue jVal;
jsize jPathListSize = 0;
int ret;
+ jsize i;
+ jobject tmpStat;
//Get the JNIEnv* corresponding to current thread
JNIEnv* env = getJNIEnv();
@@ -3000,8 +3041,6 @@ hdfsFileInfo* hdfsListDirectory(hdfsFS fs, const char* path, int *numEntries)
return NULL;
}
- jobject jFS = (jobject)fs;
-
//Create an object of org.apache.hadoop.fs.Path
jthr = constructNewObjectOfPath(env, path, &jPath);
if (jthr) {
@@ -3037,8 +3076,6 @@ hdfsFileInfo* hdfsListDirectory(hdfsFS fs, const char* path, int *numEntries)
}
//Save path information in pathList
- jsize i;
- jobject tmpStat;
for (i=0; i < jPathListSize; ++i) {
tmpStat = (*env)->GetObjectArrayElement(env, jPathList, i);
if (!tmpStat) {
@@ -3073,7 +3110,7 @@ done:
-hdfsFileInfo *hdfsGetPathInfo(hdfsFS fs, const char* path)
+hdfsFileInfo *hdfsGetPathInfo(hdfsFS fs, const char *path)
{
// JAVA EQUIVALENT:
// File f(path);
@@ -3082,6 +3119,11 @@ hdfsFileInfo *hdfsGetPathInfo(hdfsFS fs, const char* path)
// fs.getLength(f)
// f.getPath()
+ jobject jFS = (jobject)fs;
+ jobject jPath;
+ jthrowable jthr;
+ hdfsFileInfo *fileInfo;
+
//Get the JNIEnv* corresponding to current thread
JNIEnv* env = getJNIEnv();
if (env == NULL) {
@@ -3089,17 +3131,13 @@ hdfsFileInfo *hdfsGetPathInfo(hdfsFS fs, const char* path)
return NULL;
}
- jobject jFS = (jobject)fs;
-
//Create an object of org.apache.hadoop.fs.Path
- jobject jPath;
- jthrowable jthr = constructNewObjectOfPath(env, path, &jPath);
+ jthr = constructNewObjectOfPath(env, path, &jPath);
if (jthr) {
errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
"hdfsGetPathInfo(%s): constructNewObjectOfPath", path);
return NULL;
}
- hdfsFileInfo *fileInfo;
jthr = getFileInfo(env, jFS, jPath, &fileInfo);
destroyLocalReference(env, jPath);
if (jthr) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/jni_helper.c b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/jni_helper.c
index 878289f96d..50d968169c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/jni_helper.c
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/jni_helper.c
@@ -19,20 +19,18 @@
#include "config.h"
#include "exception.h"
#include "jni_helper.h"
+#include "platform.h"
+#include "common/htable.h"
+#include "os/mutexes.h"
+#include "os/thread_local_storage.h"
#include
#include
-static pthread_mutex_t hdfsHashMutex = PTHREAD_MUTEX_INITIALIZER;
-static pthread_mutex_t jvmMutex = PTHREAD_MUTEX_INITIALIZER;
-static volatile int hashTableInited = 0;
-
-#define LOCK_HASH_TABLE() pthread_mutex_lock(&hdfsHashMutex)
-#define UNLOCK_HASH_TABLE() pthread_mutex_unlock(&hdfsHashMutex)
-
+static struct htable *gClassRefHTable = NULL;
/** The Native return types that methods could return */
-#define VOID 'V'
+#define JVOID 'V'
#define JOBJECT 'L'
#define JARRAYOBJECT '['
#define JBOOLEAN 'Z'
@@ -51,40 +49,10 @@ static volatile int hashTableInited = 0;
*/
#define MAX_HASH_TABLE_ELEM 4096
-/** Key that allows us to retrieve thread-local storage */
-static pthread_key_t gTlsKey;
-
-/** nonzero if we succeeded in initializing gTlsKey. Protected by the jvmMutex */
-static int gTlsKeyInitialized = 0;
-
-/** Pthreads thread-local storage for each library thread. */
-struct hdfsTls {
- JNIEnv *env;
-};
-
/**
- * The function that is called whenever a thread with libhdfs thread local data
- * is destroyed.
- *
- * @param v The thread-local data
+ * Length of buffer for retrieving created JVMs. (We only ever create one.)
*/
-static void hdfsThreadDestructor(void *v)
-{
- struct hdfsTls *tls = v;
- JavaVM *vm;
- JNIEnv *env = tls->env;
- jint ret;
-
- ret = (*env)->GetJavaVM(env, &vm);
- if (ret) {
- fprintf(stderr, "hdfsThreadDestructor: GetJavaVM failed with "
- "error %d\n", ret);
- (*env)->ExceptionDescribe(env);
- } else {
- (*vm)->DetachCurrentThread(vm);
- }
- free(tls);
-}
+#define VM_BUF_LENGTH 1
void destroyLocalReference(JNIEnv *env, jobject jObject)
{
@@ -138,67 +106,6 @@ jthrowable newCStr(JNIEnv *env, jstring jstr, char **out)
return NULL;
}
-static int hashTableInit(void)
-{
- if (!hashTableInited) {
- LOCK_HASH_TABLE();
- if (!hashTableInited) {
- if (hcreate(MAX_HASH_TABLE_ELEM) == 0) {
- fprintf(stderr, "error creating hashtable, <%d>: %s\n",
- errno, strerror(errno));
- UNLOCK_HASH_TABLE();
- return 0;
- }
- hashTableInited = 1;
- }
- UNLOCK_HASH_TABLE();
- }
- return 1;
-}
-
-
-static int insertEntryIntoTable(const char *key, void *data)
-{
- ENTRY e, *ep;
- if (key == NULL || data == NULL) {
- return 0;
- }
- if (! hashTableInit()) {
- return -1;
- }
- e.data = data;
- e.key = (char*)key;
- LOCK_HASH_TABLE();
- ep = hsearch(e, ENTER);
- UNLOCK_HASH_TABLE();
- if (ep == NULL) {
- fprintf(stderr, "warn adding key (%s) to hash table, <%d>: %s\n",
- key, errno, strerror(errno));
- }
- return 0;
-}
-
-
-
-static void* searchEntryFromTable(const char *key)
-{
- ENTRY e,*ep;
- if (key == NULL) {
- return NULL;
- }
- hashTableInit();
- e.key = (char*)key;
- LOCK_HASH_TABLE();
- ep = hsearch(e, FIND);
- UNLOCK_HASH_TABLE();
- if (ep != NULL) {
- return ep->data;
- }
- return NULL;
-}
-
-
-
jthrowable invokeMethod(JNIEnv *env, jvalue *retval, MethType methType,
jobject instObj, const char *className,
const char *methName, const char *methSignature, ...)
@@ -235,7 +142,7 @@ jthrowable invokeMethod(JNIEnv *env, jvalue *retval, MethType methType,
}
retval->l = jobj;
}
- else if (returnType == VOID) {
+ else if (returnType == JVOID) {
if (methType == STATIC) {
(*env)->CallStaticVoidMethodV(env, cls, mid, args);
}
@@ -325,11 +232,11 @@ jthrowable methodIdFromClass(const char *className, const char *methName,
{
jclass cls;
jthrowable jthr;
+ jmethodID mid = 0;
jthr = globalClassReference(className, env, &cls);
if (jthr)
return jthr;
- jmethodID mid = 0;
jthr = validateMethodType(env, methType);
if (jthr)
return jthr;
@@ -350,25 +257,50 @@ jthrowable methodIdFromClass(const char *className, const char *methName,
jthrowable globalClassReference(const char *className, JNIEnv *env, jclass *out)
{
- jclass clsLocalRef;
- jclass cls = searchEntryFromTable(className);
- if (cls) {
- *out = cls;
- return NULL;
+ jthrowable jthr = NULL;
+ jclass local_clazz = NULL;
+ jclass clazz = NULL;
+ int ret;
+
+ mutexLock(&hdfsHashMutex);
+ if (!gClassRefHTable) {
+ gClassRefHTable = htable_alloc(MAX_HASH_TABLE_ELEM, ht_hash_string,
+ ht_compare_string);
+ if (!gClassRefHTable) {
+ jthr = newRuntimeError(env, "htable_alloc failed\n");
+ goto done;
+ }
}
- clsLocalRef = (*env)->FindClass(env,className);
- if (clsLocalRef == NULL) {
- return getPendingExceptionAndClear(env);
+ clazz = htable_get(gClassRefHTable, className);
+ if (clazz) {
+ *out = clazz;
+ goto done;
}
- cls = (*env)->NewGlobalRef(env, clsLocalRef);
- if (cls == NULL) {
- (*env)->DeleteLocalRef(env, clsLocalRef);
- return getPendingExceptionAndClear(env);
+ local_clazz = (*env)->FindClass(env,className);
+ if (!local_clazz) {
+ jthr = getPendingExceptionAndClear(env);
+ goto done;
}
- (*env)->DeleteLocalRef(env, clsLocalRef);
- insertEntryIntoTable(className, cls);
- *out = cls;
- return NULL;
+ clazz = (*env)->NewGlobalRef(env, local_clazz);
+ if (!clazz) {
+ jthr = getPendingExceptionAndClear(env);
+ goto done;
+ }
+ ret = htable_put(gClassRefHTable, (void*)className, clazz);
+ if (ret) {
+ jthr = newRuntimeError(env, "htable_put failed with error "
+ "code %d\n", ret);
+ goto done;
+ }
+ *out = clazz;
+ jthr = NULL;
+done:
+ mutexUnlock(&hdfsHashMutex);
+ (*env)->DeleteLocalRef(env, local_clazz);
+ if (jthr && clazz) {
+ (*env)->DeleteGlobalRef(env, clazz);
+ }
+ return jthr;
}
jthrowable classNameOfObject(jobject jobj, JNIEnv *env, char **name)
@@ -436,14 +368,24 @@ done:
*/
static JNIEnv* getGlobalJNIEnv(void)
{
- const jsize vmBufLength = 1;
- JavaVM* vmBuf[vmBufLength];
+ JavaVM* vmBuf[VM_BUF_LENGTH];
JNIEnv *env;
jint rv = 0;
jint noVMs = 0;
jthrowable jthr;
+ char *hadoopClassPath;
+ const char *hadoopClassPathVMArg = "-Djava.class.path=";
+ size_t optHadoopClassPathLen;
+ char *optHadoopClassPath;
+ int noArgs = 1;
+ char *hadoopJvmArgs;
+ char jvmArgDelims[] = " ";
+ char *str, *token, *savePtr;
+ JavaVMInitArgs vm_args;
+ JavaVM *vm;
+ JavaVMOption *options;
- rv = JNI_GetCreatedJavaVMs(&(vmBuf[0]), vmBufLength, &noVMs);
+ rv = JNI_GetCreatedJavaVMs(&(vmBuf[0]), VM_BUF_LENGTH, &noVMs);
if (rv != 0) {
fprintf(stderr, "JNI_GetCreatedJavaVMs failed with error: %d\n", rv);
return NULL;
@@ -451,23 +393,19 @@ static JNIEnv* getGlobalJNIEnv(void)
if (noVMs == 0) {
//Get the environment variables for initializing the JVM
- char *hadoopClassPath = getenv("CLASSPATH");
+ hadoopClassPath = getenv("CLASSPATH");
if (hadoopClassPath == NULL) {
fprintf(stderr, "Environment variable CLASSPATH not set!\n");
return NULL;
}
- char *hadoopClassPathVMArg = "-Djava.class.path=";
- size_t optHadoopClassPathLen = strlen(hadoopClassPath) +
+ optHadoopClassPathLen = strlen(hadoopClassPath) +
strlen(hadoopClassPathVMArg) + 1;
- char *optHadoopClassPath = malloc(sizeof(char)*optHadoopClassPathLen);
+ optHadoopClassPath = malloc(sizeof(char)*optHadoopClassPathLen);
snprintf(optHadoopClassPath, optHadoopClassPathLen,
"%s%s", hadoopClassPathVMArg, hadoopClassPath);
// Determine the # of LIBHDFS_OPTS args
- int noArgs = 1;
- char *hadoopJvmArgs = getenv("LIBHDFS_OPTS");
- char jvmArgDelims[] = " ";
- char *str, *token, *savePtr;
+ hadoopJvmArgs = getenv("LIBHDFS_OPTS");
if (hadoopJvmArgs != NULL) {
hadoopJvmArgs = strdup(hadoopJvmArgs);
for (noArgs = 1, str = hadoopJvmArgs; ; noArgs++, str = NULL) {
@@ -480,7 +418,12 @@ static JNIEnv* getGlobalJNIEnv(void)
}
// Now that we know the # args, populate the options array
- JavaVMOption options[noArgs];
+ options = calloc(noArgs, sizeof(JavaVMOption));
+ if (!options) {
+ fputs("Call to calloc failed\n", stderr);
+ free(optHadoopClassPath);
+ return NULL;
+ }
options[0].optionString = optHadoopClassPath;
hadoopJvmArgs = getenv("LIBHDFS_OPTS");
if (hadoopJvmArgs != NULL) {
@@ -495,8 +438,6 @@ static JNIEnv* getGlobalJNIEnv(void)
}
//Create the VM
- JavaVMInitArgs vm_args;
- JavaVM *vm;
vm_args.version = JNI_VERSION_1_2;
vm_args.options = options;
vm_args.nOptions = noArgs;
@@ -508,6 +449,7 @@ static JNIEnv* getGlobalJNIEnv(void)
free(hadoopJvmArgs);
}
free(optHadoopClassPath);
+ free(options);
if (rv != 0) {
fprintf(stderr, "Call to JNI_CreateJavaVM failed "
@@ -523,7 +465,7 @@ static JNIEnv* getGlobalJNIEnv(void)
}
else {
//Attach this thread to the VM
- JavaVM* vm = vmBuf[0];
+ vm = vmBuf[0];
rv = (*vm)->AttachCurrentThread(vm, (void*)&env, 0);
if (rv != 0) {
fprintf(stderr, "Call to AttachCurrentThread "
@@ -557,54 +499,27 @@ static JNIEnv* getGlobalJNIEnv(void)
JNIEnv* getJNIEnv(void)
{
JNIEnv *env;
- struct hdfsTls *tls;
- int ret;
-
-#ifdef HAVE_BETTER_TLS
- static __thread struct hdfsTls *quickTls = NULL;
- if (quickTls)
- return quickTls->env;
-#endif
- pthread_mutex_lock(&jvmMutex);
- if (!gTlsKeyInitialized) {
- ret = pthread_key_create(&gTlsKey, hdfsThreadDestructor);
- if (ret) {
- pthread_mutex_unlock(&jvmMutex);
- fprintf(stderr, "getJNIEnv: pthread_key_create failed with "
- "error %d\n", ret);
- return NULL;
- }
- gTlsKeyInitialized = 1;
+ THREAD_LOCAL_STORAGE_GET_QUICK();
+ mutexLock(&jvmMutex);
+ if (threadLocalStorageGet(&env)) {
+ mutexUnlock(&jvmMutex);
+ return NULL;
}
- tls = pthread_getspecific(gTlsKey);
- if (tls) {
- pthread_mutex_unlock(&jvmMutex);
- return tls->env;
+ if (env) {
+ mutexUnlock(&jvmMutex);
+ return env;
}
env = getGlobalJNIEnv();
- pthread_mutex_unlock(&jvmMutex);
+ mutexUnlock(&jvmMutex);
if (!env) {
- fprintf(stderr, "getJNIEnv: getGlobalJNIEnv failed\n");
- return NULL;
+ fprintf(stderr, "getJNIEnv: getGlobalJNIEnv failed\n");
+ return NULL;
}
- tls = calloc(1, sizeof(struct hdfsTls));
- if (!tls) {
- fprintf(stderr, "getJNIEnv: OOM allocating %zd bytes\n",
- sizeof(struct hdfsTls));
- return NULL;
+ if (threadLocalStorageSet(env)) {
+ return NULL;
}
- tls->env = env;
- ret = pthread_setspecific(gTlsKey, tls);
- if (ret) {
- fprintf(stderr, "getJNIEnv: pthread_setspecific failed with "
- "error code %d\n", ret);
- hdfsThreadDestructor(tls);
- return NULL;
- }
-#ifdef HAVE_BETTER_TLS
- quickTls = tls;
-#endif
+ THREAD_LOCAL_STORAGE_SET_QUICK(env);
return env;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/jni_helper.h b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/jni_helper.h
index c09f6a38cb..90accc7c9c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/jni_helper.h
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/jni_helper.h
@@ -24,8 +24,6 @@
#include
#include
-#include
-#include
#include
#define PATH_SEPARATOR ':'
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.c b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.c
index 77e2f0766d..2c42fa5f20 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.c
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.c
@@ -21,6 +21,7 @@
#include "hdfs_test.h"
#include "jni_helper.h"
#include "native_mini_dfs.h"
+#include "platform.h"
#include
#include
@@ -347,10 +348,11 @@ error_dlr_nn:
int nmdConfigureHdfsBuilder(struct NativeMiniDfsCluster *cl,
struct hdfsBuilder *bld)
{
- int port, ret;
+ int ret;
+ tPort port;
hdfsBuilderSetNameNode(bld, "localhost");
- port = nmdGetNameNodePort(cl);
+ port = (tPort)nmdGetNameNodePort(cl);
if (port < 0) {
fprintf(stderr, "nmdGetNameNodePort failed with error %d\n", -port);
return EIO;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/mutexes.h b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/mutexes.h
new file mode 100644
index 0000000000..da30bf4974
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/mutexes.h
@@ -0,0 +1,55 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LIBHDFS_MUTEXES_H
+#define LIBHDFS_MUTEXES_H
+
+/*
+ * Defines abstraction over platform-specific mutexes. libhdfs has no formal
+ * initialization function that users would call from a single-threaded context
+ * to initialize the library. This creates a challenge for bootstrapping the
+ * mutexes. To address this, all required mutexes are pre-defined here with
+ * external storage. Platform-specific implementations must guarantee that the
+ * mutexes are initialized via static initialization.
+ */
+
+#include "platform.h"
+
+/** Mutex protecting the class reference hash table. */
+extern mutex hdfsHashMutex;
+
+/** Mutex protecting singleton JVM instance. */
+extern mutex jvmMutex;
+
+/**
+ * Locks a mutex.
+ *
+ * @param m mutex
+ * @return 0 if successful, non-zero otherwise
+ */
+int mutexLock(mutex *m);
+
+/**
+ * Unlocks a mutex.
+ *
+ * @param m mutex
+ * @return 0 if successful, non-zero otherwise
+ */
+int mutexUnlock(mutex *m);
+
+#endif
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/event/RMAppAttemptUpdateSavedEvent.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/posix/mutexes.c
similarity index 53%
rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/event/RMAppAttemptUpdateSavedEvent.java
rename to hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/posix/mutexes.c
index 043f067c9b..c4c2f26213 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/event/RMAppAttemptUpdateSavedEvent.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/posix/mutexes.c
@@ -16,23 +16,28 @@
* limitations under the License.
*/
-package org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event;
+#include "os/mutexes.h"
-import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
-import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEvent;
-import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEventType;
+#include
+#include
-public class RMAppAttemptUpdateSavedEvent extends RMAppAttemptEvent {
+mutex hdfsHashMutex = PTHREAD_MUTEX_INITIALIZER;
+mutex jvmMutex = PTHREAD_MUTEX_INITIALIZER;
- final Exception updatedException;
-
- public RMAppAttemptUpdateSavedEvent(ApplicationAttemptId appAttemptId,
- Exception updatedException) {
- super(appAttemptId, RMAppAttemptEventType.ATTEMPT_UPDATE_SAVED);
- this.updatedException = updatedException;
- }
-
- public Exception getUpdatedException() {
- return updatedException;
+int mutexLock(mutex *m) {
+ int ret = pthread_mutex_lock(m);
+ if (ret) {
+ fprintf(stderr, "mutexLock: pthread_mutex_lock failed with error %d\n",
+ ret);
}
+ return ret;
+}
+
+int mutexUnlock(mutex *m) {
+ int ret = pthread_mutex_unlock(m);
+ if (ret) {
+ fprintf(stderr, "mutexUnlock: pthread_mutex_unlock failed with error %d\n",
+ ret);
+ }
+ return ret;
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppNewSavedEvent.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/posix/platform.h
similarity index 63%
rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppNewSavedEvent.java
rename to hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/posix/platform.h
index 4d1ed14600..c63bbf9e0e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppNewSavedEvent.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/posix/platform.h
@@ -16,21 +16,19 @@
* limitations under the License.
*/
-package org.apache.hadoop.yarn.server.resourcemanager.rmapp;
+#ifndef LIBHDFS_PLATFORM_H
+#define LIBHDFS_PLATFORM_H
-import org.apache.hadoop.yarn.api.records.ApplicationId;
+#include
-public class RMAppNewSavedEvent extends RMAppEvent {
+/* Use gcc type-checked format arguments. */
+#define TYPE_CHECKED_PRINTF_FORMAT(formatArg, varArgs) \
+ __attribute__((format(printf, formatArg, varArgs)))
- private final Exception storedException;
+/*
+ * Mutex and thread data types defined by pthreads.
+ */
+typedef pthread_mutex_t mutex;
+typedef pthread_t threadId;
- public RMAppNewSavedEvent(ApplicationId appId, Exception storedException) {
- super(appId, RMAppEventType.APP_NEW_SAVED);
- this.storedException = storedException;
- }
-
- public Exception getStoredException() {
- return storedException;
- }
-
-}
\ No newline at end of file
+#endif
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/posix/thread.c b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/posix/thread.c
new file mode 100644
index 0000000000..af0c61f03d
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/posix/thread.c
@@ -0,0 +1,52 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "os/thread.h"
+
+#include
+#include
+
+/**
+ * Defines a helper function that adapts function pointer provided by caller to
+ * the type required by pthread_create.
+ *
+ * @param toRun thread to run
+ * @return void* result of running thread (always NULL)
+ */
+static void* runThread(void *toRun) {
+ const thread *t = toRun;
+ t->start(t->arg);
+ return NULL;
+}
+
+int threadCreate(thread *t) {
+ int ret;
+ ret = pthread_create(&t->id, NULL, runThread, t);
+ if (ret) {
+ fprintf(stderr, "threadCreate: pthread_create failed with error %d\n", ret);
+ }
+ return ret;
+}
+
+int threadJoin(const thread *t) {
+ int ret = pthread_join(t->id, NULL);
+ if (ret) {
+ fprintf(stderr, "threadJoin: pthread_join failed with error %d\n", ret);
+ }
+ return ret;
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/posix/thread_local_storage.c b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/posix/thread_local_storage.c
new file mode 100644
index 0000000000..2f70e2cb1d
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/posix/thread_local_storage.c
@@ -0,0 +1,80 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "os/thread_local_storage.h"
+
+#include
+#include
+#include
+
+/** Key that allows us to retrieve thread-local storage */
+static pthread_key_t gTlsKey;
+
+/** nonzero if we succeeded in initializing gTlsKey. Protected by the jvmMutex */
+static int gTlsKeyInitialized = 0;
+
+/**
+ * The function that is called whenever a thread with libhdfs thread local data
+ * is destroyed.
+ *
+ * @param v The thread-local data
+ */
+static void hdfsThreadDestructor(void *v)
+{
+ JavaVM *vm;
+ JNIEnv *env = v;
+ jint ret;
+
+ ret = (*env)->GetJavaVM(env, &vm);
+ if (ret) {
+ fprintf(stderr, "hdfsThreadDestructor: GetJavaVM failed with error %d\n",
+ ret);
+ (*env)->ExceptionDescribe(env);
+ } else {
+ (*vm)->DetachCurrentThread(vm);
+ }
+}
+
+int threadLocalStorageGet(JNIEnv **env)
+{
+ int ret = 0;
+ if (!gTlsKeyInitialized) {
+ ret = pthread_key_create(&gTlsKey, hdfsThreadDestructor);
+ if (ret) {
+ fprintf(stderr,
+ "threadLocalStorageGet: pthread_key_create failed with error %d\n",
+ ret);
+ return ret;
+ }
+ gTlsKeyInitialized = 1;
+ }
+ *env = pthread_getspecific(gTlsKey);
+ return ret;
+}
+
+int threadLocalStorageSet(JNIEnv *env)
+{
+ int ret = pthread_setspecific(gTlsKey, env);
+ if (ret) {
+ fprintf(stderr,
+ "threadLocalStorageSet: pthread_setspecific failed with error %d\n",
+ ret);
+ hdfsThreadDestructor(env);
+ }
+ return ret;
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/thread.h b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/thread.h
new file mode 100644
index 0000000000..ae425d3564
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/thread.h
@@ -0,0 +1,54 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LIBHDFS_THREAD_H
+#define LIBHDFS_THREAD_H
+
+/*
+ * Defines abstraction over platform-specific threads.
+ */
+
+#include "platform.h"
+
+/** Pointer to function to run in thread. */
+typedef void (*threadProcedure)(void *);
+
+/** Structure containing a thread's ID, starting address and argument. */
+typedef struct {
+ threadId id;
+ threadProcedure start;
+ void *arg;
+} thread;
+
+/**
+ * Creates and immediately starts a new thread.
+ *
+ * @param t thread to create
+ * @return 0 if successful, non-zero otherwise
+ */
+int threadCreate(thread *t);
+
+/**
+ * Joins to the given thread, blocking if necessary.
+ *
+ * @param t thread to join
+ * @return 0 if successful, non-zero otherwise
+ */
+int threadJoin(const thread *t);
+
+#endif
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/thread_local_storage.h b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/thread_local_storage.h
new file mode 100644
index 0000000000..a40d5671b9
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/thread_local_storage.h
@@ -0,0 +1,75 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LIBHDFS_THREAD_LOCAL_STORAGE_H
+#define LIBHDFS_THREAD_LOCAL_STORAGE_H
+
+/*
+ * Defines abstraction over platform-specific thread-local storage. libhdfs
+ * currently only needs thread-local storage for a single piece of data: the
+ * thread's JNIEnv. For simplicity, this interface is defined in terms of
+ * JNIEnv, not general-purpose thread-local storage of any arbitrary data.
+ */
+
+#include
+
+/*
+ * Most operating systems support the more efficient __thread construct, which
+ * is initialized by the linker. The following macros use this technique on the
+ * operating systems that support it.
+ */
+#ifdef HAVE_BETTER_TLS
+ #define THREAD_LOCAL_STORAGE_GET_QUICK() \
+ static __thread JNIEnv *quickTlsEnv = NULL; \
+ { \
+ if (quickTlsEnv) { \
+ return quickTlsEnv; \
+ } \
+ }
+
+ #define THREAD_LOCAL_STORAGE_SET_QUICK(env) \
+ { \
+ quickTlsEnv = (env); \
+ }
+#else
+ #define THREAD_LOCAL_STORAGE_GET_QUICK()
+ #define THREAD_LOCAL_STORAGE_SET_QUICK(env)
+#endif
+
+/**
+ * Gets the JNIEnv in thread-local storage for the current thread. If the call
+ * succeeds, and there is a JNIEnv associated with this thread, then returns 0
+ * and populates env. If the call succeeds, but there is no JNIEnv associated
+ * with this thread, then returns 0 and sets JNIEnv to NULL. If the call fails,
+ * then returns non-zero. Only one thread at a time may execute this function.
+ * The caller is responsible for enforcing mutual exclusion.
+ *
+ * @param env JNIEnv out parameter
+ * @return 0 if successful, non-zero otherwise
+ */
+int threadLocalStorageGet(JNIEnv **env);
+
+/**
+ * Sets the JNIEnv in thread-local storage for the current thread.
+ *
+ * @param env JNIEnv to set
+ * @return 0 if successful, non-zero otherwise
+ */
+int threadLocalStorageSet(JNIEnv *env);
+
+#endif
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppUpdateSavedEvent.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/inttypes.h
similarity index 62%
rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppUpdateSavedEvent.java
rename to hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/inttypes.h
index 42072f8cf6..a520d15a24 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppUpdateSavedEvent.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/inttypes.h
@@ -16,21 +16,13 @@
* limitations under the License.
*/
-package org.apache.hadoop.yarn.server.resourcemanager.rmapp;
+#ifndef LIBHDFS_INTTYPES_H
+#define LIBHDFS_INTTYPES_H
-import org.apache.hadoop.yarn.api.records.ApplicationId;
+/* On Windows, inttypes.h does not exist, so manually define what we need. */
-public class RMAppUpdateSavedEvent extends RMAppEvent {
+#define PRId64 "I64d"
+#define PRIu64 "I64u"
+typedef unsigned __int64 uint64_t;
- private final Exception updatedException;
-
- public RMAppUpdateSavedEvent(ApplicationId appId, Exception updatedException) {
- super(appId, RMAppEventType.APP_UPDATE_SAVED);
- this.updatedException = updatedException;
- }
-
- public Exception getUpdatedException() {
- return updatedException;
- }
-
-}
+#endif
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/mutexes.c b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/mutexes.c
new file mode 100644
index 0000000000..875f03386a
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/mutexes.c
@@ -0,0 +1,52 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "os/mutexes.h"
+
+#include
+
+mutex hdfsHashMutex;
+mutex jvmMutex;
+
+/**
+ * Unfortunately, there is no simple static initializer for a critical section.
+ * Instead, the API requires calling InitializeCriticalSection. Since libhdfs
+ * lacks an explicit initialization function, there is no obvious existing place
+ * for the InitializeCriticalSection calls. To work around this, we define an
+ * initialization function and instruct the linker to set a pointer to that
+ * function as a user-defined global initializer. See discussion of CRT
+ * Initialization:
+ * http://msdn.microsoft.com/en-us/library/bb918180.aspx
+ */
+static void __cdecl initializeMutexes(void) {
+ InitializeCriticalSection(&hdfsHashMutex);
+ InitializeCriticalSection(&jvmMutex);
+}
+#pragma section(".CRT$XCU", read)
+__declspec(allocate(".CRT$XCU"))
+const void (__cdecl *pInitialize)(void) = initializeMutexes;
+
+int mutexLock(mutex *m) {
+ EnterCriticalSection(m);
+ return 0;
+}
+
+int mutexUnlock(mutex *m) {
+ LeaveCriticalSection(m);
+ return 0;
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/platform.h b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/platform.h
new file mode 100644
index 0000000000..9eedfdecda
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/platform.h
@@ -0,0 +1,86 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LIBHDFS_PLATFORM_H
+#define LIBHDFS_PLATFORM_H
+
+#include
+#include
+#include
+
+/*
+ * O_ACCMODE defined to match Linux definition.
+ */
+#ifndef O_ACCMODE
+#define O_ACCMODE 0x0003
+#endif
+
+/*
+ * Windows has a different name for its maximum path length constant.
+ */
+#ifndef PATH_MAX
+#define PATH_MAX MAX_PATH
+#endif
+
+/*
+ * Windows does not define EDQUOT and ESTALE in errno.h. The closest equivalents
+ * are these constants from winsock.h.
+ */
+#ifndef EDQUOT
+#define EDQUOT WSAEDQUOT
+#endif
+
+#ifndef ESTALE
+#define ESTALE WSAESTALE
+#endif
+
+/*
+ * gcc-style type-checked format arguments are not supported on Windows, so just
+ * stub this macro.
+ */
+#define TYPE_CHECKED_PRINTF_FORMAT(formatArg, varArgs)
+
+/*
+ * Define macros for various string formatting functions not defined on Windows.
+ * Where possible, we reroute to one of the secure CRT variants. On Windows,
+ * the preprocessor does support variadic macros, even though they weren't
+ * defined until C99.
+ */
+#define snprintf(str, size, format, ...) \
+ _snprintf_s((str), (size), _TRUNCATE, (format), __VA_ARGS__)
+#define strncpy(dest, src, n) \
+ strncpy_s((dest), (n), (src), _TRUNCATE)
+#define strtok_r(str, delim, saveptr) \
+ strtok_s((str), (delim), (saveptr))
+#define vsnprintf(str, size, format, ...) \
+ vsnprintf_s((str), (size), _TRUNCATE, (format), __VA_ARGS__)
+
+/*
+ * Mutex data type defined as Windows CRITICAL_SECTION. A critical section (not
+ * Windows mutex) is used, because libhdfs only needs synchronization of multiple
+ * threads within a single process, not synchronization across process
+ * boundaries.
+ */
+typedef CRITICAL_SECTION mutex;
+
+/*
+ * Thread data type defined as HANDLE to a Windows thread.
+ */
+typedef HANDLE threadId;
+
+#endif
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/thread.c b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/thread.c
new file mode 100644
index 0000000000..90450d8473
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/thread.c
@@ -0,0 +1,66 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "os/thread.h"
+
+#include
+#include
+
+/**
+ * Defines a helper function that adapts function pointer provided by caller to
+ * the type required by CreateThread.
+ *
+ * @param toRun thread to run
+ * @return DWORD result of running thread (always 0)
+ */
+static DWORD runThread(LPVOID toRun) {
+ const thread *t = toRun;
+ t->start(t->arg);
+ return 0;
+}
+
+int threadCreate(thread *t) {
+ DWORD ret = 0;
+ HANDLE h;
+ h = CreateThread(NULL, 0, runThread, t, 0, NULL);
+ if (h) {
+ t->id = h;
+ } else {
+ ret = GetLastError();
+ fprintf(stderr, "threadCreate: CreateThread failed with error %d\n", ret);
+ }
+ return ret;
+}
+
+int threadJoin(const thread *t) {
+ DWORD ret = WaitForSingleObject(t->id, INFINITE);
+ switch (ret) {
+ case WAIT_OBJECT_0:
+ break;
+ case WAIT_FAILED:
+ ret = GetLastError();
+ fprintf(stderr, "threadJoin: WaitForSingleObject failed with error %d\n",
+ ret);
+ break;
+ default:
+ fprintf(stderr, "threadJoin: WaitForSingleObject unexpected error %d\n",
+ ret);
+ break;
+ }
+ return ret;
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/thread_local_storage.c b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/thread_local_storage.c
new file mode 100644
index 0000000000..70ad152407
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/thread_local_storage.c
@@ -0,0 +1,164 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "os/thread_local_storage.h"
+
+#include
+#include
+#include
+
+/** Key that allows us to retrieve thread-local storage */
+static DWORD gTlsIndex = TLS_OUT_OF_INDEXES;
+
+/**
+ * If the current thread has a JNIEnv in thread-local storage, then detaches the
+ * current thread from the JVM.
+ */
+static void detachCurrentThreadFromJvm()
+{
+ JNIEnv *env = NULL;
+ JavaVM *vm;
+ jint ret;
+ if (threadLocalStorageGet(&env) || !env) {
+ return;
+ }
+ ret = (*env)->GetJavaVM(env, &vm);
+ if (ret) {
+ fprintf(stderr,
+ "detachCurrentThreadFromJvm: GetJavaVM failed with error %d\n",
+ ret);
+ (*env)->ExceptionDescribe(env);
+ } else {
+ (*vm)->DetachCurrentThread(vm);
+ }
+}
+
+/**
+ * Unlike pthreads, the Windows API does not seem to provide a convenient way to
+ * hook a callback onto thread shutdown. However, the Windows portable
+ * executable format does define a concept of thread-local storage callbacks.
+ * Here, we define a function and instruct the linker to set a pointer to that
+ * function in the segment for thread-local storage callbacks. See page 85 of
+ * Microsoft Portable Executable and Common Object File Format Specification:
+ * http://msdn.microsoft.com/en-us/gg463119.aspx
+ * This technique only works for implicit linking (OS loads DLL on demand), not
+ * for explicit linking (user code calls LoadLibrary directly). This effectively
+ * means that we have a known limitation: libhdfs may not work correctly if a
+ * Windows application attempts to use it via explicit linking.
+ *
+ * @param h module handle
+ * @param reason the reason for calling the callback
+ * @param pv reserved, unused
+ */
+static void NTAPI tlsCallback(PVOID h, DWORD reason, PVOID pv)
+{
+ DWORD tlsIndex;
+ switch (reason) {
+ case DLL_THREAD_DETACH:
+ detachCurrentThreadFromJvm();
+ break;
+ case DLL_PROCESS_DETACH:
+ detachCurrentThreadFromJvm();
+ tlsIndex = gTlsIndex;
+ gTlsIndex = TLS_OUT_OF_INDEXES;
+ if (!TlsFree(tlsIndex)) {
+ fprintf(stderr, "tlsCallback: TlsFree failed with error %d\n",
+ GetLastError());
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+/*
+ * A variable named _tls_used contains the TLS directory, which contains a list
+ * of pointers to callback functions. Normally, the linker won't retain this
+ * variable unless the executable has implicit thread-local variables, defined
+ * using the __declspec(thread) extended storage-class modifier. libhdfs
+ * doesn't use __declspec(thread), and we have no guarantee that the executable
+ * linked to libhdfs will use __declspec(thread). By forcing the linker to
+ * reference _tls_used, we guarantee that the binary retains the TLS directory.
+ * See Microsoft Visual Studio 10.0/VC/crt/src/tlssup.c .
+ */
+#pragma comment(linker, "/INCLUDE:_tls_used")
+
+/*
+ * We must retain a pointer to the callback function. Force the linker to keep
+ * this symbol, even though it appears that nothing in our source code uses it.
+ */
+#pragma comment(linker, "/INCLUDE:pTlsCallback")
+
+/*
+ * Define constant pointer to our callback, and tell the linker to pin it into
+ * the TLS directory so that it receives thread callbacks. Use external linkage
+ * to protect against the linker discarding the seemingly unused symbol.
+ */
+#pragma const_seg(".CRT$XLB")
+extern const PIMAGE_TLS_CALLBACK pTlsCallback;
+const PIMAGE_TLS_CALLBACK pTlsCallback = tlsCallback;
+#pragma const_seg()
+
+int threadLocalStorageGet(JNIEnv **env)
+{
+ LPVOID tls;
+ DWORD ret;
+ if (TLS_OUT_OF_INDEXES == gTlsIndex) {
+ gTlsIndex = TlsAlloc();
+ if (TLS_OUT_OF_INDEXES == gTlsIndex) {
+ fprintf(stderr,
+ "threadLocalStorageGet: TlsAlloc failed with error %d\n",
+ TLS_OUT_OF_INDEXES);
+ return TLS_OUT_OF_INDEXES;
+ }
+ }
+ tls = TlsGetValue(gTlsIndex);
+ if (tls) {
+ *env = tls;
+ return 0;
+ } else {
+ ret = GetLastError();
+ if (ERROR_SUCCESS == ret) {
+ /* Thread-local storage contains NULL, because we haven't set it yet. */
+ *env = NULL;
+ return 0;
+ } else {
+ /*
+ * The API call failed. According to documentation, TlsGetValue cannot
+ * fail as long as the index is a valid index from a successful TlsAlloc
+ * call. This error handling is purely defensive.
+ */
+ fprintf(stderr,
+ "threadLocalStorageGet: TlsGetValue failed with error %d\n", ret);
+ return ret;
+ }
+ }
+}
+
+int threadLocalStorageSet(JNIEnv *env)
+{
+ DWORD ret = 0;
+ if (!TlsSetValue(gTlsIndex, (LPVOID)env)) {
+ ret = GetLastError();
+ fprintf(stderr,
+ "threadLocalStorageSet: TlsSetValue failed with error %d\n",
+ ret);
+ detachCurrentThreadFromJvm(env);
+ }
+ return ret;
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/unistd.h b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/unistd.h
new file mode 100644
index 0000000000..b82ce48968
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/os/windows/unistd.h
@@ -0,0 +1,29 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LIBHDFS_UNISTD_H
+#define LIBHDFS_UNISTD_H
+
+/* On Windows, unistd.h does not exist, so manually define what we need. */
+
+#include /* Declares getpid(). */
+#include
+
+/* Re-route sleep to Sleep, converting units from seconds to milliseconds. */
+#define sleep(seconds) Sleep((seconds) * 1000)
+#endif
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_ops.c b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_ops.c
index c2a0cbd218..a6e1a13abb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_ops.c
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_ops.c
@@ -18,6 +18,7 @@
#include "hdfs.h"
#include "hdfs_test.h"
+#include "platform.h"
#include
#include
@@ -28,12 +29,13 @@
#include
void permission_disp(short permissions, char *rtr) {
- rtr[9] = '\0';
int i;
+ short permissionsId;
+ char* perm;
+ rtr[9] = '\0';
for(i=2;i>=0;i--)
{
- short permissionsId = permissions >> (i * 3) & (short)7;
- char* perm;
+ permissionsId = permissions >> (i * 3) & (short)7;
switch(permissionsId) {
case 7:
perm = "rwx"; break;
@@ -60,35 +62,56 @@ void permission_disp(short permissions, char *rtr) {
}
int main(int argc, char **argv) {
- char buffer[32];
- tSize num_written_bytes;
+ const char *writePath = "/tmp/testfile.txt";
+ const char *fileContents = "Hello, World!";
+ const char *readPath = "/tmp/testfile.txt";
+ const char *srcPath = "/tmp/testfile.txt";
+ const char *dstPath = "/tmp/testfile2.txt";
+ const char *slashTmp = "/tmp";
+ const char *newDirectory = "/tmp/newdir";
+ const char *newOwner = "root";
+ const char *tuser = "nobody";
+ const char *appendPath = "/tmp/appends";
+ const char *userPath = "/tmp/usertestfile.txt";
- hdfsFS fs = hdfsConnectNewInstance("default", 0);
+ char buffer[32], buffer2[256], rdbuffer[32];
+ tSize num_written_bytes, num_read_bytes;
+ hdfsFS fs, lfs;
+ hdfsFile writeFile, readFile, localFile, appendFile, userFile;
+ tOffset currentPos, seekPos;
+ int exists, totalResult, result, numEntries, i, j;
+ const char *resp;
+ hdfsFileInfo *fileInfo, *fileList, *finfo;
+ char *buffer3;
+ char permissions[10];
+ char ***hosts;
+ short newPerm = 0666;
+ tTime newMtime, newAtime;
+
+ fs = hdfsConnectNewInstance("default", 0);
if(!fs) {
fprintf(stderr, "Oops! Failed to connect to hdfs!\n");
exit(-1);
}
- hdfsFS lfs = hdfsConnectNewInstance(NULL, 0);
+ lfs = hdfsConnectNewInstance(NULL, 0);
if(!lfs) {
fprintf(stderr, "Oops! Failed to connect to 'local' hdfs!\n");
exit(-1);
}
- const char* writePath = "/tmp/testfile.txt";
- const char* fileContents = "Hello, World!";
-
{
//Write tests
- hdfsFile writeFile = hdfsOpenFile(fs, writePath, O_WRONLY|O_CREAT, 0, 0, 0);
+ writeFile = hdfsOpenFile(fs, writePath, O_WRONLY|O_CREAT, 0, 0, 0);
if(!writeFile) {
fprintf(stderr, "Failed to open %s for writing!\n", writePath);
exit(-1);
}
fprintf(stderr, "Opened %s for writing successfully...\n", writePath);
num_written_bytes =
- hdfsWrite(fs, writeFile, (void*)fileContents, strlen(fileContents)+1);
+ hdfsWrite(fs, writeFile, (void*)fileContents,
+ (tSize)(strlen(fileContents)+1));
if (num_written_bytes != strlen(fileContents) + 1) {
fprintf(stderr, "Failed to write correct number of bytes - expected %d, got %d\n",
(int)(strlen(fileContents) + 1), (int)num_written_bytes);
@@ -96,7 +119,7 @@ int main(int argc, char **argv) {
}
fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);
- tOffset currentPos = -1;
+ currentPos = -1;
if ((currentPos = hdfsTell(fs, writeFile)) == -1) {
fprintf(stderr,
"Failed to get current file position correctly! Got %ld!\n",
@@ -123,15 +146,14 @@ int main(int argc, char **argv) {
{
//Read tests
- const char* readPath = "/tmp/testfile.txt";
- int exists = hdfsExists(fs, readPath);
+ exists = hdfsExists(fs, readPath);
if (exists) {
fprintf(stderr, "Failed to validate existence of %s\n", readPath);
exit(-1);
}
- hdfsFile readFile = hdfsOpenFile(fs, readPath, O_RDONLY, 0, 0, 0);
+ readFile = hdfsOpenFile(fs, readPath, O_RDONLY, 0, 0, 0);
if (!readFile) {
fprintf(stderr, "Failed to open %s for reading!\n", readPath);
exit(-1);
@@ -146,13 +168,13 @@ int main(int argc, char **argv) {
fprintf(stderr, "hdfsAvailable: %d\n", hdfsAvailable(fs, readFile));
- tOffset seekPos = 1;
+ seekPos = 1;
if(hdfsSeek(fs, readFile, seekPos)) {
fprintf(stderr, "Failed to seek %s for reading!\n", readPath);
exit(-1);
}
- tOffset currentPos = -1;
+ currentPos = -1;
if((currentPos = hdfsTell(fs, readFile)) != seekPos) {
fprintf(stderr,
"Failed to get current file position correctly! Got %ld!\n",
@@ -175,7 +197,7 @@ int main(int argc, char **argv) {
exit(-1);
}
memset(buffer, 0, sizeof(buffer));
- tSize num_read_bytes = hdfsRead(fs, readFile, (void*)buffer,
+ num_read_bytes = hdfsRead(fs, readFile, (void*)buffer,
sizeof(buffer));
if (strncmp(fileContents, buffer, strlen(fileContents)) != 0) {
fprintf(stderr, "Failed to read (direct). Expected %s but got %s (%d bytes)\n",
@@ -208,14 +230,14 @@ int main(int argc, char **argv) {
hdfsCloseFile(fs, readFile);
// Test correct behaviour for unsupported filesystems
- hdfsFile localFile = hdfsOpenFile(lfs, writePath, O_WRONLY|O_CREAT, 0, 0, 0);
+ localFile = hdfsOpenFile(lfs, writePath, O_WRONLY|O_CREAT, 0, 0, 0);
if(!localFile) {
fprintf(stderr, "Failed to open %s for writing!\n", writePath);
exit(-1);
}
num_written_bytes = hdfsWrite(lfs, localFile, (void*)fileContents,
- strlen(fileContents) + 1);
+ (tSize)(strlen(fileContents) + 1));
hdfsCloseFile(lfs, localFile);
localFile = hdfsOpenFile(lfs, writePath, O_RDONLY, 0, 0, 0);
@@ -229,50 +251,43 @@ int main(int argc, char **argv) {
hdfsCloseFile(lfs, localFile);
}
- int totalResult = 0;
- int result = 0;
+ totalResult = 0;
+ result = 0;
{
//Generic file-system operations
- const char* srcPath = "/tmp/testfile.txt";
- const char* dstPath = "/tmp/testfile2.txt";
-
- fprintf(stderr, "hdfsCopy(remote-local): %s\n", ((result = hdfsCopy(fs, srcPath, lfs, srcPath)) ? "Failed!" : "Success!"));
+ fprintf(stderr, "hdfsCopy(remote-local): %s\n", ((result = hdfsCopy(fs, srcPath, lfs, srcPath)) != 0 ? "Failed!" : "Success!"));
totalResult += result;
- fprintf(stderr, "hdfsCopy(remote-remote): %s\n", ((result = hdfsCopy(fs, srcPath, fs, dstPath)) ? "Failed!" : "Success!"));
+ fprintf(stderr, "hdfsCopy(remote-remote): %s\n", ((result = hdfsCopy(fs, srcPath, fs, dstPath)) != 0 ? "Failed!" : "Success!"));
totalResult += result;
- fprintf(stderr, "hdfsMove(local-local): %s\n", ((result = hdfsMove(lfs, srcPath, lfs, dstPath)) ? "Failed!" : "Success!"));
+ fprintf(stderr, "hdfsMove(local-local): %s\n", ((result = hdfsMove(lfs, srcPath, lfs, dstPath)) != 0 ? "Failed!" : "Success!"));
totalResult += result;
- fprintf(stderr, "hdfsMove(remote-local): %s\n", ((result = hdfsMove(fs, srcPath, lfs, srcPath)) ? "Failed!" : "Success!"));
+ fprintf(stderr, "hdfsMove(remote-local): %s\n", ((result = hdfsMove(fs, srcPath, lfs, srcPath)) != 0 ? "Failed!" : "Success!"));
totalResult += result;
- fprintf(stderr, "hdfsRename: %s\n", ((result = hdfsRename(fs, dstPath, srcPath)) ? "Failed!" : "Success!"));
+ fprintf(stderr, "hdfsRename: %s\n", ((result = hdfsRename(fs, dstPath, srcPath)) != 0 ? "Failed!" : "Success!"));
totalResult += result;
- fprintf(stderr, "hdfsCopy(remote-remote): %s\n", ((result = hdfsCopy(fs, srcPath, fs, dstPath)) ? "Failed!" : "Success!"));
+ fprintf(stderr, "hdfsCopy(remote-remote): %s\n", ((result = hdfsCopy(fs, srcPath, fs, dstPath)) != 0 ? "Failed!" : "Success!"));
totalResult += result;
- const char* slashTmp = "/tmp";
- const char* newDirectory = "/tmp/newdir";
- fprintf(stderr, "hdfsCreateDirectory: %s\n", ((result = hdfsCreateDirectory(fs, newDirectory)) ? "Failed!" : "Success!"));
+ fprintf(stderr, "hdfsCreateDirectory: %s\n", ((result = hdfsCreateDirectory(fs, newDirectory)) != 0 ? "Failed!" : "Success!"));
totalResult += result;
- fprintf(stderr, "hdfsSetReplication: %s\n", ((result = hdfsSetReplication(fs, srcPath, 2)) ? "Failed!" : "Success!"));
+ fprintf(stderr, "hdfsSetReplication: %s\n", ((result = hdfsSetReplication(fs, srcPath, 2)) != 0 ? "Failed!" : "Success!"));
totalResult += result;
- char buffer[256];
- const char *resp;
- fprintf(stderr, "hdfsGetWorkingDirectory: %s\n", ((resp = hdfsGetWorkingDirectory(fs, buffer, sizeof(buffer))) ? buffer : "Failed!"));
+ fprintf(stderr, "hdfsGetWorkingDirectory: %s\n", ((resp = hdfsGetWorkingDirectory(fs, buffer2, sizeof(buffer2))) != 0 ? buffer2 : "Failed!"));
totalResult += (resp ? 0 : 1);
- fprintf(stderr, "hdfsSetWorkingDirectory: %s\n", ((result = hdfsSetWorkingDirectory(fs, slashTmp)) ? "Failed!" : "Success!"));
+ fprintf(stderr, "hdfsSetWorkingDirectory: %s\n", ((result = hdfsSetWorkingDirectory(fs, slashTmp)) != 0 ? "Failed!" : "Success!"));
totalResult += result;
- fprintf(stderr, "hdfsGetWorkingDirectory: %s\n", ((resp = hdfsGetWorkingDirectory(fs, buffer, sizeof(buffer))) ? buffer : "Failed!"));
+ fprintf(stderr, "hdfsGetWorkingDirectory: %s\n", ((resp = hdfsGetWorkingDirectory(fs, buffer2, sizeof(buffer2))) != 0 ? buffer2 : "Failed!"));
totalResult += (resp ? 0 : 1);
fprintf(stderr, "hdfsGetDefaultBlockSize: %ld\n", hdfsGetDefaultBlockSize(fs));
fprintf(stderr, "hdfsGetCapacity: %ld\n", hdfsGetCapacity(fs));
fprintf(stderr, "hdfsGetUsed: %ld\n", hdfsGetUsed(fs));
- hdfsFileInfo *fileInfo = NULL;
+ fileInfo = NULL;
if((fileInfo = hdfsGetPathInfo(fs, slashTmp)) != NULL) {
fprintf(stderr, "hdfsGetPathInfo - SUCCESS!\n");
fprintf(stderr, "Name: %s, ", fileInfo->mName);
@@ -283,7 +298,6 @@ int main(int argc, char **argv) {
fprintf(stderr, "LastMod: %s", ctime(&fileInfo->mLastMod));
fprintf(stderr, "Owner: %s, ", fileInfo->mOwner);
fprintf(stderr, "Group: %s, ", fileInfo->mGroup);
- char permissions[10];
permission_disp(fileInfo->mPermissions, permissions);
fprintf(stderr, "Permissions: %d (%s)\n", fileInfo->mPermissions, permissions);
hdfsFreeFileInfo(fileInfo, 1);
@@ -292,10 +306,8 @@ int main(int argc, char **argv) {
fprintf(stderr, "waah! hdfsGetPathInfo for %s - FAILED!\n", slashTmp);
}
- hdfsFileInfo *fileList = 0;
- int numEntries = 0;
+ fileList = 0;
if((fileList = hdfsListDirectory(fs, slashTmp, &numEntries)) != NULL) {
- int i = 0;
for(i=0; i < numEntries; ++i) {
fprintf(stderr, "Name: %s, ", fileList[i].mName);
fprintf(stderr, "Type: %c, ", (char)fileList[i].mKind);
@@ -305,7 +317,6 @@ int main(int argc, char **argv) {
fprintf(stderr, "LastMod: %s", ctime(&fileList[i].mLastMod));
fprintf(stderr, "Owner: %s, ", fileList[i].mOwner);
fprintf(stderr, "Group: %s, ", fileList[i].mGroup);
- char permissions[10];
permission_disp(fileList[i].mPermissions, permissions);
fprintf(stderr, "Permissions: %d (%s)\n", fileList[i].mPermissions, permissions);
}
@@ -319,12 +330,12 @@ int main(int argc, char **argv) {
}
}
- char*** hosts = hdfsGetHosts(fs, srcPath, 0, 1);
+ hosts = hdfsGetHosts(fs, srcPath, 0, 1);
if(hosts) {
fprintf(stderr, "hdfsGetHosts - SUCCESS! ... \n");
- int i=0;
+ i=0;
while(hosts[i]) {
- int j = 0;
+ j = 0;
while(hosts[i][j]) {
fprintf(stderr,
"\thosts[%d][%d] - %s\n", i, j, hosts[i][j]);
@@ -337,131 +348,129 @@ int main(int argc, char **argv) {
fprintf(stderr, "waah! hdfsGetHosts - FAILED!\n");
}
- char *newOwner = "root";
// setting tmp dir to 777 so later when connectAsUser nobody, we can write to it
- short newPerm = 0666;
// chown write
- fprintf(stderr, "hdfsChown: %s\n", ((result = hdfsChown(fs, writePath, NULL, "users")) ? "Failed!" : "Success!"));
+ fprintf(stderr, "hdfsChown: %s\n", ((result = hdfsChown(fs, writePath, NULL, "users")) != 0 ? "Failed!" : "Success!"));
totalResult += result;
- fprintf(stderr, "hdfsChown: %s\n", ((result = hdfsChown(fs, writePath, newOwner, NULL)) ? "Failed!" : "Success!"));
+ fprintf(stderr, "hdfsChown: %s\n", ((result = hdfsChown(fs, writePath, newOwner, NULL)) != 0 ? "Failed!" : "Success!"));
totalResult += result;
// chmod write
- fprintf(stderr, "hdfsChmod: %s\n", ((result = hdfsChmod(fs, writePath, newPerm)) ? "Failed!" : "Success!"));
+ fprintf(stderr, "hdfsChmod: %s\n", ((result = hdfsChmod(fs, writePath, newPerm)) != 0 ? "Failed!" : "Success!"));
totalResult += result;
sleep(2);
- tTime newMtime = time(NULL);
- tTime newAtime = time(NULL);
+ newMtime = time(NULL);
+ newAtime = time(NULL);
// utime write
- fprintf(stderr, "hdfsUtime: %s\n", ((result = hdfsUtime(fs, writePath, newMtime, newAtime)) ? "Failed!" : "Success!"));
+ fprintf(stderr, "hdfsUtime: %s\n", ((result = hdfsUtime(fs, writePath, newMtime, newAtime)) != 0 ? "Failed!" : "Success!"));
totalResult += result;
// chown/chmod/utime read
- hdfsFileInfo *finfo = hdfsGetPathInfo(fs, writePath);
+ finfo = hdfsGetPathInfo(fs, writePath);
- fprintf(stderr, "hdfsChown read: %s\n", ((result = (strcmp(finfo->mOwner, newOwner) != 0)) ? "Failed!" : "Success!"));
+ fprintf(stderr, "hdfsChown read: %s\n", ((result = (strcmp(finfo->mOwner, newOwner))) != 0 ? "Failed!" : "Success!"));
totalResult += result;
- fprintf(stderr, "hdfsChmod read: %s\n", ((result = (finfo->mPermissions != newPerm)) ? "Failed!" : "Success!"));
+ fprintf(stderr, "hdfsChmod read: %s\n", ((result = (finfo->mPermissions != newPerm)) != 0 ? "Failed!" : "Success!"));
totalResult += result;
// will later use /tmp/ as a different user so enable it
- fprintf(stderr, "hdfsChmod: %s\n", ((result = hdfsChmod(fs, "/tmp/", 0777)) ? "Failed!" : "Success!"));
+ fprintf(stderr, "hdfsChmod: %s\n", ((result = hdfsChmod(fs, "/tmp/", 0777)) != 0 ? "Failed!" : "Success!"));
totalResult += result;
fprintf(stderr,"newMTime=%ld\n",newMtime);
fprintf(stderr,"curMTime=%ld\n",finfo->mLastMod);
- fprintf(stderr, "hdfsUtime read (mtime): %s\n", ((result = (finfo->mLastMod != newMtime)) ? "Failed!" : "Success!"));
+ fprintf(stderr, "hdfsUtime read (mtime): %s\n", ((result = (finfo->mLastMod != newMtime)) != 0 ? "Failed!" : "Success!"));
totalResult += result;
// No easy way to turn on access times from hdfs_test right now
- // fprintf(stderr, "hdfsUtime read (atime): %s\n", ((result = (finfo->mLastAccess != newAtime)) ? "Failed!" : "Success!"));
+ // fprintf(stderr, "hdfsUtime read (atime): %s\n", ((result = (finfo->mLastAccess != newAtime)) != 0 ? "Failed!" : "Success!"));
// totalResult += result;
hdfsFreeFileInfo(finfo, 1);
// Clean up
- fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(fs, newDirectory, 1)) ? "Failed!" : "Success!"));
+ fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(fs, newDirectory, 1)) != 0 ? "Failed!" : "Success!"));
totalResult += result;
- fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(fs, srcPath, 1)) ? "Failed!" : "Success!"));
+ fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(fs, srcPath, 1)) != 0 ? "Failed!" : "Success!"));
totalResult += result;
- fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(lfs, srcPath, 1)) ? "Failed!" : "Success!"));
+ fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(lfs, srcPath, 1)) != 0 ? "Failed!" : "Success!"));
totalResult += result;
- fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(lfs, dstPath, 1)) ? "Failed!" : "Success!"));
+ fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(lfs, dstPath, 1)) != 0 ? "Failed!" : "Success!"));
totalResult += result;
- fprintf(stderr, "hdfsExists: %s\n", ((result = hdfsExists(fs, newDirectory)) ? "Success!" : "Failed!"));
+ fprintf(stderr, "hdfsExists: %s\n", ((result = hdfsExists(fs, newDirectory)) != 0 ? "Success!" : "Failed!"));
totalResult += (result ? 0 : 1);
}
{
// TEST APPENDS
- const char *writePath = "/tmp/appends";
// CREATE
- hdfsFile writeFile = hdfsOpenFile(fs, writePath, O_WRONLY, 0, 0, 0);
- if(!writeFile) {
- fprintf(stderr, "Failed to open %s for writing!\n", writePath);
+ appendFile = hdfsOpenFile(fs, appendPath, O_WRONLY, 0, 0, 0);
+ if(!appendFile) {
+ fprintf(stderr, "Failed to open %s for writing!\n", appendPath);
exit(-1);
}
- fprintf(stderr, "Opened %s for writing successfully...\n", writePath);
+ fprintf(stderr, "Opened %s for writing successfully...\n", appendPath);
- char* buffer = "Hello,";
- tSize num_written_bytes = hdfsWrite(fs, writeFile, (void*)buffer, strlen(buffer));
+ buffer3 = "Hello,";
+ num_written_bytes = hdfsWrite(fs, appendFile, (void*)buffer3,
+ (tSize)strlen(buffer3));
fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);
- if (hdfsFlush(fs, writeFile)) {
- fprintf(stderr, "Failed to 'flush' %s\n", writePath);
+ if (hdfsFlush(fs, appendFile)) {
+ fprintf(stderr, "Failed to 'flush' %s\n", appendPath);
exit(-1);
}
- fprintf(stderr, "Flushed %s successfully!\n", writePath);
+ fprintf(stderr, "Flushed %s successfully!\n", appendPath);
- hdfsCloseFile(fs, writeFile);
+ hdfsCloseFile(fs, appendFile);
// RE-OPEN
- writeFile = hdfsOpenFile(fs, writePath, O_WRONLY|O_APPEND, 0, 0, 0);
- if(!writeFile) {
- fprintf(stderr, "Failed to open %s for writing!\n", writePath);
+ appendFile = hdfsOpenFile(fs, appendPath, O_WRONLY|O_APPEND, 0, 0, 0);
+ if(!appendFile) {
+ fprintf(stderr, "Failed to open %s for writing!\n", appendPath);
exit(-1);
}
- fprintf(stderr, "Opened %s for writing successfully...\n", writePath);
+ fprintf(stderr, "Opened %s for writing successfully...\n", appendPath);
- buffer = " World";
- num_written_bytes = hdfsWrite(fs, writeFile, (void*)buffer, strlen(buffer) + 1);
+ buffer3 = " World";
+ num_written_bytes = hdfsWrite(fs, appendFile, (void*)buffer3,
+ (tSize)(strlen(buffer3) + 1));
fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);
- if (hdfsFlush(fs, writeFile)) {
- fprintf(stderr, "Failed to 'flush' %s\n", writePath);
+ if (hdfsFlush(fs, appendFile)) {
+ fprintf(stderr, "Failed to 'flush' %s\n", appendPath);
exit(-1);
}
- fprintf(stderr, "Flushed %s successfully!\n", writePath);
+ fprintf(stderr, "Flushed %s successfully!\n", appendPath);
- hdfsCloseFile(fs, writeFile);
+ hdfsCloseFile(fs, appendFile);
// CHECK size
- hdfsFileInfo *finfo = hdfsGetPathInfo(fs, writePath);
- fprintf(stderr, "fileinfo->mSize: == total %s\n", ((result = (finfo->mSize == strlen("Hello, World") + 1)) ? "Success!" : "Failed!"));
+ finfo = hdfsGetPathInfo(fs, appendPath);
+ fprintf(stderr, "fileinfo->mSize: == total %s\n", ((result = (finfo->mSize == (tOffset)(strlen("Hello, World") + 1))) == 1 ? "Success!" : "Failed!"));
totalResult += (result ? 0 : 1);
// READ and check data
- hdfsFile readFile = hdfsOpenFile(fs, writePath, O_RDONLY, 0, 0, 0);
+ readFile = hdfsOpenFile(fs, appendPath, O_RDONLY, 0, 0, 0);
if (!readFile) {
- fprintf(stderr, "Failed to open %s for reading!\n", writePath);
+ fprintf(stderr, "Failed to open %s for reading!\n", appendPath);
exit(-1);
}
- char rdbuffer[32];
- tSize num_read_bytes = hdfsRead(fs, readFile, (void*)rdbuffer, sizeof(rdbuffer));
+ num_read_bytes = hdfsRead(fs, readFile, (void*)rdbuffer, sizeof(rdbuffer));
fprintf(stderr, "Read following %d bytes:\n%s\n",
num_read_bytes, rdbuffer);
- fprintf(stderr, "read == Hello, World %s\n", (result = (strcmp(rdbuffer, "Hello, World") == 0)) ? "Success!" : "Failed!");
+ fprintf(stderr, "read == Hello, World %s\n", ((result = (strcmp(rdbuffer, "Hello, World"))) == 0 ? "Success!" : "Failed!"));
hdfsCloseFile(fs, readFile);
@@ -478,36 +487,33 @@ int main(int argc, char **argv) {
// the actual fs user capabilities. Thus just create a file and read
// the owner is correct.
- const char *tuser = "nobody";
- const char* writePath = "/tmp/usertestfile.txt";
-
fs = hdfsConnectAsUserNewInstance("default", 0, tuser);
if(!fs) {
fprintf(stderr, "Oops! Failed to connect to hdfs as user %s!\n",tuser);
exit(-1);
}
- hdfsFile writeFile = hdfsOpenFile(fs, writePath, O_WRONLY|O_CREAT, 0, 0, 0);
- if(!writeFile) {
- fprintf(stderr, "Failed to open %s for writing!\n", writePath);
+ userFile = hdfsOpenFile(fs, userPath, O_WRONLY|O_CREAT, 0, 0, 0);
+ if(!userFile) {
+ fprintf(stderr, "Failed to open %s for writing!\n", userPath);
exit(-1);
}
- fprintf(stderr, "Opened %s for writing successfully...\n", writePath);
+ fprintf(stderr, "Opened %s for writing successfully...\n", userPath);
- char* buffer = "Hello, World!";
- tSize num_written_bytes = hdfsWrite(fs, writeFile, (void*)buffer, strlen(buffer)+1);
+ num_written_bytes = hdfsWrite(fs, userFile, (void*)fileContents,
+ (tSize)(strlen(fileContents)+1));
fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);
- if (hdfsFlush(fs, writeFile)) {
- fprintf(stderr, "Failed to 'flush' %s\n", writePath);
+ if (hdfsFlush(fs, userFile)) {
+ fprintf(stderr, "Failed to 'flush' %s\n", userPath);
exit(-1);
}
- fprintf(stderr, "Flushed %s successfully!\n", writePath);
+ fprintf(stderr, "Flushed %s successfully!\n", userPath);
- hdfsCloseFile(fs, writeFile);
+ hdfsCloseFile(fs, userFile);
- hdfsFileInfo *finfo = hdfsGetPathInfo(fs, writePath);
- fprintf(stderr, "hdfs new file user is correct: %s\n", ((result = (strcmp(finfo->mOwner, tuser) != 0)) ? "Failed!" : "Success!"));
+ finfo = hdfsGetPathInfo(fs, userPath);
+ fprintf(stderr, "hdfs new file user is correct: %s\n", ((result = (strcmp(finfo->mOwner, tuser))) != 0 ? "Failed!" : "Success!"));
totalResult += result;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_read.c b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_read.c
index 464a4d142f..6e44741c3b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_read.c
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_read.c
@@ -22,35 +22,38 @@
#include
int main(int argc, char **argv) {
+ hdfsFS fs;
+ const char *rfile = argv[1];
+ tSize bufferSize = strtoul(argv[3], NULL, 10);
+ hdfsFile readFile;
+ char* buffer;
+ tSize curSize;
if (argc != 4) {
fprintf(stderr, "Usage: hdfs_read \n");
exit(-1);
}
- hdfsFS fs = hdfsConnect("default", 0);
+ fs = hdfsConnect("default", 0);
if (!fs) {
fprintf(stderr, "Oops! Failed to connect to hdfs!\n");
exit(-1);
}
-
- const char* rfile = argv[1];
- tSize bufferSize = strtoul(argv[3], NULL, 10);
-
- hdfsFile readFile = hdfsOpenFile(fs, rfile, O_RDONLY, bufferSize, 0, 0);
+
+ readFile = hdfsOpenFile(fs, rfile, O_RDONLY, bufferSize, 0, 0);
if (!readFile) {
fprintf(stderr, "Failed to open %s for writing!\n", rfile);
exit(-2);
}
// data to be written to the file
- char* buffer = malloc(sizeof(char) * bufferSize);
+ buffer = malloc(sizeof(char) * bufferSize);
if(buffer == NULL) {
return -2;
}
// read from the file
- tSize curSize = bufferSize;
+ curSize = bufferSize;
for (; curSize == bufferSize;) {
curSize = hdfsRead(fs, readFile, (void*)buffer, curSize);
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_write.c b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_write.c
index b0f320c525..42b3df75f6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_write.c
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_write.c
@@ -21,23 +21,31 @@
#include
#include
#include
+#include
int main(int argc, char **argv) {
+ hdfsFS fs;
+ const char *writeFileName = argv[1];
+ off_t fileTotalSize = strtoul(argv[2], NULL, 10);
+ long long tmpBufferSize = strtoul(argv[3], NULL, 10);
+ tSize bufferSize;
+ hdfsFile writeFile;
+ char* buffer;
+ int i;
+ off_t nrRemaining;
+ tSize curSize;
+ tSize written;
if (argc != 4) {
fprintf(stderr, "Usage: hdfs_write \n");
exit(-1);
}
- hdfsFS fs = hdfsConnect("default", 0);
+ fs = hdfsConnect("default", 0);
if (!fs) {
fprintf(stderr, "Oops! Failed to connect to hdfs!\n");
exit(-1);
}
-
- const char* writeFileName = argv[1];
- off_t fileTotalSize = strtoul(argv[2], NULL, 10);
- long long tmpBufferSize = strtoul(argv[3], NULL, 10);
// sanity check
if(fileTotalSize == ULONG_MAX && errno == ERANGE) {
@@ -51,30 +59,27 @@ int main(int argc, char **argv) {
exit(-3);
}
- tSize bufferSize = tmpBufferSize;
+ bufferSize = (tSize)tmpBufferSize;
- hdfsFile writeFile = hdfsOpenFile(fs, writeFileName, O_WRONLY, bufferSize, 0, 0);
+ writeFile = hdfsOpenFile(fs, writeFileName, O_WRONLY, bufferSize, 0, 0);
if (!writeFile) {
fprintf(stderr, "Failed to open %s for writing!\n", writeFileName);
exit(-2);
}
// data to be written to the file
- char* buffer = malloc(sizeof(char) * bufferSize);
+ buffer = malloc(sizeof(char) * bufferSize);
if(buffer == NULL) {
fprintf(stderr, "Could not allocate buffer of size %d\n", bufferSize);
return -2;
}
- int i = 0;
for (i=0; i < bufferSize; ++i) {
buffer[i] = 'a' + (i%26);
}
// write to the file
- off_t nrRemaining;
for (nrRemaining = fileTotalSize; nrRemaining > 0; nrRemaining -= bufferSize ) {
- tSize curSize = ( bufferSize < nrRemaining ) ? bufferSize : (tSize)nrRemaining;
- tSize written;
+ curSize = ( bufferSize < nrRemaining ) ? bufferSize : (tSize)nrRemaining;
if ((written = hdfsWrite(fs, writeFile, (void*)buffer, curSize)) != curSize) {
fprintf(stderr, "ERROR: hdfsWrite returned an error on write: %d\n", written);
exit(-3);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_zerocopy.c b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_zerocopy.c
index f9ee331a1a..3774417c83 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_zerocopy.c
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test/test_libhdfs_zerocopy.c
@@ -19,12 +19,12 @@
#include "expect.h"
#include "hdfs.h"
#include "native_mini_dfs.h"
+#include "platform.h"
#include
#include
-#include
-#include
#include
+#include
#include
#include
#include
@@ -53,7 +53,7 @@ static uint8_t *getZeroCopyBlockData(int blockIdx)
exit(1);
}
for (i = 0; i < TEST_ZEROCOPY_FULL_BLOCK_SIZE; i++) {
- buf[i] = blockIdx + (i % 17);
+ buf[i] = (uint8_t)(blockIdx + (i % 17));
}
return buf;
}
@@ -69,18 +69,6 @@ static int getZeroCopyBlockLen(int blockIdx)
}
}
-static void printBuf(const uint8_t *buf, size_t len) __attribute__((unused));
-
-static void printBuf(const uint8_t *buf, size_t len)
-{
- size_t i;
-
- for (i = 0; i < len; i++) {
- fprintf(stderr, "%02x", buf[i]);
- }
- fprintf(stderr, "\n");
-}
-
static int doTestZeroCopyReads(hdfsFS fs, const char *fileName)
{
hdfsFile file = NULL;
@@ -127,8 +115,9 @@ static int doTestZeroCopyReads(hdfsFS fs, const char *fileName)
EXPECT_NONNULL(block);
EXPECT_ZERO(memcmp(block, hadoopRzBufferGet(buffer), SMALL_READ_LEN));
hadoopRzBufferFree(file, buffer);
- EXPECT_INT_EQ(TEST_ZEROCOPY_FULL_BLOCK_SIZE + SMALL_READ_LEN,
- hdfsTell(fs, file));
+ EXPECT_INT64_EQ(
+ (int64_t)TEST_ZEROCOPY_FULL_BLOCK_SIZE + (int64_t)SMALL_READ_LEN,
+ hdfsTell(fs, file));
EXPECT_ZERO(expectFileStats(file,
TEST_ZEROCOPY_FULL_BLOCK_SIZE + SMALL_READ_LEN,
TEST_ZEROCOPY_FULL_BLOCK_SIZE + SMALL_READ_LEN,
@@ -165,7 +154,7 @@ static int doTestZeroCopyReads(hdfsFS fs, const char *fileName)
free(block);
block = getZeroCopyBlockData(2);
EXPECT_NONNULL(block);
- EXPECT_ZERO(memcmp(block, hadoopRzBufferGet(buffer) +
+ EXPECT_ZERO(memcmp(block, (uint8_t*)hadoopRzBufferGet(buffer) +
(TEST_ZEROCOPY_FULL_BLOCK_SIZE - SMALL_READ_LEN), SMALL_READ_LEN));
hadoopRzBufferFree(file, buffer);
@@ -219,8 +208,10 @@ int main(void)
{
int port;
struct NativeMiniDfsConf conf = {
- .doFormat = 1,
- .configureShortCircuit = 1,
+ 1, /* doFormat */
+ 0, /* webhdfsEnabled */
+ 0, /* namenodeHttpPort */
+ 1, /* configureShortCircuit */
};
char testFileName[TEST_FILE_NAME_LENGTH];
hdfsFS fs;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_libhdfs_threaded.c b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_libhdfs_threaded.c
index c4ea060ec7..cf605e3e2d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_libhdfs_threaded.c
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/test_libhdfs_threaded.c
@@ -19,11 +19,11 @@
#include "expect.h"
#include "hdfs.h"
#include "native_mini_dfs.h"
+#include "os/thread.h"
#include
#include
-#include
-#include
+#include
#include