Merge branch 'trunk' into HDFS-6581
This commit is contained in:
commit
7f0422be76
@ -554,6 +554,8 @@ Release 2.6.0 - UNRELEASED
|
||||
HADOOP-10954. Adding site documents of hadoop-tools (Masatake Iwasaki
|
||||
via aw)
|
||||
|
||||
HADOOP-11153. Make number of KMS threads configurable. (wang)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
HADOOP-10838. Byte array native checksumming. (James Thomas via todd)
|
||||
@ -761,6 +763,8 @@ Release 2.6.0 - UNRELEASED
|
||||
HADOOP-1110. JavaKeystoreProvider should not report a key as created if it
|
||||
was not flushed to the backing file.
|
||||
|
||||
HADOOP-11130. NFS updateMaps OS check is reversed (brandonli)
|
||||
|
||||
BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
|
||||
|
||||
HADOOP-10734. Implement high-performance secure random number sources.
|
||||
|
@ -673,27 +673,11 @@ protected boolean isSameParents(Node node1, Node node2) {
|
||||
return node1.getParent()==node2.getParent();
|
||||
}
|
||||
|
||||
private static final ThreadLocal<Random> r = new ThreadLocal<Random>();
|
||||
|
||||
/**
|
||||
* Getter for thread-local Random, which provides better performance than
|
||||
* a shared Random (even though Random is thread-safe).
|
||||
*
|
||||
* @return Thread-local Random.
|
||||
*/
|
||||
protected Random getRandom() {
|
||||
Random rand = r.get();
|
||||
if (rand == null) {
|
||||
rand = new Random();
|
||||
r.set(rand);
|
||||
}
|
||||
return rand;
|
||||
}
|
||||
private static final Random r = new Random();
|
||||
|
||||
@VisibleForTesting
|
||||
void setRandomSeed(long seed) {
|
||||
Random rand = getRandom();
|
||||
rand.setSeed(seed);
|
||||
r.setSeed(seed);
|
||||
}
|
||||
|
||||
/** randomly choose one node from <i>scope</i>
|
||||
@ -745,7 +729,7 @@ private Node chooseRandom(String scope, String excludedScope){
|
||||
"Failed to find datanode (scope=\"" + String.valueOf(scope) +
|
||||
"\" excludedScope=\"" + String.valueOf(excludedScope) + "\").");
|
||||
}
|
||||
int leaveIndex = getRandom().nextInt(numOfDatanodes);
|
||||
int leaveIndex = r.nextInt(numOfDatanodes);
|
||||
return innerNode.getLeaf(leaveIndex, node);
|
||||
}
|
||||
|
||||
@ -918,11 +902,10 @@ public void sortByDistance(Node reader, Node[] nodes, int activeLen) {
|
||||
list.add(node);
|
||||
}
|
||||
|
||||
Random rand = getRandom();
|
||||
int idx = 0;
|
||||
for (List<Node> list: tree.values()) {
|
||||
if (list != null) {
|
||||
Collections.shuffle(list, rand);
|
||||
Collections.shuffle(list, r);
|
||||
for (Node n: list) {
|
||||
nodes[idx] = n;
|
||||
idx++;
|
||||
|
@ -36,6 +36,10 @@
|
||||
#
|
||||
# export KMS_ADMIN_PORT=`expr ${KMS_HTTP_PORT} + 1`
|
||||
|
||||
# The maximum number of Tomcat handler threads
|
||||
#
|
||||
# export KMS_MAX_THREADS=1000
|
||||
|
||||
# The location of the SSL keystore if using SSL
|
||||
#
|
||||
# export KMS_SSL_KEYSTORE_FILE=${HOME}/.keystore
|
||||
|
@ -136,6 +136,13 @@ else
|
||||
print "Using KMS_ADMIN_PORT: ${KMS_ADMIN_PORT}"
|
||||
fi
|
||||
|
||||
if [ "${KMS_MAX_THREADS}" = "" ]; then
|
||||
export KMS_MAX_THREADS=1000
|
||||
print "Setting KMS_MAX_THREADS: ${KMS_MAX_THREADS}"
|
||||
else
|
||||
print "Using KMS_MAX_THREADS: ${KMS_MAX_THREADS}"
|
||||
fi
|
||||
|
||||
if [ "${KMS_SSL_KEYSTORE_FILE}" = "" ]; then
|
||||
export KMS_SSL_KEYSTORE_FILE=${HOME}/.keystore
|
||||
print "Setting KMS_SSL_KEYSTORE_FILE: ${KMS_SSL_KEYSTORE_FILE}"
|
||||
|
@ -44,6 +44,7 @@ catalina_opts="${catalina_opts} -Dkms.log.dir=${KMS_LOG}";
|
||||
catalina_opts="${catalina_opts} -Dkms.temp.dir=${KMS_TEMP}";
|
||||
catalina_opts="${catalina_opts} -Dkms.admin.port=${KMS_ADMIN_PORT}";
|
||||
catalina_opts="${catalina_opts} -Dkms.http.port=${KMS_HTTP_PORT}";
|
||||
catalina_opts="${catalina_opts} -Dkms.max.threads=${KMS_MAX_THREADS}";
|
||||
catalina_opts="${catalina_opts} -Dkms.ssl.keystore.file=${KMS_SSL_KEYSTORE_FILE}";
|
||||
catalina_opts="${catalina_opts} -Dkms.ssl.keystore.pass=${KMS_SSL_KEYSTORE_PASS}";
|
||||
|
||||
|
@ -73,6 +73,7 @@
|
||||
Define a non-SSL HTTP/1.1 Connector on port ${kms.http.port}
|
||||
-->
|
||||
<Connector port="${kms.http.port}" protocol="HTTP/1.1"
|
||||
maxThreads="${kms.max.threads}"
|
||||
connectionTimeout="20000"
|
||||
redirectPort="8443"/>
|
||||
<!-- A "Connector" using the shared thread pool-->
|
||||
|
@ -69,7 +69,7 @@
|
||||
connector should be using the OpenSSL style configuration
|
||||
described in the APR documentation -->
|
||||
<Connector port="${kms.http.port}" protocol="HTTP/1.1" SSLEnabled="true"
|
||||
maxThreads="150" scheme="https" secure="true"
|
||||
maxThreads="${kms.max.threads}" scheme="https" secure="true"
|
||||
clientAuth="false" sslProtocol="TLS"
|
||||
keystoreFile="${kms.ssl.keystore.file}"
|
||||
keystorePass="${kms.ssl.keystore.pass}"/>
|
||||
|
@ -152,6 +152,8 @@ hadoop-${project.version} $ sbin/kms.sh start
|
||||
|
||||
* KMS_ADMIN_PORT
|
||||
|
||||
* KMS_MAX_THREADS
|
||||
|
||||
* KMS_LOG
|
||||
|
||||
NOTE: You need to restart the KMS for the configuration changes to take
|
||||
|
@ -45,8 +45,8 @@ public class IdUserGroup {
|
||||
private final static String OS = System.getProperty("os.name");
|
||||
|
||||
/** Shell commands to get users and groups */
|
||||
static final String LINUX_GET_ALL_USERS_CMD = "getent passwd | cut -d: -f1,3";
|
||||
static final String LINUX_GET_ALL_GROUPS_CMD = "getent group | cut -d: -f1,3";
|
||||
static final String GET_ALL_USERS_CMD = "getent passwd | cut -d: -f1,3";
|
||||
static final String GET_ALL_GROUPS_CMD = "getent group | cut -d: -f1,3";
|
||||
static final String MAC_GET_ALL_USERS_CMD = "dscl . -list /Users UniqueID";
|
||||
static final String MAC_GET_ALL_GROUPS_CMD = "dscl . -list /Groups PrimaryGroupID";
|
||||
|
||||
@ -223,17 +223,16 @@ synchronized public void updateMaps() throws IOException {
|
||||
+ "' does not exist.");
|
||||
}
|
||||
|
||||
if (OS.startsWith("Linux")) {
|
||||
updateMapInternal(uMap, "user", LINUX_GET_ALL_USERS_CMD, ":",
|
||||
staticMapping.uidMapping);
|
||||
updateMapInternal(gMap, "group", LINUX_GET_ALL_GROUPS_CMD, ":",
|
||||
staticMapping.gidMapping);
|
||||
} else {
|
||||
// Mac
|
||||
if (OS.startsWith("Mac")) {
|
||||
updateMapInternal(uMap, "user", MAC_GET_ALL_USERS_CMD, "\\s+",
|
||||
staticMapping.uidMapping);
|
||||
updateMapInternal(gMap, "group", MAC_GET_ALL_GROUPS_CMD, "\\s+",
|
||||
staticMapping.gidMapping);
|
||||
} else {
|
||||
updateMapInternal(uMap, "user", GET_ALL_USERS_CMD, ":",
|
||||
staticMapping.uidMapping);
|
||||
updateMapInternal(gMap, "group", GET_ALL_GROUPS_CMD, ":",
|
||||
staticMapping.gidMapping);
|
||||
}
|
||||
|
||||
uidNameMap = uMap;
|
||||
|
@ -520,6 +520,8 @@ Release 2.6.0 - UNRELEASED
|
||||
|
||||
HDFS-7104. Fix and clarify INodeInPath getter functions. (Zhe Zhang via wang)
|
||||
|
||||
HDFS-7124. Remove EncryptionZoneManager.NULL_EZ. (clamb via wang)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
HDFS-6690. Deduplicate xattr names in memory. (wang)
|
||||
@ -530,6 +532,9 @@ Release 2.6.0 - UNRELEASED
|
||||
HDFS-6865. Byte array native checksumming on client side
|
||||
(James Thomas via todd)
|
||||
|
||||
HDFS-7122. Use of ThreadLocal<Random> results in poor block placement.
|
||||
(wang)
|
||||
|
||||
BUG FIXES
|
||||
|
||||
HDFS-6823. dfs.web.authentication.kerberos.principal shows up in logs for
|
||||
|
@ -2940,8 +2940,7 @@ public EncryptionZone getEZForPath(String src)
|
||||
throws IOException {
|
||||
checkOpen();
|
||||
try {
|
||||
final EncryptionZone ez = namenode.getEZForPath(src);
|
||||
return (ez.getId() < 0) ? null : ez;
|
||||
return namenode.getEZForPath(src);
|
||||
} catch (RemoteException re) {
|
||||
throw re.unwrapRemoteException(AccessControlException.class,
|
||||
UnresolvedPathException.class);
|
||||
|
@ -1340,7 +1340,9 @@ public GetEZForPathResponseProto getEZForPath(
|
||||
GetEZForPathResponseProto.Builder builder =
|
||||
GetEZForPathResponseProto.newBuilder();
|
||||
final EncryptionZone ret = server.getEZForPath(req.getSrc());
|
||||
if (ret != null) {
|
||||
builder.setZone(PBHelper.convert(ret));
|
||||
}
|
||||
return builder.build();
|
||||
} catch (IOException e) {
|
||||
throw new ServiceException(e);
|
||||
|
@ -1347,7 +1347,11 @@ public EncryptionZone getEZForPath(String src)
|
||||
try {
|
||||
final EncryptionZonesProtos.GetEZForPathResponseProto response =
|
||||
rpcProxy.getEZForPath(null, req);
|
||||
if (response.hasZone()) {
|
||||
return PBHelper.convert(response.getZone());
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
} catch (ServiceException e) {
|
||||
throw ProtobufHelper.getRemoteException(e);
|
||||
}
|
||||
|
@ -57,10 +57,6 @@ public class EncryptionZoneManager {
|
||||
public static Logger LOG = LoggerFactory.getLogger(EncryptionZoneManager
|
||||
.class);
|
||||
|
||||
public static final EncryptionZone NULL_EZ =
|
||||
new EncryptionZone(-1, "", CipherSuite.UNKNOWN,
|
||||
CryptoProtocolVersion.UNKNOWN, "");
|
||||
|
||||
/**
|
||||
* EncryptionZoneInt is the internal representation of an encryption zone. The
|
||||
* external representation of an EZ is embodied in an EncryptionZone and
|
||||
@ -226,7 +222,7 @@ private EncryptionZoneInt getEncryptionZoneForPath(INodesInPath iip) {
|
||||
EncryptionZone getEZINodeForPath(INodesInPath iip) {
|
||||
final EncryptionZoneInt ezi = getEncryptionZoneForPath(iip);
|
||||
if (ezi == null) {
|
||||
return NULL_EZ;
|
||||
return null;
|
||||
} else {
|
||||
return new EncryptionZone(ezi.getINodeId(), getFullPathName(ezi),
|
||||
ezi.getSuite(), ezi.getVersion(), ezi.getKeyName());
|
||||
|
@ -2863,8 +2863,7 @@ FileEncryptionInfo getFileEncryptionInfo(INode inode, int snapshotId,
|
||||
iip = getINodesInPath(inode.getFullPathName(), true);
|
||||
}
|
||||
EncryptionZone encryptionZone = getEZForPath(iip);
|
||||
if (encryptionZone == null ||
|
||||
encryptionZone.equals(EncryptionZoneManager.NULL_EZ)) {
|
||||
if (encryptionZone == null) {
|
||||
// not an encrypted file
|
||||
return null;
|
||||
} else if(encryptionZone.getPath() == null
|
||||
|
@ -63,5 +63,5 @@ message GetEZForPathRequestProto {
|
||||
}
|
||||
|
||||
message GetEZForPathResponseProto {
|
||||
required EncryptionZoneProto zone = 1;
|
||||
optional EncryptionZoneProto zone = 1;
|
||||
}
|
||||
|
@ -465,6 +465,9 @@ Release 2.6.0 - UNRELEASED
|
||||
YARN-2608. FairScheduler: Potential deadlocks in loading alloc files and
|
||||
clock access. (Wei Yan via kasha)
|
||||
|
||||
YARN-2606. Application History Server tries to access hdfs before doing
|
||||
secure login (Mit Desai via jeagles)
|
||||
|
||||
Release 2.5.1 - 2014-09-05
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
@ -115,7 +115,8 @@ protected FileSystem getFileSystem(Path path, Configuration conf) throws Excepti
|
||||
}
|
||||
|
||||
@Override
|
||||
public void serviceInit(Configuration conf) throws Exception {
|
||||
public void serviceStart() throws Exception {
|
||||
Configuration conf = getConfig();
|
||||
Path fsWorkingPath =
|
||||
new Path(conf.get(YarnConfiguration.FS_APPLICATION_HISTORY_STORE_URI,
|
||||
conf.get("hadoop.tmp.dir") + "/yarn/timeline/generic-history"));
|
||||
@ -132,7 +133,7 @@ public void serviceInit(Configuration conf) throws Exception {
|
||||
LOG.error("Error when initializing FileSystemHistoryStorage", e);
|
||||
throw e;
|
||||
}
|
||||
super.serviceInit(conf);
|
||||
super.serviceStart();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -61,10 +61,10 @@ public class TestFileSystemApplicationHistoryStore extends
|
||||
@Before
|
||||
public void setup() throws Exception {
|
||||
fs = new RawLocalFileSystem();
|
||||
initStore(fs);
|
||||
initAndStartStore(fs);
|
||||
}
|
||||
|
||||
private void initStore(final FileSystem fs) throws IOException,
|
||||
private void initAndStartStore(final FileSystem fs) throws IOException,
|
||||
URISyntaxException {
|
||||
Configuration conf = new Configuration();
|
||||
fs.initialize(new URI("/"), conf);
|
||||
@ -272,7 +272,7 @@ public void testInitExistingWorkingDirectoryInSafeMode() throws Exception {
|
||||
doReturn(true).when(fs).isDirectory(any(Path.class));
|
||||
|
||||
try {
|
||||
initStore(fs);
|
||||
initAndStartStore(fs);
|
||||
} catch (Exception e) {
|
||||
Assert.fail("Exception should not be thrown: " + e);
|
||||
}
|
||||
@ -293,7 +293,7 @@ public void testInitNonExistingWorkingDirectoryInSafeMode() throws Exception {
|
||||
doThrow(new IOException()).when(fs).mkdirs(any(Path.class));
|
||||
|
||||
try {
|
||||
initStore(fs);
|
||||
initAndStartStore(fs);
|
||||
Assert.fail("Exception should have been thrown");
|
||||
} catch (Exception e) {
|
||||
// Expected failure
|
||||
|
Loading…
Reference in New Issue
Block a user