diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/XAttrCommands.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/XAttrCommands.java index 4505aa90cd..6301776119 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/XAttrCommands.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/XAttrCommands.java @@ -20,12 +20,9 @@ import java.io.IOException; import java.util.Iterator; import java.util.LinkedList; -import java.util.Locale; import java.util.Map; import java.util.Map.Entry; -import com.google.common.base.Enums; -import com.google.common.base.Function; import com.google.common.base.Preconditions; import org.apache.hadoop.HadoopIllegalArgumentException; @@ -66,8 +63,6 @@ public static class GetfattrCommand extends FsCommand { " and values encoded as hexadecimal and base64 are prefixed with " + "0x and 0s, respectively.\n" + ": The file or directory.\n"; - private final static Function enValueOfFunc = - Enums.stringConverter(XAttrCodec.class); private String name = null; private boolean dump = false; @@ -79,7 +74,7 @@ protected void processOptions(LinkedList args) throws IOException { String en = StringUtils.popOptionWithArgument("-e", args); if (en != null) { try { - encoding = enValueOfFunc.apply(StringUtils.toUpperCase(en)); + encoding = XAttrCodec.valueOf(StringUtils.toUpperCase(en)); } catch (IllegalArgumentException e) { throw new IllegalArgumentException( "Invalid/unsupported encoding option specified: " + en); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java index 835643bd9e..9a7108124f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java @@ -65,7 +65,6 @@ import com.google.common.collect.ComparisonChain; import com.google.common.collect.Lists; import com.google.common.collect.Maps; -import com.google.common.util.concurrent.Futures; /** * Data storage information file. @@ -1109,7 +1108,14 @@ public Void call() throws IOException { } linkWorkers.shutdown(); for (Future f : futures) { - Futures.getChecked(f, IOException.class); + try { + f.get(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new IOException(e); + } catch (ExecutionException e) { + throw new IOException(e); + } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclTransformation.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclTransformation.java index 3e4a319d56..4402e263a2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclTransformation.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclTransformation.java @@ -28,7 +28,6 @@ import java.util.Iterator; import java.util.List; -import com.google.common.base.MoreObjects; import com.google.common.collect.ComparisonChain; import com.google.common.collect.Lists; import com.google.common.collect.Maps; @@ -366,8 +365,10 @@ private static void calculateMasks(List aclBuilder, for (AclEntry entry: aclBuilder) { scopeFound.add(entry.getScope()); if (entry.getType() == GROUP || entry.getName() != null) { - FsAction scopeUnionPerms = MoreObjects.firstNonNull( - unionPerms.get(entry.getScope()), FsAction.NONE); + FsAction scopeUnionPerms = unionPerms.get(entry.getScope()); + if (scopeUnionPerms == null) { + scopeUnionPerms = FsAction.NONE; + } unionPerms.put(entry.getScope(), scopeUnionPerms.or(entry.getPermission())); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java index db77d31be2..e7f2adb5bd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java @@ -40,7 +40,6 @@ import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; import com.google.common.base.Preconditions; -import com.google.common.collect.ComparisonChain; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableListMultimap; import com.google.common.collect.Lists; @@ -57,28 +56,18 @@ public class JournalSet implements JournalManager { static final Log LOG = LogFactory.getLog(FSEditLog.class); + // we want local logs to be ordered earlier in the collection, and true + // is considered larger than false, so reverse the comparator private static final Comparator - LOCAL_LOG_PREFERENCE_COMPARATOR = new Comparator() { - @Override - public int compare(EditLogInputStream elis1, EditLogInputStream elis2) { - // we want local logs to be ordered earlier in the collection, and true - // is considered larger than false, so we want to invert the booleans here - return ComparisonChain.start().compareFalseFirst(!elis1.isLocalLog(), - !elis2.isLocalLog()).result(); - } - }; - - static final public Comparator - EDIT_LOG_INPUT_STREAM_COMPARATOR = new Comparator() { - @Override - public int compare(EditLogInputStream a, EditLogInputStream b) { - return ComparisonChain.start(). - compare(a.getFirstTxId(), b.getFirstTxId()). - compare(b.getLastTxId(), a.getLastTxId()). - result(); - } - }; - + LOCAL_LOG_PREFERENCE_COMPARATOR = Comparator + .comparing(EditLogInputStream::isLocalLog) + .reversed(); + + public static final Comparator + EDIT_LOG_INPUT_STREAM_COMPARATOR = Comparator + .comparing(EditLogInputStream::getFirstTxId) + .thenComparing(EditLogInputStream::getLastTxId); + /** * Container for a JournalManager paired with its currently * active stream. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/DirectExecutorService.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/DirectExecutorService.java new file mode 100644 index 0000000000..15d2a13d32 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/DirectExecutorService.java @@ -0,0 +1,154 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.qjournal.client; + +import java.util.Collection; +import java.util.List; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Future; +import java.util.concurrent.RejectedExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +/** + * A very basic ExecutorService for running submitted Callables serially. + * Many bits of functionality are not implemented. + */ +public class DirectExecutorService implements ExecutorService { + + private static class DirectFuture implements Future { + private V result = null; + private Exception ex = null; + + DirectFuture(Callable c) { + try { + result = c.call(); + } catch (Exception e) { + ex = e; + } + } + + @Override + public boolean cancel(boolean mayInterruptIfRunning) { + return false; + } + + @Override + public boolean isCancelled() { + return false; + } + + @Override + public boolean isDone() { + return true; + } + + @Override + public V get() throws InterruptedException, ExecutionException { + if (ex != null) { + throw new ExecutionException(ex); + } + return result; + } + + @Override + public V get(long timeout, TimeUnit unit) + throws InterruptedException, ExecutionException, TimeoutException { + return get(); + } + } + + private boolean isShutdown = false; + + @Override + synchronized public void shutdown() { + isShutdown = true; + } + + @Override + public List shutdownNow() { + throw new UnsupportedOperationException(); + } + + @Override + public boolean isShutdown() { + return isShutdown; + } + + @Override + synchronized public boolean isTerminated() { + return isShutdown; + } + + @Override + public boolean awaitTermination(long timeout, TimeUnit unit) + throws InterruptedException { + throw new UnsupportedOperationException(); + } + + @Override + synchronized public Future submit(Callable task) { + if (isShutdown) { + throw new RejectedExecutionException("ExecutorService was shutdown"); + } + return new DirectFuture<>(task); + } + + @Override + public Future submit(Runnable task, T result) { + throw new UnsupportedOperationException(); + } + + @Override + public Future submit(Runnable task) { + throw new UnsupportedOperationException(); + } + + @Override + public List> invokeAll(Collection> tasks) + throws InterruptedException { + throw new UnsupportedOperationException(); + } + + @Override + public List> invokeAll(Collection> tasks, + long timeout, TimeUnit unit) throws InterruptedException { + throw new UnsupportedOperationException(); + } + + @Override + public T invokeAny(Collection> tasks) + throws InterruptedException, ExecutionException { + throw new UnsupportedOperationException(); + } + + @Override + public T invokeAny(Collection> tasks, long timeout, + TimeUnit unit) + throws InterruptedException, ExecutionException, TimeoutException { + throw new UnsupportedOperationException(); + } + + @Override + synchronized public void execute(Runnable command) { + command.run(); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQJMWithFaults.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQJMWithFaults.java index c752f239e5..9ada40f6fd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQJMWithFaults.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQJMWithFaults.java @@ -65,7 +65,6 @@ import com.google.common.base.Preconditions; import com.google.common.collect.Maps; import com.google.common.collect.Sets; -import com.google.common.util.concurrent.MoreExecutors; public class TestQJMWithFaults { @@ -402,7 +401,7 @@ public void afterCall(InvocationOnMock invocation, boolean succeeded) { @Override protected ExecutorService createSingleThreadExecutor() { - return MoreExecutors.newDirectExecutorService(); + return new DirectExecutorService(); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java index 9aada1d155..8d92666630 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java @@ -66,7 +66,6 @@ import org.mockito.stubbing.Stubber; import com.google.common.collect.Lists; -import com.google.common.util.concurrent.MoreExecutors; /** * Functional tests for QuorumJournalManager. @@ -946,7 +945,7 @@ public AsyncLogger createLogger(Configuration conf, NamespaceInfo nsInfo, protected ExecutorService createSingleThreadExecutor() { // Don't parallelize calls to the quorum in the tests. // This makes the tests more deterministic. - return MoreExecutors.newDirectExecutorService(); + return new DirectExecutorService(); } }; diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index c9b6522e03..4097a0ae03 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -90,7 +90,7 @@ 3.0.0 3.1.0-RC1 - 21.0 + 11.0.2 4.0 2.9.4 diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationSubmissionContextPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationSubmissionContextPBImpl.java index 0148d0e417..6e46eb6c76 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationSubmissionContextPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationSubmissionContextPBImpl.java @@ -51,7 +51,6 @@ import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto; import org.apache.hadoop.yarn.proto.YarnProtos.ResourceRequestProto; -import com.google.common.base.CharMatcher; import com.google.protobuf.TextFormat; @Private @@ -286,7 +285,7 @@ private void checkTags(Set tags) { "maximum allowed length of a tag is " + YarnConfiguration.APPLICATION_MAX_TAG_LENGTH); } - if (!CharMatcher.ascii().matchesAllOf(tag)) { + if (!org.apache.commons.lang3.StringUtils.isAsciiPrintable(tag)) { throw new IllegalArgumentException("A tag can only have ASCII " + "characters! Invalid tag - " + tag); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApp.java index 300bf3ee45..fad6fe2946 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApp.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApp.java @@ -26,6 +26,7 @@ import java.util.List; import java.util.Map; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.http.HttpServer2; @@ -33,7 +34,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.base.CharMatcher; import com.google.common.base.Splitter; import com.google.common.collect.Lists; import com.google.inject.Provides; @@ -275,7 +275,7 @@ static List parseRoute(String pathSpec) { static String getPrefix(String pathSpec) { int start = 0; - while (CharMatcher.whitespace().matches(pathSpec.charAt(start))) { + while (StringUtils.isAnyBlank(Character.toString(pathSpec.charAt(start)))) { ++start; } if (pathSpec.charAt(start) != '/') { @@ -291,7 +291,7 @@ static String getPrefix(String pathSpec) { char c; do { c = pathSpec.charAt(--ci); - } while (c == '/' || CharMatcher.whitespace().matches(c)); + } while (c == '/' || StringUtils.isAnyBlank(Character.toString(c))); return pathSpec.substring(start, ci + 1); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java index e49c3edf76..368832a931 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java @@ -21,6 +21,8 @@ import java.util.LinkedList; import java.util.List; import java.util.Map; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -68,7 +70,6 @@ import org.apache.hadoop.yarn.server.utils.BuilderUtils; import com.google.common.annotations.VisibleForTesting; -import com.google.common.util.concurrent.Futures; import com.google.common.util.concurrent.SettableFuture; /** @@ -210,6 +211,17 @@ public void logApplicationSummary(ApplicationId appId) { ApplicationSummary.logAppSummary(rmContext.getRMApps().get(appId)); } + private static V getChecked(Future future) throws YarnException { + try { + return future.get(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new YarnException(e); + } catch (ExecutionException e) { + throw new YarnException(e); + } + } + protected synchronized int getCompletedAppsListSize() { return this.completedApps.size(); } @@ -641,7 +653,7 @@ public void updateApplicationTimeout(RMApp app, this.rmContext.getStateStore() .updateApplicationStateSynchronously(appState, false, future); - Futures.getChecked(future, YarnException.class); + getChecked(future); // update in-memory ((RMAppImpl) app).updateApplicationTimeout(newExpireTime); @@ -678,7 +690,7 @@ public void updateApplicationPriority(UserGroupInformation callerUGI, return; } - Futures.getChecked(future, YarnException.class); + getChecked(future); // update in-memory ((RMAppImpl) app).setApplicationPriority(appPriority); @@ -761,7 +773,7 @@ private void updateAppDataToStateStore(String queue, RMApp app, false, future); try { - Futures.getChecked(future, YarnException.class); + getChecked(future); } catch (YarnException ex) { if (!toSuppressException) { throw ex; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java index d29d34e094..bc204cbc8c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java @@ -58,7 +58,6 @@ import org.w3c.dom.Text; import org.xml.sax.SAXException; -import com.google.common.base.CharMatcher; import com.google.common.annotations.VisibleForTesting; @Public @@ -465,7 +464,7 @@ private void loadQueue(String parentName, Element element, Set reservableQueues, Set nonPreemptableQueues) throws AllocationConfigurationException { - String queueName = CharMatcher.whitespace().trimFrom( + String queueName = FairSchedulerUtilities.trimQueueName( element.getAttribute("name")); if (queueName.contains(".")) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerUtilities.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerUtilities.java new file mode 100644 index 0000000000..f394a9387a --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerUtilities.java @@ -0,0 +1,69 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair; + +/** + * Utility class for the Fair Scheduler. + */ +public final class FairSchedulerUtilities { + + /** + * Table copied from Google Guava v19: + * com/google/common/base/CharMatcher.java + *

+ * Licensed under the Apache License Version 2.0. + */ + static final String WHITESPACE_TABLE = + "\u2002\u3000\r\u0085\u200A\u2005\u2000\u3000" + + "\u2029\u000B\u3000\u2008\u2003\u205F\u3000\u1680" + + "\u0009\u0020\u2006\u2001\u202F\u00A0\u000C\u2009" + + "\u3000\u2004\u3000\u3000\u2028\n\u2007\u3000"; + + private FairSchedulerUtilities() { + // private constructor because this is a utility class. + } + + private static boolean isWhitespace(char c) { + for (int i = 0; i < WHITESPACE_TABLE.length(); i++) { + if (WHITESPACE_TABLE.charAt(i) == c) { + return true; + } + } + return false; + } + + public static String trimQueueName(String name) { + if (name == null) { + return null; + } + int start = 0; + while (start < name.length() + && isWhitespace(name.charAt(start)) + && start < name.length()) { + start++; + } + int end = name.length() - 1; + while (end >= 0 + && isWhitespace(name.charAt(end)) + && end > start) { + end--; + } + return name.substring(start, end+1); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueueManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueueManager.java index 5b006dfcf9..c08d13e9c4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueueManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueueManager.java @@ -38,7 +38,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.FifoPolicy; import org.xml.sax.SAXException; -import com.google.common.base.CharMatcher; import com.google.common.annotations.VisibleForTesting; import java.util.Iterator; import java.util.Set; @@ -533,8 +532,9 @@ public void updateAllocationConfiguration(AllocationConfiguration queueConf) { @VisibleForTesting boolean isQueueNameValid(String node) { // use the same white space trim as in QueueMetrics() otherwise things fail - // guava uses a different definition for whitespace than java. + // This needs to trim additional Unicode whitespace characters beyond what + // the built-in JDK methods consider whitespace. See YARN-5272. return !node.isEmpty() && - node.equals(CharMatcher.whitespace().trimFrom(node)); + node.equals(FairSchedulerUtilities.trimQueueName(node)); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerUtilities.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerUtilities.java new file mode 100644 index 0000000000..37f686e79e --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerUtilities.java @@ -0,0 +1,67 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair; + +import org.junit.Test; + +import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairSchedulerUtilities.trimQueueName; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +/** + * Tests for {@link FairSchedulerUtilities}. + */ +public class TestFairSchedulerUtilities { + + @Test + public void testTrimQueueNameEquals() throws Exception { + final String[] equalsStrings = { + // no spaces + "a", + // leading spaces + " a", + " \u3000a", + "\u2002\u3000\r\u0085\u200A\u2005\u2000\u3000a", + "\u2029\u000B\u3000\u2008\u2003\u205F\u3000\u1680a", + "\u0009\u0020\u2006\u2001\u202F\u00A0\u000C\u2009a", + "\u3000\u2004\u3000\u3000\u2028\n\u2007\u3000a", + // trailing spaces + "a\u200A", + "a \u0085 ", + // spaces on both sides + " a ", + " a\u00A0", + "\u0009\u0020\u2006\u2001\u202F\u00A0\u000C\u2009a" + + "\u3000\u2004\u3000\u3000\u2028\n\u2007\u3000", + }; + for (String s : equalsStrings) { + assertEquals("a", trimQueueName(s)); + } + } + + @Test + public void testTrimQueueNamesEmpty() throws Exception { + assertNull(trimQueueName(null)); + final String spaces = "\u2002\u3000\r\u0085\u200A\u2005\u2000\u3000" + + "\u2029\u000B\u3000\u2008\u2003\u205F\u3000\u1680" + + "\u0009\u0020\u2006\u2001\u202F\u00A0\u000C\u2009" + + "\u3000\u2004\u3000\u3000\u2028\n\u2007\u3000"; + assertTrue(trimQueueName(spaces).isEmpty()); + } +}