HADOOP-17970. unguava: remove Preconditions from hdfs-projects modules (#3566)

This commit is contained in:
Ahmed Hussein 2021-10-23 16:13:07 -05:00 committed by GitHub
parent 9cfd8d0a83
commit 62c86eaa0e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
248 changed files with 436 additions and 274 deletions

View File

@ -178,6 +178,38 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
<excludePackageNames>org.apache.hadoop.hdfs.protocol.proto</excludePackageNames> <excludePackageNames>org.apache.hadoop.hdfs.protocol.proto</excludePackageNames>
</configuration> </configuration>
</plugin> </plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-enforcer-plugin</artifactId>
<dependencies>
<dependency>
<groupId>de.skuzzle.enforcer</groupId>
<artifactId>restrict-imports-enforcer-rule</artifactId>
<version>${restrict-imports.enforcer.version}</version>
</dependency>
</dependencies>
<executions>
<execution>
<id>banned-illegal-imports</id>
<phase>process-sources</phase>
<goals>
<goal>enforce</goal>
</goals>
<configuration>
<rules>
<restrictImports implementation="de.skuzzle.enforcer.restrictimports.rule.RestrictImports">
<includeTestCode>true</includeTestCode>
<reason>Use hadoop-common provided VisibleForTesting rather than the one provided by Guava</reason>
<bannedImports>
<bannedImport>org.apache.hadoop.thirdparty.com.google.common.base.Preconditions</bannedImport>
<bannedImport>com.google.common.base.Preconditions</bannedImport>
</bannedImports>
</restrictImports>
</rules>
</configuration>
</execution>
</executions>
</plugin>
</plugins> </plugins>
</build> </build>
</project> </project>

View File

@ -43,7 +43,7 @@
import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.classification.VisibleForTesting;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;

View File

@ -196,7 +196,7 @@
import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.classification.VisibleForTesting;
import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.apache.hadoop.thirdparty.com.google.common.net.InetAddresses; import org.apache.hadoop.thirdparty.com.google.common.net.InetAddresses;
/******************************************************** /********************************************************

View File

@ -73,7 +73,7 @@
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.classification.VisibleForTesting;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.Write.RECOVER_LEASE_ON_CLOSE_EXCEPTION_DEFAULT; import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.Write.RECOVER_LEASE_ON_CLOSE_EXCEPTION_DEFAULT;
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.Write.RECOVER_LEASE_ON_CLOSE_EXCEPTION_KEY; import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.Write.RECOVER_LEASE_ON_CLOSE_EXCEPTION_KEY;

View File

@ -18,7 +18,7 @@
package org.apache.hadoop.hdfs; package org.apache.hadoop.hdfs;
import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.classification.VisibleForTesting;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.CreateFlag;

View File

@ -19,7 +19,7 @@
import org.apache.hadoop.net.DomainNameResolver; import org.apache.hadoop.net.DomainNameResolver;
import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.apache.hadoop.thirdparty.com.google.common.collect.Maps;
import org.apache.hadoop.thirdparty.com.google.common.primitives.SignedBytes; import org.apache.hadoop.thirdparty.com.google.common.primitives.SignedBytes;
import java.net.URISyntaxException; import java.net.URISyntaxException;
@ -145,7 +145,10 @@ public static byte[][] bytes2byteArray(byte[] bytes) {
*/ */
public static byte[][] bytes2byteArray(byte[] bytes, int len, public static byte[][] bytes2byteArray(byte[] bytes, int len,
byte separator) { byte separator) {
Preconditions.checkPositionIndex(len, bytes.length); if (len < 0 || len > bytes.length) {
throw new IndexOutOfBoundsException(
"Incorrect index [len, size] [" + len + ", " + bytes.length + "]");
}
if (len == 0) { if (len == 0) {
return new byte[][]{null}; return new byte[][]{null};
} }

View File

@ -22,7 +22,7 @@
import org.apache.hadoop.ipc.RpcNoSuchMethodException; import org.apache.hadoop.ipc.RpcNoSuchMethodException;
import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.classification.VisibleForTesting;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.apache.commons.collections.list.TreeList; import org.apache.commons.collections.list.TreeList;
import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;

View File

@ -36,7 +36,7 @@
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.classification.VisibleForTesting;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;

View File

@ -24,7 +24,7 @@
import java.util.Map.Entry; import java.util.Map.Entry;
import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.classification.VisibleForTesting;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.apache.hadoop.thirdparty.com.google.common.collect.LinkedListMultimap; import org.apache.hadoop.thirdparty.com.google.common.collect.LinkedListMultimap;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;

View File

@ -17,7 +17,7 @@
*/ */
package org.apache.hadoop.hdfs; package org.apache.hadoop.hdfs;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.util.StripedBlockUtil; import org.apache.hadoop.hdfs.util.StripedBlockUtil;

View File

@ -17,7 +17,7 @@
*/ */
package org.apache.hadoop.hdfs; package org.apache.hadoop.hdfs;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.util.StripedBlockUtil; import org.apache.hadoop.hdfs.util.StripedBlockUtil;

View File

@ -17,7 +17,7 @@
*/ */
package org.apache.hadoop.hdfs; package org.apache.hadoop.hdfs;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.apache.hadoop.fs.ChecksumException; import org.apache.hadoop.fs.ChecksumException;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;

View File

@ -27,7 +27,7 @@
import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Lists;
import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.apache.hadoop.thirdparty.com.google.common.collect.Maps;
@InterfaceAudience.Private @InterfaceAudience.Private

View File

@ -31,7 +31,7 @@
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
/** /**
* The Hdfs implementation of {@link FSDataInputStream}. * The Hdfs implementation of {@link FSDataInputStream}.

View File

@ -28,7 +28,7 @@
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.DFSOutputStream; import org.apache.hadoop.hdfs.DFSOutputStream;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
/** /**
* The Hdfs implementation of {@link FSDataOutputStream}. * The Hdfs implementation of {@link FSDataOutputStream}.

View File

@ -75,7 +75,7 @@
import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Time;
import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.classification.VisibleForTesting;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;

View File

@ -18,7 +18,7 @@
package org.apache.hadoop.hdfs.client.impl; package org.apache.hadoop.hdfs.client.impl;
import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.classification.VisibleForTesting;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.ReadOption; import org.apache.hadoop.fs.ReadOption;
import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.StorageType;

View File

@ -18,7 +18,7 @@
package org.apache.hadoop.hdfs.client.impl; package org.apache.hadoop.hdfs.client.impl;
import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.classification.VisibleForTesting;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.CommonConfigurationKeysPublic;

View File

@ -25,7 +25,7 @@
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.DFSUtilClient;
/** /**

View File

@ -26,7 +26,7 @@
import org.apache.hadoop.fs.InvalidRequestException; import org.apache.hadoop.fs.InvalidRequestException;
import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.apache.hadoop.tracing.TraceScope; import org.apache.hadoop.tracing.TraceScope;
import org.apache.hadoop.tracing.Tracer; import org.apache.hadoop.tracing.Tracer;

View File

@ -17,7 +17,7 @@
*/ */
package org.apache.hadoop.hdfs.protocol; package org.apache.hadoop.hdfs.protocol;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.apache.commons.lang3.builder.EqualsBuilder; import org.apache.commons.lang3.builder.EqualsBuilder;
import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.commons.lang3.builder.HashCodeBuilder;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;

View File

@ -17,7 +17,7 @@
*/ */
package org.apache.hadoop.hdfs.protocol; package org.apache.hadoop.hdfs.protocol;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.apache.commons.lang3.builder.EqualsBuilder; import org.apache.commons.lang3.builder.EqualsBuilder;
import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.commons.lang3.builder.HashCodeBuilder;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;

View File

@ -18,7 +18,7 @@
package org.apache.hadoop.hdfs.protocol; package org.apache.hadoop.hdfs.protocol;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.apache.commons.lang3.builder.ToStringBuilder; import org.apache.commons.lang3.builder.ToStringBuilder;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.ipc.RemoteException;

View File

@ -22,7 +22,7 @@
import java.util.Comparator; import java.util.Comparator;
import java.util.List; import java.util.List;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.StorageType;

View File

@ -18,7 +18,7 @@
package org.apache.hadoop.hdfs.protocol; package org.apache.hadoop.hdfs.protocol;
import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.classification.VisibleForTesting;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries; import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto;

View File

@ -20,7 +20,7 @@
import java.util.Collections; import java.util.Collections;
import java.util.List; import java.util.List;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.DFSUtilClient;

View File

@ -17,7 +17,7 @@
*/ */
package org.apache.hadoop.hdfs.protocol; package org.apache.hadoop.hdfs.protocol;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto;
/** /**

View File

@ -27,7 +27,7 @@
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto;
import org.apache.hadoop.hdfs.util.ByteBufferOutputStream; import org.apache.hadoop.hdfs.util.ByteBufferOutputStream;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.apache.hadoop.thirdparty.com.google.common.primitives.Shorts; import org.apache.hadoop.thirdparty.com.google.common.primitives.Shorts;
import org.apache.hadoop.thirdparty.com.google.common.primitives.Ints; import org.apache.hadoop.thirdparty.com.google.common.primitives.Ints;
import org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException; import org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException;

View File

@ -31,7 +31,7 @@
import org.apache.hadoop.util.DirectBufferPool; import org.apache.hadoop.util.DirectBufferPool;
import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.apache.hadoop.thirdparty.com.google.common.primitives.Ints; import org.apache.hadoop.thirdparty.com.google.common.primitives.Ints;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;

View File

@ -28,7 +28,7 @@
import java.util.Map; import java.util.Map;
import java.util.Set; import java.util.Set;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.apache.hadoop.thirdparty.com.google.common.cache.CacheBuilder; import org.apache.hadoop.thirdparty.com.google.common.cache.CacheBuilder;
import org.apache.hadoop.thirdparty.com.google.common.cache.CacheLoader; import org.apache.hadoop.thirdparty.com.google.common.cache.CacheLoader;
import org.apache.hadoop.thirdparty.com.google.common.cache.LoadingCache; import org.apache.hadoop.thirdparty.com.google.common.cache.LoadingCache;

View File

@ -22,7 +22,7 @@
import com.fasterxml.jackson.annotation.JsonInclude; import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.ObjectReader; import com.fasterxml.jackson.databind.ObjectReader;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;

View File

@ -23,7 +23,7 @@
import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.ObjectReader; import com.fasterxml.jackson.databind.ObjectReader;
import com.fasterxml.jackson.databind.SerializationFeature; import com.fasterxml.jackson.databind.SerializationFeature;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;

View File

@ -26,7 +26,7 @@
import org.apache.hadoop.net.unix.DomainSocket; import org.apache.hadoop.net.unix.DomainSocket;
import org.apache.hadoop.net.unix.DomainSocketWatcher; import org.apache.hadoop.net.unix.DomainSocketWatcher;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
/** /**
* DfsClientShm is a subclass of ShortCircuitShm which is used by the * DfsClientShm is a subclass of ShortCircuitShm which is used by the

View File

@ -44,7 +44,7 @@
import org.apache.hadoop.net.unix.DomainSocketWatcher; import org.apache.hadoop.net.unix.DomainSocketWatcher;
import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.classification.VisibleForTesting;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;

View File

@ -30,7 +30,7 @@
import org.apache.hadoop.net.unix.DomainSocket; import org.apache.hadoop.net.unix.DomainSocket;
import org.apache.hadoop.util.PerformanceAdvisory; import org.apache.hadoop.util.PerformanceAdvisory;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.apache.hadoop.thirdparty.com.google.common.cache.Cache; import org.apache.hadoop.thirdparty.com.google.common.cache.Cache;
import org.apache.hadoop.thirdparty.com.google.common.cache.CacheBuilder; import org.apache.hadoop.thirdparty.com.google.common.cache.CacheBuilder;

View File

@ -55,7 +55,7 @@
import org.apache.hadoop.util.Waitable; import org.apache.hadoop.util.Waitable;
import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.classification.VisibleForTesting;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
import org.slf4j.Logger; import org.slf4j.Logger;

View File

@ -32,7 +32,7 @@
import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Time;
import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.classification.VisibleForTesting;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;

View File

@ -39,7 +39,7 @@
import sun.misc.Unsafe; import sun.misc.Unsafe;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.apache.hadoop.thirdparty.com.google.common.collect.ComparisonChain; import org.apache.hadoop.thirdparty.com.google.common.collect.ComparisonChain;
import org.apache.hadoop.thirdparty.com.google.common.primitives.Ints; import org.apache.hadoop.thirdparty.com.google.common.primitives.Ints;

View File

@ -26,7 +26,7 @@
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Time;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;

View File

@ -28,7 +28,7 @@
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock; import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.DFSStripedOutputStream; import org.apache.hadoop.hdfs.DFSStripedOutputStream;

View File

@ -19,7 +19,7 @@
import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.ObjectReader; import com.fasterxml.jackson.databind.ObjectReader;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.apache.hadoop.thirdparty.com.google.common.collect.Maps;
import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FileChecksum; import org.apache.hadoop.fs.FileChecksum;

View File

@ -138,7 +138,7 @@
import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.classification.VisibleForTesting;
import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; import org.apache.hadoop.thirdparty.com.google.common.base.Charsets;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import static org.apache.hadoop.fs.impl.PathCapabilitiesSupport.validatePathCapabilityArgs; import static org.apache.hadoop.fs.impl.PathCapabilitiesSupport.validatePathCapabilityArgs;

View File

@ -343,6 +343,38 @@
<excludeFilterFile>${basedir}/dev-support/findbugsExcludeFile.xml</excludeFilterFile> <excludeFilterFile>${basedir}/dev-support/findbugsExcludeFile.xml</excludeFilterFile>
</configuration> </configuration>
</plugin> </plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-enforcer-plugin</artifactId>
<dependencies>
<dependency>
<groupId>de.skuzzle.enforcer</groupId>
<artifactId>restrict-imports-enforcer-rule</artifactId>
<version>${restrict-imports.enforcer.version}</version>
</dependency>
</dependencies>
<executions>
<execution>
<id>banned-illegal-imports</id>
<phase>process-sources</phase>
<goals>
<goal>enforce</goal>
</goals>
<configuration>
<rules>
<restrictImports implementation="de.skuzzle.enforcer.restrictimports.rule.RestrictImports">
<includeTestCode>true</includeTestCode>
<reason>Use hadoop-common provided VisibleForTesting rather than the one provided by Guava</reason>
<bannedImports>
<bannedImport>org.apache.hadoop.thirdparty.com.google.common.base.Preconditions</bannedImport>
<bannedImport>com.google.common.base.Preconditions</bannedImport>
</bannedImports>
</restrictImports>
</rules>
</configuration>
</execution>
</executions>
</plugin>
</plugins> </plugins>
</build> </build>

View File

@ -73,7 +73,7 @@
import org.json.simple.parser.JSONParser; import org.json.simple.parser.JSONParser;
import org.json.simple.parser.ParseException; import org.json.simple.parser.ParseException;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.apache.hadoop.thirdparty.com.google.common.collect.Maps;
import java.io.BufferedInputStream; import java.io.BufferedInputStream;

View File

@ -219,6 +219,38 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
</execution> </execution>
</executions> </executions>
</plugin> </plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-enforcer-plugin</artifactId>
<dependencies>
<dependency>
<groupId>de.skuzzle.enforcer</groupId>
<artifactId>restrict-imports-enforcer-rule</artifactId>
<version>${restrict-imports.enforcer.version}</version>
</dependency>
</dependencies>
<executions>
<execution>
<id>banned-illegal-imports</id>
<phase>process-sources</phase>
<goals>
<goal>enforce</goal>
</goals>
<configuration>
<rules>
<restrictImports implementation="de.skuzzle.enforcer.restrictimports.rule.RestrictImports">
<includeTestCode>true</includeTestCode>
<reason>Use hadoop-common provided VisibleForTesting rather than the one provided by Guava</reason>
<bannedImports>
<bannedImport>org.apache.hadoop.thirdparty.com.google.common.base.Preconditions</bannedImport>
<bannedImport>com.google.common.base.Preconditions</bannedImport>
</bannedImports>
</restrictImports>
</rules>
</configuration>
</execution>
</executions>
</plugin>
</plugins> </plugins>
</build> </build>
</profile> </profile>

View File

@ -29,7 +29,7 @@
import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSClient;

View File

@ -19,7 +19,7 @@
import java.util.Comparator; import java.util.Comparator;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
/** /**
* OffsetRange is the range of read/write request. A single point (e.g.,[5,5]) * OffsetRange is the range of read/write request. A single point (e.g.,[5,5])

View File

@ -58,7 +58,7 @@
import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Time;
import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.classification.VisibleForTesting;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;

View File

@ -31,7 +31,7 @@
import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Time;
import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.classification.VisibleForTesting;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.apache.hadoop.thirdparty.com.google.common.collect.Maps;
/** /**

View File

@ -30,7 +30,7 @@
import org.apache.hadoop.nfs.nfs3.Nfs3Constant.WriteStableHow; import org.apache.hadoop.nfs.nfs3.Nfs3Constant.WriteStableHow;
import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.classification.VisibleForTesting;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
/** /**
* WriteCtx saves the context of one write request, such as request, channel, * WriteCtx saves the context of one write request, such as request, channel,

View File

@ -305,6 +305,38 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
</filesets> </filesets>
</configuration> </configuration>
</plugin> </plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-enforcer-plugin</artifactId>
<dependencies>
<dependency>
<groupId>de.skuzzle.enforcer</groupId>
<artifactId>restrict-imports-enforcer-rule</artifactId>
<version>${restrict-imports.enforcer.version}</version>
</dependency>
</dependencies>
<executions>
<execution>
<id>banned-illegal-imports</id>
<phase>process-sources</phase>
<goals>
<goal>enforce</goal>
</goals>
<configuration>
<rules>
<restrictImports implementation="de.skuzzle.enforcer.restrictimports.rule.RestrictImports">
<includeTestCode>true</includeTestCode>
<reason>Use hadoop-common provided VisibleForTesting rather than the one provided by Guava</reason>
<bannedImports>
<bannedImport>org.apache.hadoop.thirdparty.com.google.common.base.Preconditions</bannedImport>
<bannedImport>com.google.common.base.Preconditions</bannedImport>
</bannedImports>
</restrictImports>
</rules>
</configuration>
</execution>
</executions>
</plugin>
</plugins> </plugins>
</build> </build>
<profiles> <profiles>

View File

@ -39,7 +39,7 @@
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
/** /**
* Order the destinations based on available space. This resolver uses a * Order the destinations based on available space. This resolver uses a

View File

@ -30,7 +30,7 @@
import java.util.Map; import java.util.Map;
import java.util.Set; import java.util.Set;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;

View File

@ -447,6 +447,38 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
</filesets> </filesets>
</configuration> </configuration>
</plugin> </plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-enforcer-plugin</artifactId>
<dependencies>
<dependency>
<groupId>de.skuzzle.enforcer</groupId>
<artifactId>restrict-imports-enforcer-rule</artifactId>
<version>${restrict-imports.enforcer.version}</version>
</dependency>
</dependencies>
<executions>
<execution>
<id>banned-illegal-imports</id>
<phase>process-sources</phase>
<goals>
<goal>enforce</goal>
</goals>
<configuration>
<rules>
<restrictImports implementation="de.skuzzle.enforcer.restrictimports.rule.RestrictImports">
<includeTestCode>true</includeTestCode>
<reason>Use hadoop-common provided VisibleForTesting rather than the one provided by Guava</reason>
<bannedImports>
<bannedImport>org.apache.hadoop.thirdparty.com.google.common.base.Preconditions</bannedImport>
<bannedImport>com.google.common.base.Preconditions</bannedImport>
</bannedImports>
</restrictImports>
</rules>
</configuration>
</execution>
</executions>
</plugin>
</plugins> </plugins>
</build> </build>

View File

@ -110,7 +110,7 @@
import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.classification.VisibleForTesting;
import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.apache.hadoop.thirdparty.protobuf.BlockingService; import org.apache.hadoop.thirdparty.protobuf.BlockingService;
@InterfaceAudience.Private @InterfaceAudience.Private
@ -310,7 +310,11 @@ public static String byteArray2PathString(final byte[][] components,
// specifically not using StringBuilder to more efficiently build // specifically not using StringBuilder to more efficiently build
// string w/o excessive byte[] copies and charset conversions. // string w/o excessive byte[] copies and charset conversions.
final int range = offset + length; final int range = offset + length;
Preconditions.checkPositionIndexes(offset, range, components.length); if (offset < 0 || range < offset || range > components.length) {
throw new IndexOutOfBoundsException(
"Incorrect index [offset, range, size] ["
+ offset + ", " + range + ", " + components.length + "]");
}
if (length == 0) { if (length == 0) {
return ""; return "";
} }

View File

@ -57,7 +57,7 @@
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Lists;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;

View File

@ -18,7 +18,7 @@
package org.apache.hadoop.hdfs.net; package org.apache.hadoop.hdfs.net;
import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.classification.VisibleForTesting;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.StorageType;

View File

@ -18,7 +18,7 @@
package org.apache.hadoop.hdfs.net; package org.apache.hadoop.hdfs.net;
import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.classification.VisibleForTesting;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.net.InnerNode; import org.apache.hadoop.net.InnerNode;

View File

@ -35,7 +35,7 @@
import org.apache.hadoop.hdfs.server.datanode.Replica; import org.apache.hadoop.hdfs.server.datanode.Replica;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.classification.VisibleForTesting;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.apache.hadoop.thirdparty.protobuf.ByteString; import org.apache.hadoop.thirdparty.protobuf.ByteString;
import org.apache.hadoop.thirdparty.protobuf.CodedInputStream; import org.apache.hadoop.thirdparty.protobuf.CodedInputStream;
import org.apache.hadoop.thirdparty.protobuf.CodedOutputStream; import org.apache.hadoop.thirdparty.protobuf.CodedOutputStream;

View File

@ -17,8 +17,6 @@
*/ */
package org.apache.hadoop.hdfs.protocol; package org.apache.hadoop.hdfs.protocol;
import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkNotNull;
import java.util.Date; import java.util.Date;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
@ -28,7 +26,7 @@
import org.apache.hadoop.util.IntrusiveCollection; import org.apache.hadoop.util.IntrusiveCollection;
import org.apache.hadoop.util.IntrusiveCollection.Element; import org.apache.hadoop.util.IntrusiveCollection.Element;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
/** /**
* Namenode class that tracks state related to a cached path. * Namenode class that tracks state related to a cached path.
@ -63,7 +61,7 @@ public CacheDirective(long id, String path,
short replication, long expiryTime) { short replication, long expiryTime) {
Preconditions.checkArgument(id > 0); Preconditions.checkArgument(id > 0);
this.id = id; this.id = id;
this.path = checkNotNull(path); this.path = Preconditions.checkNotNull(path);
Preconditions.checkArgument(replication > 0); Preconditions.checkArgument(replication > 0);
this.replication = replication; this.replication = replication;
this.expiryTime = expiryTime; this.expiryTime = expiryTime;

View File

@ -61,7 +61,7 @@
import org.apache.hadoop.hdfs.server.protocol.StorageReport; import org.apache.hadoop.hdfs.server.protocol.StorageReport;
import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary; import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.apache.hadoop.thirdparty.protobuf.RpcController; import org.apache.hadoop.thirdparty.protobuf.RpcController;
import org.apache.hadoop.thirdparty.protobuf.ServiceException; import org.apache.hadoop.thirdparty.protobuf.ServiceException;

View File

@ -36,7 +36,7 @@
import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.classification.VisibleForTesting;
import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList;
import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.apache.hadoop.thirdparty.com.google.common.collect.Maps;
import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListenableFuture; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListenableFuture;

View File

@ -58,7 +58,7 @@
import org.apache.hadoop.util.StopWatch; import org.apache.hadoop.util.StopWatch;
import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.classification.VisibleForTesting;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.apache.hadoop.thirdparty.com.google.common.net.InetAddresses; import org.apache.hadoop.thirdparty.com.google.common.net.InetAddresses;
import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.FutureCallback; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.FutureCallback;
import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Futures; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Futures;

View File

@ -30,7 +30,7 @@
import org.apache.hadoop.util.Timer; import org.apache.hadoop.util.Timer;
import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.apache.hadoop.thirdparty.com.google.common.collect.Maps;
import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.FutureCallback; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.FutureCallback;
import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Futures; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Futures;

View File

@ -23,7 +23,7 @@
import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
/** /**
* Exception thrown when too many exceptions occur while gathering * Exception thrown when too many exceptions occur while gathering

View File

@ -61,7 +61,7 @@
import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.classification.VisibleForTesting;
import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.apache.hadoop.thirdparty.protobuf.TextFormat; import org.apache.hadoop.thirdparty.protobuf.TextFormat;
/** /**

View File

@ -23,7 +23,7 @@
import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto;
import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.apache.hadoop.thirdparty.com.google.common.collect.ComparisonChain; import org.apache.hadoop.thirdparty.com.google.common.collect.ComparisonChain;
import org.apache.hadoop.thirdparty.com.google.common.primitives.Booleans; import org.apache.hadoop.thirdparty.com.google.common.primitives.Booleans;

View File

@ -73,7 +73,7 @@
import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.classification.VisibleForTesting;
import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; import org.apache.hadoop.thirdparty.com.google.common.base.Charsets;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList;
import org.apache.hadoop.thirdparty.protobuf.TextFormat; import org.apache.hadoop.thirdparty.protobuf.TextFormat;

View File

@ -18,7 +18,7 @@
package org.apache.hadoop.hdfs.qjournal.server; package org.apache.hadoop.hdfs.qjournal.server;
import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.classification.VisibleForTesting;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.apache.hadoop.thirdparty.com.google.common.base.Strings; import org.apache.hadoop.thirdparty.com.google.common.base.Strings;
import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.apache.hadoop.thirdparty.com.google.common.collect.Maps;
import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Lists;

View File

@ -46,7 +46,7 @@
import org.apache.hadoop.util.Timer; import org.apache.hadoop.util.Timer;
import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.classification.VisibleForTesting;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.apache.hadoop.thirdparty.com.google.common.collect.HashMultiset; import org.apache.hadoop.thirdparty.com.google.common.collect.HashMultiset;
import org.apache.hadoop.thirdparty.com.google.common.collect.Multiset; import org.apache.hadoop.thirdparty.com.google.common.collect.Multiset;

View File

@ -50,7 +50,7 @@
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager; import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
import org.apache.hadoop.security.token.delegation.DelegationKey; import org.apache.hadoop.security.token.delegation.DelegationKey;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.apache.hadoop.thirdparty.protobuf.ByteString; import org.apache.hadoop.thirdparty.protobuf.ByteString;
/** /**

View File

@ -17,7 +17,6 @@
*/ */
package org.apache.hadoop.hdfs.server.balancer; package org.apache.hadoop.hdfs.server.balancer;
import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkArgument;
import static org.apache.hadoop.hdfs.protocol.BlockType.CONTIGUOUS; import static org.apache.hadoop.hdfs.protocol.BlockType.CONTIGUOUS;
import java.io.IOException; import java.io.IOException;
@ -75,7 +74,7 @@
import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner; import org.apache.hadoop.util.ToolRunner;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
/** <p>The balancer is a tool that balances disk space usage on an HDFS cluster /** <p>The balancer is a tool that balances disk space usage on an HDFS cluster
* when some datanodes become full or when new empty nodes join the cluster. * when some datanodes become full or when new empty nodes join the cluster.
@ -998,7 +997,7 @@ static BalancerParameters parse(String[] args) {
try { try {
for(int i = 0; i < args.length; i++) { for(int i = 0; i < args.length; i++) {
if ("-threshold".equalsIgnoreCase(args[i])) { if ("-threshold".equalsIgnoreCase(args[i])) {
checkArgument(++i < args.length, Preconditions.checkArgument(++i < args.length,
"Threshold value is missing: args = " + Arrays.toString(args)); "Threshold value is missing: args = " + Arrays.toString(args));
try { try {
double threshold = Double.parseDouble(args[i]); double threshold = Double.parseDouble(args[i]);
@ -1015,7 +1014,7 @@ static BalancerParameters parse(String[] args) {
throw e; throw e;
} }
} else if ("-policy".equalsIgnoreCase(args[i])) { } else if ("-policy".equalsIgnoreCase(args[i])) {
checkArgument(++i < args.length, Preconditions.checkArgument(++i < args.length,
"Policy value is missing: args = " + Arrays.toString(args)); "Policy value is missing: args = " + Arrays.toString(args));
try { try {
b.setBalancingPolicy(BalancingPolicy.parse(args[i])); b.setBalancingPolicy(BalancingPolicy.parse(args[i]));
@ -1036,7 +1035,7 @@ static BalancerParameters parse(String[] args) {
i = processHostList(args, i, "source", sourceNodes); i = processHostList(args, i, "source", sourceNodes);
b.setSourceNodes(sourceNodes); b.setSourceNodes(sourceNodes);
} else if ("-blockpools".equalsIgnoreCase(args[i])) { } else if ("-blockpools".equalsIgnoreCase(args[i])) {
checkArgument( Preconditions.checkArgument(
++i < args.length, ++i < args.length,
"blockpools value is missing: args = " "blockpools value is missing: args = "
+ Arrays.toString(args)); + Arrays.toString(args));
@ -1045,7 +1044,7 @@ static BalancerParameters parse(String[] args) {
+ blockpools.toString()); + blockpools.toString());
b.setBlockpools(blockpools); b.setBlockpools(blockpools);
} else if ("-idleiterations".equalsIgnoreCase(args[i])) { } else if ("-idleiterations".equalsIgnoreCase(args[i])) {
checkArgument(++i < args.length, Preconditions.checkArgument(++i < args.length,
"idleiterations value is missing: args = " + Arrays "idleiterations value is missing: args = " + Arrays
.toString(args)); .toString(args));
int maxIdleIteration = Integer.parseInt(args[i]); int maxIdleIteration = Integer.parseInt(args[i]);
@ -1061,7 +1060,7 @@ static BalancerParameters parse(String[] args) {
b.setRunAsService(true); b.setRunAsService(true);
LOG.info("Balancer will run as a long running service"); LOG.info("Balancer will run as a long running service");
} else if ("-hotBlockTimeInterval".equalsIgnoreCase(args[i])) { } else if ("-hotBlockTimeInterval".equalsIgnoreCase(args[i])) {
checkArgument(++i < args.length, Preconditions.checkArgument(++i < args.length,
"hotBlockTimeInterval value is missing: args = " "hotBlockTimeInterval value is missing: args = "
+ Arrays.toString(args)); + Arrays.toString(args));
long hotBlockTimeInterval = Long.parseLong(args[i]); long hotBlockTimeInterval = Long.parseLong(args[i]);
@ -1077,7 +1076,7 @@ static BalancerParameters parse(String[] args) {
+ Arrays.toString(args)); + Arrays.toString(args));
} }
} }
checkArgument(excludedNodes == null || includedNodes == null, Preconditions.checkArgument(excludedNodes == null || includedNodes == null,
"-exclude and -include options cannot be specified together."); "-exclude and -include options cannot be specified together.");
} catch(RuntimeException e) { } catch(RuntimeException e) {
printUsage(System.err); printUsage(System.err);

View File

@ -85,7 +85,7 @@
import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Time;
import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.classification.VisibleForTesting;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
/** Dispatching block replica moves between datanodes. */ /** Dispatching block replica moves between datanodes. */
@InterfaceAudience.Private @InterfaceAudience.Private

View File

@ -31,7 +31,7 @@
import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicLong;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.RateLimiter; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.RateLimiter;
import org.apache.hadoop.ha.HAServiceProtocol; import org.apache.hadoop.ha.HAServiceProtocol;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;

View File

@ -29,7 +29,7 @@
import java.util.Random; import java.util.Random;
import java.util.Set; import java.util.Set;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;

View File

@ -18,7 +18,7 @@
package org.apache.hadoop.hdfs.server.blockmanagement; package org.apache.hadoop.hdfs.server.blockmanagement;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;

View File

@ -18,7 +18,7 @@
package org.apache.hadoop.hdfs.server.blockmanagement; package org.apache.hadoop.hdfs.server.blockmanagement;
import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.classification.VisibleForTesting;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockType; import org.apache.hadoop.hdfs.protocol.BlockType;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;

View File

@ -22,7 +22,7 @@
import java.util.LinkedList; import java.util.LinkedList;
import java.util.List; import java.util.List;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;

View File

@ -17,7 +17,7 @@
*/ */
package org.apache.hadoop.hdfs.server.blockmanagement; package org.apache.hadoop.hdfs.server.blockmanagement;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;

View File

@ -18,7 +18,7 @@
package org.apache.hadoop.hdfs.server.blockmanagement; package org.apache.hadoop.hdfs.server.blockmanagement;
import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.classification.VisibleForTesting;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockType; import org.apache.hadoop.hdfs.protocol.BlockType;

View File

@ -126,7 +126,7 @@
import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Time;
import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.classification.VisibleForTesting;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;

View File

@ -36,7 +36,7 @@
import org.apache.hadoop.util.Daemon; import org.apache.hadoop.util.Daemon;
import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.classification.VisibleForTesting;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;

View File

@ -25,7 +25,7 @@
import java.util.Map; import java.util.Map;
import java.util.Set; import java.util.Set;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.AddBlockFlag; import org.apache.hadoop.hdfs.AddBlockFlag;

View File

@ -26,7 +26,7 @@
import java.util.*; import java.util.*;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.AddBlockFlag; import org.apache.hadoop.hdfs.AddBlockFlag;

View File

@ -17,7 +17,7 @@
*/ */
package org.apache.hadoop.hdfs.server.blockmanagement; package org.apache.hadoop.hdfs.server.blockmanagement;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Time;

View File

@ -18,7 +18,7 @@
package org.apache.hadoop.hdfs.server.blockmanagement; package org.apache.hadoop.hdfs.server.blockmanagement;
import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.classification.VisibleForTesting;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.StorageType;

View File

@ -19,7 +19,7 @@
import static org.apache.hadoop.hdfs.server.blockmanagement.CorruptReplicasMap.Reason; import static org.apache.hadoop.hdfs.server.blockmanagement.CorruptReplicasMap.Reason;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
/** /**

View File

@ -55,7 +55,7 @@
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
/** /**
* Scans the namesystem, scheduling blocks to be cached as appropriate. * Scans the namesystem, scheduling blocks to be cached as appropriate.

View File

@ -17,7 +17,7 @@
*/ */
package org.apache.hadoop.hdfs.server.blockmanagement; package org.apache.hadoop.hdfs.server.blockmanagement;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.server.namenode.INode; import org.apache.hadoop.hdfs.server.namenode.INode;

View File

@ -17,7 +17,6 @@
*/ */
package org.apache.hadoop.hdfs.server.blockmanagement; package org.apache.hadoop.hdfs.server.blockmanagement;
import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkArgument;
import static org.apache.hadoop.util.Time.monotonicNow; import static org.apache.hadoop.util.Time.monotonicNow;
import java.util.Queue; import java.util.Queue;
@ -30,6 +29,7 @@
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.Namesystem; import org.apache.hadoop.hdfs.server.namenode.Namesystem;
import org.apache.hadoop.util.Preconditions;
import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.ReflectionUtils;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
@ -105,7 +105,7 @@ void activate(Configuration conf) {
DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY,
DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_DEFAULT, DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_DEFAULT,
TimeUnit.SECONDS); TimeUnit.SECONDS);
checkArgument(intervalSecs >= 0, "Cannot set a negative " + Preconditions.checkArgument(intervalSecs >= 0, "Cannot set a negative " +
"value for " + DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY); "value for " + DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY);
int blocksPerInterval = conf.getInt( int blocksPerInterval = conf.getInt(
@ -122,7 +122,7 @@ void activate(Configuration conf) {
DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BLOCKS_PER_INTERVAL_KEY); DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BLOCKS_PER_INTERVAL_KEY);
} }
checkArgument(blocksPerInterval > 0, Preconditions.checkArgument(blocksPerInterval > 0,
"Must set a positive value for " "Must set a positive value for "
+ DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BLOCKS_PER_INTERVAL_KEY); + DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BLOCKS_PER_INTERVAL_KEY);
@ -130,8 +130,8 @@ void activate(Configuration conf) {
DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_MAX_CONCURRENT_TRACKED_NODES, DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_MAX_CONCURRENT_TRACKED_NODES,
DFSConfigKeys DFSConfigKeys
.DFS_NAMENODE_DECOMMISSION_MAX_CONCURRENT_TRACKED_NODES_DEFAULT); .DFS_NAMENODE_DECOMMISSION_MAX_CONCURRENT_TRACKED_NODES_DEFAULT);
checkArgument(maxConcurrentTrackedNodes >= 0, "Cannot set a negative " + Preconditions.checkArgument(maxConcurrentTrackedNodes >= 0,
"value for " "Cannot set a negative value for "
+ DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_MAX_CONCURRENT_TRACKED_NODES); + DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_MAX_CONCURRENT_TRACKED_NODES);
Class cls = null; Class cls = null;

View File

@ -24,7 +24,7 @@
import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.classification.VisibleForTesting;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.apache.hadoop.thirdparty.com.google.common.net.InetAddresses; import org.apache.hadoop.thirdparty.com.google.common.net.InetAddresses;
import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.StorageType;

View File

@ -18,7 +18,7 @@
package org.apache.hadoop.hdfs.server.blockmanagement; package org.apache.hadoop.hdfs.server.blockmanagement;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.apache.hadoop.thirdparty.com.google.common.collect.HashMultimap; import org.apache.hadoop.thirdparty.com.google.common.collect.HashMultimap;
import org.apache.hadoop.thirdparty.com.google.common.collect.Multimap; import org.apache.hadoop.thirdparty.com.google.common.collect.Multimap;
import org.apache.hadoop.thirdparty.com.google.common.collect.UnmodifiableIterator; import org.apache.hadoop.thirdparty.com.google.common.collect.UnmodifiableIterator;

View File

@ -31,7 +31,7 @@
import org.apache.hadoop.hdfs.server.namenode.FSDirectory; import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
import org.apache.hadoop.hdfs.server.namenode.MetaRecoveryContext; import org.apache.hadoop.hdfs.server.namenode.MetaRecoveryContext;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion; import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion;
import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils;

View File

@ -54,7 +54,7 @@
import org.apache.hadoop.util.VersionInfo; import org.apache.hadoop.util.VersionInfo;
import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; import org.apache.hadoop.thirdparty.com.google.common.base.Charsets;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;

View File

@ -54,7 +54,7 @@
import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Time;
import org.apache.hadoop.hdfs.web.URLConnectionFactory; import org.apache.hadoop.hdfs.web.URLConnectionFactory;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;

View File

@ -18,7 +18,7 @@
package org.apache.hadoop.hdfs.server.datanode; package org.apache.hadoop.hdfs.server.datanode;
import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.classification.VisibleForTesting;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.StorageType;

View File

@ -17,7 +17,7 @@
*/ */
package org.apache.hadoop.hdfs.server.datanode; package org.apache.hadoop.hdfs.server.datanode;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.DFSUtilClient;

View File

@ -33,7 +33,7 @@
import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.classification.VisibleForTesting;
import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.apache.hadoop.thirdparty.com.google.common.collect.Maps;
import org.slf4j.Logger; import org.slf4j.Logger;

View File

@ -48,7 +48,7 @@
import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Lists;
import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.classification.VisibleForTesting;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
/** /**
* Manages storage for the set of BlockPoolSlices which share a particular * Manages storage for the set of BlockPoolSlices which share a particular

View File

@ -19,7 +19,7 @@
import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.classification.VisibleForTesting;
import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner;
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Preconditions;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeID;

Some files were not shown because too many files have changed in this diff Show More