From 301641811d93ac22dc6fe1a05f18c1f266cc5e54 Mon Sep 17 00:00:00 2001 From: Wei Yan Date: Wed, 29 Nov 2017 09:43:03 -0800 Subject: [PATCH] HDFS-12835. Fix the javadoc errors in Router-based federation. --- .../resolver/ActiveNamenodeResolver.java | 7 +++--- .../resolver/MembershipNamenodeResolver.java | 3 +-- .../resolver/MountTableResolver.java | 5 +++-- .../resolver/NamenodeStatusReport.java | 8 +++---- .../federation/router/ConnectionManager.java | 4 +--- .../federation/router/ConnectionPool.java | 4 ++-- .../federation/router/FederationUtil.java | 2 +- .../router/NamenodeHeartbeatService.java | 6 ++--- .../hdfs/server/federation/router/Router.java | 6 ++--- .../federation/router/RouterRpcClient.java | 22 +++++++++---------- .../federation/router/RouterRpcServer.java | 4 ++-- .../federation/store/CachedRecordStore.java | 7 ++---- .../server/federation/store/RecordStore.java | 2 +- .../store/driver/StateStoreDriver.java | 9 ++++---- .../driver/impl/StateStoreFileBaseImpl.java | 6 ++--- .../driver/impl/StateStoreFileSystemImpl.java | 4 ++-- .../driver/impl/StateStoreZooKeeperImpl.java | 5 +++-- .../store/impl/MembershipStoreImpl.java | 5 ++--- 18 files changed, 53 insertions(+), 56 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/ActiveNamenodeResolver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/ActiveNamenodeResolver.java index 477053d5d4..1773b34925 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/ActiveNamenodeResolver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/ActiveNamenodeResolver.java @@ -94,7 +94,7 @@ void updateActiveNamenode( * @return True if the node was registered and successfully committed to the * data store. * @throws IOException Throws exception if the namenode could not be - * registered. + * registered. */ boolean registerNamenode(NamenodeStatusReport report) throws IOException; @@ -103,7 +103,8 @@ void updateActiveNamenode( * federation. * * @return List of name spaces in the federation - * @throws Throws exception if the namespace list is not available. + * @throws IOException Throws exception if the namespace list is not + * available. */ Set getNamespaces() throws IOException; @@ -111,7 +112,7 @@ void updateActiveNamenode( * Assign a unique identifier for the parent router service. * Required to report the status to the namenode resolver. * - * @param router Unique string identifier for the router. + * @param routerId Unique string identifier for the router. */ void setRouterId(String routerId); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MembershipNamenodeResolver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MembershipNamenodeResolver.java index 0950cde28d..98ddd22940 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MembershipNamenodeResolver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MembershipNamenodeResolver.java @@ -270,8 +270,7 @@ public Set getNamespaces() throws IOException { * (if showStandby) 3) Most recently updated UNAVAILABLE registration (if * showUnavailable). EXPIRED registrations are ignored. * - * @param query The select query for NN registrations. - * @param excludes List of NNs to exclude from matching results. + * @param request The select query for NN registrations. * @param addUnavailable include UNAVAILABLE registrations. * @param addExpired include EXPIRED registrations. * @return List of memberships or null if no registrations that diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java index 13e3db39e4..4457cba099 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MountTableResolver.java @@ -207,7 +207,7 @@ public void removeEntry(final String srcPath) { /** * Invalidates all cache entries below this path. It requires the write lock. * - * @param src Source path. + * @param path Source path. */ private void invalidateLocationCache(final String path) { if (locationCache.isEmpty()) { @@ -449,7 +449,8 @@ public String toString() { /** * Build a location for this result beneath the discovered mount point. * - * @param result Tree node search result. + * @param path Path to build for. + * @param entry Mount table entry. * @return PathLocation containing the namespace, local path. */ private static PathLocation buildLocation( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/NamenodeStatusReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/NamenodeStatusReport.java index f8759e8713..555e2eebe2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/NamenodeStatusReport.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/NamenodeStatusReport.java @@ -289,9 +289,9 @@ public int getNumDecomDeadDatanodes() { * @param numFiles Number of files. * @param numBlocks Total number of blocks. * @param numBlocksMissing Number of missing blocks. - * @param numOfBlocksPendingReplication Number of blocks pending replication. - * @param numOfBlocksUnderReplicated Number of blocks under replication. - * @param numOfBlocksPendingDeletion Number of blocks pending deletion. + * @param numBlocksPendingReplication Number of blocks pending replication. + * @param numBlocksUnderReplicated Number of blocks under replication. + * @param numBlocksPendingDeletion Number of blocks pending deletion. */ public void setNamesystemInfo(long available, long total, long numFiles, long numBlocks, long numBlocksMissing, @@ -385,4 +385,4 @@ public String toString() { return String.format("%s-%s:%s", nameserviceId, namenodeId, serviceAddress); } -} \ No newline at end of file +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java index 543d964c24..2e45280b97 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionManager.java @@ -93,8 +93,6 @@ public class ConnectionManager { * Creates a proxy client connection pool manager. * * @param config Configuration for the connections. - * @param minPoolSize Min size of the connection pool. - * @param maxPoolSize Max size of the connection pool. */ public ConnectionManager(Configuration config) { this.conf = config; @@ -428,4 +426,4 @@ public void shutdown() { this.interrupt(); } } -} \ No newline at end of file +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPool.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPool.java index ca113efa78..5c77c5910e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPool.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/ConnectionPool.java @@ -171,7 +171,7 @@ protected ConnectionContext getConnection() { /** * Add a connection to the current pool. It uses a Copy-On-Write approach. * - * @param conns New connections to add to the pool. + * @param conn New connection to add to the pool. */ public synchronized void addConnection(ConnectionContext conn) { List tmpConnections = new ArrayList<>(this.connections); @@ -334,4 +334,4 @@ protected static ConnectionContext newConnection(Configuration conf, ConnectionContext connection = new ConnectionContext(clientProxy); return connection; } -} \ No newline at end of file +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/FederationUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/FederationUtil.java index 99af2d83db..224cac17b0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/FederationUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/FederationUtil.java @@ -155,7 +155,7 @@ public static FileSubclusterResolver newFileSubclusterResolver( * Creates an instance of an ActiveNamenodeResolver from the configuration. * * @param conf Configuration that defines the namenode resolver class. - * @param obj Context object passed to class constructor. + * @param stateStore State store passed to class constructor. * @return New active namenode resolver. */ public static ActiveNamenodeResolver newActiveNamenodeResolver( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NamenodeHeartbeatService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NamenodeHeartbeatService.java index 38f63e5367..7d69a26cfa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NamenodeHeartbeatService.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NamenodeHeartbeatService.java @@ -89,8 +89,8 @@ public class NamenodeHeartbeatService extends PeriodicService { /** * Create a new Namenode status updater. * @param resolver Namenode resolver service to handle NN registration. - * @param nameserviceId Identifier of the nameservice. - * @param namenodeId Identifier of the namenode in HA. + * @param nsId Identifier of the nameservice. + * @param nnId Identifier of the namenode in HA. */ public NamenodeHeartbeatService( ActiveNamenodeResolver resolver, String nsId, String nnId) { @@ -320,7 +320,7 @@ public String getNamenodeDesc() { /** * Get the parameters for a Namenode from JMX and add them to the report. - * @param webAddress Web interface of the Namenode to monitor. + * @param address Web interface of the Namenode to monitor. * @param report Namenode status report to update with JMX data. */ private void updateJMXParameters( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java index 443b9a7d10..413566ed05 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java @@ -267,7 +267,7 @@ public RouterRpcServer getRpcServer() { /** * Set the current RPC socket for the router. * - * @param rpcAddress RPC address. + * @param address RPC address. */ protected void setRpcServerAddress(InetSocketAddress address) { this.rpcAddress = address; @@ -310,7 +310,7 @@ protected RouterAdminServer createAdminServer() throws IOException { /** * Set the current Admin socket for the router. * - * @param adminAddress Admin RPC address. + * @param address Admin RPC address. */ protected void setAdminServerAddress(InetSocketAddress address) { this.adminAddress = address; @@ -513,7 +513,7 @@ public String getRouterId() { /** * Sets a unique ID for this router. * - * @param router Identifier of the Router. + * @param id Identifier of the Router. */ public void setRouterId(String id) { this.routerId = id; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java index 932295e8df..cac37132ad 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java @@ -264,7 +264,7 @@ private static IOException toIOException(Exception e) { /** * If we should retry the RPC call. * - * @param ex Exception reported. + * @param ioe IOException reported. * @param retryCount Number of retries. * @return Retry decision. * @throws IOException Original exception if the retry policy generates one. @@ -675,8 +675,8 @@ public Object invokeSequential( /** * Checks if a result matches the required result class. * - * @param expectedResultClass Required result class, null to skip the check. - * @param result The result to check. + * @param expectedClass Required result class, null to skip the check. + * @param clazz The result to check. * @return True if the result is an instance of the required class or if the * expected class is null. */ @@ -693,8 +693,8 @@ private static boolean isExpectedClass(Class expectedClass, Object clazz) { /** * Checks if a result matches the expected value. * - * @param expectedResultValue The expected value, null to skip the check. - * @param result The result to check. + * @param expectedValue The expected value, null to skip the check. + * @param value The result to check. * @return True if the result is equals to the expected value or if the * expected value is null. */ @@ -717,7 +717,7 @@ private static boolean isExpectedValue(Object expectedValue, Object value) { * * @param The type of the remote location. * @param locations List of remote locations to call concurrently. - * @param remoteMethod The remote method and parameters to invoke. + * @param method The remote method and parameters to invoke. * @param requireResponse If true an exception will be thrown if all calls do * not complete. If false exceptions are ignored and all data results * successfully received are returned. @@ -740,12 +740,12 @@ public Map invokeConcurrent( * RemoteException or IOException. * * @param locations List of remote locations to call concurrently. - * @param remoteMethod The remote method and parameters to invoke. + * @param method The remote method and parameters to invoke. * @param requireResponse If true an exception will be thrown if all calls do * not complete. If false exceptions are ignored and all data results * successfully received are returned. * @param standby If the requests should go to the standby namenodes too. - * @param timeoutMs Timeout for each individual call. + * @param timeOutMs Timeout for each individual call. * @return Result of invoking the method per subcluster: nsId -> result. * @throws IOException If requiredResponse=true and any of the calls throw an * exception. @@ -877,7 +877,7 @@ public Object call() throws Exception { * Get a prioritized list of NNs that share the same nameservice ID (in the * same namespace). NNs that are reported as ACTIVE will be first in the list. * - * @param nameserviceId The nameservice ID for the namespace. + * @param nsId The nameservice ID for the namespace. * @return A prioritized list of NNs to use for communication. * @throws IOException If a NN cannot be located for the nameservice ID. */ @@ -898,7 +898,7 @@ private List getNamenodesForNameservice( * Get a prioritized list of NNs that share the same block pool ID (in the * same namespace). NNs that are reported as ACTIVE will be first in the list. * - * @param blockPoolId The blockpool ID for the namespace. + * @param bpId The blockpool ID for the namespace. * @return A prioritized list of NNs to use for communication. * @throws IOException If a NN cannot be located for the block pool ID. */ @@ -929,4 +929,4 @@ private String getNameserviceForBlockPoolId(final String bpId) FederationNamenodeContext namenode = namenodes.get(0); return namenode.getNameserviceId(); } -} \ No newline at end of file +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java index ed91931bf9..3bb5ca47fe 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java @@ -1390,7 +1390,7 @@ public ContentSummary getContentSummary(String path) throws IOException { /** * Aggregate content summaries for each subcluster. * - * @param results Collection of individual summaries. + * @param summaries Collection of individual summaries. * @return Aggregated content summary. */ private ContentSummary aggregateContentSummary( @@ -1999,7 +1999,7 @@ private Map getMountPointDates(String path) { * * @param name Name of the mount point. * @param childrenNum Number of children. - * @param dates Map with the dates. + * @param date Map with the dates. * @return New HDFS file status representing a mount point. */ private HdfsFileStatus getMountPointStatus( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/CachedRecordStore.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/CachedRecordStore.java index fbece88fe6..cdd4449090 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/CachedRecordStore.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/CachedRecordStore.java @@ -86,7 +86,7 @@ protected CachedRecordStore(Class clazz, StateStoreDriver driver) { * * @param clazz Class of the record to store. * @param driver State Store driver. - * @param override If the entries should be override if they expire + * @param over If the entries should be override if they expire */ protected CachedRecordStore( Class clazz, StateStoreDriver driver, boolean over) { @@ -167,7 +167,6 @@ private boolean isUpdateTime() { * expired state. * * @param query RecordQueryResult containing the data to be inspected. - * @param clazz Type of objects contained in the query. * @throws IOException */ public void overrideExpiredRecords(QueryResult query) throws IOException { @@ -194,9 +193,7 @@ public void overrideExpiredRecords(QueryResult query) throws IOException { * Updates the state store with any record overrides we detected, such as an * expired state. * - * @param driver State store driver for the data store. * @param record Record record to be updated. - * @param clazz Type of data record. * @throws IOException */ public void overrideExpiredRecord(R record) throws IOException { @@ -242,4 +239,4 @@ protected QueryResult getCachedRecordsAndTimeStamp() this.readLock.unlock(); } } -} \ No newline at end of file +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/RecordStore.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/RecordStore.java index 524f432968..53a8b82e23 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/RecordStore.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/RecordStore.java @@ -80,7 +80,7 @@ protected StateStoreDriver getDriver() { /** * Build a state store API implementation interface. * - * @param interfaceClass The specific interface implementation to create + * @param clazz The specific interface implementation to create * @param driver The {@link StateStoreDriver} implementation in use. * @return An initialized instance of the specified state store API * implementation. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreDriver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreDriver.java index 3ebab0bda9..c9b1ce6900 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreDriver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreDriver.java @@ -128,9 +128,10 @@ public StateStoreMetrics getMetrics() { /** * Initialize storage for a single record class. * - * @param name String reference of the record class to initialize, used to - * construct paths and file names for the record. Determined by - * configuration settings for the specific driver. + * @param className String reference of the record class to initialize, + * used to construct paths and file names for the record. + * Determined by configuration settings for the specific + * driver. * @param clazz Record type corresponding to the provided name. * @return True if successful, false otherwise. */ @@ -199,4 +200,4 @@ private String getHostname() { } return hostname; } -} \ No newline at end of file +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileBaseImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileBaseImpl.java index d7c00ffc1d..a0cd878fee 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileBaseImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileBaseImpl.java @@ -192,7 +192,7 @@ public boolean initRecordStorage( * Read all lines from a file and deserialize into the desired record type. * * @param reader Open handle for the file. - * @param recordClass Record class to create. + * @param clazz Record class to create. * @param includeDates True if dateModified/dateCreated are serialized. * @return List of records. * @throws IOException @@ -250,7 +250,7 @@ public QueryResult get(Class clazz, String sub) /** * Overwrite the existing data with a new data set. * - * @param list List of records to write. + * @param records List of records to write. * @param writer BufferedWriter stream to write to. * @return If the records were succesfully written. */ @@ -426,4 +426,4 @@ public boolean removeAll(Class clazz) boolean status = writeAll(emptyList, clazz); return status; } -} \ No newline at end of file +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileSystemImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileSystemImpl.java index 59684215ab..d9ef280565 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileSystemImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileSystemImpl.java @@ -98,7 +98,7 @@ public void close() throws Exception { /** * Get the folder path for the record class' data. * - * @param cls Data record class. + * @param clazz Data record class. * @return Path of the folder containing the record class' data files. */ private Path getPathForClass(Class clazz) { @@ -175,4 +175,4 @@ protected BufferedWriter getWriter( return null; } } -} \ No newline at end of file +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreZooKeeperImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreZooKeeperImpl.java index 97c821e0e0..1c3f756d41 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreZooKeeperImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreZooKeeperImpl.java @@ -309,7 +309,8 @@ private String getZNodeForClass(Class clazz) { /** * Creates a record from a string returned by ZooKeeper. * - * @param source Object from ZooKeeper. + * @param data The data to write. + * @param stat Stat of the data record to create. * @param clazz The data record type to create. * @return The created record. * @throws IOException @@ -321,4 +322,4 @@ private T createRecord( record.setDateModified(stat.getMtime()); return record; } -} \ No newline at end of file +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/impl/MembershipStoreImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/impl/MembershipStoreImpl.java index c28131f659..57b7b618b0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/impl/MembershipStoreImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/impl/MembershipStoreImpl.java @@ -267,8 +267,7 @@ public UpdateNamenodeRegistrationResponse updateNamenodeRegistration( * majority value 2) Otherwise the first sorted entry in the set of all * entries * - * @param entries - Collection of state store record objects of the same type - * @param fieldName - Field name for the value to compare + * @param records - Collection of state store record objects of the same type * @return record that is most representative of the field name */ private MembershipState getRepresentativeQuorum( @@ -308,4 +307,4 @@ private MembershipState getRepresentativeQuorum( return null; } } -} \ No newline at end of file +}