HDDS-843. [JDK11] Fix Javadoc errors in hadoop-hdds-server-scm module. Contributed by Dinesh Chitlangia.

This commit is contained in:
Giovanni Matteo Fumarola 2018-11-15 14:54:41 -08:00
parent 57866b366f
commit d8ec017a8d
4 changed files with 25 additions and 27 deletions

View File

@ -1,18 +1,19 @@
/** /**
* Licensed to the Apache Software Foundation (ASF) under one or more * Licensed to the Apache Software Foundation (ASF) under one
* contributor license agreements. See the NOTICE file distributed with this * or more contributor license agreements. See the NOTICE file
* work for additional information regarding copyright ownership. The ASF * distributed with this work for additional information
* licenses this file to you under the Apache License, Version 2.0 (the * regarding copyright ownership. The ASF licenses this file
* "License"); you may not use this file except in compliance with the License. * to you under the Apache License, Version 2.0 (the
* You may obtain a copy of the License at * "License"); you may not use this file except in compliance
* <p/> * with the License. You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0 *
* <p/> * http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software * Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * distributed under the License is distributed on an "AS IS" BASIS,
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* License for the specific language governing permissions and limitations under * See the License for the specific language governing permissions and
* the License. * limitations under the License.
*/ */
package org.apache.hadoop.hdds.scm.exceptions; package org.apache.hadoop.hdds.scm.exceptions;
// Exceptions thrown by SCM. // Exceptions thrown by SCM.

View File

@ -23,7 +23,6 @@
import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric; import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat; import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
import org.apache.hadoop.hdds.scm.exceptions.SCMException;
import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
@ -40,17 +39,17 @@
/** /**
* A node manager supports a simple interface for managing a datanode. * A node manager supports a simple interface for managing a datanode.
* <p/> * <p>
* 1. A datanode registers with the NodeManager. * 1. A datanode registers with the NodeManager.
* <p/> * <p>
* 2. If the node is allowed to register, we add that to the nodes that we need * 2. If the node is allowed to register, we add that to the nodes that we need
* to keep track of. * to keep track of.
* <p/> * <p>
* 3. A heartbeat is made by the node at a fixed frequency. * 3. A heartbeat is made by the node at a fixed frequency.
* <p/> * <p>
* 4. A node can be in any of these 4 states: {HEALTHY, STALE, DEAD, * 4. A node can be in any of these 4 states: {HEALTHY, STALE, DEAD,
* DECOMMISSIONED} * DECOMMISSIONED}
* <p/> * <p>
* HEALTHY - It is a datanode that is regularly heartbeating us. * HEALTHY - It is a datanode that is regularly heartbeating us.
* *
* STALE - A datanode for which we have missed few heart beats. * STALE - A datanode for which we have missed few heart beats.
@ -135,8 +134,8 @@ public interface NodeManager extends StorageContainerNodeProtocol,
* Remaps datanode to containers mapping to the new set of containers. * Remaps datanode to containers mapping to the new set of containers.
* @param datanodeDetails - DatanodeDetails * @param datanodeDetails - DatanodeDetails
* @param containerIds - Set of containerIDs * @param containerIds - Set of containerIDs
* @throws SCMException - if datanode is not known. For new datanode use * @throws NodeNotFoundException - if datanode is not known. For new datanode
* addDatanodeInContainerMap call. * use addDatanodeInContainerMap call.
*/ */
void setContainers(DatanodeDetails datanodeDetails, void setContainers(DatanodeDetails datanodeDetails,
Set<ContainerID> containerIds) throws NodeNotFoundException; Set<ContainerID> containerIds) throws NodeNotFoundException;

View File

@ -24,7 +24,6 @@
import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerID;
import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
import org.apache.hadoop.hdds.scm.exceptions.SCMException;
import org.apache.hadoop.hdds.scm.node.states.NodeAlreadyExistsException; import org.apache.hadoop.hdds.scm.node.states.NodeAlreadyExistsException;
import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
import org.apache.hadoop.hdds.scm.server.StorageContainerManager; import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
@ -281,7 +280,6 @@ public RegisteredCommand register(
* *
* @param datanodeDetails - DatanodeDetailsProto. * @param datanodeDetails - DatanodeDetailsProto.
* @return SCMheartbeat response. * @return SCMheartbeat response.
* @throws IOException
*/ */
@Override @Override
public List<SCMCommand> processHeartbeat(DatanodeDetails datanodeDetails) { public List<SCMCommand> processHeartbeat(DatanodeDetails datanodeDetails) {
@ -396,8 +394,8 @@ public void removePipeline(Pipeline pipeline) {
* Update set of containers available on a datanode. * Update set of containers available on a datanode.
* @param datanodeDetails - DatanodeID * @param datanodeDetails - DatanodeID
* @param containerIds - Set of containerIDs * @param containerIds - Set of containerIDs
* @throws SCMException - if datanode is not known. For new datanode use * @throws NodeNotFoundException - if datanode is not known. For new datanode
* addDatanodeInContainerMap call. * use addDatanodeInContainerMap call.
*/ */
@Override @Override
public void setContainers(DatanodeDetails datanodeDetails, public void setContainers(DatanodeDetails datanodeDetails,

View File

@ -69,7 +69,7 @@ private HddsTestUtils() {
* Creates list of ContainerInfo. * Creates list of ContainerInfo.
* *
* @param numContainers number of ContainerInfo to be included in list. * @param numContainers number of ContainerInfo to be included in list.
* @return List<ContainerInfo> * @return {@literal List<ContainerInfo>}
*/ */
public static List<ContainerInfo> getContainerInfo(int numContainers) { public static List<ContainerInfo> getContainerInfo(int numContainers) {
List<ContainerInfo> containerInfoList = new ArrayList<>(); List<ContainerInfo> containerInfoList = new ArrayList<>();