HDFS-2983. Relax the build version check to permit rolling upgrades within a release. Contributed by Aaron T. Myers.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1325110 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
b31d9d9d86
commit
7f427646df
@ -370,6 +370,9 @@ Release 2.0.0 - UNRELEASED
|
||||
HDFS-3179. Improve the exception message thrown by DataStreamer when
|
||||
it failed to add a datanode. (szetszwo)
|
||||
|
||||
HDFS-2983. Relax the build version check to permit rolling upgrades within
|
||||
a release. (atm)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
HDFS-3024. Improve performance of stringification in addStoredBlock (todd)
|
||||
|
@ -146,6 +146,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
|
||||
public static final int DFS_NAMENODE_NUM_CHECKPOINTS_RETAINED_DEFAULT = 2;
|
||||
public static final String DFS_NAMENODE_NUM_EXTRA_EDITS_RETAINED_KEY = "dfs.namenode.num.extra.edits.retained";
|
||||
public static final int DFS_NAMENODE_NUM_EXTRA_EDITS_RETAINED_DEFAULT = 1000000; //1M
|
||||
public static final String DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_KEY = "dfs.namenode.min.supported.datanode.version";
|
||||
public static final String DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_DEFAULT = "3.0.0";
|
||||
|
||||
public static final String DFS_NAMENODE_EDITS_DIR_MINIMUM_KEY = "dfs.namenode.edits.dir.minimum";
|
||||
public static final int DFS_NAMENODE_EDITS_DIR_MINIMUM_DEFAULT = 1;
|
||||
@ -262,6 +264,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
|
||||
public static final String DFS_DATANODE_IPC_ADDRESS_KEY = "dfs.datanode.ipc.address";
|
||||
public static final int DFS_DATANODE_IPC_DEFAULT_PORT = 50020;
|
||||
public static final String DFS_DATANODE_IPC_ADDRESS_DEFAULT = "0.0.0.0" + DFS_DATANODE_IPC_DEFAULT_PORT;
|
||||
public static final String DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_KEY = "dfs.datanode.min.supported.namenode.version";
|
||||
public static final String DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_DEFAULT = "3.0.0";
|
||||
|
||||
public static final String DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY = "dfs.block.access.token.enable";
|
||||
public static final boolean DFS_BLOCK_ACCESS_TOKEN_ENABLE_DEFAULT = false;
|
||||
|
@ -386,7 +386,7 @@ public static NamespaceInfo convert(NamespaceInfoProto info) {
|
||||
StorageInfoProto storage = info.getStorageInfo();
|
||||
return new NamespaceInfo(storage.getNamespceID(), storage.getClusterID(),
|
||||
info.getBlockPoolID(), storage.getCTime(), info.getDistUpgradeVersion(),
|
||||
info.getBuildVersion());
|
||||
info.getBuildVersion(), info.getSoftwareVersion());
|
||||
}
|
||||
|
||||
public static NamenodeCommand convert(NamenodeCommandProto cmd) {
|
||||
@ -612,13 +612,14 @@ public static DatanodeRegistrationProto convert(
|
||||
.newBuilder();
|
||||
return builder.setDatanodeID(PBHelper.convert((DatanodeID) registration))
|
||||
.setStorageInfo(PBHelper.convert(registration.getStorageInfo()))
|
||||
.setKeys(PBHelper.convert(registration.getExportedKeys())).build();
|
||||
.setKeys(PBHelper.convert(registration.getExportedKeys()))
|
||||
.setSoftwareVersion(registration.getSoftwareVersion()).build();
|
||||
}
|
||||
|
||||
public static DatanodeRegistration convert(DatanodeRegistrationProto proto) {
|
||||
return new DatanodeRegistration(PBHelper.convert(proto.getDatanodeID()),
|
||||
PBHelper.convert(proto.getStorageInfo()), PBHelper.convert(proto
|
||||
.getKeys()));
|
||||
.getKeys()), proto.getSoftwareVersion());
|
||||
}
|
||||
|
||||
public static DatanodeCommand convert(DatanodeCommandProto proto) {
|
||||
@ -894,7 +895,8 @@ public static NamespaceInfoProto convert(NamespaceInfo info) {
|
||||
.setBlockPoolID(info.getBlockPoolID())
|
||||
.setBuildVersion(info.getBuildVersion())
|
||||
.setDistUpgradeVersion(info.getDistributedUpgradeVersion())
|
||||
.setStorageInfo(PBHelper.convert((StorageInfo)info)).build();
|
||||
.setStorageInfo(PBHelper.convert((StorageInfo)info))
|
||||
.setSoftwareVersion(info.getSoftwareVersion()).build();
|
||||
}
|
||||
|
||||
// Located Block Arrays and Lists
|
||||
|
@ -32,7 +32,19 @@
|
||||
@InterfaceStability.Evolving
|
||||
public class IncorrectVersionException extends IOException {
|
||||
private static final long serialVersionUID = 1L;
|
||||
|
||||
public IncorrectVersionException(String message) {
|
||||
super(message);
|
||||
}
|
||||
|
||||
public IncorrectVersionException(String minimumVersion, String reportedVersion,
|
||||
String remoteDaemon, String thisDaemon) {
|
||||
this("The reported " + remoteDaemon + " version is too low to communicate" +
|
||||
" with this " + thisDaemon + ". " + remoteDaemon + " version: '" +
|
||||
reportedVersion + "' Minimum " + remoteDaemon + " version: '" +
|
||||
minimumVersion + "'");
|
||||
}
|
||||
|
||||
public IncorrectVersionException(int versionReported, String ofWhat) {
|
||||
this(versionReported, ofWhat, HdfsConstants.LAYOUT_VERSION);
|
||||
}
|
||||
@ -40,16 +52,9 @@ public IncorrectVersionException(int versionReported, String ofWhat) {
|
||||
public IncorrectVersionException(int versionReported,
|
||||
String ofWhat,
|
||||
int versionExpected) {
|
||||
super("Unexpected version "
|
||||
+ (ofWhat==null ? "" : "of " + ofWhat) + ". Reported: "
|
||||
+ versionReported + ". Expecting = " + versionExpected + ".");
|
||||
this("Unexpected version "
|
||||
+ (ofWhat==null ? "" : "of " + ofWhat) + ". Reported: "
|
||||
+ versionReported + ". Expecting = " + versionExpected + ".");
|
||||
}
|
||||
|
||||
public IncorrectVersionException(String versionReported,
|
||||
String ofWhat,
|
||||
String versionExpected) {
|
||||
super("Unexpected version "
|
||||
+ (ofWhat==null ? "" : "of " + ofWhat) + ". Reported: "
|
||||
+ versionReported + ". Expecting = " + versionExpected + ".");
|
||||
}
|
||||
}
|
||||
|
@ -37,7 +37,6 @@
|
||||
import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException;
|
||||
import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
|
||||
import org.apache.hadoop.hdfs.server.common.IncorrectVersionException;
|
||||
import org.apache.hadoop.hdfs.server.common.Storage;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
|
||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
|
||||
@ -49,9 +48,11 @@
|
||||
import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
|
||||
import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
|
||||
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
|
||||
import org.apache.hadoop.hdfs.util.VersionUtil;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.ipc.RemoteException;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.apache.hadoop.util.VersionInfo;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.collect.Maps;
|
||||
@ -178,17 +179,23 @@ NamespaceInfo retrieveNamespaceInfo() throws IOException {
|
||||
private void checkNNVersion(NamespaceInfo nsInfo)
|
||||
throws IncorrectVersionException {
|
||||
// build and layout versions should match
|
||||
String nsBuildVer = nsInfo.getBuildVersion();
|
||||
String stBuildVer = Storage.getBuildVersion();
|
||||
if (!nsBuildVer.equals(stBuildVer)) {
|
||||
LOG.warn("Data-node and name-node Build versions must be the same. " +
|
||||
"Namenode build version: " + nsBuildVer + "Datanode " +
|
||||
"build version: " + stBuildVer);
|
||||
throw new IncorrectVersionException(nsBuildVer, "namenode", stBuildVer);
|
||||
String nnVersion = nsInfo.getSoftwareVersion();
|
||||
String minimumNameNodeVersion = dnConf.getMinimumNameNodeVersion();
|
||||
if (VersionUtil.compareVersions(nnVersion, minimumNameNodeVersion) < 0) {
|
||||
IncorrectVersionException ive = new IncorrectVersionException(
|
||||
minimumNameNodeVersion, nnVersion, "NameNode", "DataNode");
|
||||
LOG.warn(ive.getMessage());
|
||||
throw ive;
|
||||
}
|
||||
String dnVersion = VersionInfo.getVersion();
|
||||
if (!nnVersion.equals(dnVersion)) {
|
||||
LOG.info("Reported NameNode version '" + nnVersion + "' does not match " +
|
||||
"DataNode version '" + dnVersion + "' but is within acceptable " +
|
||||
"limits. Note: This is normal during a rolling upgrade.");
|
||||
}
|
||||
|
||||
if (HdfsConstants.LAYOUT_VERSION != nsInfo.getLayoutVersion()) {
|
||||
LOG.warn("Data-node and name-node layout versions must be the same." +
|
||||
LOG.warn("DataNode and NameNode layout versions must be the same." +
|
||||
" Expected: "+ HdfsConstants.LAYOUT_VERSION +
|
||||
" actual "+ nsInfo.getLayoutVersion());
|
||||
throw new IncorrectVersionException(
|
||||
|
@ -31,6 +31,8 @@
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_TRANSFERTO_ALLOWED_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_KEY;
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_DEFAULT;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
@ -58,6 +60,8 @@ class DNConf {
|
||||
final long deleteReportInterval;
|
||||
final long initialBlockReportDelay;
|
||||
final int writePacketSize;
|
||||
|
||||
final String minimumNameNodeVersion;
|
||||
|
||||
public DNConf(Configuration conf) {
|
||||
socketTimeout = conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY,
|
||||
@ -111,5 +115,12 @@ public DNConf(Configuration conf) {
|
||||
this.syncOnClose = conf.getBoolean(DFS_DATANODE_SYNCONCLOSE_KEY,
|
||||
DFS_DATANODE_SYNCONCLOSE_DEFAULT);
|
||||
|
||||
this.minimumNameNodeVersion = conf.get(DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_KEY,
|
||||
DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_DEFAULT);
|
||||
}
|
||||
|
||||
// We get minimumNameNodeVersion via a method so it can be mocked out in tests.
|
||||
String getMinimumNameNodeVersion() {
|
||||
return this.minimumNameNodeVersion;
|
||||
}
|
||||
}
|
||||
|
@ -673,6 +673,7 @@ DatanodeRegistration createBPRegistration(NamespaceInfo nsInfo) {
|
||||
bpRegistration.setIpcPort(getIpcPort());
|
||||
bpRegistration.setHostName(hostName);
|
||||
bpRegistration.setStorageID(getStorageId());
|
||||
bpRegistration.setSoftwareVersion(VersionInfo.getVersion());
|
||||
|
||||
StorageInfo storageInfo = storage.getBPStorage(nsInfo.getBlockPoolID());
|
||||
if (storageInfo == null) {
|
||||
|
@ -242,7 +242,7 @@ private BackupNodeRpcServer(Configuration conf, BackupNode nn)
|
||||
*/
|
||||
private void verifyJournalRequest(JournalInfo journalInfo)
|
||||
throws IOException {
|
||||
verifyVersion(journalInfo.getLayoutVersion());
|
||||
verifyLayoutVersion(journalInfo.getLayoutVersion());
|
||||
String errorMsg = null;
|
||||
int expectedNamespaceID = namesystem.getNamespaceInfo().getNamespaceID();
|
||||
if (journalInfo.getNamespaceId() != expectedNamespaceID) {
|
||||
|
@ -47,6 +47,7 @@
|
||||
import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAServiceProtocolService;
|
||||
import org.apache.hadoop.ha.protocolPB.HAServiceProtocolPB;
|
||||
import org.apache.hadoop.ha.protocolPB.HAServiceProtocolServerSideTranslatorPB;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.apache.hadoop.hdfs.HDFSPolicyProvider;
|
||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
@ -107,6 +108,7 @@
|
||||
import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
|
||||
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
|
||||
import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
|
||||
import org.apache.hadoop.hdfs.util.VersionUtil;
|
||||
import org.apache.hadoop.io.EnumSetWritable;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.ipc.ProtobufRpcEngine;
|
||||
@ -121,6 +123,7 @@
|
||||
import org.apache.hadoop.security.authorize.ProxyUsers;
|
||||
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
import org.apache.hadoop.util.VersionInfo;
|
||||
|
||||
import com.google.protobuf.BlockingService;
|
||||
|
||||
@ -147,6 +150,8 @@ class NameNodeRpcServer implements NamenodeProtocols {
|
||||
/** The RPC server that listens to requests from clients */
|
||||
protected final RPC.Server clientRpcServer;
|
||||
protected final InetSocketAddress clientRpcAddress;
|
||||
|
||||
private final String minimumDataNodeVersion;
|
||||
|
||||
public NameNodeRpcServer(Configuration conf, NameNode nn)
|
||||
throws IOException {
|
||||
@ -261,6 +266,10 @@ public NameNodeRpcServer(Configuration conf, NameNode nn)
|
||||
// The rpc-server port can be ephemeral... ensure we have the correct info
|
||||
this.clientRpcAddress = this.clientRpcServer.getListenerAddress();
|
||||
nn.setRpcServerAddress(conf, clientRpcAddress);
|
||||
|
||||
this.minimumDataNodeVersion = conf.get(
|
||||
DFSConfigKeys.DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_KEY,
|
||||
DFSConfigKeys.DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_DEFAULT);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -326,7 +335,7 @@ public void errorReport(NamenodeRegistration registration,
|
||||
@Override // NamenodeProtocol
|
||||
public NamenodeRegistration register(NamenodeRegistration registration)
|
||||
throws IOException {
|
||||
verifyVersion(registration.getVersion());
|
||||
verifyLayoutVersion(registration.getVersion());
|
||||
NamenodeRegistration myRegistration = nn.setRegistration();
|
||||
namesystem.registerBackupNode(registration, myRegistration);
|
||||
return myRegistration;
|
||||
@ -829,9 +838,10 @@ public String getLinkTarget(String path) throws IOException {
|
||||
|
||||
|
||||
@Override // DatanodeProtocol
|
||||
public DatanodeRegistration registerDatanode(DatanodeRegistration nodeReg
|
||||
) throws IOException {
|
||||
verifyVersion(nodeReg.getVersion());
|
||||
public DatanodeRegistration registerDatanode(DatanodeRegistration nodeReg)
|
||||
throws IOException {
|
||||
verifyLayoutVersion(nodeReg.getVersion());
|
||||
verifySoftwareVersion(nodeReg);
|
||||
namesystem.registerDatanode(nodeReg);
|
||||
return nodeReg;
|
||||
}
|
||||
@ -916,7 +926,7 @@ public UpgradeCommand processUpgradeCommand(UpgradeCommand comm) throws IOExcept
|
||||
* @throws UnregisteredNodeException if the registration is invalid
|
||||
*/
|
||||
void verifyRequest(NodeRegistration nodeReg) throws IOException {
|
||||
verifyVersion(nodeReg.getVersion());
|
||||
verifyLayoutVersion(nodeReg.getVersion());
|
||||
if (!namesystem.getRegistrationID().equals(nodeReg.getRegistrationID())) {
|
||||
LOG.warn("Invalid registrationID - expected: "
|
||||
+ namesystem.getRegistrationID() + " received: "
|
||||
@ -989,10 +999,39 @@ public synchronized HAServiceStatus getServiceStatus()
|
||||
* @param version
|
||||
* @throws IOException
|
||||
*/
|
||||
void verifyVersion(int version) throws IOException {
|
||||
void verifyLayoutVersion(int version) throws IOException {
|
||||
if (version != HdfsConstants.LAYOUT_VERSION)
|
||||
throw new IncorrectVersionException(version, "data node");
|
||||
}
|
||||
|
||||
private void verifySoftwareVersion(DatanodeRegistration dnReg)
|
||||
throws IncorrectVersionException {
|
||||
String dnVersion = dnReg.getSoftwareVersion();
|
||||
if (VersionUtil.compareVersions(dnVersion, minimumDataNodeVersion) < 0) {
|
||||
IncorrectVersionException ive = new IncorrectVersionException(
|
||||
minimumDataNodeVersion, dnVersion, "DataNode", "NameNode");
|
||||
LOG.warn(ive.getMessage() + " DN: " + dnReg);
|
||||
throw ive;
|
||||
}
|
||||
String nnVersion = VersionInfo.getVersion();
|
||||
if (!dnVersion.equals(nnVersion)) {
|
||||
String messagePrefix = "Reported DataNode version '" + dnVersion +
|
||||
"' of DN " + dnReg + " does not match NameNode version '" +
|
||||
nnVersion + "'";
|
||||
long nnCTime = nn.getFSImage().getStorage().getCTime();
|
||||
long dnCTime = dnReg.getStorageInfo().getCTime();
|
||||
if (nnCTime != dnCTime) {
|
||||
IncorrectVersionException ive = new IncorrectVersionException(
|
||||
messagePrefix + " and CTime of DN ('" + dnCTime +
|
||||
"') does not match CTime of NN ('" + nnCTime + "')");
|
||||
LOG.warn(ive);
|
||||
throw ive;
|
||||
} else {
|
||||
LOG.info(messagePrefix +
|
||||
". Note: This is normal during a rolling upgrade.");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static String getClientMachine() {
|
||||
String clientMachine = NamenodeWebHdfsMethods.getRemoteAddress();
|
||||
|
@ -37,12 +37,14 @@ public class DatanodeRegistration extends DatanodeID
|
||||
|
||||
private StorageInfo storageInfo;
|
||||
private ExportedBlockKeys exportedKeys;
|
||||
private String softwareVersion;
|
||||
|
||||
public DatanodeRegistration(DatanodeID dn, StorageInfo info,
|
||||
ExportedBlockKeys keys) {
|
||||
ExportedBlockKeys keys, String softwareVersion) {
|
||||
super(dn);
|
||||
this.storageInfo = info;
|
||||
this.exportedKeys = keys;
|
||||
this.softwareVersion = softwareVersion;
|
||||
}
|
||||
|
||||
public DatanodeRegistration(String ipAddr, int xferPort) {
|
||||
@ -71,6 +73,14 @@ public void setExportedKeys(ExportedBlockKeys keys) {
|
||||
public ExportedBlockKeys getExportedKeys() {
|
||||
return exportedKeys;
|
||||
}
|
||||
|
||||
public void setSoftwareVersion(String softwareVersion) {
|
||||
this.softwareVersion = softwareVersion;
|
||||
}
|
||||
|
||||
public String getSoftwareVersion() {
|
||||
return softwareVersion;
|
||||
}
|
||||
|
||||
@Override // NodeRegistration
|
||||
public int getVersion() {
|
||||
|
@ -26,6 +26,7 @@
|
||||
import org.apache.hadoop.hdfs.server.common.Storage;
|
||||
import org.apache.hadoop.hdfs.server.common.StorageInfo;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NNStorage;
|
||||
import org.apache.hadoop.util.VersionInfo;
|
||||
|
||||
/**
|
||||
* NamespaceInfo is returned by the name-node in reply
|
||||
@ -38,6 +39,7 @@ public class NamespaceInfo extends StorageInfo {
|
||||
String buildVersion;
|
||||
int distributedUpgradeVersion;
|
||||
String blockPoolID = ""; // id of the block pool
|
||||
String softwareVersion;
|
||||
|
||||
public NamespaceInfo() {
|
||||
super();
|
||||
@ -45,16 +47,18 @@ public NamespaceInfo() {
|
||||
}
|
||||
|
||||
public NamespaceInfo(int nsID, String clusterID, String bpID,
|
||||
long cT, int duVersion, String buildVersion) {
|
||||
long cT, int duVersion, String buildVersion, String softwareVersion) {
|
||||
super(HdfsConstants.LAYOUT_VERSION, nsID, clusterID, cT);
|
||||
blockPoolID = bpID;
|
||||
this.buildVersion = buildVersion;
|
||||
this.distributedUpgradeVersion = duVersion;
|
||||
this.softwareVersion = softwareVersion;
|
||||
}
|
||||
|
||||
public NamespaceInfo(int nsID, String clusterID, String bpID,
|
||||
long cT, int duVersion) {
|
||||
this(nsID, clusterID, bpID, cT, duVersion, Storage.getBuildVersion());
|
||||
this(nsID, clusterID, bpID, cT, duVersion, Storage.getBuildVersion(),
|
||||
VersionInfo.getVersion());
|
||||
}
|
||||
|
||||
public String getBuildVersion() {
|
||||
@ -68,6 +72,10 @@ public int getDistributedUpgradeVersion() {
|
||||
public String getBlockPoolID() {
|
||||
return blockPoolID;
|
||||
}
|
||||
|
||||
public String getSoftwareVersion() {
|
||||
return softwareVersion;
|
||||
}
|
||||
|
||||
public String toString(){
|
||||
return super.toString() + ";bpid=" + blockPoolID;
|
||||
|
@ -0,0 +1,101 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.util;
|
||||
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
|
||||
@InterfaceAudience.Private
|
||||
public abstract class VersionUtil {
|
||||
|
||||
private static final Pattern COMPONENT_GROUPS = Pattern.compile("(\\d+)|(\\D+)");
|
||||
|
||||
/**
|
||||
* This function splits the two versions on "." and performs a
|
||||
* naturally-ordered comparison of the resulting components. For example, the
|
||||
* version string "0.3" is considered to precede "0.20", despite the fact that
|
||||
* lexical comparison would consider "0.20" to precede "0.3". This method of
|
||||
* comparison is similar to the method used by package versioning systems like
|
||||
* deb and RPM.
|
||||
*
|
||||
* Version components are compared numerically whenever possible, however a
|
||||
* version component can contain non-numeric characters. When a non-numeric
|
||||
* group of characters is found in a version component, this group is compared
|
||||
* with the similarly-indexed group in the other version component. If the
|
||||
* other group is numeric, then the numeric group is considered to precede the
|
||||
* non-numeric group. If both groups are non-numeric, then a lexical
|
||||
* comparison is performed.
|
||||
*
|
||||
* If two versions have a different number of components, then only the lower
|
||||
* number of components are compared. If those components are identical
|
||||
* between the two versions, then the version with fewer components is
|
||||
* considered to precede the version with more components.
|
||||
*
|
||||
* This function returns a negative integer if version1 precedes version2, a
|
||||
* positive integer if version2 precedes version1, and 0 if and only if the
|
||||
* two versions' components are identical in value and cardinality.
|
||||
*
|
||||
* @param version1
|
||||
* the first version to compare
|
||||
* @param version2
|
||||
* the second version to compare
|
||||
* @return a negative integer if version1 precedes version2, a positive
|
||||
* integer if version2 precedes version1, and 0 if and only if the two
|
||||
* versions are equal.
|
||||
*/
|
||||
public static int compareVersions(String version1, String version2) {
|
||||
String[] version1Parts = version1.split("\\.");
|
||||
String[] version2Parts = version2.split("\\.");
|
||||
|
||||
for (int i = 0; i < version1Parts.length && i < version2Parts.length; i++) {
|
||||
String component1 = version1Parts[i];
|
||||
String component2 = version2Parts[i];
|
||||
if (!component1.equals(component2)) {
|
||||
Matcher matcher1 = COMPONENT_GROUPS.matcher(component1);
|
||||
Matcher matcher2 = COMPONENT_GROUPS.matcher(component2);
|
||||
|
||||
while (matcher1.find() && matcher2.find()) {
|
||||
String group1 = matcher1.group();
|
||||
String group2 = matcher2.group();
|
||||
if (!group1.equals(group2)) {
|
||||
if (isNumeric(group1) && isNumeric(group2)) {
|
||||
return Integer.parseInt(group1) - Integer.parseInt(group2);
|
||||
} else if (!isNumeric(group1) && !isNumeric(group2)) {
|
||||
return group1.compareTo(group2);
|
||||
} else {
|
||||
return isNumeric(group1) ? -1 : 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
return component1.length() - component2.length();
|
||||
}
|
||||
}
|
||||
return version1Parts.length - version2Parts.length;
|
||||
}
|
||||
|
||||
private static boolean isNumeric(String s) {
|
||||
try {
|
||||
Integer.parseInt(s);
|
||||
return true;
|
||||
} catch (NumberFormatException nfe) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
@ -33,6 +33,7 @@ message DatanodeRegistrationProto {
|
||||
required DatanodeIDProto datanodeID = 1; // Datanode information
|
||||
required StorageInfoProto storageInfo = 2; // Node information
|
||||
required ExportedBlockKeysProto keys = 3; // Block keys
|
||||
required string softwareVersion = 4; // Software version of the DN, e.g. "2.0.0"
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -303,10 +303,11 @@ message RemoteEditLogManifestProto {
|
||||
* Namespace information that describes namespace on a namenode
|
||||
*/
|
||||
message NamespaceInfoProto {
|
||||
required string buildVersion = 1; // Software build version
|
||||
required string buildVersion = 1; // Software revision version (e.g. an svn or git revision)
|
||||
required uint32 distUpgradeVersion = 2; // Distributed upgrade version
|
||||
required string blockPoolID = 3; // block pool used by the namespace
|
||||
required StorageInfoProto storageInfo = 4;// Noe information
|
||||
required StorageInfoProto storageInfo = 4;// Node information
|
||||
required string softwareVersion = 5; // Software version number (e.g. 2.0.0)
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -17,24 +17,40 @@
|
||||
*/
|
||||
package org.apache.hadoop.hdfs;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.fail;
|
||||
import static org.mockito.Mockito.*;
|
||||
|
||||
import java.net.InetSocketAddress;
|
||||
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.DFSClient;
|
||||
import junit.framework.TestCase;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
|
||||
import org.apache.hadoop.hdfs.server.common.IncorrectVersionException;
|
||||
import org.apache.hadoop.hdfs.server.common.StorageInfo;
|
||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
|
||||
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.apache.hadoop.util.VersionInfo;
|
||||
import org.junit.Test;
|
||||
|
||||
/**
|
||||
* This class tests that a file need not be closed before its
|
||||
* data can be read by another client.
|
||||
*/
|
||||
public class TestDatanodeRegistration extends TestCase {
|
||||
public class TestDatanodeRegistration {
|
||||
|
||||
public static final Log LOG = LogFactory.getLog(TestDatanodeRegistration.class);
|
||||
|
||||
/**
|
||||
* Regression test for HDFS-894 ensures that, when datanodes
|
||||
* are restarted, the new IPC port is registered with the
|
||||
* namenode.
|
||||
*/
|
||||
@Test
|
||||
public void testChangeIpcPort() throws Exception {
|
||||
HdfsConfiguration conf = new HdfsConfiguration();
|
||||
MiniDFSCluster cluster = null;
|
||||
@ -74,4 +90,101 @@ public void testChangeIpcPort() throws Exception {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testRegistrationWithDifferentSoftwareVersions() throws Exception {
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
conf.set(DFSConfigKeys.DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_KEY, "3.0.0");
|
||||
MiniDFSCluster cluster = null;
|
||||
try {
|
||||
cluster = new MiniDFSCluster.Builder(conf)
|
||||
.numDataNodes(0)
|
||||
.build();
|
||||
|
||||
NamenodeProtocols rpcServer = cluster.getNameNodeRpc();
|
||||
|
||||
long nnCTime = cluster.getNamesystem().getFSImage().getStorage().getCTime();
|
||||
StorageInfo mockStorageInfo = mock(StorageInfo.class);
|
||||
doReturn(nnCTime).when(mockStorageInfo).getCTime();
|
||||
|
||||
DatanodeRegistration mockDnReg = mock(DatanodeRegistration.class);
|
||||
doReturn(HdfsConstants.LAYOUT_VERSION).when(mockDnReg).getVersion();
|
||||
doReturn("fake-storage-id").when(mockDnReg).getStorageID();
|
||||
doReturn(mockStorageInfo).when(mockDnReg).getStorageInfo();
|
||||
|
||||
// Should succeed when software versions are the same.
|
||||
doReturn("3.0.0").when(mockDnReg).getSoftwareVersion();
|
||||
rpcServer.registerDatanode(mockDnReg);
|
||||
|
||||
// Should succeed when software version of DN is above minimum required by NN.
|
||||
doReturn("4.0.0").when(mockDnReg).getSoftwareVersion();
|
||||
rpcServer.registerDatanode(mockDnReg);
|
||||
|
||||
// Should fail when software version of DN is below minimum required by NN.
|
||||
doReturn("2.0.0").when(mockDnReg).getSoftwareVersion();
|
||||
try {
|
||||
rpcServer.registerDatanode(mockDnReg);
|
||||
fail("Should not have been able to register DN with too-low version.");
|
||||
} catch (IncorrectVersionException ive) {
|
||||
GenericTestUtils.assertExceptionContains(
|
||||
"The reported DataNode version is too low", ive);
|
||||
LOG.info("Got expected exception", ive);
|
||||
}
|
||||
} finally {
|
||||
if (cluster != null) {
|
||||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testRegistrationWithDifferentSoftwareVersionsDuringUpgrade()
|
||||
throws Exception {
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
conf.set(DFSConfigKeys.DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_KEY, "1.0.0");
|
||||
MiniDFSCluster cluster = null;
|
||||
try {
|
||||
cluster = new MiniDFSCluster.Builder(conf)
|
||||
.numDataNodes(0)
|
||||
.build();
|
||||
|
||||
NamenodeProtocols rpcServer = cluster.getNameNodeRpc();
|
||||
|
||||
long nnCTime = cluster.getNamesystem().getFSImage().getStorage().getCTime();
|
||||
StorageInfo mockStorageInfo = mock(StorageInfo.class);
|
||||
doReturn(nnCTime).when(mockStorageInfo).getCTime();
|
||||
|
||||
DatanodeRegistration mockDnReg = mock(DatanodeRegistration.class);
|
||||
doReturn(HdfsConstants.LAYOUT_VERSION).when(mockDnReg).getVersion();
|
||||
doReturn("fake-storage-id").when(mockDnReg).getStorageID();
|
||||
doReturn(mockStorageInfo).when(mockDnReg).getStorageInfo();
|
||||
|
||||
// Should succeed when software versions are the same and CTimes are the
|
||||
// same.
|
||||
doReturn(VersionInfo.getVersion()).when(mockDnReg).getSoftwareVersion();
|
||||
rpcServer.registerDatanode(mockDnReg);
|
||||
|
||||
// Should succeed when software versions are the same and CTimes are
|
||||
// different.
|
||||
doReturn(nnCTime + 1).when(mockStorageInfo).getCTime();
|
||||
rpcServer.registerDatanode(mockDnReg);
|
||||
|
||||
// Should fail when software version of DN is different from NN and CTimes
|
||||
// are different.
|
||||
doReturn(VersionInfo.getVersion() + ".1").when(mockDnReg).getSoftwareVersion();
|
||||
try {
|
||||
rpcServer.registerDatanode(mockDnReg);
|
||||
fail("Should not have been able to register DN with different software" +
|
||||
" versions and CTimes");
|
||||
} catch (IncorrectVersionException ive) {
|
||||
GenericTestUtils.assertExceptionContains(
|
||||
"does not match CTime of NN", ive);
|
||||
LOG.info("Got expected exception", ive);
|
||||
}
|
||||
} finally {
|
||||
if (cluster != null) {
|
||||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -429,12 +429,13 @@ public void testConvertDatanodeRegistration() {
|
||||
ExportedBlockKeys expKeys = new ExportedBlockKeys(true, 9, 10,
|
||||
getBlockKey(1), keys);
|
||||
DatanodeRegistration reg = new DatanodeRegistration(dnId,
|
||||
new StorageInfo(), expKeys);
|
||||
new StorageInfo(), expKeys, "3.0.0");
|
||||
DatanodeRegistrationProto proto = PBHelper.convert(reg);
|
||||
DatanodeRegistration reg2 = PBHelper.convert(proto);
|
||||
compare(reg.getStorageInfo(), reg2.getStorageInfo());
|
||||
compare(reg.getExportedKeys(), reg2.getExportedKeys());
|
||||
compare((DatanodeID)reg, (DatanodeID)reg2);
|
||||
assertEquals(reg.getSoftwareVersion(), reg2.getSoftwareVersion());
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -18,48 +18,105 @@
|
||||
|
||||
package org.apache.hadoop.hdfs.server.datanode;
|
||||
|
||||
import java.net.InetSocketAddress;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.fail;
|
||||
import static org.mockito.Mockito.*;
|
||||
import static org.mockito.Mockito.doReturn;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.InetSocketAddress;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
|
||||
import org.apache.hadoop.hdfs.server.common.IncorrectVersionException;
|
||||
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.apache.hadoop.util.VersionInfo;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.mockito.Mockito;
|
||||
|
||||
|
||||
public class TestDatanodeRegister {
|
||||
public static final Log LOG = LogFactory.getLog(TestDatanodeRegister.class);
|
||||
|
||||
// Invalid address
|
||||
static final InetSocketAddress INVALID_ADDR =
|
||||
private static final InetSocketAddress INVALID_ADDR =
|
||||
new InetSocketAddress("127.0.0.1", 1);
|
||||
|
||||
private BPServiceActor actor;
|
||||
NamespaceInfo fakeNsInfo;
|
||||
DNConf mockDnConf;
|
||||
|
||||
@Before
|
||||
public void setUp() throws IOException {
|
||||
mockDnConf = mock(DNConf.class);
|
||||
doReturn(VersionInfo.getVersion()).when(mockDnConf).getMinimumNameNodeVersion();
|
||||
|
||||
DataNode mockDN = mock(DataNode.class);
|
||||
doReturn(true).when(mockDN).shouldRun();
|
||||
doReturn(mockDnConf).when(mockDN).getDnConf();
|
||||
|
||||
BPOfferService mockBPOS = mock(BPOfferService.class);
|
||||
doReturn(mockDN).when(mockBPOS).getDataNode();
|
||||
|
||||
actor = new BPServiceActor(INVALID_ADDR, mockBPOS);
|
||||
|
||||
fakeNsInfo = mock(NamespaceInfo.class);
|
||||
// Return a a good software version.
|
||||
doReturn(VersionInfo.getVersion()).when(fakeNsInfo).getSoftwareVersion();
|
||||
// Return a good layout version for now.
|
||||
doReturn(HdfsConstants.LAYOUT_VERSION).when(fakeNsInfo).getLayoutVersion();
|
||||
|
||||
DatanodeProtocolClientSideTranslatorPB fakeDnProt =
|
||||
mock(DatanodeProtocolClientSideTranslatorPB.class);
|
||||
when(fakeDnProt.versionRequest()).thenReturn(fakeNsInfo);
|
||||
actor.setNameNode(fakeDnProt);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDataNodeRegister() throws Exception {
|
||||
DataNode mockDN = mock(DataNode.class);
|
||||
Mockito.doReturn(true).when(mockDN).shouldRun();
|
||||
public void testSoftwareVersionDifferences() throws Exception {
|
||||
// We expect no exception to be thrown when the software versions match.
|
||||
assertEquals(VersionInfo.getVersion(),
|
||||
actor.retrieveNamespaceInfo().getSoftwareVersion());
|
||||
|
||||
BPOfferService mockBPOS = Mockito.mock(BPOfferService.class);
|
||||
Mockito.doReturn(mockDN).when(mockBPOS).getDataNode();
|
||||
// We expect no exception to be thrown when the min NN version is below the
|
||||
// reported NN version.
|
||||
doReturn("4.0.0").when(fakeNsInfo).getSoftwareVersion();
|
||||
doReturn("3.0.0").when(mockDnConf).getMinimumNameNodeVersion();
|
||||
assertEquals("4.0.0", actor.retrieveNamespaceInfo().getSoftwareVersion());
|
||||
|
||||
BPServiceActor actor = new BPServiceActor(INVALID_ADDR, mockBPOS);
|
||||
|
||||
NamespaceInfo fakeNSInfo = mock(NamespaceInfo.class);
|
||||
when(fakeNSInfo.getBuildVersion()).thenReturn("NSBuildVersion");
|
||||
DatanodeProtocolClientSideTranslatorPB fakeDNProt =
|
||||
mock(DatanodeProtocolClientSideTranslatorPB.class);
|
||||
when(fakeDNProt.versionRequest()).thenReturn(fakeNSInfo);
|
||||
|
||||
actor.setNameNode( fakeDNProt );
|
||||
try {
|
||||
// When the NN reports a version that's too low, throw an exception.
|
||||
doReturn("3.0.0").when(fakeNsInfo).getSoftwareVersion();
|
||||
doReturn("4.0.0").when(mockDnConf).getMinimumNameNodeVersion();
|
||||
try {
|
||||
actor.retrieveNamespaceInfo();
|
||||
fail("register() did not throw exception! " +
|
||||
"Expected: IncorrectVersionException");
|
||||
} catch (IncorrectVersionException ie) {
|
||||
LOG.info("register() returned correct Exception: IncorrectVersionException");
|
||||
fail("Should have thrown an exception for NN with too-low version");
|
||||
} catch (IncorrectVersionException ive) {
|
||||
GenericTestUtils.assertExceptionContains(
|
||||
"The reported NameNode version is too low", ive);
|
||||
LOG.info("Got expected exception", ive);
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDifferentLayoutVersions() throws Exception {
|
||||
// We expect no exceptions to be thrown when the layout versions match.
|
||||
assertEquals(HdfsConstants.LAYOUT_VERSION,
|
||||
actor.retrieveNamespaceInfo().getLayoutVersion());
|
||||
|
||||
// We expect an exception to be thrown when the NN reports a layout version
|
||||
// different from that of the DN.
|
||||
doReturn(HdfsConstants.LAYOUT_VERSION * 1000).when(fakeNsInfo)
|
||||
.getLayoutVersion();
|
||||
try {
|
||||
actor.retrieveNamespaceInfo();
|
||||
fail("Should have failed to retrieve NS info from DN with bad layout version");
|
||||
} catch (IncorrectVersionException ive) {
|
||||
GenericTestUtils.assertExceptionContains(
|
||||
"Unexpected version of namenode", ive);
|
||||
LOG.info("Got expected exception", ive);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -58,6 +58,7 @@
|
||||
import org.apache.hadoop.net.NetworkTopology;
|
||||
import org.apache.hadoop.security.Groups;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.apache.hadoop.util.VersionInfo;
|
||||
import org.apache.log4j.Level;
|
||||
import org.apache.log4j.LogManager;
|
||||
|
||||
@ -783,6 +784,7 @@ private static int getNodePort(int num) throws IOException {
|
||||
String hostName = DNS.getDefaultHost("default", "default");
|
||||
dnRegistration = new DatanodeRegistration(ipAddr, getNodePort(dnIdx));
|
||||
dnRegistration.setHostName(hostName);
|
||||
dnRegistration.setSoftwareVersion(VersionInfo.getVersion());
|
||||
this.blocks = new ArrayList<Block>(blockCapacity);
|
||||
this.nrBlocks = 0;
|
||||
}
|
||||
|
@ -0,0 +1,62 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.util;
|
||||
|
||||
import static org.junit.Assert.*;
|
||||
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.junit.Test;
|
||||
|
||||
public class TestVersionUtil {
|
||||
|
||||
@Test
|
||||
public void testCompareVersions() {
|
||||
// Equal versions are equal.
|
||||
assertEquals(0, VersionUtil.compareVersions("2.0.0", "2.0.0"));
|
||||
assertEquals(0, VersionUtil.compareVersions("2.0.0a", "2.0.0a"));
|
||||
assertEquals(0, VersionUtil.compareVersions("1", "1"));
|
||||
|
||||
// Assert that lower versions are lower, and higher versions are higher.
|
||||
assertExpectedValues("1", "2.0.0");
|
||||
assertExpectedValues("1.0.0", "2");
|
||||
assertExpectedValues("1.0.0", "2.0.0");
|
||||
assertExpectedValues("1.0", "2.0.0");
|
||||
assertExpectedValues("1.0.0", "2.0.0");
|
||||
assertExpectedValues("1.0.0", "1.0.0a");
|
||||
assertExpectedValues("1.0.0.0", "2.0.0");
|
||||
assertExpectedValues("1.0.0", "1.0.0-dev");
|
||||
assertExpectedValues("1.0.0", "1.0.1");
|
||||
assertExpectedValues("1.0.0", "1.0.2");
|
||||
assertExpectedValues("1.0.0", "1.1.0");
|
||||
assertExpectedValues("2.0.0", "10.0.0");
|
||||
assertExpectedValues("1.0.0", "1.0.0a");
|
||||
assertExpectedValues("1.0.2a", "1.0.10");
|
||||
assertExpectedValues("1.0.2a", "1.0.2b");
|
||||
assertExpectedValues("1.0.2a", "1.0.2ab");
|
||||
assertExpectedValues("1.0.0a1", "1.0.0a2");
|
||||
assertExpectedValues("1.0.0a2", "1.0.0a10");
|
||||
assertExpectedValues("1.0", "1.a");
|
||||
assertExpectedValues("1.0", "1.a0");
|
||||
}
|
||||
|
||||
private static void assertExpectedValues(String lower, String higher) {
|
||||
assertTrue(VersionUtil.compareVersions(lower, higher) < 0);
|
||||
assertTrue(VersionUtil.compareVersions(higher, lower) > 0);
|
||||
}
|
||||
|
||||
}
|
Loading…
Reference in New Issue
Block a user