HDFS-8147. StorageGroup in Dispatcher should override equals nad hashCode. Contributed by surendra singh lilhore
This commit is contained in:
parent
26971e52ae
commit
416b84354e
@ -581,6 +581,9 @@ Release 2.7.1 - UNRELEASED
|
||||
HDFS-8163. Using monotonicNow for block report scheduling causes
|
||||
test failures on recently restarted systems. (Arpit Agarwal)
|
||||
|
||||
HDFS-8147. StorageGroup in Dispatcher should override equals nad hashCode.
|
||||
(surendra singh lilhore via szetszwo)
|
||||
|
||||
Release 2.7.0 - 2015-04-20
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
@ -469,6 +469,25 @@ String getDisplayName() {
|
||||
public String toString() {
|
||||
return getDisplayName();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return getStorageType().hashCode() ^ getDatanodeInfo().hashCode();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (this == obj) {
|
||||
return true;
|
||||
} else if (obj == null || !(obj instanceof StorageGroup)) {
|
||||
return false;
|
||||
} else {
|
||||
final StorageGroup that = (StorageGroup) obj;
|
||||
return this.getStorageType() == that.getStorageType()
|
||||
&& this.getDatanodeInfo().equals(that.getDatanodeInfo());
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
final DatanodeInfo datanode;
|
||||
@ -753,6 +772,16 @@ private void dispatchBlocks() {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return super.hashCode();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
return super.equals(obj);
|
||||
}
|
||||
}
|
||||
|
||||
public Dispatcher(NameNodeConnector nnc, Set<String> includedNodes,
|
||||
|
@ -277,4 +277,51 @@ public void testMoverCliWithFederationHA() throws Exception {
|
||||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
@Test(timeout = 300000)
|
||||
public void testTwoReplicaSameStorageTypeShouldNotSelect() throws Exception {
|
||||
// HDFS-8147
|
||||
final Configuration conf = new HdfsConfiguration();
|
||||
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
|
||||
.numDataNodes(3)
|
||||
.storageTypes(
|
||||
new StorageType[][] { { StorageType.DISK, StorageType.ARCHIVE },
|
||||
{ StorageType.DISK, StorageType.DISK },
|
||||
{ StorageType.DISK, StorageType.ARCHIVE } }).build();
|
||||
try {
|
||||
cluster.waitActive();
|
||||
final DistributedFileSystem dfs = cluster.getFileSystem();
|
||||
final String file = "/testForTwoReplicaSameStorageTypeShouldNotSelect";
|
||||
// write to DISK
|
||||
final FSDataOutputStream out = dfs.create(new Path(file), (short) 2);
|
||||
out.writeChars("testForTwoReplicaSameStorageTypeShouldNotSelect");
|
||||
out.close();
|
||||
|
||||
// verify before movement
|
||||
LocatedBlock lb = dfs.getClient().getLocatedBlocks(file, 0).get(0);
|
||||
StorageType[] storageTypes = lb.getStorageTypes();
|
||||
for (StorageType storageType : storageTypes) {
|
||||
Assert.assertTrue(StorageType.DISK == storageType);
|
||||
}
|
||||
// move to ARCHIVE
|
||||
dfs.setStoragePolicy(new Path(file), "COLD");
|
||||
int rc = ToolRunner.run(conf, new Mover.Cli(),
|
||||
new String[] { "-p", file.toString() });
|
||||
Assert.assertEquals("Movement to ARCHIVE should be successfull", 0, rc);
|
||||
|
||||
// Wait till namenode notified
|
||||
Thread.sleep(3000);
|
||||
lb = dfs.getClient().getLocatedBlocks(file, 0).get(0);
|
||||
storageTypes = lb.getStorageTypes();
|
||||
int archiveCount = 0;
|
||||
for (StorageType storageType : storageTypes) {
|
||||
if (StorageType.ARCHIVE == storageType) {
|
||||
archiveCount++;
|
||||
}
|
||||
}
|
||||
Assert.assertEquals(archiveCount, 2);
|
||||
} finally {
|
||||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user