diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index b9f8787bcf..e00f67e6be 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -581,6 +581,9 @@ Release 2.7.1 - UNRELEASED HDFS-8163. Using monotonicNow for block report scheduling causes test failures on recently restarted systems. (Arpit Agarwal) + HDFS-8147. StorageGroup in Dispatcher should override equals nad hashCode. + (surendra singh lilhore via szetszwo) + Release 2.7.0 - 2015-04-20 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java index a3fd251c29..a7a6c4a40b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java @@ -469,6 +469,25 @@ String getDisplayName() { public String toString() { return getDisplayName(); } + + @Override + public int hashCode() { + return getStorageType().hashCode() ^ getDatanodeInfo().hashCode(); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } else if (obj == null || !(obj instanceof StorageGroup)) { + return false; + } else { + final StorageGroup that = (StorageGroup) obj; + return this.getStorageType() == that.getStorageType() + && this.getDatanodeInfo().equals(that.getDatanodeInfo()); + } + } + } final DatanodeInfo datanode; @@ -753,6 +772,16 @@ private void dispatchBlocks() { } } } + + @Override + public int hashCode() { + return super.hashCode(); + } + + @Override + public boolean equals(Object obj) { + return super.equals(obj); + } } public Dispatcher(NameNodeConnector nnc, Set includedNodes, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java index 1de236e942..b2f9fce8c3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java @@ -277,4 +277,51 @@ public void testMoverCliWithFederationHA() throws Exception { cluster.shutdown(); } } + + @Test(timeout = 300000) + public void testTwoReplicaSameStorageTypeShouldNotSelect() throws Exception { + // HDFS-8147 + final Configuration conf = new HdfsConfiguration(); + final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(3) + .storageTypes( + new StorageType[][] { { StorageType.DISK, StorageType.ARCHIVE }, + { StorageType.DISK, StorageType.DISK }, + { StorageType.DISK, StorageType.ARCHIVE } }).build(); + try { + cluster.waitActive(); + final DistributedFileSystem dfs = cluster.getFileSystem(); + final String file = "/testForTwoReplicaSameStorageTypeShouldNotSelect"; + // write to DISK + final FSDataOutputStream out = dfs.create(new Path(file), (short) 2); + out.writeChars("testForTwoReplicaSameStorageTypeShouldNotSelect"); + out.close(); + + // verify before movement + LocatedBlock lb = dfs.getClient().getLocatedBlocks(file, 0).get(0); + StorageType[] storageTypes = lb.getStorageTypes(); + for (StorageType storageType : storageTypes) { + Assert.assertTrue(StorageType.DISK == storageType); + } + // move to ARCHIVE + dfs.setStoragePolicy(new Path(file), "COLD"); + int rc = ToolRunner.run(conf, new Mover.Cli(), + new String[] { "-p", file.toString() }); + Assert.assertEquals("Movement to ARCHIVE should be successfull", 0, rc); + + // Wait till namenode notified + Thread.sleep(3000); + lb = dfs.getClient().getLocatedBlocks(file, 0).get(0); + storageTypes = lb.getStorageTypes(); + int archiveCount = 0; + for (StorageType storageType : storageTypes) { + if (StorageType.ARCHIVE == storageType) { + archiveCount++; + } + } + Assert.assertEquals(archiveCount, 2); + } finally { + cluster.shutdown(); + } + } }