HDFS-10225. DataNode hot swap drives should disallow storage type changes. Contributed by Lei (Eddy) Xu.
This commit is contained in:
parent
12aa184479
commit
132deb4cac
@ -634,7 +634,7 @@ static class ChangedVolumes {
|
||||
* @param newVolumes a comma separated string that specifies the data volumes.
|
||||
* @return changed volumes.
|
||||
* @throws IOException if none of the directories are specified in the
|
||||
* configuration.
|
||||
* configuration, or the storage type of a directory is changed.
|
||||
*/
|
||||
@VisibleForTesting
|
||||
ChangedVolumes parseChangedVolumes(String newVolumes) throws IOException {
|
||||
@ -646,6 +646,12 @@ ChangedVolumes parseChangedVolumes(String newVolumes) throws IOException {
|
||||
throw new IOException("No directory is specified.");
|
||||
}
|
||||
|
||||
// Use the existing StorageLocation to detect storage type changes.
|
||||
Map<String, StorageLocation> existingLocations = new HashMap<>();
|
||||
for (StorageLocation loc : getStorageLocations(this.conf)) {
|
||||
existingLocations.put(loc.getFile().getCanonicalPath(), loc);
|
||||
}
|
||||
|
||||
ChangedVolumes results = new ChangedVolumes();
|
||||
results.newLocations.addAll(locations);
|
||||
|
||||
@ -659,6 +665,12 @@ ChangedVolumes parseChangedVolumes(String newVolumes) throws IOException {
|
||||
if (location.getFile().getCanonicalPath().equals(
|
||||
dir.getRoot().getCanonicalPath())) {
|
||||
sl.remove();
|
||||
StorageLocation old = existingLocations.get(
|
||||
location.getFile().getCanonicalPath());
|
||||
if (old != null &&
|
||||
old.getStorageType() != location.getStorageType()) {
|
||||
throw new IOException("Changing storage type is not allowed.");
|
||||
}
|
||||
results.unchangedLocations.add(location);
|
||||
found = true;
|
||||
break;
|
||||
|
@ -27,6 +27,7 @@
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.StorageType;
|
||||
import org.apache.hadoop.hdfs.BlockMissingException;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||
@ -82,7 +83,6 @@
|
||||
import static org.junit.Assert.fail;
|
||||
import static org.junit.Assume.assumeTrue;
|
||||
import static org.mockito.Matchers.any;
|
||||
import static org.mockito.Matchers.anyBoolean;
|
||||
import static org.mockito.Matchers.anyString;
|
||||
import static org.mockito.Mockito.doAnswer;
|
||||
import static org.mockito.Mockito.timeout;
|
||||
@ -256,6 +256,27 @@ public void testParseChangedVolumesFailures() throws IOException {
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testParseStorageTypeChanges() throws IOException {
|
||||
startDFSCluster(1, 1);
|
||||
DataNode dn = cluster.getDataNodes().get(0);
|
||||
Configuration conf = dn.getConf();
|
||||
List<StorageLocation> oldLocations = DataNode.getStorageLocations(conf);
|
||||
|
||||
// Change storage type of an existing StorageLocation
|
||||
String newLoc = String.format("[%s]%s", StorageType.SSD,
|
||||
oldLocations.get(1).getUri());
|
||||
String newDataDirs = oldLocations.get(0).toString() + "," + newLoc;
|
||||
|
||||
try {
|
||||
dn.parseChangedVolumes(newDataDirs);
|
||||
fail("should throw IOE because storage type changes.");
|
||||
} catch (IOException e) {
|
||||
GenericTestUtils.assertExceptionContains(
|
||||
"Changing storage type is not allowed", e);
|
||||
}
|
||||
}
|
||||
|
||||
/** Add volumes to the first DataNode. */
|
||||
private void addVolumes(int numNewVolumes)
|
||||
throws ReconfigurationException, IOException {
|
||||
|
Loading…
Reference in New Issue
Block a user