From a17584936cc5141e3f5612ac3ecf35e27968e439 Mon Sep 17 00:00:00 2001 From: Colin Patrick Mccabe Date: Tue, 20 Jan 2015 20:11:09 -0800 Subject: [PATCH] HDFS-7610. Fix removal of dynamically added DN volumes (Lei (Eddy) Xu via Colin P. McCabe) --- .../fsdataset/impl/FsDatasetImpl.java | 16 ++++---- .../datanode/fsdataset/impl/FsVolumeList.java | 8 +++- .../fsdataset/impl/TestFsDatasetImpl.java | 37 +++++++++++++++++-- 3 files changed, 49 insertions(+), 12 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java index 534732317b..d8cc2875f8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java @@ -342,7 +342,7 @@ public void addVolume(final StorageLocation location, StorageType storageType = location.getStorageType(); final FsVolumeImpl fsVolume = new FsVolumeImpl( - this, sd.getStorageUuid(), dir, this.conf, storageType); + this, sd.getStorageUuid(), sd.getCurrentDir(), this.conf, storageType); final ReplicaMap tempVolumeMap = new ReplicaMap(fsVolume); ArrayList exceptions = Lists.newArrayList(); @@ -385,19 +385,19 @@ public void addVolume(final StorageLocation location, */ @Override public synchronized void removeVolumes(Collection volumes) { - Set volumeSet = new HashSet(); + Set volumeSet = new HashSet<>(); for (StorageLocation sl : volumes) { - volumeSet.add(sl.getFile()); + volumeSet.add(sl.getFile().getAbsolutePath()); } for (int idx = 0; idx < dataStorage.getNumStorageDirs(); idx++) { Storage.StorageDirectory sd = dataStorage.getStorageDir(idx); - if (volumeSet.contains(sd.getRoot())) { - String volume = sd.getRoot().toString(); + String volume = sd.getRoot().getAbsolutePath(); + if (volumeSet.contains(volume)) { LOG.info("Removing " + volume + " from FsDataset."); // Disable the volume from the service. asyncDiskService.removeVolume(sd.getCurrentDir()); - this.volumes.removeVolume(volume); + this.volumes.removeVolume(sd.getRoot()); // Removed all replica information for the blocks on the volume. Unlike // updating the volumeMap in addVolume(), this operation does not scan @@ -407,7 +407,9 @@ public synchronized void removeVolumes(Collection volumes) { for (Iterator it = volumeMap.replicas(bpid).iterator(); it.hasNext(); ) { ReplicaInfo block = it.next(); - if (block.getVolume().getBasePath().equals(volume)) { + String absBasePath = + new File(block.getVolume().getBasePath()).getAbsolutePath(); + if (absBasePath.equals(volume)) { invalidate(bpid, block); blocks.add(block); it.remove(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java index ba19897fc1..c837593385 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl; +import java.io.File; import java.io.IOException; import java.nio.channels.ClosedChannelException; import java.util.ArrayList; @@ -322,13 +323,16 @@ private void removeVolume(FsVolumeImpl target) { * Dynamically remove volume in the list. * @param volume the volume to be removed. */ - void removeVolume(String volume) { + void removeVolume(File volume) { // Make a copy of volumes to remove one volume. final FsVolumeImpl[] curVolumes = volumes.get(); final List volumeList = Lists.newArrayList(curVolumes); for (Iterator it = volumeList.iterator(); it.hasNext(); ) { FsVolumeImpl fsVolume = it.next(); - if (fsVolume.getBasePath().equals(volume)) { + String basePath, targetPath; + basePath = new File(fsVolume.getBasePath()).getAbsolutePath(); + targetPath = volume.getAbsolutePath(); + if (basePath.equals(targetPath)) { // Make sure the removed volume is the one in the curVolumes. removeVolume(fsVolume); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java index 0120dfe4ef..ca936b334f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java @@ -46,6 +46,7 @@ import java.io.File; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.HashSet; import java.util.List; import java.util.Set; @@ -70,6 +71,7 @@ public class TestFsDatasetImpl { private static final String BASE_DIR = new FileSystemTestHelper().getTestRootDir(); private static final int NUM_INIT_VOLUMES = 2; + private static final String CLUSTER_ID = "cluser-id"; private static final String[] BLOCK_POOL_IDS = {"bpid-0", "bpid-1"}; // Use to generate storageUuid @@ -136,10 +138,11 @@ public void testAddVolumes() throws IOException { Set expectedVolumes = new HashSet(); List nsInfos = Lists.newArrayList(); for (String bpid : BLOCK_POOL_IDS) { - nsInfos.add(new NamespaceInfo(0, "cluster-id", bpid, 1)); + nsInfos.add(new NamespaceInfo(0, CLUSTER_ID, bpid, 1)); } for (int i = 0; i < numNewVolumes; i++) { String path = BASE_DIR + "/newData" + i; + expectedVolumes.add(path); StorageLocation loc = StorageLocation.parse(path); Storage.StorageDirectory sd = createStorageDirectory(new File(path)); DataStorage.VolumeBuilder builder = @@ -156,7 +159,8 @@ public void testAddVolumes() throws IOException { Set actualVolumes = new HashSet(); for (int i = 0; i < numNewVolumes; i++) { - dataset.getVolumes().get(numExistingVolumes + i).getBasePath(); + actualVolumes.add( + dataset.getVolumes().get(numExistingVolumes + i).getBasePath()); } assertEquals(actualVolumes, expectedVolumes); } @@ -210,6 +214,33 @@ public void run() {} .deleteBlocks(anyString(), any(Block[].class)); } + @Test(timeout = 5000) + public void testRemoveNewlyAddedVolume() throws IOException { + final int numExistingVolumes = dataset.getVolumes().size(); + List nsInfos = new ArrayList<>(); + for (String bpid : BLOCK_POOL_IDS) { + nsInfos.add(new NamespaceInfo(0, CLUSTER_ID, bpid, 1)); + } + String newVolumePath = BASE_DIR + "/newVolumeToRemoveLater"; + StorageLocation loc = StorageLocation.parse(newVolumePath); + + Storage.StorageDirectory sd = createStorageDirectory(new File(newVolumePath)); + DataStorage.VolumeBuilder builder = + new DataStorage.VolumeBuilder(storage, sd); + when(storage.prepareVolume(eq(datanode), eq(loc.getFile()), + anyListOf(NamespaceInfo.class))) + .thenReturn(builder); + + dataset.addVolume(loc, nsInfos); + assertEquals(numExistingVolumes + 1, dataset.getVolumes().size()); + + when(storage.getNumStorageDirs()).thenReturn(numExistingVolumes + 1); + when(storage.getStorageDir(numExistingVolumes)).thenReturn(sd); + List volumesToRemove = Arrays.asList(loc); + dataset.removeVolumes(volumesToRemove); + assertEquals(numExistingVolumes, dataset.getVolumes().size()); + } + @Test(timeout = 5000) public void testChangeVolumeWithRunningCheckDirs() throws IOException { RoundRobinVolumeChoosingPolicy blockChooser = @@ -234,7 +265,7 @@ public void testChangeVolumeWithRunningCheckDirs() throws IOException { @Override public Object answer(InvocationOnMock invocationOnMock) throws Throwable { - volumeList.removeVolume("data4"); + volumeList.removeVolume(new File("data4")); volumeList.addVolume(newVolume); return null; }