HDFS-11598. Improve -setrep for Erasure Coded files. Contributed by Yiqun Lin.
This commit is contained in:
parent
bc7aff7cec
commit
bbd68478d5
@ -85,11 +85,20 @@ protected void processPath(PathData item) throws IOException {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (item.stat.isFile()) {
|
if (item.stat.isFile()) {
|
||||||
|
// Do the checking if the file is erasure coded since
|
||||||
|
// replication factor for an EC file is meaningless.
|
||||||
|
if (!item.stat.isErasureCoded()) {
|
||||||
if (!item.fs.setReplication(item.path, newRep)) {
|
if (!item.fs.setReplication(item.path, newRep)) {
|
||||||
throw new IOException("Could not set replication for: " + item);
|
throw new IOException("Could not set replication for: " + item);
|
||||||
}
|
}
|
||||||
out.println("Replication " + newRep + " set: " + item);
|
out.println("Replication " + newRep + " set: " + item);
|
||||||
if (waitOpt) waitList.add(item);
|
if (waitOpt) {
|
||||||
|
waitList.add(item);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
out.println("Did not set replication for: " + item
|
||||||
|
+ ", because it's an erasure coded file.");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -20,7 +20,9 @@
|
|||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
import static org.junit.Assert.assertTrue;
|
import static org.junit.Assert.assertTrue;
|
||||||
|
|
||||||
|
import java.io.ByteArrayOutputStream;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.io.PrintStream;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.BlockLocation;
|
import org.apache.hadoop.fs.BlockLocation;
|
||||||
@ -28,6 +30,7 @@
|
|||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.FsShell;
|
import org.apache.hadoop.fs.FsShell;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
|
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
@ -102,4 +105,45 @@ public void testSetRepWithStoragePolicyOnEmptyFile() throws Exception {
|
|||||||
cluster.shutdown();
|
cluster.shutdown();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testSetRepOnECFile() throws Exception {
|
||||||
|
ClientProtocol client;
|
||||||
|
Configuration conf = new HdfsConfiguration();
|
||||||
|
conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
|
||||||
|
StripedFileTestUtil.getDefaultECPolicy().getName());
|
||||||
|
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
|
||||||
|
.build();
|
||||||
|
cluster.waitActive();
|
||||||
|
client = NameNodeProxies.createProxy(conf,
|
||||||
|
cluster.getFileSystem(0).getUri(), ClientProtocol.class).getProxy();
|
||||||
|
client.setErasureCodingPolicy("/",
|
||||||
|
StripedFileTestUtil.getDefaultECPolicy().getName());
|
||||||
|
|
||||||
|
FileSystem dfs = cluster.getFileSystem();
|
||||||
|
try {
|
||||||
|
Path d = new Path("/tmp");
|
||||||
|
dfs.mkdirs(d);
|
||||||
|
Path f = new Path(d, "foo");
|
||||||
|
dfs.createNewFile(f);
|
||||||
|
FileStatus file = dfs.getFileStatus(f);
|
||||||
|
assertTrue(file.isErasureCoded());
|
||||||
|
|
||||||
|
ByteArrayOutputStream out = new ByteArrayOutputStream();
|
||||||
|
System.setOut(new PrintStream(out));
|
||||||
|
String[] args = {"-setrep", "2", "" + f};
|
||||||
|
FsShell shell = new FsShell();
|
||||||
|
shell.setConf(conf);
|
||||||
|
assertEquals(0, shell.run(args));
|
||||||
|
assertTrue(
|
||||||
|
out.toString().contains("Did not set replication for: /tmp/foo"));
|
||||||
|
|
||||||
|
// verify the replication factor of the EC file
|
||||||
|
file = dfs.getFileStatus(f);
|
||||||
|
assertEquals(1, file.getReplication());
|
||||||
|
} finally {
|
||||||
|
dfs.close();
|
||||||
|
cluster.shutdown();
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user