HDFS-11693. Ozone:Add archive support to containers. Contributed by Anu Engineer.

This commit is contained in:
Xiaoyu Yao 2017-05-15 16:29:30 -07:00 committed by Owen O'Malley
parent 162dac96ce
commit 9c9be9f7f7
3 changed files with 160 additions and 14 deletions

View File

@ -74,6 +74,8 @@ import "Ozone.proto";
* 16. GetSmallFile - A single RPC that combines both getKey and ReadChunk.
*
* 17. CloseContainer - Closes an open container and makes it immutable.
*
* 18. CopyContainer - Copies a container from a remote machine.
*/
enum Type {
@ -128,6 +130,8 @@ enum Result {
ERROR_CONTAINER_NOT_EMPTY = 23;
ERROR_IN_COMPACT_DB = 24;
UNCLOSED_CONTAINER_IO = 25;
DELETE_ON_OPEN_CONTAINER = 26;
CLOSED_CONTAINER_RETRY = 27;
}
message ContainerCommandRequestProto {
@ -382,3 +386,18 @@ message GetSmallFileRequestProto {
message GetSmallFileResponseProto {
required ReadChunkResponseProto data = 1;
}
message CopyContainerRequestProto {
required string containerName = 1;
required uint64 readOffset = 2;
optional uint64 len = 3;
}
message CopyContainerResponseProto {
required string archiveName = 1;
required uint64 readOffset = 2;
required uint64 len = 3;
required bool eof = 4;
repeated bytes data = 5;
optional int64 checksum = 6;
}

View File

@ -0,0 +1,106 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.scm;
import org.apache.commons.io.FileUtils;
import org.apache.commons.lang3.RandomStringUtils;
import org.apache.hadoop.fs.FileUtil;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Iterator;
import java.util.Random;
import java.util.zip.Adler32;
import java.util.zip.Checksum;
/**
* Test archive creation and unpacking.
*/
public class TestArchive {
private static final int DIR_COUNT = 10;
private static final int SUB_DIR_COUNT = 3;
private static final int FILE_COUNT = 10;
@Rule
public TemporaryFolder folder = new TemporaryFolder();
@Rule
public TemporaryFolder outputFolder = new TemporaryFolder();
Checksum crc = new Adler32();
@Before
public void setUp() throws Exception {
Random r = new Random();
final int megaByte = 1024 * 1024;
for (int x = 0; x < DIR_COUNT; x++) {
File subdir = folder.newFolder(String.format("dir%d", x));
for (int y = 0; y < SUB_DIR_COUNT; y++) {
File targetDir = new File(subdir.getPath().concat(File.separator)
.concat(String.format("subdir%d%d", x, y)));
if(!targetDir.mkdirs()) {
throw new IOException("Failed to create subdirectory. " +
targetDir.toString());
}
for (int z = 0; z < FILE_COUNT; z++) {
Path temp = Paths.get(targetDir.getPath().concat(File.separator)
.concat(String.format("File%d.txt", z)));
byte[] buf = RandomStringUtils.randomAlphanumeric(r.nextInt(megaByte))
.getBytes("UTF-8");
Files.write(temp, buf);
crc.update(buf, 0, buf.length);
}
}
}
}
@Test
public void testArchive() throws Exception {
Checksum readCrc = new Adler32();
File archiveFile = new File(outputFolder.getRoot() + File.separator
+ "test.container.zip");
long zipCheckSum = FileUtil.zip(folder.getRoot(), archiveFile);
Assert.assertTrue(zipCheckSum > 0);
File decomp = new File(outputFolder.getRoot() + File.separator +
"decompress");
if (!decomp.exists() && !decomp.mkdirs()) {
throw new IOException("Unable to create the destination directory. " +
decomp.getPath());
}
FileUtil.unZip(archiveFile, decomp);
String[] patterns = {"txt"};
Iterator<File> iter = FileUtils.iterateFiles(decomp, patterns, true);
int count = 0;
while (iter.hasNext()) {
count++;
byte[] buf = Files.readAllBytes(iter.next().toPath());
readCrc.update(buf, 0, buf.length);
}
Assert.assertEquals(DIR_COUNT * SUB_DIR_COUNT * FILE_COUNT, count);
Assert.assertEquals(crc.getValue(), readCrc.getValue());
}
}

View File

@ -0,0 +1,21 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.scm;
/**
Test cases for SCM client classes.
*/