HDFS-10505. OIV's ReverseXML processor should support ACLs (Surendra Singh Lilhore via cmccabe)

This commit is contained in:
Colin Patrick Mccabe 2016-06-15 22:35:19 -07:00
parent b48f27e794
commit 2449db507d
3 changed files with 51 additions and 8 deletions

View File

@ -77,11 +77,11 @@ public final class FSImageFormatPBINode {
private final static int USER_STRID_OFFSET = 40; private final static int USER_STRID_OFFSET = 40;
private final static int GROUP_STRID_OFFSET = 16; private final static int GROUP_STRID_OFFSET = 16;
private static final int ACL_ENTRY_NAME_MASK = (1 << 24) - 1; public static final int ACL_ENTRY_NAME_MASK = (1 << 24) - 1;
private static final int ACL_ENTRY_NAME_OFFSET = 6; public static final int ACL_ENTRY_NAME_OFFSET = 6;
private static final int ACL_ENTRY_TYPE_OFFSET = 3; public static final int ACL_ENTRY_TYPE_OFFSET = 3;
private static final int ACL_ENTRY_SCOPE_OFFSET = 5; public static final int ACL_ENTRY_SCOPE_OFFSET = 5;
private static final int ACL_ENTRY_PERM_MASK = 7; public static final int ACL_ENTRY_PERM_MASK = 7;
private static final int ACL_ENTRY_TYPE_MASK = 3; private static final int ACL_ENTRY_TYPE_MASK = 3;
private static final int ACL_ENTRY_SCOPE_MASK = 1; private static final int ACL_ENTRY_SCOPE_MASK = 1;
private static final FsAction[] FSACTION_VALUES = FsAction.values(); private static final FsAction[] FSACTION_VALUES = FsAction.values();

View File

@ -17,6 +17,10 @@
*/ */
package org.apache.hadoop.hdfs.tools.offlineImageViewer; package org.apache.hadoop.hdfs.tools.offlineImageViewer;
import static org.apache.hadoop.hdfs.server.namenode.FSImageFormatPBINode.ACL_ENTRY_NAME_MASK;
import static org.apache.hadoop.hdfs.server.namenode.FSImageFormatPBINode.ACL_ENTRY_NAME_OFFSET;
import static org.apache.hadoop.hdfs.server.namenode.FSImageFormatPBINode.ACL_ENTRY_SCOPE_OFFSET;
import static org.apache.hadoop.hdfs.server.namenode.FSImageFormatPBINode.ACL_ENTRY_TYPE_OFFSET;
import static org.apache.hadoop.hdfs.server.namenode.FSImageFormatPBINode.XATTR_NAMESPACE_MASK; import static org.apache.hadoop.hdfs.server.namenode.FSImageFormatPBINode.XATTR_NAMESPACE_MASK;
import static org.apache.hadoop.hdfs.server.namenode.FSImageFormatPBINode.XATTR_NAME_OFFSET; import static org.apache.hadoop.hdfs.server.namenode.FSImageFormatPBINode.XATTR_NAME_OFFSET;
import static org.apache.hadoop.hdfs.server.namenode.FSImageFormatPBINode.XATTR_NAMESPACE_OFFSET; import static org.apache.hadoop.hdfs.server.namenode.FSImageFormatPBINode.XATTR_NAMESPACE_OFFSET;
@ -49,10 +53,12 @@
import com.google.common.primitives.Ints; import com.google.common.primitives.Ints;
import com.google.protobuf.ByteString; import com.google.protobuf.ByteString;
import com.google.protobuf.TextFormat; import com.google.protobuf.TextFormat;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto;
@ -66,6 +72,7 @@
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary; import org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry; import org.apache.hadoop.hdfs.server.namenode.FsImageProto.FilesUnderConstructionSection.FileUnderConstructionEntry;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection; import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.AclFeatureProto;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection; import org.apache.hadoop.hdfs.server.namenode.FsImageProto.NameSystemSection;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection; import org.apache.hadoop.hdfs.server.namenode.FsImageProto.SecretManagerSection;
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry; import org.apache.hadoop.hdfs.server.namenode.FsImageProto.SnapshotDiffSection.DiffEntry;
@ -131,6 +138,8 @@ class OfflineImageReconstructor {
*/ */
private int latestStringId = 0; private int latestStringId = 0;
private static final String EMPTY_STRING = "";
private OfflineImageReconstructor(CountingOutputStream out, private OfflineImageReconstructor(CountingOutputStream out,
InputStreamReader reader) throws XMLStreamException { InputStreamReader reader) throws XMLStreamException {
this.out = out; this.out = out;
@ -731,10 +740,25 @@ private void processSymlinkXml(Node node,
// Will check remaining keys and serialize in processINodeXml // Will check remaining keys and serialize in processINodeXml
} }
private INodeSection.AclFeatureProto.Builder aclXmlToProto(Node acl) private INodeSection.AclFeatureProto.Builder aclXmlToProto(Node acls)
throws IOException { throws IOException {
// TODO: support ACLs AclFeatureProto.Builder b = AclFeatureProto.newBuilder();
throw new IOException("ACLs are not supported yet."); while (true) {
Node acl = acls.removeChild(INODE_SECTION_ACL);
if (acl == null) {
break;
}
String val = acl.getVal();
AclEntry entry = AclEntry.parseAclEntry(val, true);
int nameId = registerStringId(entry.getName() == null ? EMPTY_STRING
: entry.getName());
int v = ((nameId & ACL_ENTRY_NAME_MASK) << ACL_ENTRY_NAME_OFFSET)
| (entry.getType().ordinal() << ACL_ENTRY_TYPE_OFFSET)
| (entry.getScope().ordinal() << ACL_ENTRY_SCOPE_OFFSET)
| (entry.getPermission().ordinal());
b.addEntries(v);
}
return b;
} }
private INodeSection.XAttrFeatureProto.Builder xattrsXmlToProto(Node xattrs) private INodeSection.XAttrFeatureProto.Builder xattrsXmlToProto(Node xattrs)

View File

@ -17,6 +17,14 @@
*/ */
package org.apache.hadoop.hdfs.tools.offlineImageViewer; package org.apache.hadoop.hdfs.tools.offlineImageViewer;
import static org.apache.hadoop.fs.permission.AclEntryScope.ACCESS;
import static org.apache.hadoop.fs.permission.AclEntryType.GROUP;
import static org.apache.hadoop.fs.permission.AclEntryType.OTHER;
import static org.apache.hadoop.fs.permission.AclEntryType.USER;
import static org.apache.hadoop.fs.permission.FsAction.ALL;
import static org.apache.hadoop.fs.permission.FsAction.EXECUTE;
import static org.apache.hadoop.fs.permission.FsAction.READ_EXECUTE;
import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
@ -49,6 +57,7 @@
import javax.xml.parsers.SAXParserFactory; import javax.xml.parsers.SAXParserFactory;
import com.google.common.io.Files; import com.google.common.io.Files;
import org.apache.commons.io.FileUtils; import org.apache.commons.io.FileUtils;
import org.apache.commons.io.output.NullOutputStream; import org.apache.commons.io.output.NullOutputStream;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
@ -82,6 +91,7 @@
import org.xml.sax.SAXException; import org.xml.sax.SAXException;
import org.xml.sax.helpers.DefaultHandler; import org.xml.sax.helpers.DefaultHandler;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps; import com.google.common.collect.Maps;
public class TestOfflineImageViewer { public class TestOfflineImageViewer {
@ -113,6 +123,7 @@ public static void createOriginalFSImage() throws IOException {
DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY, 5000); DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY, 5000);
conf.setBoolean( conf.setBoolean(
DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true); DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTH_TO_LOCAL, conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTH_TO_LOCAL,
"RULE:[2:$1@$0](JobTracker@.*FOO.COM)s/@.*//" + "DEFAULT"); "RULE:[2:$1@$0](JobTracker@.*FOO.COM)s/@.*//" + "DEFAULT");
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
@ -179,6 +190,14 @@ public static void createOriginalFSImage() throws IOException {
// as UTF8 // as UTF8
hdfs.setXAttr(xattr, "user.a4", new byte[]{ -0x3d, 0x28 }); hdfs.setXAttr(xattr, "user.a4", new byte[]{ -0x3d, 0x28 });
writtenFiles.put(xattr.toString(), hdfs.getFileStatus(xattr)); writtenFiles.put(xattr.toString(), hdfs.getFileStatus(xattr));
// Set ACLs
hdfs.setAcl(
xattr,
Lists.newArrayList(aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, "foo", ALL),
aclEntry(ACCESS, GROUP, READ_EXECUTE),
aclEntry(ACCESS, GROUP, "bar", READ_EXECUTE),
aclEntry(ACCESS, OTHER, EXECUTE)));
// Write results to the fsimage file // Write results to the fsimage file
hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false); hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);