HDFS-16187. SnapshotDiff behaviour with Xattrs and Acls is not consistent across NN restarts with checkpointing (#3340) (#3640)

(cherry picked from commit 356ebbbe80)

 Conflicts:
	hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java

Change-Id: If76fd0d77fafc90fe2f2c19ab1d0c43a58510f6b

Co-authored-by: bshashikant <shashikant@apache.org>
This commit is contained in:
Wei-Chiu Chuang 2021-11-12 14:32:02 +08:00 committed by GitHub
parent c4e8826194
commit 5333e872e2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 85 additions and 4 deletions

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.hdfs.server.namenode; package org.apache.hadoop.hdfs.server.namenode;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays;
import java.util.List; import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
@ -84,6 +85,22 @@ public List<XAttr> getXAttrs() {
} }
} }
@Override
public boolean equals(Object o) {
if (o == null) {
return false;
}
if (getClass() != o.getClass()) {
return false;
}
return getXAttrs().equals(((XAttrFeature) o).getXAttrs());
}
@Override
public int hashCode() {
return Arrays.hashCode(getXAttrs().toArray());
}
/** /**
* Get XAttr by name with prefix. * Get XAttr by name with prefix.
* @param prefixedName xAttr name with prefix * @param prefixedName xAttr name with prefix

View File

@ -24,11 +24,14 @@
import java.util.Arrays; import java.util.Arrays;
import java.util.Comparator; import java.util.Comparator;
import java.util.Date; import java.util.Date;
import java.util.Objects;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.namenode.AclFeature; import org.apache.hadoop.hdfs.server.namenode.AclFeature;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectoryAttributes;
import org.apache.hadoop.hdfs.server.namenode.ContentSummaryComputationContext; import org.apache.hadoop.hdfs.server.namenode.ContentSummaryComputationContext;
import org.apache.hadoop.hdfs.server.namenode.DirectoryWithQuotaFeature; import org.apache.hadoop.hdfs.server.namenode.DirectoryWithQuotaFeature;
import org.apache.hadoop.hdfs.server.namenode.FSImageFormat; import org.apache.hadoop.hdfs.server.namenode.FSImageFormat;
@ -185,6 +188,18 @@ public ContentSummaryComputationContext computeContentSummary(
return computeDirectoryContentSummary(summary, snapshotId); return computeDirectoryContentSummary(summary, snapshotId);
} }
@Override
public boolean metadataEquals(INodeDirectoryAttributes other) {
return other != null && getQuotaCounts().equals(other.getQuotaCounts())
&& getPermissionLong() == other.getPermissionLong()
// Acl feature maintains a reference counted map, thereby
// every snapshot copy should point to the same Acl object unless
// there is no change in acl values.
// Reference equals is hence intentional here.
&& getAclFeature() == other.getAclFeature()
&& Objects.equals(getXAttrFeature(), other.getXAttrFeature());
}
@Override @Override
public String getFullPathName() { public String getFullPathName() {
return getSnapshotPath(getParent().getFullPathName(), getLocalName()); return getSnapshotPath(getParent().getFullPathName(), getLocalName());

View File

@ -43,8 +43,6 @@
import java.util.concurrent.Executors; import java.util.concurrent.Executors;
import java.util.concurrent.Future; import java.util.concurrent.Future;
import org.apache.hadoop.thirdparty.com.google.common.collect.Lists;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.crypto.CipherSuite; import org.apache.hadoop.crypto.CipherSuite;
import org.apache.hadoop.crypto.CryptoInputStream; import org.apache.hadoop.crypto.CryptoInputStream;
@ -74,9 +72,10 @@
import org.apache.hadoop.hdfs.client.HdfsAdmin; import org.apache.hadoop.hdfs.client.HdfsAdmin;
import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.EncryptionZone; import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
@ -98,6 +97,7 @@
import org.apache.hadoop.security.authorize.AuthorizationException; import org.apache.hadoop.security.authorize.AuthorizationException;
import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.DelegationTokenIssuer; import org.apache.hadoop.security.token.DelegationTokenIssuer;
import org.apache.hadoop.thirdparty.com.google.common.collect.Lists;
import org.apache.hadoop.util.DataChecksum; import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.ToolRunner; import org.apache.hadoop.util.ToolRunner;
import org.apache.hadoop.crypto.key.KeyProviderDelegationTokenExtension.DelegationTokenExtension; import org.apache.hadoop.crypto.key.KeyProviderDelegationTokenExtension.DelegationTokenExtension;
@ -1184,6 +1184,30 @@ private void dTIEM(Path prefix) throws Exception {
} }
} }
@Test
public void testEncryptionZonesWithSnapshots() throws Exception {
final Path snapshottable = new Path("/zones");
fsWrapper.mkdir(snapshottable, FsPermission.getDirDefault(),
true);
dfsAdmin.allowSnapshot(snapshottable);
dfsAdmin.createEncryptionZone(snapshottable, TEST_KEY, NO_TRASH);
fs.createSnapshot(snapshottable, "snap1");
SnapshotDiffReport report =
fs.getSnapshotDiffReport(snapshottable, "snap1", "");
Assert.assertEquals(0, report.getDiffList().size());
report =
fs.getSnapshotDiffReport(snapshottable, "snap1", "");
System.out.println(report);
Assert.assertEquals(0, report.getDiffList().size());
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
fs.saveNamespace();
fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
cluster.restartNameNode(true);
report =
fs.getSnapshotDiffReport(snapshottable, "snap1", "");
Assert.assertEquals(0, report.getDiffList().size());
}
private class AuthorizationExceptionInjector extends EncryptionFaultInjector { private class AuthorizationExceptionInjector extends EncryptionFaultInjector {
@Override @Override
public void ensureKeyIsInitialized() throws IOException { public void ensureKeyIsInitialized() throws IOException {
@ -1730,7 +1754,6 @@ public void testEncryptionZonesOnRootPath() throws Exception {
true, fs.getFileStatus(rootDir).isEncrypted()); true, fs.getFileStatus(rootDir).isEncrypted());
assertEquals("File is encrypted", assertEquals("File is encrypted",
true, fs.getFileStatus(zoneFile).isEncrypted()); true, fs.getFileStatus(zoneFile).isEncrypted());
DFSTestUtil.verifyFilesNotEqual(fs, zoneFile, rawFile, len);
} }
@Test @Test

View File

@ -36,6 +36,7 @@
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException; import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException; import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.IOUtils;
@ -140,6 +141,31 @@ public void testModifyReadsCurrentState() throws Exception {
assertEquals(xattrs.size(), 0); assertEquals(xattrs.size(), 0);
} }
@Test
public void testXattrWithSnapshotAndNNRestart() throws Exception {
// Init
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short) 0700));
hdfs.setXAttr(path, name1, value1);
hdfs.allowSnapshot(path);
hdfs.createSnapshot(path, snapshotName);
SnapshotDiffReport report =
hdfs.getSnapshotDiffReport(path, snapshotName, "");
System.out.println(report);
Assert.assertEquals(0, report.getDiffList().size());
report =
hdfs.getSnapshotDiffReport(path, snapshotName, "");
System.out.println(report);
Assert.assertEquals(0, report.getDiffList().size());
hdfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
hdfs.saveNamespace();
hdfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
cluster.restartNameNode(true);
report =
hdfs.getSnapshotDiffReport(path, snapshotName, "");
System.out.println(report);
Assert.assertEquals(0, report.getDiffList().size());
}
/** /**
* Tests removing xattrs on a directory that has been snapshotted * Tests removing xattrs on a directory that has been snapshotted
*/ */