From a109f2b32f01164dd3c534ef1ea7bcc82cc2026d Mon Sep 17 00:00:00 2001 From: Erik Krogen Date: Fri, 1 Jun 2018 09:24:38 -0700 Subject: [PATCH] HDFS-13578. [SBN read] Add ReadOnly annotation to methods in ClientProtocol. Contributed by Chao Sun. --- .../hadoop/hdfs/protocol/ClientProtocol.java | 45 ++++++++ .../hdfs/server/namenode/ha/ReadOnly.java | 47 ++++++++ .../hadoop/hdfs/protocol/TestReadOnly.java | 101 ++++++++++++++++++ 3 files changed, 193 insertions(+) create mode 100644 hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ReadOnly.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestReadOnly.java diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java index a55a0f7d95..5b4c897e4f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java @@ -48,6 +48,7 @@ import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector; +import org.apache.hadoop.hdfs.server.namenode.ha.ReadOnly; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport; import org.apache.hadoop.io.EnumSetWritable; import org.apache.hadoop.io.Text; @@ -128,6 +129,7 @@ public interface ClientProtocol { * @throws IOException If an I/O error occurred */ @Idempotent + @ReadOnly(atimeAffected = true) LocatedBlocks getBlockLocations(String src, long offset, long length) throws IOException; @@ -137,6 +139,7 @@ LocatedBlocks getBlockLocations(String src, long offset, long length) * @throws IOException */ @Idempotent + @ReadOnly FsServerDefaults getServerDefaults() throws IOException; /** @@ -277,6 +280,7 @@ boolean setReplication(String src, short replication) * @return All the in-use block storage policies currently. */ @Idempotent + @ReadOnly BlockStoragePolicy[] getStoragePolicies() throws IOException; /** @@ -319,6 +323,7 @@ void setStoragePolicy(String src, String policyName) * If file/dir src is not found */ @Idempotent + @ReadOnly BlockStoragePolicy getStoragePolicy(String path) throws IOException; /** @@ -685,6 +690,7 @@ boolean mkdirs(String src, FsPermission masked, boolean createParent) * @throws IOException If an I/O error occurred */ @Idempotent + @ReadOnly DirectoryListing getListing(String src, byte[] startAfter, boolean needLocation) throws IOException; @@ -695,6 +701,7 @@ DirectoryListing getListing(String src, byte[] startAfter, * @throws IOException If an I/O error occurred */ @Idempotent + @ReadOnly SnapshottableDirectoryStatus[] getSnapshottableDirListing() throws IOException; @@ -775,6 +782,7 @@ SnapshottableDirectoryStatus[] getSnapshottableDirListing() * */ @Idempotent + @ReadOnly long[] getStats() throws IOException; /** @@ -782,6 +790,7 @@ SnapshottableDirectoryStatus[] getSnapshottableDirListing() * in the filesystem. */ @Idempotent + @ReadOnly ReplicatedBlockStats getReplicatedBlockStats() throws IOException; /** @@ -789,6 +798,7 @@ SnapshottableDirectoryStatus[] getSnapshottableDirListing() * in the filesystem. */ @Idempotent + @ReadOnly ECBlockGroupStats getECBlockGroupStats() throws IOException; /** @@ -798,6 +808,7 @@ SnapshottableDirectoryStatus[] getSnapshottableDirListing() * otherwise all datanodes if type is ALL. */ @Idempotent + @ReadOnly DatanodeInfo[] getDatanodeReport(HdfsConstants.DatanodeReportType type) throws IOException; @@ -805,6 +816,7 @@ DatanodeInfo[] getDatanodeReport(HdfsConstants.DatanodeReportType type) * Get a report on the current datanode storages. */ @Idempotent + @ReadOnly DatanodeStorageReport[] getDatanodeStorageReport( HdfsConstants.DatanodeReportType type) throws IOException; @@ -817,6 +829,7 @@ DatanodeStorageReport[] getDatanodeStorageReport( * a symlink. */ @Idempotent + @ReadOnly long getPreferredBlockSize(String filename) throws IOException; @@ -971,6 +984,7 @@ RollingUpgradeInfo rollingUpgrade(RollingUpgradeAction action) * cookie returned from the previous call. */ @Idempotent + @ReadOnly CorruptFileBlocks listCorruptFileBlocks(String path, String cookie) throws IOException; @@ -1006,6 +1020,7 @@ CorruptFileBlocks listCorruptFileBlocks(String path, String cookie) * @throws IOException If an I/O error occurred */ @Idempotent + @ReadOnly HdfsFileStatus getFileInfo(String src) throws IOException; /** @@ -1020,6 +1035,7 @@ CorruptFileBlocks listCorruptFileBlocks(String path, String cookie) * @throws IOException If an I/O error occurred */ @Idempotent + @ReadOnly boolean isFileClosed(String src) throws IOException; /** @@ -1036,6 +1052,7 @@ CorruptFileBlocks listCorruptFileBlocks(String path, String cookie) * @throws IOException If an I/O error occurred */ @Idempotent + @ReadOnly HdfsFileStatus getFileLinkInfo(String src) throws IOException; /** @@ -1050,6 +1067,7 @@ CorruptFileBlocks listCorruptFileBlocks(String path, String cookie) * @throws IOException If an I/O error occurred */ @Idempotent + @ReadOnly HdfsLocatedFileStatus getLocatedFileInfo(String src, boolean needBlockToken) throws IOException; @@ -1064,6 +1082,7 @@ HdfsLocatedFileStatus getLocatedFileInfo(String src, boolean needBlockToken) * @throws IOException If an I/O error occurred */ @Idempotent + @ReadOnly ContentSummary getContentSummary(String path) throws IOException; /** @@ -1176,6 +1195,7 @@ void createSymlink(String target, String link, FsPermission dirPerm, * or an I/O error occurred */ @Idempotent + @ReadOnly String getLinkTarget(String path) throws IOException; /** @@ -1245,6 +1265,7 @@ void cancelDelegationToken(Token token) * @throws IOException */ @Idempotent + @ReadOnly DataEncryptionKey getDataEncryptionKey() throws IOException; /** @@ -1313,6 +1334,7 @@ void disallowSnapshot(String snapshotRoot) * @throws IOException on error */ @Idempotent + @ReadOnly SnapshotDiffReport getSnapshotDiffReport(String snapshotRoot, String fromSnapshot, String toSnapshot) throws IOException; @@ -1340,6 +1362,7 @@ SnapshotDiffReport getSnapshotDiffReport(String snapshotRoot, * @throws IOException on error */ @Idempotent + @ReadOnly SnapshotDiffReportListing getSnapshotDiffReportListing(String snapshotRoot, String fromSnapshot, String toSnapshot, byte[] startPath, int index) throws IOException; @@ -1386,6 +1409,7 @@ void modifyCacheDirective(CacheDirectiveInfo directive, * @return A batch of CacheDirectiveEntry objects. */ @Idempotent + @ReadOnly BatchedEntries listCacheDirectives( long prevId, CacheDirectiveInfo filter) throws IOException; @@ -1427,6 +1451,7 @@ BatchedEntries listCacheDirectives( * @return A batch of CachePoolEntry objects. */ @Idempotent + @ReadOnly BatchedEntries listCachePools(String prevPool) throws IOException; @@ -1473,6 +1498,7 @@ void removeAclEntries(String src, List aclSpec) * Gets the ACLs of files and directories. */ @Idempotent + @ReadOnly AclStatus getAclStatus(String src) throws IOException; /** @@ -1486,6 +1512,7 @@ void createEncryptionZone(String src, String keyName) * Get the encryption zone for a path. */ @Idempotent + @ReadOnly EncryptionZone getEZForPath(String src) throws IOException; @@ -1497,6 +1524,7 @@ EncryptionZone getEZForPath(String src) * @return Batch of encryption zones. */ @Idempotent + @ReadOnly BatchedEntries listEncryptionZones( long prevId) throws IOException; @@ -1521,6 +1549,7 @@ void reencryptEncryptionZone(String zone, ReencryptAction action) * @throws IOException */ @Idempotent + @ReadOnly BatchedEntries listReencryptionStatus(long prevId) throws IOException; @@ -1554,6 +1583,7 @@ void setXAttr(String src, XAttr xAttr, EnumSet flag) * @throws IOException */ @Idempotent + @ReadOnly List getXAttrs(String src, List xAttrs) throws IOException; @@ -1569,6 +1599,7 @@ List getXAttrs(String src, List xAttrs) * @throws IOException */ @Idempotent + @ReadOnly List listXAttrs(String src) throws IOException; @@ -1603,6 +1634,7 @@ List listXAttrs(String src) * @throws IOException see specific implementation */ @Idempotent + @ReadOnly void checkAccess(String path, FsAction mode) throws IOException; /** @@ -1611,6 +1643,7 @@ List listXAttrs(String src) * the starting point for the inotify event stream. */ @Idempotent + @ReadOnly long getCurrentEditLogTxid() throws IOException; /** @@ -1618,6 +1651,7 @@ List listXAttrs(String src) * transactions for txids equal to or greater than txid. */ @Idempotent + @ReadOnly EventBatchList getEditsFromTxid(long txid) throws IOException; /** @@ -1675,6 +1709,7 @@ AddErasureCodingPolicyResponse[] addErasureCodingPolicies( * @throws IOException */ @Idempotent + @ReadOnly ErasureCodingPolicyInfo[] getErasureCodingPolicies() throws IOException; /** @@ -1683,6 +1718,7 @@ AddErasureCodingPolicyResponse[] addErasureCodingPolicies( * @throws IOException */ @Idempotent + @ReadOnly Map getErasureCodingCodecs() throws IOException; /** @@ -1693,6 +1729,7 @@ AddErasureCodingPolicyResponse[] addErasureCodingPolicies( * @throws IOException */ @Idempotent + @ReadOnly ErasureCodingPolicy getErasureCodingPolicy(String src) throws IOException; /** @@ -1704,6 +1741,11 @@ AddErasureCodingPolicyResponse[] addErasureCodingPolicies( /** * Get {@link QuotaUsage} rooted at the specified directory. + * + * Note: due to HDFS-6763, standby/observer doesn't keep up-to-date info + * about quota usage, and thus even though this is ReadOnly, it can only be + * directed to the active namenode. + * * @param path The string representation of the path * * @throws AccessControlException permission denied @@ -1713,6 +1755,7 @@ AddErasureCodingPolicyResponse[] addErasureCodingPolicies( * @throws IOException If an I/O error occurred */ @Idempotent + @ReadOnly(activeOnly = true) QuotaUsage getQuotaUsage(String path) throws IOException; /** @@ -1726,6 +1769,7 @@ AddErasureCodingPolicyResponse[] addErasureCodingPolicies( */ @Idempotent @Deprecated + @ReadOnly BatchedEntries listOpenFiles(long prevId) throws IOException; /** @@ -1740,6 +1784,7 @@ AddErasureCodingPolicyResponse[] addErasureCodingPolicies( * @throws IOException */ @Idempotent + @ReadOnly BatchedEntries listOpenFiles(long prevId, EnumSet openFilesTypes, String path) throws IOException; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ReadOnly.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ReadOnly.java new file mode 100644 index 0000000000..1782dcb6d8 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ReadOnly.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode.ha; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Inherited; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * Marker interface used to annotate methods that are readonly. + */ +@Inherited +@Retention(RetentionPolicy.RUNTIME) +@Target(ElementType.METHOD) +@InterfaceStability.Evolving +public @interface ReadOnly { + /** + * @return if true, the annotated method may update the last accessed time + * while performing its read, if access time is enabled. + */ + boolean atimeAffected() default false; + + /** + * @return if true, the target method should only be invoked on the active + * namenode. This applies to operations that need to access information that + * is only available on the active namenode. + */ + boolean activeOnly() default false; +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestReadOnly.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestReadOnly.java new file mode 100644 index 0000000000..34e84fa489 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestReadOnly.java @@ -0,0 +1,101 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.protocol; + +import org.apache.hadoop.hdfs.server.namenode.ha.ReadOnly; +import org.junit.Test; + +import java.lang.reflect.Method; +import java.util.Arrays; +import java.util.HashSet; +import java.util.Set; + +import static org.junit.Assert.assertEquals; + +/** + * Testing class for {@link ReadOnly} annotation on {@link ClientProtocol}. + */ +public class TestReadOnly { + private static final Method[] ALL_METHODS = ClientProtocol.class.getMethods(); + private static final Set READONLY_METHOD_NAMES = new HashSet<>( + Arrays.asList( + "getBlockLocations", + "getServerDefaults", + "getStoragePolicies", + "getStoragePolicy", + "getListing", + "getSnapshottableDirListing", + "getPreferredBlockSize", + "listCorruptFileBlocks", + "getFileInfo", + "isFileClosed", + "getFileLinkInfo", + "getLocatedFileInfo", + "getContentSummary", + "getLinkTarget", + "getSnapshotDiffReport", + "getSnapshotDiffReportListing", + "listCacheDirectives", + "listCachePools", + "getAclStatus", + "getEZForPath", + "listEncryptionZones", + "listReencryptionStatus", + "getXAttrs", + "listXAttrs", + "checkAccess", + "getErasureCodingPolicies", + "getErasureCodingCodecs", + "getErasureCodingPolicy", + "listOpenFiles", + "getStats", + "getReplicatedBlockStats", + "getECBlockGroupStats", + "getDatanodeReport", + "getDatanodeStorageReport", + "getDataEncryptionKey", + "getCurrentEditLogTxid", + "getEditsFromTxid", + "getQuotaUsage" + ) + ); + + @Test + public void testReadOnly() { + for (Method m : ALL_METHODS) { + boolean expected = READONLY_METHOD_NAMES.contains(m.getName()); + checkIsReadOnly(m.getName(), expected); + } + } + + private void checkIsReadOnly(String methodName, boolean expected) { + for (Method m : ALL_METHODS) { + // Note here we only check the FIRST result of overloaded methods + // with the same name. The assumption is that all these methods should + // share the same annotation. + if (m.getName().equals(methodName)) { + assertEquals("Expected ReadOnly for method '" + methodName + + "' to be " + expected, + m.isAnnotationPresent(ReadOnly.class), expected); + return; + } + } + throw new IllegalArgumentException("Unknown method name: " + methodName); + } + +}