listXAttrs(Path path) throws IOException {
+ throw new NotInMountpointException(path, "listXAttrs");
+ }
+
+ @Override
+ public void removeXAttr(Path path, String name) throws IOException {
+ checkPathIsSlash(path);
+ throw readOnlyMountTable("removeXAttr", path);
+ }
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/DirectBufferPool.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DirectBufferPool.java
similarity index 95%
rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/DirectBufferPool.java
rename to hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DirectBufferPool.java
index 7332d34594..510938b7ff 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/DirectBufferPool.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DirectBufferPool.java
@@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.hdfs.util;
+package org.apache.hadoop.util;
import java.lang.ref.WeakReference;
import java.nio.ByteBuffer;
@@ -27,6 +27,7 @@
import org.apache.hadoop.classification.InterfaceAudience;
import com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.classification.InterfaceStability;
/**
* A simple class for pooling direct ByteBuffers. This is necessary
@@ -40,7 +41,8 @@
* allocated at the same size. There is no attempt to reuse larger
* buffers to satisfy smaller allocations.
*/
-@InterfaceAudience.Private
+@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
+@InterfaceStability.Evolving
public class DirectBufferPool {
// Essentially implement a multimap with weak values.
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ToolRunner.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ToolRunner.java
index 49581000ca..16872d0891 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ToolRunner.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ToolRunner.java
@@ -30,7 +30,7 @@
* ToolRunner
can be used to run classes implementing
* Tool
interface. It works in conjunction with
* {@link GenericOptionsParser} to parse the
- *
+ *
* generic hadoop command line arguments and modifies the
* Configuration
of the Tool
. The
* application-specific options are passed along without being modified.
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDFVariations.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDFVariations.java
index d457c0e8a4..97dbe5e606 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDFVariations.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDFVariations.java
@@ -29,14 +29,33 @@
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Shell;
+import org.junit.After;
+import org.junit.Before;
import org.junit.Test;
+
import static org.junit.Assert.*;
public class TestDFVariations {
+ private static final String TEST_ROOT_DIR =
+ System.getProperty("test.build.data","build/test/data") + "/TestDFVariations";
+ private static File test_root = null;
+ @Before
+ public void setup() throws IOException {
+ test_root = new File(TEST_ROOT_DIR);
+ test_root.mkdirs();
+ }
+
+ @After
+ public void after() throws IOException {
+ FileUtil.setWritable(test_root, true);
+ FileUtil.fullyDelete(test_root);
+ assertTrue(!test_root.exists());
+ }
+
public static class XXDF extends DF {
public XXDF() throws IOException {
- super(new File(System.getProperty("test.build.data","/tmp")), 0L);
+ super(test_root, 0L);
}
@Override
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java
index e1a440d061..a32455604c 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java
@@ -773,4 +773,34 @@ public void testInternalGetAclStatus() throws IOException {
assertFalse(aclStatus.isStickyBit());
}
+ @Test(expected=AccessControlException.class)
+ public void testInternalSetXAttr() throws IOException {
+ fsView.setXAttr(new Path("/internalDir"), "xattrName", null);
+ }
+
+ @Test(expected=NotInMountpointException.class)
+ public void testInternalGetXAttr() throws IOException {
+ fsView.getXAttr(new Path("/internalDir"), "xattrName");
+ }
+
+ @Test(expected=NotInMountpointException.class)
+ public void testInternalGetXAttrs() throws IOException {
+ fsView.getXAttrs(new Path("/internalDir"));
+ }
+
+ @Test(expected=NotInMountpointException.class)
+ public void testInternalGetXAttrsWithNames() throws IOException {
+ fsView.getXAttrs(new Path("/internalDir"), new ArrayList());
+ }
+
+ @Test(expected=NotInMountpointException.class)
+ public void testInternalListXAttr() throws IOException {
+ fsView.listXAttrs(new Path("/internalDir"));
+ }
+
+ @Test(expected=AccessControlException.class)
+ public void testInternalRemoveXAttr() throws IOException {
+ fsView.removeXAttr(new Path("/internalDir"), "xattrName");
+ }
+
}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java
index 2813c34bef..035b280249 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java
@@ -747,4 +747,34 @@ public void testInternalGetAclStatus() throws IOException {
AclUtil.getMinimalAcl(PERMISSION_555));
assertFalse(aclStatus.isStickyBit());
}
+
+ @Test(expected=AccessControlException.class)
+ public void testInternalSetXAttr() throws IOException {
+ fcView.setXAttr(new Path("/internalDir"), "xattrName", null);
+ }
+
+ @Test(expected=NotInMountpointException.class)
+ public void testInternalGetXAttr() throws IOException {
+ fcView.getXAttr(new Path("/internalDir"), "xattrName");
+ }
+
+ @Test(expected=NotInMountpointException.class)
+ public void testInternalGetXAttrs() throws IOException {
+ fcView.getXAttrs(new Path("/internalDir"));
+ }
+
+ @Test(expected=NotInMountpointException.class)
+ public void testInternalGetXAttrsWithNames() throws IOException {
+ fcView.getXAttrs(new Path("/internalDir"), new ArrayList());
+ }
+
+ @Test(expected=NotInMountpointException.class)
+ public void testInternalListXAttr() throws IOException {
+ fcView.listXAttrs(new Path("/internalDir"));
+ }
+
+ @Test(expected=AccessControlException.class)
+ public void testInternalRemoveXAttr() throws IOException {
+ fcView.removeXAttr(new Path("/internalDir"), "xattrName");
+ }
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestDirectBufferPool.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDirectBufferPool.java
similarity index 95%
rename from hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestDirectBufferPool.java
rename to hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDirectBufferPool.java
index 31a18fb881..c8fd754666 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestDirectBufferPool.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDirectBufferPool.java
@@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.hdfs.util;
+package org.apache.hadoop.util;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotSame;
@@ -29,7 +29,7 @@
import com.google.common.collect.Lists;
public class TestDirectBufferPool {
- final DirectBufferPool pool = new DirectBufferPool();
+ final org.apache.hadoop.util.DirectBufferPool pool = new org.apache.hadoop.util.DirectBufferPool();
@Test
public void testBasics() {
diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/NfsExports.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/NfsExports.java
index 9628686538..b617ae5088 100644
--- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/NfsExports.java
+++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/NfsExports.java
@@ -53,7 +53,12 @@ public static synchronized NfsExports getInstance(Configuration conf) {
long expirationPeriodNano = conf.getLong(
Nfs3Constant.NFS_EXPORTS_CACHE_EXPIRYTIME_MILLIS_KEY,
Nfs3Constant.NFS_EXPORTS_CACHE_EXPIRYTIME_MILLIS_DEFAULT) * 1000 * 1000;
- exports = new NfsExports(cacheSize, expirationPeriodNano, matchHosts);
+ try {
+ exports = new NfsExports(cacheSize, expirationPeriodNano, matchHosts);
+ } catch (IllegalArgumentException e) {
+ LOG.error("Invalid NFS Exports provided: ", e);
+ return exports;
+ }
}
return exports;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java
index 9fbab240f6..2814cb007e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java
@@ -104,6 +104,10 @@ public XDR nullOp(XDR out, int xid, InetAddress client) {
@Override
public XDR mnt(XDR xdr, XDR out, int xid, InetAddress client) {
+ if (hostsMatcher == null) {
+ return MountResponse.writeMNTResponse(Nfs3Status.NFS3ERR_ACCES, out, xid,
+ null);
+ }
AccessPrivilege accessPrivilege = hostsMatcher.getAccessPrivilege(client);
if (accessPrivilege == AccessPrivilege.NONE) {
return MountResponse.writeMNTResponse(Nfs3Status.NFS3ERR_ACCES, out, xid,
@@ -208,16 +212,23 @@ public void handleInternal(ChannelHandlerContext ctx, RpcInfo info) {
} else if (mntproc == MNTPROC.UMNTALL) {
umntall(out, xid, client);
} else if (mntproc == MNTPROC.EXPORT) {
- // Currently only support one NFS export
+ // Currently only support one NFS export
List hostsMatchers = new ArrayList();
- hostsMatchers.add(hostsMatcher);
- out = MountResponse.writeExportList(out, xid, exports, hostsMatchers);
+ if (hostsMatcher != null) {
+ hostsMatchers.add(hostsMatcher);
+ out = MountResponse.writeExportList(out, xid, exports, hostsMatchers);
+ } else {
+ // This means there are no valid exports provided.
+ RpcAcceptedReply.getInstance(xid,
+ RpcAcceptedReply.AcceptState.PROC_UNAVAIL, new VerifierNone()).write(
+ out);
+ }
} else {
// Invalid procedure
RpcAcceptedReply.getInstance(xid,
RpcAcceptedReply.AcceptState.PROC_UNAVAIL, new VerifierNone()).write(
out);
- }
+ }
ChannelBuffer buf = ChannelBuffers.wrappedBuffer(out.asReadOnlyWrap().buffer());
RpcResponse rsp = new RpcResponse(buf, info.remoteAddress());
RpcUtil.sendRpcResponse(ctx, rsp);
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
index f254f50709..1650b14724 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
@@ -2123,8 +2123,11 @@ private boolean checkAccessPrivilege(SocketAddress remoteAddress,
if (!doPortMonitoring(remoteAddress)) {
return false;
}
-
+
// Check export table
+ if (exports == null) {
+ return false;
+ }
InetAddress client = ((InetSocketAddress) remoteAddress).getAddress();
AccessPrivilege access = exports.getAccessPrivilege(client);
if (access == AccessPrivilege.NONE) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 8c172bf03c..498454916d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -252,6 +252,9 @@ Trunk (Unreleased)
HDFS-5794. Fix the inconsistency of layout version number of
ADD_DATANODE_AND_STORAGE_UUIDS between trunk and branch-2. (jing9)
+ HDFS-6657. Remove link to 'Legacy UI' in trunk's Namenode UI.
+ (Vinayakumar B via wheat 9)
+
Release 2.6.0 - UNRELEASED
INCOMPATIBLE CHANGES
@@ -357,6 +360,16 @@ Release 2.6.0 - UNRELEASED
HDFS-6731. Run "hdfs zkfc-formatZK" on a server in a non-namenode will cause
a null pointer exception. (Masatake Iwasaki via brandonli)
+ HDFS-6114. Block Scan log rolling will never happen if blocks written
+ continuously leading to huge size of dncp_block_verification.log.curr
+ (vinayakumarb via cmccabe)
+
+ HDFS-6455. NFS: Exception should be added in NFS log for invalid separator in
+ nfs.exports.allowed.hosts. (Abhiraj Butala via brandonli)
+
+ HDFS-6715. Webhdfs wont fail over when it gets java.io.IOException: Namenode
+ is in startup mode. (jing9)
+
Release 2.5.0 - UNRELEASED
INCOMPATIBLE CHANGES
@@ -892,6 +905,12 @@ Release 2.5.0 - UNRELEASED
HDFS-6703. NFS: Files can be deleted from a read-only mount
(Srikanth Upputuri via brandonli)
+ HDFS-6422. getfattr in CLI doesn't throw exception or return non-0 return code
+ when xattr doesn't exist. (Charles Lamb via umamahesh)
+
+ HDFS-6696. Name node cannot start if the path of a file under
+ construction contains ".snapshot". (wang)
+
BREAKDOWN OF HDFS-2006 SUBTASKS AND RELATED JIRAS
HDFS-6299. Protobuf for XAttr and client-side implementation. (Yi Liu via umamahesh)
@@ -970,6 +989,9 @@ Release 2.5.0 - UNRELEASED
HDFS-6622. Rename and AddBlock may race and produce invalid edits (kihwal
via cmccabe)
+ HDFS-6723. New NN webUI no longer displays decommissioned state for dead node.
+ (Ming Ma via wheat9)
+
Release 2.4.1 - 2014-06-23
INCOMPATIBLE CHANGES
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java
index bb9612a995..cd75e53b27 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java
@@ -31,7 +31,7 @@
import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
import org.apache.hadoop.hdfs.shortcircuit.ClientMmap;
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitReplica;
-import org.apache.hadoop.hdfs.util.DirectBufferPool;
+import org.apache.hadoop.util.DirectBufferPool;
import org.apache.hadoop.util.DataChecksum;
import com.google.common.annotations.VisibleForTesting;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java
index c68e548099..47455754d7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java
@@ -40,7 +40,7 @@
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader;
import org.apache.hadoop.hdfs.shortcircuit.ClientMmap;
-import org.apache.hadoop.hdfs.util.DirectBufferPool;
+import org.apache.hadoop.util.DirectBufferPool;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.security.UserGroupInformation;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
index 7996717d9d..734240f06f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
@@ -1358,6 +1358,6 @@ public List listXAttrs(String src)
* @param xAttr XAttr
to remove
* @throws IOException
*/
- @Idempotent
+ @AtMostOnce
public void removeXAttr(String src, XAttr xAttr) throws IOException;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java
index 3503554636..0de445c222 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java
@@ -27,7 +27,7 @@
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdfs.util.DirectBufferPool;
+import org.apache.hadoop.util.DirectBufferPool;
import org.apache.hadoop.io.IOUtils;
import com.google.common.base.Preconditions;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java
index 1039b4fe92..bbb67fc473 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java
@@ -84,6 +84,10 @@ class BlockPoolSliceScanner {
private final SortedSet blockInfoSet
= new TreeSet(BlockScanInfo.LAST_SCAN_TIME_COMPARATOR);
+
+ private final SortedSet newBlockInfoSet =
+ new TreeSet(BlockScanInfo.LAST_SCAN_TIME_COMPARATOR);
+
private final GSet blockMap
= new LightWeightGSet(
LightWeightGSet.computeCapacity(0.5, "BlockMap"));
@@ -195,7 +199,7 @@ public LinkedElement getNext() {
BlockScanInfo info = new BlockScanInfo( block );
info.lastScanTime = scanTime--;
//still keep 'info.lastScanType' to NONE.
- addBlockInfo(info);
+ addBlockInfo(info, false);
}
RollingLogs rollingLogs = null;
@@ -221,25 +225,42 @@ private void updateBytesToScan(long len, long lastScanTime) {
// Should we change throttler bandwidth every time bytesLeft changes?
// not really required.
}
-
- private synchronized void addBlockInfo(BlockScanInfo info) {
- boolean added = blockInfoSet.add(info);
+
+ /**
+ * Add the BlockScanInfo to sorted set of blockScanInfo
+ * @param info BlockScanInfo to be added
+ * @param isNewBlock true if the block is the new Block, false if
+ * BlockScanInfo is being updated with new scanTime
+ */
+ private synchronized void addBlockInfo(BlockScanInfo info,
+ boolean isNewBlock) {
+ boolean added = false;
+ if (isNewBlock) {
+ // check whether the block already present
+ boolean exists = blockInfoSet.contains(info);
+ added = !exists && newBlockInfoSet.add(info);
+ } else {
+ added = blockInfoSet.add(info);
+ }
blockMap.put(info);
if (added) {
updateBytesToScan(info.getNumBytes(), info.lastScanTime);
}
}
-
+
private synchronized void delBlockInfo(BlockScanInfo info) {
boolean exists = blockInfoSet.remove(info);
+ if (!exists){
+ exists = newBlockInfoSet.remove(info);
+ }
blockMap.remove(info);
if (exists) {
updateBytesToScan(-info.getNumBytes(), info.lastScanTime);
}
}
-
+
/** Update blockMap by the given LogEntry */
private synchronized void updateBlockInfo(LogEntry e) {
BlockScanInfo info = blockMap.get(new Block(e.blockId, 0, e.genStamp));
@@ -249,7 +270,7 @@ private synchronized void updateBlockInfo(LogEntry e) {
delBlockInfo(info);
info.lastScanTime = e.verificationTime;
info.lastScanType = ScanType.VERIFICATION_SCAN;
- addBlockInfo(info);
+ addBlockInfo(info, false);
}
}
@@ -275,14 +296,14 @@ synchronized void addBlock(ExtendedBlock block) {
info = new BlockScanInfo(block.getLocalBlock());
info.lastScanTime = getNewBlockScanTime();
- addBlockInfo(info);
+ addBlockInfo(info, true);
adjustThrottler();
}
/** Deletes the block from internal structures */
synchronized void deleteBlock(Block block) {
BlockScanInfo info = blockMap.get(block);
- if ( info != null ) {
+ if (info != null) {
delBlockInfo(info);
}
}
@@ -319,7 +340,7 @@ private synchronized void updateScanStatus(BlockScanInfo info,
info.lastScanType = type;
info.lastScanTime = now;
info.lastScanOk = scanOk;
- addBlockInfo(info);
+ addBlockInfo(info, false);
// Don't update meta data if the verification failed.
if (!scanOk) {
@@ -578,7 +599,7 @@ private boolean assignInitialVerificationTimes() {
delBlockInfo(info);
info.lastScanTime = lastScanTime;
lastScanTime += verifyInterval;
- addBlockInfo(info);
+ addBlockInfo(info, false);
}
}
}
@@ -674,12 +695,21 @@ private void scan() {
throw e;
} finally {
rollVerificationLogs();
+ rollNewBlocksInfo();
if (LOG.isDebugEnabled()) {
LOG.debug("Done scanning block pool: " + blockPoolId);
}
}
}
-
+
+ // add new blocks to scan in next iteration
+ private synchronized void rollNewBlocksInfo() {
+ for (BlockScanInfo newBlock : newBlockInfoSet) {
+ blockInfoSet.add(newBlock);
+ }
+ newBlockInfoSet.clear();
+ }
+
private synchronized void rollVerificationLogs() {
if (verificationLog != null) {
try {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
index 85cfc1c774..b2adcd455f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
@@ -1074,10 +1074,11 @@ void logSetXAttrs(String src, List xAttrs, boolean toLogRpcIds) {
logEdit(op);
}
- void logRemoveXAttrs(String src, List xAttrs) {
+ void logRemoveXAttrs(String src, List xAttrs, boolean toLogRpcIds) {
final RemoveXAttrOp op = RemoveXAttrOp.getInstance();
op.src = src;
op.xAttrs = xAttrs;
+ logRpcIds(op, toLogRpcIds);
logEdit(op);
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
index 858cd57b23..a721491948 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
@@ -821,6 +821,10 @@ private long applyEditLogOp(FSEditLogOp op, FSDirectory fsDir,
RemoveXAttrOp removeXAttrOp = (RemoveXAttrOp) op;
fsDir.unprotectedRemoveXAttrs(removeXAttrOp.src,
removeXAttrOp.xAttrs);
+ if (toAddRetryCache) {
+ fsNamesys.addCacheEntry(removeXAttrOp.rpcClientId,
+ removeXAttrOp.rpcCallId);
+ }
break;
}
default:
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
index e972799b33..5543e0cb86 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
@@ -3551,6 +3551,7 @@ void readFields(DataInputStream in, int logVersion) throws IOException {
XAttrEditLogProto p = XAttrEditLogProto.parseDelimitedFrom(in);
src = p.getSrc();
xAttrs = PBHelper.convertXAttrs(p.getXAttrsList());
+ readRpcIds(in, logVersion);
}
@Override
@@ -3561,18 +3562,22 @@ public void writeFields(DataOutputStream out) throws IOException {
}
b.addAllXAttrs(PBHelper.convertXAttrProto(xAttrs));
b.build().writeDelimitedTo(out);
+ // clientId and callId
+ writeRpcIds(rpcClientId, rpcCallId, out);
}
@Override
protected void toXml(ContentHandler contentHandler) throws SAXException {
XMLUtils.addSaxString(contentHandler, "SRC", src);
appendXAttrsToXml(contentHandler, xAttrs);
+ appendRpcIdsToXml(contentHandler, rpcClientId, rpcCallId);
}
@Override
void fromXml(Stanza st) throws InvalidXmlException {
src = st.getValue("SRC");
xAttrs = readXAttrsFromXml(st);
+ readRpcIdsFromXml(st);
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
index 49a035cfff..5b6d269546 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
@@ -614,6 +614,16 @@ private void loadFullNameINodes(long numFiles, DataInput in, Counter counter)
INodeDirectory parentINode = fsDir.rootDir;
for (long i = 0; i < numFiles; i++) {
pathComponents = FSImageSerialization.readPathComponents(in);
+ for (int j=0; j < pathComponents.length; j++) {
+ byte[] newComponent = renameReservedComponentOnUpgrade
+ (pathComponents[j], getLayoutVersion());
+ if (!Arrays.equals(newComponent, pathComponents[j])) {
+ String oldPath = DFSUtil.byteArray2PathString(pathComponents);
+ pathComponents[j] = newComponent;
+ String newPath = DFSUtil.byteArray2PathString(pathComponents);
+ LOG.info("Renaming reserved path " + oldPath + " to " + newPath);
+ }
+ }
final INode newNode = loadINode(
pathComponents[pathComponents.length-1], false, in, counter);
@@ -926,6 +936,7 @@ LayoutVersion.Feature.ADD_INODE_ID, getLayoutVersion())) {
oldnode = namesystem.dir.getInode(cons.getId()).asFile();
inSnapshot = true;
} else {
+ path = renameReservedPathsOnUpgrade(path, getLayoutVersion());
final INodesInPath iip = fsDir.getLastINodeInPath(path);
oldnode = INodeFile.valueOf(iip.getINode(0), path);
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 9807c4f5b8..514ab072d9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -8658,11 +8658,12 @@ List getXAttrs(String src, List xAttrs) throws IOException {
nnConf.checkXAttrsConfigFlag();
FSPermissionChecker pc = getPermissionChecker();
boolean getAll = xAttrs == null || xAttrs.isEmpty();
- List filteredXAttrs = null;
if (!getAll) {
- filteredXAttrs = XAttrPermissionFilter.filterXAttrsForApi(pc, xAttrs);
- if (filteredXAttrs.isEmpty()) {
- return filteredXAttrs;
+ try {
+ XAttrPermissionFilter.checkPermissionForApi(pc, xAttrs);
+ } catch (AccessControlException e) {
+ logAuditEvent(false, "getXAttrs", src);
+ throw e;
}
}
checkOperation(OperationCategory.READ);
@@ -8681,15 +8682,21 @@ List getXAttrs(String src, List xAttrs) throws IOException {
if (filteredAll == null || filteredAll.isEmpty()) {
return null;
}
- List toGet = Lists.newArrayListWithCapacity(filteredXAttrs.size());
- for (XAttr xAttr : filteredXAttrs) {
+ List toGet = Lists.newArrayListWithCapacity(xAttrs.size());
+ for (XAttr xAttr : xAttrs) {
+ boolean foundIt = false;
for (XAttr a : filteredAll) {
if (xAttr.getNameSpace() == a.getNameSpace()
&& xAttr.getName().equals(a.getName())) {
toGet.add(a);
+ foundIt = true;
break;
}
}
+ if (!foundIt) {
+ throw new IOException(
+ "At least one of the attributes provided was not found.");
+ }
}
return toGet;
}
@@ -8723,17 +8730,42 @@ List listXAttrs(String src) throws IOException {
readUnlock();
}
}
-
+
+ /**
+ * Remove an xattr for a file or directory.
+ *
+ * @param src
+ * - path to remove the xattr from
+ * @param xAttr
+ * - xAttr to remove
+ * @throws AccessControlException
+ * @throws SafeModeException
+ * @throws UnresolvedLinkException
+ * @throws IOException
+ */
void removeXAttr(String src, XAttr xAttr) throws IOException {
- nnConf.checkXAttrsConfigFlag();
- HdfsFileStatus resultingStat = null;
- FSPermissionChecker pc = getPermissionChecker();
+ CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
+ if (cacheEntry != null && cacheEntry.isSuccess()) {
+ return; // Return previous response
+ }
+ boolean success = false;
try {
- XAttrPermissionFilter.checkPermissionForApi(pc, xAttr);
+ removeXAttrInt(src, xAttr, cacheEntry != null);
+ success = true;
} catch (AccessControlException e) {
logAuditEvent(false, "removeXAttr", src);
throw e;
+ } finally {
+ RetryCache.setState(cacheEntry, success);
}
+ }
+
+ void removeXAttrInt(String src, XAttr xAttr, boolean logRetryCache)
+ throws IOException {
+ nnConf.checkXAttrsConfigFlag();
+ HdfsFileStatus resultingStat = null;
+ FSPermissionChecker pc = getPermissionChecker();
+ XAttrPermissionFilter.checkPermissionForApi(pc, xAttr);
checkOperation(OperationCategory.WRITE);
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
writeLock();
@@ -8747,12 +8779,12 @@ void removeXAttr(String src, XAttr xAttr) throws IOException {
xAttrs.add(xAttr);
List removedXAttrs = dir.removeXAttrs(src, xAttrs);
if (removedXAttrs != null && !removedXAttrs.isEmpty()) {
- getEditLog().logRemoveXAttrs(src, removedXAttrs);
+ getEditLog().logRemoveXAttrs(src, removedXAttrs, logRetryCache);
+ } else {
+ throw new IOException(
+ "No matching attributes found for remove operation");
}
resultingStat = getAuditFileInfo(src, false);
- } catch (AccessControlException e) {
- logAuditEvent(false, "removeXAttr", src);
- throw e;
} finally {
writeUnlock();
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java
index 47f29399e5..98730142fb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java
@@ -26,6 +26,7 @@
import org.apache.hadoop.security.AccessControlException;
import com.google.common.collect.Lists;
+import com.google.common.base.Preconditions;
/**
* There are four types of extended attributes defined by the
@@ -60,8 +61,20 @@ static void checkPermissionForApi(FSPermissionChecker pc, XAttr xAttr)
throw new AccessControlException("User doesn't have permission for xattr: "
+ XAttrHelper.getPrefixName(xAttr));
}
-
- static List filterXAttrsForApi(FSPermissionChecker pc,
+
+ static void checkPermissionForApi(FSPermissionChecker pc,
+ List xAttrs) throws AccessControlException {
+ Preconditions.checkArgument(xAttrs != null);
+ if (xAttrs.isEmpty()) {
+ return;
+ }
+
+ for (XAttr xAttr : xAttrs) {
+ checkPermissionForApi(pc, xAttr);
+ }
+ }
+
+ static List filterXAttrsForApi(FSPermissionChecker pc,
List xAttrs) {
assert xAttrs != null : "xAttrs can not be null";
if (xAttrs == null || xAttrs.isEmpty()) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
index 92a58f9822..d7235b3872 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
@@ -113,6 +113,7 @@
import org.apache.hadoop.hdfs.web.resources.XAttrSetFlagParam;
import org.apache.hadoop.hdfs.web.resources.XAttrValueParam;
import org.apache.hadoop.io.Text;
+import org.apache.hadoop.ipc.RetriableException;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.net.NetworkTopology.InvalidTopologyException;
import org.apache.hadoop.net.Node;
@@ -190,7 +191,7 @@ private static NamenodeProtocols getRPCServer(NameNode namenode)
throws IOException {
final NamenodeProtocols np = namenode.getRpcServer();
if (np == null) {
- throw new IOException("Namenode is in startup mode");
+ throw new RetriableException("Namenode is in startup mode");
}
return np;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrNameParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrNameParam.java
index 3860f916e4..8137b4494f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrNameParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrNameParam.java
@@ -25,8 +25,8 @@ public class XAttrNameParam extends StringParam {
/** Default parameter value. **/
public static final String DEFAULT = "";
- private static Domain DOMAIN = new Domain(NAME,
- Pattern.compile("^(user\\.|trusted\\.|system\\.|security\\.).+"));
+ private static Domain DOMAIN = new Domain(NAME,
+ Pattern.compile(".*"));
public XAttrNameParam(final String str) {
super(DOMAIN, str == null || str.equals(DEFAULT) ? null : str);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
index fadba07072..2589526198 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
@@ -66,7 +66,6 @@
@@ -283,7 +282,7 @@
{name} ({xferaddr}) |
{lastContact} |
- Dead{?decomissioned}, Decomissioned{/decomissioned} |
+ Dead{?decommissioned}, Decommissioned{/decommissioned} |
- |
- |
- |
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/index.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/index.html
index 99bb13b326..aa62a37239 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/index.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/index.html
@@ -18,18 +18,7 @@
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
-
+
Hadoop Administration
-
-
-Hadoop Administration
-
-
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/index.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/index.html
index 97e0207e06..f7ef858b9e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/index.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/index.html
@@ -21,15 +21,4 @@
Hadoop Administration
-
-
-Hadoop Administration
-
-