diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java index 7bf93ada90..e26fac5ab8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java @@ -69,7 +69,7 @@ Entry getEntryToEvict() { Iterator> it = openFileMap.entrySet() .iterator(); if (LOG.isTraceEnabled()) { - LOG.trace("openFileMap size:" + openFileMap.size()); + LOG.trace("openFileMap size:" + size()); } Entry idlest = null; @@ -117,10 +117,10 @@ Entry getEntryToEvict() { boolean put(FileHandle h, OpenFileCtx context) { OpenFileCtx toEvict = null; synchronized (this) { - Preconditions.checkState(openFileMap.size() <= this.maxStreams, - "stream cache size " + openFileMap.size() - + " is larger than maximum" + this.maxStreams); - if (openFileMap.size() == this.maxStreams) { + Preconditions.checkState(size() <= this.maxStreams, + "stream cache size " + size() + " is larger than maximum" + this + .maxStreams); + if (size() == this.maxStreams) { Entry pairs = getEntryToEvict(); if (pairs ==null) { return false; @@ -149,7 +149,7 @@ void scan(long streamTimeout) { Iterator> it = openFileMap.entrySet() .iterator(); if (LOG.isTraceEnabled()) { - LOG.trace("openFileMap size:" + openFileMap.size()); + LOG.trace("openFileMap size:" + size()); } while (it.hasNext()) { @@ -168,7 +168,7 @@ void scan(long streamTimeout) { openFileMap.remove(handle); if (LOG.isDebugEnabled()) { LOG.debug("After remove stream " + handle.getFileId() - + ", the stream number:" + openFileMap.size()); + + ", the stream number:" + size()); } ctxToRemove.add(ctx2); } @@ -201,7 +201,7 @@ void cleanAll() { Iterator> it = openFileMap.entrySet() .iterator(); if (LOG.isTraceEnabled()) { - LOG.trace("openFileMap size:" + openFileMap.size()); + LOG.trace("openFileMap size:" + size()); } while (it.hasNext()) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java index 35468da4dc..8a097a542c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java @@ -84,12 +84,12 @@ void addToCorruptReplicasMap(Block blk, DatanodeDescriptor dn, if (!nodes.keySet().contains(dn)) { NameNode.blockStateChangeLog.debug( "BLOCK NameSystem.addToCorruptReplicasMap: {} added as corrupt on " - + "{} by {} {}", blk.getBlockName(), dn, Server.getRemoteIp(), + + "{} by {} {}", blk, dn, Server.getRemoteIp(), reasonText); } else { NameNode.blockStateChangeLog.debug( "BLOCK NameSystem.addToCorruptReplicasMap: duplicate requested for" + - " {} to add as corrupt on {} by {} {}", blk.getBlockName(), dn, + " {} to add as corrupt on {} by {} {}", blk, dn, Server.getRemoteIp(), reasonText); } // Add the node or update the reason. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index afedbb9839..b380d199fd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -666,7 +666,7 @@ NamenodeRegistration getRegistration() { NamenodeRegistration setRegistration() { nodeRegistration = new NamenodeRegistration( - NetUtils.getHostPortString(rpcServer.getRpcAddress()), + NetUtils.getHostPortString(getNameNodeAddress()), NetUtils.getHostPortString(getHttpAddress()), getFSImage().getStorage(), getRole()); return nodeRegistration; @@ -729,7 +729,7 @@ protected void initialize(Configuration conf) throws IOException { // This is expected for MiniDFSCluster. Set it now using // the RPC server's bind address. clientNamenodeAddress = - NetUtils.getHostPortString(rpcServer.getRpcAddress()); + NetUtils.getHostPortString(getNameNodeAddress()); LOG.info("Clients are to use " + clientNamenodeAddress + " to access" + " this namenode/service."); } @@ -816,7 +816,7 @@ private void startCommonServices(Configuration conf) throws IOException { LOG.warn("ServicePlugin " + p + " could not be started", t); } } - LOG.info(getRole() + " RPC up at: " + rpcServer.getRpcAddress()); + LOG.info(getRole() + " RPC up at: " + getNameNodeAddress()); if (rpcServer.getServiceRpcAddress() != null) { LOG.info(getRole() + " service RPC up at: " + rpcServer.getServiceRpcAddress()); @@ -1047,7 +1047,7 @@ public InetSocketAddress getNameNodeAddress() { * @return NameNode RPC address in "host:port" string form */ public String getNameNodeAddressHostPortString() { - return NetUtils.getHostPortString(rpcServer.getRpcAddress()); + return NetUtils.getHostPortString(getNameNodeAddress()); } /** @@ -1056,7 +1056,7 @@ public String getNameNodeAddressHostPortString() { */ public InetSocketAddress getServiceRpcAddress() { final InetSocketAddress serviceAddr = rpcServer.getServiceRpcAddress(); - return serviceAddr == null ? rpcServer.getRpcAddress() : serviceAddr; + return serviceAddr == null ? getNameNodeAddress() : serviceAddr; } /**