diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index 9c77299e50..022c59edbe 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -889,8 +889,11 @@ private String reconfDfsUsageParameters(String property, String newVal) String result = null; try { LOG.info("Reconfiguring {} to {}", property, newVal); + if (data == null) { + LOG.debug("FsDatasetSpi has not been initialized."); + throw new IOException("FsDatasetSpi has not been initialized"); + } if (property.equals(FS_DU_INTERVAL_KEY)) { - Preconditions.checkNotNull(data, "FsDatasetSpi has not been initialized."); long interval = (newVal == null ? FS_DU_INTERVAL_DEFAULT : Long.parseLong(newVal)); result = Long.toString(interval); @@ -902,7 +905,6 @@ private String reconfDfsUsageParameters(String property, String newVal) } } } else if (property.equals(FS_GETSPACEUSED_JITTER_KEY)) { - Preconditions.checkNotNull(data, "FsDatasetSpi has not been initialized."); long jitter = (newVal == null ? FS_GETSPACEUSED_JITTER_DEFAULT : Long.parseLong(newVal)); result = Long.toString(jitter); @@ -914,7 +916,6 @@ private String reconfDfsUsageParameters(String property, String newVal) } } } else if (property.equals(FS_GETSPACEUSED_CLASSNAME)) { - Preconditions.checkNotNull(data, "FsDatasetSpi has not been initialized."); Class klass; if (newVal == null) { if (Shell.WINDOWS) { @@ -1174,7 +1175,7 @@ private void refreshVolumes(String newVolumes) throws IOException { .newFixedThreadPool(changedVolumes.newLocations.size()); List> exceptions = Lists.newArrayList(); - Preconditions.checkNotNull(data, "Storage not yet initialized"); + checkStorageState("refreshVolumes"); for (final StorageLocation location : changedVolumes.newLocations) { exceptions.add(service.submit(new Callable() { @Override @@ -1274,7 +1275,7 @@ private synchronized void removeVolumes( clearFailure, Joiner.on(",").join(storageLocations))); IOException ioe = null; - Preconditions.checkNotNull(data, "Storage not yet initialized"); + checkStorageState("removeVolumes"); // Remove volumes and block infos from FsDataset. data.removeVolumes(storageLocations, clearFailure); @@ -2301,7 +2302,7 @@ public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block, Token token) throws IOException { checkBlockLocalPathAccess(); checkBlockToken(block, token, BlockTokenIdentifier.AccessMode.READ); - Preconditions.checkNotNull(data, "Storage not yet initialized"); + checkStorageState("getBlockLocalPathInfo"); BlockLocalPathInfo info = data.getBlockLocalPathInfo(block); if (info != null) { LOG.trace("getBlockLocalPathInfo successful " + @@ -2351,7 +2352,7 @@ FileInputStream[] requestShortCircuitFdsForRead(final ExtendedBlock blk, FileInputStream fis[] = new FileInputStream[2]; try { - Preconditions.checkNotNull(data, "Storage not yet initialized"); + checkStorageState("requestShortCircuitFdsForRead"); fis[0] = (FileInputStream)data.getBlockInputStream(blk, 0); fis[1] = DatanodeUtil.getMetaDataInputStream(blk, data); } catch (ClassCastException e) { @@ -3382,7 +3383,7 @@ public static void main(String args[]) { @Override // InterDatanodeProtocol public ReplicaRecoveryInfo initReplicaRecovery(RecoveringBlock rBlock) throws IOException { - Preconditions.checkNotNull(data, "Storage not yet initialized"); + checkStorageState("initReplicaRecovery"); return data.initReplicaRecovery(rBlock); } @@ -3393,7 +3394,7 @@ public ReplicaRecoveryInfo initReplicaRecovery(RecoveringBlock rBlock) public String updateReplicaUnderRecovery(final ExtendedBlock oldBlock, final long recoveryId, final long newBlockId, final long newLength) throws IOException { - Preconditions.checkNotNull(data, "Storage not yet initialized"); + checkStorageState("updateReplicaUnderRecovery"); final Replica r = data.updateReplicaUnderRecovery(oldBlock, recoveryId, newBlockId, newLength); // Notify the namenode of the updated block info. This is important @@ -3679,10 +3680,23 @@ public void deleteBlockPool(String blockPoolId, boolean force) "The block pool is still running. First do a refreshNamenodes to " + "shutdown the block pool service"); } - Preconditions.checkNotNull(data, "Storage not yet initialized"); + checkStorageState("deleteBlockPool"); data.deleteBlockPool(blockPoolId, force); } + /** + * Check if storage has been initialized. + * @param methodName caller name + * @throws IOException throw IOException if not yet initialized. + */ + private void checkStorageState(String methodName) throws IOException { + if (data == null) { + String message = "Storage not yet initialized for " + methodName; + LOG.debug(message); + throw new IOException(message); + } + } + @Override // ClientDatanodeProtocol public synchronized void shutdownDatanode(boolean forUpgrade) throws IOException { checkSuperuserPrivilege(); @@ -4131,7 +4145,7 @@ public String getSlowDisks() { @Override public List getVolumeReport() throws IOException { checkSuperuserPrivilege(); - Preconditions.checkNotNull(data, "Storage not yet initialized"); + checkStorageState("getVolumeReport"); Map volumeInfoMap = data.getVolumeInfoMap(); if (volumeInfoMap == null) { LOG.warn("DataNode volume info not available.");