HDFS-14640. [Dynamometer] Fix TestDynamometerInfra failure. Contributed by Erik Krogen.
This commit is contained in:
parent
fc0656dd30
commit
32925d04d9
@ -128,6 +128,16 @@
|
|||||||
</excludes>
|
</excludes>
|
||||||
</configuration>
|
</configuration>
|
||||||
</plugin>
|
</plugin>
|
||||||
|
<plugin>
|
||||||
|
<groupId>org.apache.maven.plugins</groupId>
|
||||||
|
<artifactId>maven-surefire-plugin</artifactId>
|
||||||
|
<configuration>
|
||||||
|
<forkedProcessTimeoutInSeconds>1800</forkedProcessTimeoutInSeconds>
|
||||||
|
<environmentVariables>
|
||||||
|
<JAVA_HOME>${java.home}</JAVA_HOME>
|
||||||
|
</environmentVariables>
|
||||||
|
</configuration>
|
||||||
|
</plugin>
|
||||||
</plugins>
|
</plugins>
|
||||||
</build>
|
</build>
|
||||||
|
|
||||||
|
@ -36,7 +36,7 @@ hadoopTarTmp="$hadoopTar.temporary"
|
|||||||
mkdir -p "$hadoopTarTmp"
|
mkdir -p "$hadoopTarTmp"
|
||||||
|
|
||||||
tar xzf "$hadoopTar" -C "$hadoopTarTmp"
|
tar xzf "$hadoopTar" -C "$hadoopTarTmp"
|
||||||
baseDir="$(find -H "$hadoopTarTmp" -depth 1 -type d | head -n 1)" # Should only be one subdir
|
baseDir="$(find -H "$hadoopTarTmp" -maxdepth 1 -mindepth 1 -type d | head -n 1)" # Should only be one subdir
|
||||||
hadoopShare="$baseDir/share/hadoop"
|
hadoopShare="$baseDir/share/hadoop"
|
||||||
|
|
||||||
# Remove unnecessary files
|
# Remove unnecessary files
|
||||||
|
@ -38,12 +38,12 @@ else
|
|||||||
name_dir="$(pwd)"
|
name_dir="$(pwd)"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
image_file_count="$(find -H "${name_dir}" -depth 1 -name "fsimage_*$image_txid" -type f | wc -l)"
|
image_file_count="$(find -H "${name_dir}" -maxdepth 1 -mindepth 1 -name "fsimage_*$image_txid" -type f | wc -l)"
|
||||||
if [[ "$image_file_count" != 1 ]]; then
|
if [[ "$image_file_count" != 1 ]]; then
|
||||||
echo "Error; found $image_file_count matching fsimage files."
|
echo "Error; found $image_file_count matching fsimage files."
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
image_file="$(find -H "${name_dir}" -depth 1 -name "fsimage_*$image_txid" -type f)"
|
image_file="$(find -H "${name_dir}" -maxdepth 1 -mindepth 1 -name "fsimage_*$image_txid" -type f)"
|
||||||
image_file_name="$(basename "${image_file}")"
|
image_file_name="$(basename "${image_file}")"
|
||||||
echo "Using fsimage: $image_file_name"
|
echo "Using fsimage: $image_file_name"
|
||||||
image_file_md5="${image_file}.md5"
|
image_file_md5="${image_file}.md5"
|
||||||
|
@ -75,7 +75,7 @@ chmod 755 "$baseDir"
|
|||||||
chmod 700 "$pidDir"
|
chmod 700 "$pidDir"
|
||||||
|
|
||||||
# Set Hadoop variables for component
|
# Set Hadoop variables for component
|
||||||
hadoopHome="$(find -H "$(pwd)/hadoopBinary" -depth 1 -type d | head -n 1)"
|
hadoopHome="$(find -H "$(pwd)/hadoopBinary" -maxdepth 1 -mindepth 1 -type d | head -n 1)"
|
||||||
# Save real environment for later
|
# Save real environment for later
|
||||||
hadoopConfOriginal=${HADOOP_CONF_DIR:-$confDir}
|
hadoopConfOriginal=${HADOOP_CONF_DIR:-$confDir}
|
||||||
hadoopHomeOriginal=${HADOOP_HOME:-$hadoopHome}
|
hadoopHomeOriginal=${HADOOP_HOME:-$hadoopHome}
|
||||||
@ -252,11 +252,8 @@ EOF
|
|||||||
rm -rf "$nameDir" "$editsDir" "$checkpointDir"
|
rm -rf "$nameDir" "$editsDir" "$checkpointDir"
|
||||||
mkdir -p "$nameDir/current" "$editsDir/current" "$checkpointDir"
|
mkdir -p "$nameDir/current" "$editsDir/current" "$checkpointDir"
|
||||||
chmod -R 700 "$nameDir" "$editsDir" "$checkpointDir"
|
chmod -R 700 "$nameDir" "$editsDir" "$checkpointDir"
|
||||||
fsImageFile="$(find "$(pwd)" -depth 1 -name "fsimage_*" | tail -n 1)"
|
# Link all of the fsimage files into the name dir
|
||||||
fsImageMD5File="$(find "$(pwd)" -depth 1 -name "fsimage_*.md5" | tail -n 1)"
|
find "$(pwd)" -maxdepth 1 -mindepth 1 \( -name "fsimage_*" -or -name "VERSION" \) -execdir ln -snf "$(pwd)/{}" "$nameDir/current/{}" \;
|
||||||
ln -snf "$fsImageFile" "$nameDir/current/$(basename "$fsImageFile")"
|
|
||||||
ln -snf "$fsImageMD5File" "$nameDir/current/$(basename "$fsImageMD5File")"
|
|
||||||
ln -snf "$(pwd)/VERSION" "$nameDir/current/VERSION"
|
|
||||||
chmod 700 "$nameDir"/current/*
|
chmod 700 "$nameDir"/current/*
|
||||||
|
|
||||||
namenodeConfigs=(
|
namenodeConfigs=(
|
||||||
|
@ -20,6 +20,7 @@
|
|||||||
import com.google.common.collect.Sets;
|
import com.google.common.collect.Sets;
|
||||||
import java.util.Optional;
|
import java.util.Optional;
|
||||||
import java.util.concurrent.TimeoutException;
|
import java.util.concurrent.TimeoutException;
|
||||||
|
import java.util.concurrent.TimeUnit;
|
||||||
import java.util.function.Supplier;
|
import java.util.function.Supplier;
|
||||||
import org.apache.hadoop.test.PlatformAssumptions;
|
import org.apache.hadoop.test.PlatformAssumptions;
|
||||||
import org.apache.hadoop.tools.dynamometer.workloadgenerator.audit.AuditLogDirectParser;
|
import org.apache.hadoop.tools.dynamometer.workloadgenerator.audit.AuditLogDirectParser;
|
||||||
@ -315,16 +316,30 @@ public void testNameNodeInYARN() throws Exception {
|
|||||||
|
|
||||||
awaitApplicationStartup();
|
awaitApplicationStartup();
|
||||||
|
|
||||||
Supplier<Boolean> falseSupplier = () -> false;
|
long startTime = System.currentTimeMillis();
|
||||||
|
long maxWaitTimeMs = TimeUnit.MINUTES.toMillis(10);
|
||||||
|
Supplier<Boolean> exitCheckSupplier = () -> {
|
||||||
|
if (System.currentTimeMillis() - startTime > maxWaitTimeMs) {
|
||||||
|
// Wait at most 10 minutes for the NameNode to start and be ready
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
// Exit immediately if the YARN app fails
|
||||||
|
return yarnClient.getApplicationReport(infraAppId)
|
||||||
|
.getYarnApplicationState() == YarnApplicationState.FAILED;
|
||||||
|
} catch (IOException | YarnException e) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
};
|
||||||
Optional<Properties> namenodeProperties = DynoInfraUtils
|
Optional<Properties> namenodeProperties = DynoInfraUtils
|
||||||
.waitForAndGetNameNodeProperties(falseSupplier, localConf,
|
.waitForAndGetNameNodeProperties(exitCheckSupplier, localConf,
|
||||||
client.getNameNodeInfoPath(), LOG);
|
client.getNameNodeInfoPath(), LOG);
|
||||||
if (!namenodeProperties.isPresent()) {
|
if (!namenodeProperties.isPresent()) {
|
||||||
fail("Unable to fetch NameNode properties");
|
fail("Unable to fetch NameNode properties");
|
||||||
}
|
}
|
||||||
|
|
||||||
DynoInfraUtils.waitForNameNodeReadiness(namenodeProperties.get(), 3, false,
|
DynoInfraUtils.waitForNameNodeReadiness(namenodeProperties.get(), 3, false,
|
||||||
falseSupplier, localConf, LOG);
|
exitCheckSupplier, localConf, LOG);
|
||||||
|
|
||||||
assertClusterIsFunctional(localConf, namenodeProperties.get());
|
assertClusterIsFunctional(localConf, namenodeProperties.get());
|
||||||
|
|
||||||
|
@ -38,12 +38,12 @@ else
|
|||||||
edits_dir="$(pwd)"
|
edits_dir="$(pwd)"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
edits_file_count="$(find -H "${edits_dir}" -depth 1 -type f -name "edits_*-*$image_txid" | wc -l)"
|
edits_file_count="$(find -H "${edits_dir}" -maxdepth 1 -type f -name "edits_*-*$image_txid" | wc -l)"
|
||||||
if [[ "$edits_file_count" != 1 ]]; then
|
if [[ "$edits_file_count" != 1 ]]; then
|
||||||
echo "Error; found $edits_file_count matching edit files."
|
echo "Error; found $edits_file_count matching edit files."
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
edits_file="$(find -H "${edits_dir}" -depth 1 -type f -name "edits_*-*$image_txid")"
|
edits_file="$(find -H "${edits_dir}" -maxdepth 1 -type f -name "edits_*-*$image_txid")"
|
||||||
|
|
||||||
# Shellcheck complains about the $ in the single-quote because it won't expand, but this is intentional
|
# Shellcheck complains about the $ in the single-quote because it won't expand, but this is intentional
|
||||||
# shellcheck disable=SC2016
|
# shellcheck disable=SC2016
|
||||||
|
Loading…
Reference in New Issue
Block a user