hadoop/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/resources/nvidia-smi-output-excerpt.xml

71 lines
2.7 KiB
XML
Raw Normal View History

<?xml version="1.0" ?>
<!--
~ Licensed to the Apache Software Foundation (ASF) under one
~ or more contributor license agreements. See the NOTICE file
~ distributed with this work for additional information
~ regarding copyright ownership. The ASF licenses this file
~ to you under the Apache License, Version 2.0 (the
~ "License"); you may not use this file except in compliance
~ with the License. You may obtain a copy of the License at
~ http://www.apache.org/licenses/LICENSE-2.0
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<!DOCTYPE nvidia_smi_log SYSTEM "nvsmi_device_v8.dtd">
<nvidia_smi_log>
<timestamp>Wed Sep 6 21:52:51 2017</timestamp>
<driver_version>375.66</driver_version>
<attached_gpus>2</attached_gpus>
<gpu id="0000:04:00.0">
<product_name>Tesla P100-PCIE-12GB</product_name>
<product_brand>Tesla</product_brand>
<uuid>GPU-28604e81-21ec-cc48-6759-bf2648b22e16</uuid>
<minor_number>0</minor_number>
<fb_memory_usage>
<total>11567 MiB</total>
<used>11400 MiB</used>
<free>167 MiB</free>
</fb_memory_usage>
<utilization>
<gpu_util>33.4 %</gpu_util>
<memory_util>0 %</memory_util>
<encoder_util>0 %</encoder_util>
<decoder_util>0 %</decoder_util>
</utilization>
<temperature>
<gpu_temp>31 C</gpu_temp>
<gpu_temp_max_threshold>80 C</gpu_temp_max_threshold>
<gpu_temp_slow_threshold>88 C</gpu_temp_slow_threshold>
</temperature>
</gpu>
<gpu id="0000:82:00.0">
<product_name>Tesla P100-PCIE-12GB_2</product_name>
<product_brand>Tesla</product_brand>
<uuid>GPU-46915a82-3fd2-8e11-ae26-a80b607c04f3</uuid>
<minor_number>1</minor_number>
<fb_memory_usage>
<total>12290 MiB</total>
<used>11800 MiB</used>
<free>490 MiB</free>
</fb_memory_usage>
<compute_mode>Default</compute_mode>
<utilization>
<gpu_util>10.3 %</gpu_util>
<memory_util>0 %</memory_util>
<encoder_util>0 %</encoder_util>
<decoder_util>0 %</decoder_util>
</utilization>
<temperature>
<gpu_temp>34 C</gpu_temp>
<gpu_temp_max_threshold>85 C</gpu_temp_max_threshold>
<gpu_temp_slow_threshold>82 C</gpu_temp_slow_threshold>
</temperature>
</gpu>
</nvidia_smi_log>