8 Commits

Author SHA1 Message Date
0f416b2da9 Patch cuda.cpp with cloudy's fix 2026-01-10 19:29:45 -05:00
Dr-Noob
5f619dc95a [v0.30] Add support for XCDs and matrix cores
For XCDs, we dont show them if the GPU is made of a single
XCD, as it adds little value

For matrix cores, we assume it can be computed as
compute_units * simds_per_cu, it seems to work for the GPUs
I checked from CDNA3 and RDNA3. Not sure what would happen for
older GPUs that do not have matrix cores though.
2025-10-26 10:51:27 +01:00
Dr-Noob
98bb02e203 [v0.30] Allow users to select backend from build script
Before we had AMD support, CMakeLists.txt tried to enable all backends
by default. Now that we have AMD support, that does not make that much
sense so instead it will only enable the backend specified by the user
(with the -DENABLE_XXX_BACKEND flags)

Then, before AMD support, the build.sh script was useful to just
invoke cmake and let it figure out the backends, but the script was
a bit useless after the mentioned change in the CMakeLists.txt.

Therefore, this commit allow users to specify an argument, like:

./build.sh cuda

To specify what backend/s to enable, without the need to manually
configure the build with the -DENABLE_XXX_BACKEND flag. Note that
multiple backends are also allowed, like:

./build.sh intel,hsa

Would enable both Intel and HSA backends (which could make sense for
example in a system with Intel iGPU an an AMD dGPU).
2025-10-24 22:29:45 +02:00
Dr-Noob
78d34e71f1 [v0.30][AMD] Add support to fetch bus width, global memory and LDS size
We can use hsa_amd_agent_iterate_memory_pools to fetch info about GPU
memory pools in the GPU. HSA_AMD_SEGMENT_GROUP seems to be LDS, and
HSA_AMD_SEGMENT_GLOBAL seems to be global memory.

However, the latter is reported multiple times (I don't know why). The
only solution I found for this is to check for the
HSA_AMD_MEMORY_POOL_GLOBAL_FLAG_EXTENDED_SCOPE_FINE_GRAINED flag, which
seems to be reported only once.

For bus width, we simply use HSA_AMD_AGENT_INFO_MEMORY_WIDTH.
2025-10-23 21:30:02 +02:00
Dr-Noob
82ea16fc3d [v0.30] Fix warning in printer 2025-10-16 20:01:14 +02:00
Dr-Noob
6589de9717 [v0.30] Reorganize attributes in printer and add CUs attr for AMD 2025-10-16 19:53:48 +02:00
Dr-Noob
0950b97393 [v0.30] Build pciutils only if neccesary
If only HSA is enabled we dont need pciutils since AMD detection does
not rely on it. Therefore we change CMakeLists.txt to build pciutils
only if required.

This commit has some side-effects:
1. We now don't build Intel backend by default. In other words, no
   backend is built by default, the user must specify which backend
   to use.
2. There were some issues with includes and wrongly used defines and
   variables. This commit fixes all that.
2025-10-16 08:26:42 +02:00
Dr-Noob
8794cd322d [v0.30] Add support for building on AMD where rocm-cmake is not installed 2025-10-16 07:24:45 +02:00
13 changed files with 414 additions and 162 deletions

View File

@@ -10,9 +10,10 @@ set(CUDA_DIR "${SRC_DIR}/cuda")
set(HSA_DIR "${SRC_DIR}/hsa") set(HSA_DIR "${SRC_DIR}/hsa")
set(INTEL_DIR "${SRC_DIR}/intel") set(INTEL_DIR "${SRC_DIR}/intel")
# Enable Intel backend by default # Make sure that at least one backend is enabled.
if(NOT DEFINED ENABLE_INTEL_BACKEND) # It does not make sense that the user has not specified any backend.
set(ENABLE_INTEL_BACKEND true) if(NOT ENABLE_INTEL_BACKEND AND NOT ENABLE_CUDA_BACKEND AND NOT ENABLE_HSA_BACKEND)
message(FATAL_ERROR "No backend was enabled! Please enable at least one backend with -DENABLE_XXX_BACKEND")
endif() endif()
if(ENABLE_CUDA_BACKEND) if(ENABLE_CUDA_BACKEND)
@@ -66,7 +67,30 @@ if(ENABLE_HSA_BACKEND)
endif() endif()
endif() endif()
set(GPUFECH_COMMON
${COMMON_DIR}/main.cpp
${COMMON_DIR}/args.cpp
${COMMON_DIR}/gpu.cpp
${COMMON_DIR}/global.cpp
${COMMON_DIR}/printer.cpp
${COMMON_DIR}/master.cpp
${COMMON_DIR}/uarch.cpp
)
set(GPUFETCH_LINK_TARGETS z)
if(NOT(ENABLE_HSA_BACKEND AND NOT ENABLE_CUDA_BACKEND AND NOT ENABLE_INTEL_BACKEND))
# Look for pciutils only if not building HSA only.
#
# This has the (intented) secondary effect that if only HSA backend is enabled
# by the user, but ROCm cannot be found, pciutils will still be compiled in
# order to show the list of GPUs available on the system, so that the user will
# get at least some feedback even if HSA is not found.
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_LIST_DIR}/cmake") list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_LIST_DIR}/cmake")
list(APPEND GPUFECH_COMMON ${COMMON_DIR}/pci.cpp ${COMMON_DIR}/sort.cpp)
list(APPEND GPUFETCH_LINK_TARGETS pci)
set(CMAKE_ENABLE_PCIUTILS ON)
find_package(PCIUTILS) find_package(PCIUTILS)
if(NOT ${PCIUTILS_FOUND}) if(NOT ${PCIUTILS_FOUND})
message(STATUS "${BoldYellow}pciutils not found, downloading and building a local copy...${ColorReset}") message(STATUS "${BoldYellow}pciutils not found, downloading and building a local copy...${ColorReset}")
@@ -89,12 +113,17 @@ else()
# Needed for linking libpci in FreeBSD # Needed for linking libpci in FreeBSD
link_directories(/usr/local/lib/) link_directories(/usr/local/lib/)
endif() endif()
endif()
add_executable(gpufetch ${COMMON_DIR}/main.cpp ${COMMON_DIR}/args.cpp ${COMMON_DIR}/gpu.cpp ${COMMON_DIR}/pci.cpp ${COMMON_DIR}/sort.cpp ${COMMON_DIR}/global.cpp ${COMMON_DIR}/printer.cpp ${COMMON_DIR}/master.cpp ${COMMON_DIR}/uarch.cpp) add_executable(gpufetch ${GPUFECH_COMMON})
set(SANITY_FLAGS -Wfloat-equal -Wshadow -Wpointer-arith -Wall -Wextra -pedantic -fstack-protector-all -pedantic) set(SANITY_FLAGS -Wfloat-equal -Wshadow -Wpointer-arith -Wall -Wextra -pedantic -fstack-protector-all -pedantic)
target_compile_features(gpufetch PRIVATE cxx_std_11) target_compile_features(gpufetch PRIVATE cxx_std_11)
target_compile_options(gpufetch PRIVATE ${SANITY_FLAGS}) target_compile_options(gpufetch PRIVATE ${SANITY_FLAGS})
if (CMAKE_ENABLE_PCIUTILS)
target_compile_definitions(gpufetch PUBLIC BACKEND_USE_PCI)
endif()
if(ENABLE_INTEL_BACKEND) if(ENABLE_INTEL_BACKEND)
target_compile_definitions(gpufetch PUBLIC BACKEND_INTEL) target_compile_definitions(gpufetch PUBLIC BACKEND_INTEL)
@@ -156,7 +185,7 @@ if(ENABLE_HSA_BACKEND)
target_link_libraries(gpufetch hsa_backend) target_link_libraries(gpufetch hsa_backend)
endif() endif()
target_link_libraries(gpufetch pci z) target_link_libraries(gpufetch ${GPUFETCH_LINK_TARGETS})
install(TARGETS gpufetch DESTINATION bin) install(TARGETS gpufetch DESTINATION bin)
if(NOT WIN32) if(NOT WIN32)

View File

@@ -1,5 +1,24 @@
#!/bin/bash #!/bin/bash
print_help() {
cat << EOF
Usage: $0 <backends> [build_type]
<backends> MANDATORY. Comma-separated list of
backends to enable.
Valid options: hsa, intel, cuda
Example: hsa,cuda
[build_type] OPTIONAL. Build type. Valid options:
debug, release (default: release)
Examples:
$0 hsa,intel debug
$0 cuda
$0 hsa,intel,cuda release
EOF
}
# gpufetch build script # gpufetch build script
set -e set -e
@@ -7,19 +26,79 @@ rm -rf build/ gpufetch
mkdir build/ mkdir build/
cd build/ cd build/
if [ "$1" == "debug" ] if [ "$1" == "--help" ]
then then
BUILD_TYPE="Debug" echo "gpufetch build script"
else echo
BUILD_TYPE="Release" print_help
exit 0
fi fi
if [[ $# -lt 1 ]]; then
echo "ERROR: At least one backend must be specified."
echo
print_help
exit 1
fi
# Determine if last argument is build type
LAST_ARG="${!#}"
if [[ "$LAST_ARG" == "debug" || "$LAST_ARG" == "release" ]]; then
BUILD_TYPE="$LAST_ARG"
BACKEND_ARG="${1}"
else
BUILD_TYPE="release"
BACKEND_ARG="${1}"
fi
# Split comma-separated backends into an array
IFS=',' read -r -a BACKENDS <<< "$BACKEND_ARG"
# Validate build type
if [[ "$BUILD_TYPE" != "debug" && "$BUILD_TYPE" != "release" ]]
then
echo "Error: Invalid build type '$BUILD_TYPE'."
echo "Valid options are: debug, release"
exit 1
fi
# From lower to upper case
CMAKE_FLAGS="-DCMAKE_BUILD_TYPE=${BUILD_TYPE^}"
# Validate backends
VALID_BACKENDS=("hsa" "intel" "cuda")
for BACKEND in "${BACKENDS[@]}"; do
case "$BACKEND" in
hsa)
CMAKE_FLAGS+=" -DENABLE_HSA_BACKEND=ON"
;;
intel)
CMAKE_FLAGS+=" -DENABLE_INTEL_BACKEND=ON"
;;
cuda)
CMAKE_FLAGS+=" -DENABLE_CUDA_BACKEND=ON"
;;
*)
echo "ERROR: Invalid backend '$BACKEND'."
echo "Valid options: ${VALID_BACKENDS[*]}"
exit 1
;;
esac
done
# You can also manually specify the compilation flags.
# If you need to, just run the cmake command directly
# instead of using this script.
#
# Here you will find some help:
#
# In case you have CUDA installed but it is not detected, # In case you have CUDA installed but it is not detected,
# - set CMAKE_CUDA_COMPILER to your nvcc binary: # - set CMAKE_CUDA_COMPILER to your nvcc binary:
# - set CMAKE_CUDA_COMPILER_TOOLKIT_ROOT to the CUDA root dir # - set CMAKE_CUDA_COMPILER_TOOLKIT_ROOT to the CUDA root dir
# for example: # for example:
# cmake -DCMAKE_BUILD_TYPE=$BUILD_TYPE -DCMAKE_CUDA_COMPILER=/usr/local/cuda/bin/nvcc -DCMAKE_CUDA_COMPILER_TOOLKIT_ROOT=/usr/local/cuda/ .. # cmake -DCMAKE_BUILD_TYPE=$BUILD_TYPE -DCMAKE_CUDA_COMPILER=/usr/local/cuda/bin/nvcc -DCMAKE_CUDA_COMPILER_TOOLKIT_ROOT=/usr/local/cuda/ ..
#
# In case you want to explicitely disable a backend, you can: # In case you want to explicitely disable a backend, you can:
# Disable CUDA backend: # Disable CUDA backend:
# cmake -DCMAKE_BUILD_TYPE=$BUILD_TYPE -DENABLE_CUDA_BACKEND=OFF .. # cmake -DCMAKE_BUILD_TYPE=$BUILD_TYPE -DENABLE_CUDA_BACKEND=OFF ..
@@ -28,7 +107,9 @@ fi
# Disable Intel backend: # Disable Intel backend:
# cmake -DCMAKE_BUILD_TYPE=$BUILD_TYPE -DENABLE_INTEL_BACKEND=OFF .. # cmake -DCMAKE_BUILD_TYPE=$BUILD_TYPE -DENABLE_INTEL_BACKEND=OFF ..
cmake -DCMAKE_BUILD_TYPE=$BUILD_TYPE .. echo "$0: Running cmake $CMAKE_FLAGS"
echo
cmake $CMAKE_FLAGS ..
os=$(uname) os=$(uname)
if [ "$os" == 'Linux' ]; then if [ "$os" == 'Linux' ]; then

View File

@@ -101,6 +101,17 @@ char* get_str_bus_width(struct gpu_info* gpu) {
return string; return string;
} }
char* get_str_lds_size(struct gpu_info* gpu) {
// TODO: Show XX KB (XX MB Total) like in cpufetch
uint32_t size = 3+1+3+1;
assert(strlen(STRING_UNKNOWN)+1 <= size);
char* string = (char *) ecalloc(size, sizeof(char));
sprintf(string, "%d KB", gpu->mem->lds_size / 1024);
return string;
}
char* get_str_memory_clock(struct gpu_info* gpu) { char* get_str_memory_clock(struct gpu_info* gpu) {
return get_freq_as_str_mhz(gpu->mem->freq); return get_freq_as_str_mhz(gpu->mem->freq);
} }

View File

@@ -3,8 +3,6 @@
#include <cstdint> #include <cstdint>
#include "../cuda/pci.hpp"
#define UNKNOWN_FREQ -1 #define UNKNOWN_FREQ -1
enum { enum {
@@ -48,6 +46,10 @@ struct topology_c {
// HSA topology // HSA topology
struct topology_h { struct topology_h {
int32_t compute_units; int32_t compute_units;
int32_t num_shader_engines;
int32_t simds_per_cu;
int32_t num_xcc;
int32_t matrix_cores;
}; };
// Intel topology // Intel topology
@@ -63,6 +65,7 @@ struct memory {
int32_t bus_width; int32_t bus_width;
int32_t freq; int32_t freq;
int32_t clk_mul; // clock multiplier int32_t clk_mul; // clock multiplier
int32_t lds_size; // HSA specific for now
}; };
struct gpu_info { struct gpu_info {
@@ -90,6 +93,7 @@ char* get_str_freq(struct gpu_info* gpu);
char* get_str_memory_size(struct gpu_info* gpu); char* get_str_memory_size(struct gpu_info* gpu);
char* get_str_memory_type(struct gpu_info* gpu); char* get_str_memory_type(struct gpu_info* gpu);
char* get_str_bus_width(struct gpu_info* gpu); char* get_str_bus_width(struct gpu_info* gpu);
char* get_str_lds_size(struct gpu_info* gpu);
char* get_str_memory_clock(struct gpu_info* gpu); char* get_str_memory_clock(struct gpu_info* gpu);
char* get_str_l2(struct gpu_info* gpu); char* get_str_l2(struct gpu_info* gpu);
char* get_str_peak_performance(struct gpu_info* gpu); char* get_str_peak_performance(struct gpu_info* gpu);

View File

@@ -8,6 +8,10 @@
#include "../cuda/cuda.hpp" #include "../cuda/cuda.hpp"
#include "../cuda/uarch.hpp" #include "../cuda/uarch.hpp"
#ifdef BACKEND_USE_PCI
#include "pci.hpp"
#endif
static const char* VERSION = "0.30"; static const char* VERSION = "0.30";
void print_help(char *argv[]) { void print_help(char *argv[]) {
@@ -79,8 +83,12 @@ int main(int argc, char* argv[]) {
} }
if(get_num_gpus_available(list) == 0) { if(get_num_gpus_available(list) == 0) {
#ifdef BACKEND_USE_PCI
printErr("No GPU was detected! Available GPUs are:"); printErr("No GPU was detected! Available GPUs are:");
print_gpus_list_pci(); print_gpus_list_pci();
#else
printErr("No GPU was detected!");
#endif
printf("Please, make sure that the appropiate backend is enabled:\n"); printf("Please, make sure that the appropiate backend is enabled:\n");
print_enabled_backends(); print_enabled_backends();
printf("Visit https://github.com/Dr-Noob/gpufetch#2-backends for more information\n"); printf("Visit https://github.com/Dr-Noob/gpufetch#2-backends for more information\n");

View File

@@ -1,7 +1,10 @@
#include <cstdlib> #include <cstdlib>
#include <cstdio> #include <cstdio>
#ifdef BACKEND_USE_PCI
#include "pci.hpp" #include "pci.hpp"
#endif
#include "global.hpp" #include "global.hpp"
#include "colors.hpp" #include "colors.hpp"
#include "master.hpp" #include "master.hpp"
@@ -19,7 +22,9 @@ struct gpu_list {
struct gpu_list* get_gpu_list() { struct gpu_list* get_gpu_list() {
int idx = 0; int idx = 0;
#ifdef BACKEND_USE_PCI
struct pci_dev *devices = get_pci_devices_from_pciutils(); struct pci_dev *devices = get_pci_devices_from_pciutils();
#endif
struct gpu_list* list = (struct gpu_list*) malloc(sizeof(struct gpu_list)); struct gpu_list* list = (struct gpu_list*) malloc(sizeof(struct gpu_list));
list->num_gpus = 0; list->num_gpus = 0;
list->gpus = (struct gpu_info**) malloc(sizeof(struct info*) * MAX_GPUS); list->gpus = (struct gpu_info**) malloc(sizeof(struct info*) * MAX_GPUS);
@@ -40,7 +45,7 @@ struct gpu_list* get_gpu_list() {
bool valid = true; bool valid = true;
while(valid) { while(valid) {
list->gpus[idx] = get_gpu_info_hsa(devices, idx); list->gpus[idx] = get_gpu_info_hsa(idx);
if(list->gpus[idx] != NULL) idx++; if(list->gpus[idx] != NULL) idx++;
else valid = false; else valid = false;
} }

View File

@@ -32,64 +32,60 @@
#define MAX_ATTRIBUTES 100 #define MAX_ATTRIBUTES 100
#define MAX_TERM_SIZE 1024 #define MAX_TERM_SIZE 1024
typedef struct {
int id;
const char *name;
const char *shortname;
} AttributeField;
// AttributeField IDs
// Used by
enum { enum {
ATTRIBUTE_NAME, ATTRIBUTE_NAME, // ALL
ATTRIBUTE_CHIP, ATTRIBUTE_CHIP, // ALL
ATTRIBUTE_UARCH, ATTRIBUTE_UARCH, // ALL
ATTRIBUTE_TECHNOLOGY, ATTRIBUTE_TECHNOLOGY, // ALL
ATTRIBUTE_GT, ATTRIBUTE_FREQUENCY, // ALL
ATTRIBUTE_FREQUENCY, ATTRIBUTE_PEAK, // ALL
ATTRIBUTE_STREAMINGMP, ATTRIBUTE_COMPUTE_UNITS, // HSA
ATTRIBUTE_CORESPERMP, ATTRIBUTE_MATRIX_CORES, // HSA
ATTRIBUTE_CUDA_CORES, ATTRIBUTE_XCDS, // HSA
ATTRIBUTE_TENSOR_CORES, ATTRIBUTE_LDS_SIZE, // HSA
ATTRIBUTE_EUS, ATTRIBUTE_STREAMINGMP, // CUDA
ATTRIBUTE_L2, ATTRIBUTE_CORESPERMP, // CUDA
ATTRIBUTE_MEMORY, ATTRIBUTE_CUDA_CORES, // CUDA
ATTRIBUTE_MEMORY_FREQ, ATTRIBUTE_TENSOR_CORES, // CUDA
ATTRIBUTE_BUS_WIDTH, ATTRIBUTE_L2, // CUDA
ATTRIBUTE_PEAK, ATTRIBUTE_MEMORY, // CUDA,HSA
ATTRIBUTE_PEAK_TENSOR, ATTRIBUTE_MEMORY_FREQ, // CUDA
ATTRIBUTE_BUS_WIDTH, // CUDA,HSA
ATTRIBUTE_PEAK_TENSOR, // CUDA
ATTRIBUTE_EUS, // Intel
ATTRIBUTE_GT, // Intel
}; };
static const char* ATTRIBUTE_FIELDS [] = { static const AttributeField ATTRIBUTE_INFO[] = {
"Name:", { ATTRIBUTE_NAME, "Name:", "Name:" },
"GPU processor:", { ATTRIBUTE_CHIP, "GPU processor:", "Processor:" },
"Microarchitecture:", { ATTRIBUTE_UARCH, "Microarchitecture:", "uArch:" },
"Technology:", { ATTRIBUTE_TECHNOLOGY, "Technology:", "Technology:" },
"Graphics Tier:", { ATTRIBUTE_FREQUENCY, "Max Frequency:", "Max Freq.:" },
"Max Frequency:", { ATTRIBUTE_PEAK, "Peak Performance:", "Peak Perf.:" },
"SMs:", { ATTRIBUTE_COMPUTE_UNITS, "Compute Units (CUs):", "CUs" },
"Cores/SM:", { ATTRIBUTE_MATRIX_CORES, "Matrix Cores:", "Matrix Cores:" },
"CUDA Cores:", { ATTRIBUTE_XCDS, "XCDs:", "XCDs" },
"Tensor Cores:", { ATTRIBUTE_LDS_SIZE, "LDS size:", "LDS:" },
"Execution Units:", { ATTRIBUTE_STREAMINGMP, "SMs:", "SMs:" },
"L2 Size:", { ATTRIBUTE_CORESPERMP, "Cores/SM:", "Cores/SM:" },
"Memory:", { ATTRIBUTE_CUDA_CORES, "CUDA Cores:", "CUDA Cores:" },
"Memory frequency:", { ATTRIBUTE_TENSOR_CORES, "Tensor Cores:", "Tensor Cores:" },
"Bus width:", { ATTRIBUTE_L2, "L2 Size:", "L2 Size:" },
"Peak Performance:", { ATTRIBUTE_MEMORY, "Memory:", "Memory:" },
"Peak Performance (MMA):", { ATTRIBUTE_MEMORY_FREQ, "Memory frequency:", "Memory freq.:" },
}; { ATTRIBUTE_BUS_WIDTH, "Bus width:", "Bus width:" },
{ ATTRIBUTE_PEAK_TENSOR, "Peak Performance (MMA):", "Peak Perf.(MMA):" },
static const char* ATTRIBUTE_FIELDS_SHORT [] = { { ATTRIBUTE_EUS, "Execution Units:", "EUs:" },
"Name:", { ATTRIBUTE_GT, "Graphics Tier:", "GT:" },
"Processor:",
"uArch:",
"Technology:",
"GT:",
"Max Freq.:",
"SMs:",
"Cores/SM:",
"CUDA Cores:",
"Tensor Cores:",
"EUs:",
"L2 Size:",
"Memory:",
"Memory freq.:",
"Bus width:",
"Peak Perf.:",
"Peak Perf.(MMA):",
}; };
struct terminal { struct terminal {
@@ -207,8 +203,6 @@ bool ascii_fits_screen(int termw, struct ascii_logo logo, int lf) {
void replace_bgbyfg_color(struct ascii_logo* logo) { void replace_bgbyfg_color(struct ascii_logo* logo) {
// Replace background by foreground color // Replace background by foreground color
for(int i=0; i < 2; i++) { for(int i=0; i < 2; i++) {
if(logo->color_ascii[i] == NULL) break;
if(strcmp(logo->color_ascii[i], C_BG_BLACK) == 0) strcpy(logo->color_ascii[i], C_FG_BLACK); if(strcmp(logo->color_ascii[i], C_BG_BLACK) == 0) strcpy(logo->color_ascii[i], C_FG_BLACK);
else if(strcmp(logo->color_ascii[i], C_BG_RED) == 0) strcpy(logo->color_ascii[i], C_FG_RED); else if(strcmp(logo->color_ascii[i], C_BG_RED) == 0) strcpy(logo->color_ascii[i], C_FG_RED);
else if(strcmp(logo->color_ascii[i], C_BG_GREEN) == 0) strcpy(logo->color_ascii[i], C_FG_GREEN); else if(strcmp(logo->color_ascii[i], C_BG_GREEN) == 0) strcpy(logo->color_ascii[i], C_FG_GREEN);
@@ -276,13 +270,14 @@ void choose_ascii_art(struct ascii* art, struct color** cs, struct terminal* ter
} }
} }
uint32_t longest_attribute_length(struct ascii* art, const char** attribute_fields) { uint32_t longest_attribute_length(struct ascii* art, bool use_short) {
uint32_t max = 0; uint32_t max = 0;
uint64_t len = 0; uint64_t len = 0;
for(uint32_t i=0; i < art->n_attributes_set; i++) { for(uint32_t i=0; i < art->n_attributes_set; i++) {
if(art->attributes[i]->value != NULL) { if(art->attributes[i]->value != NULL) {
len = strlen(attribute_fields[art->attributes[i]->type]); const char* str = use_short ? ATTRIBUTE_INFO[art->attributes[i]->type].shortname : ATTRIBUTE_INFO[art->attributes[i]->type].name;
len = strlen(str);
if(len > max) max = len; if(len > max) max = len;
} }
} }
@@ -306,7 +301,7 @@ uint32_t longest_field_length(struct ascii* art, int la) {
return max; return max;
} }
void print_ascii_generic(struct ascii* art, uint32_t la, int32_t text_space, const char** attribute_fields) { void print_ascii_generic(struct ascii* art, uint32_t la, int32_t text_space, bool use_short) {
struct ascii_logo* logo = art->art; struct ascii_logo* logo = art->art;
int attr_to_print = 0; int attr_to_print = 0;
int attr_type; int attr_type;
@@ -350,11 +345,13 @@ void print_ascii_generic(struct ascii* art, uint32_t la, int32_t text_space, con
attr_value = art->attributes[attr_to_print]->value; attr_value = art->attributes[attr_to_print]->value;
attr_to_print++; attr_to_print++;
space_right = 1 + (la - strlen(attribute_fields[attr_type])); const char* attr_str = use_short ? ATTRIBUTE_INFO[attr_type].shortname : ATTRIBUTE_INFO[attr_type].name;
space_right = 1 + (la - strlen(attr_str));
current_space = max(0, text_space); current_space = max(0, text_space);
printf("%s%.*s%s", logo->color_text[0], current_space, attribute_fields[attr_type], art->reset); printf("%s%.*s%s", logo->color_text[0], current_space, attr_str, art->reset);
current_space = max(0, current_space - (int) strlen(attribute_fields[attr_type])); current_space = max(0, current_space - (int) strlen(attr_str));
printf("%*s", min(current_space, space_right), ""); printf("%*s", min(current_space, space_right), "");
current_space = max(0, current_space - min(current_space, space_right)); current_space = max(0, current_space - min(current_space, space_right));
printf("%s%.*s%s", logo->color_text[1], current_space, attr_value, art->reset); printf("%s%.*s%s", logo->color_text[1], current_space, attr_value, art->reset);
@@ -388,19 +385,19 @@ bool print_gpufetch_intel(struct gpu_info* gpu, STYLE s, struct color** cs, stru
setAttribute(art, ATTRIBUTE_EUS, eus); setAttribute(art, ATTRIBUTE_EUS, eus);
setAttribute(art, ATTRIBUTE_PEAK, pp); setAttribute(art, ATTRIBUTE_PEAK, pp);
const char** attribute_fields = ATTRIBUTE_FIELDS; bool use_short = false;
uint32_t longest_attribute = longest_attribute_length(art, attribute_fields); uint32_t longest_attribute = longest_attribute_length(art, use_short);
uint32_t longest_field = longest_field_length(art, longest_attribute); uint32_t longest_field = longest_field_length(art, longest_attribute);
choose_ascii_art(art, cs, term, longest_field); choose_ascii_art(art, cs, term, longest_field);
if(!ascii_fits_screen(term->w, *art->art, longest_field)) { if(!ascii_fits_screen(term->w, *art->art, longest_field)) {
// Despite of choosing the smallest logo, the output does not fit // Despite of choosing the smallest logo, the output does not fit
// Choose the shorter field names and recalculate the longest attr // Choose the shorter field names and recalculate the longest attr
attribute_fields = ATTRIBUTE_FIELDS_SHORT; use_short = true;
longest_attribute = longest_attribute_length(art, attribute_fields); longest_attribute = longest_attribute_length(art, use_short);
} }
print_ascii_generic(art, longest_attribute, term->w - art->art->width, attribute_fields); print_ascii_generic(art, longest_attribute, term->w - art->art->width, use_short);
return true; return true;
} }
@@ -457,19 +454,19 @@ bool print_gpufetch_cuda(struct gpu_info* gpu, STYLE s, struct color** cs, struc
setAttribute(art, ATTRIBUTE_PEAK_TENSOR, pp_tensor); setAttribute(art, ATTRIBUTE_PEAK_TENSOR, pp_tensor);
} }
const char** attribute_fields = ATTRIBUTE_FIELDS; bool use_short = false;
uint32_t longest_attribute = longest_attribute_length(art, attribute_fields); uint32_t longest_attribute = longest_attribute_length(art, use_short);
uint32_t longest_field = longest_field_length(art, longest_attribute); uint32_t longest_field = longest_field_length(art, longest_attribute);
choose_ascii_art(art, cs, term, longest_field); choose_ascii_art(art, cs, term, longest_field);
if(!ascii_fits_screen(term->w, *art->art, longest_field)) { if(!ascii_fits_screen(term->w, *art->art, longest_field)) {
// Despite of choosing the smallest logo, the output does not fit // Despite of choosing the smallest logo, the output does not fit
// Choose the shorter field names and recalculate the longest attr // Choose the shorter field names and recalculate the longest attr
attribute_fields = ATTRIBUTE_FIELDS_SHORT; use_short = true;
longest_attribute = longest_attribute_length(art, attribute_fields); longest_attribute = longest_attribute_length(art, use_short);
} }
print_ascii_generic(art, longest_attribute, term->w - art->art->width, attribute_fields); print_ascii_generic(art, longest_attribute, term->w - art->art->width, use_short);
free(manufacturing_process); free(manufacturing_process);
free(max_frequency); free(max_frequency);
@@ -494,8 +491,13 @@ bool print_gpufetch_amd(struct gpu_info* gpu, STYLE s, struct color** cs, struct
char* gpu_chip = get_str_chip(gpu->arch); char* gpu_chip = get_str_chip(gpu->arch);
char* uarch = get_str_uarch_hsa(gpu->arch); char* uarch = get_str_uarch_hsa(gpu->arch);
char* manufacturing_process = get_str_process(gpu->arch); char* manufacturing_process = get_str_process(gpu->arch);
char* sms = get_str_cu(gpu); char* cus = get_str_cu(gpu);
char* matrix_cores = get_str_matrix_cores(gpu);
char* xcds = get_str_xcds(gpu);
char* max_frequency = get_str_freq(gpu); char* max_frequency = get_str_freq(gpu);
char* bus_width = get_str_bus_width(gpu);
char* mem_size = get_str_memory_size(gpu);
char* lds_size = get_str_lds_size(gpu);
setAttribute(art, ATTRIBUTE_NAME, gpu_name); setAttribute(art, ATTRIBUTE_NAME, gpu_name);
if (gpu_chip != NULL) { if (gpu_chip != NULL) {
@@ -504,21 +506,28 @@ bool print_gpufetch_amd(struct gpu_info* gpu, STYLE s, struct color** cs, struct
setAttribute(art, ATTRIBUTE_UARCH, uarch); setAttribute(art, ATTRIBUTE_UARCH, uarch);
setAttribute(art, ATTRIBUTE_TECHNOLOGY, manufacturing_process); setAttribute(art, ATTRIBUTE_TECHNOLOGY, manufacturing_process);
setAttribute(art, ATTRIBUTE_FREQUENCY, max_frequency); setAttribute(art, ATTRIBUTE_FREQUENCY, max_frequency);
setAttribute(art, ATTRIBUTE_STREAMINGMP, sms); setAttribute(art, ATTRIBUTE_COMPUTE_UNITS, cus);
setAttribute(art, ATTRIBUTE_MATRIX_CORES, matrix_cores);
if (xcds != NULL) {
setAttribute(art, ATTRIBUTE_XCDS, xcds);
}
setAttribute(art, ATTRIBUTE_LDS_SIZE, lds_size);
setAttribute(art, ATTRIBUTE_MEMORY, mem_size);
setAttribute(art, ATTRIBUTE_BUS_WIDTH, bus_width);
const char** attribute_fields = ATTRIBUTE_FIELDS; bool use_short = false;
uint32_t longest_attribute = longest_attribute_length(art, attribute_fields); uint32_t longest_attribute = longest_attribute_length(art, use_short);
uint32_t longest_field = longest_field_length(art, longest_attribute); uint32_t longest_field = longest_field_length(art, longest_attribute);
choose_ascii_art(art, cs, term, longest_field); choose_ascii_art(art, cs, term, longest_field);
if(!ascii_fits_screen(term->w, *art->art, longest_field)) { if(!ascii_fits_screen(term->w, *art->art, longest_field)) {
// Despite of choosing the smallest logo, the output does not fit // Despite of choosing the smallest logo, the output does not fit
// Choose the shorter field names and recalculate the longest attr // Choose the shorter field names and recalculate the longest attr
attribute_fields = ATTRIBUTE_FIELDS_SHORT; use_short = true;
longest_attribute = longest_attribute_length(art, attribute_fields); longest_attribute = longest_attribute_length(art, use_short);
} }
print_ascii_generic(art, longest_attribute, term->w - art->art->width, attribute_fields); print_ascii_generic(art, longest_attribute, term->w - art->art->width, use_short);
free(art->attributes); free(art->attributes);
free(art); free(art);

View File

@@ -1,3 +1,6 @@
// patched cuda.cpp for cuda13 by cloudy
#include <cuda_runtime.h> #include <cuda_runtime.h>
#include <cstring> #include <cstring>
#include <cstdlib> #include <cstdlib>
@@ -5,8 +8,8 @@
#include "cuda.hpp" #include "cuda.hpp"
#include "uarch.hpp" #include "uarch.hpp"
#include "pci.hpp"
#include "gpufetch_helper_cuda.hpp" #include "gpufetch_helper_cuda.hpp"
#include "../common/pci.hpp"
#include "../common/global.hpp" #include "../common/global.hpp"
#include "../common/uarch.hpp" #include "../common/uarch.hpp"
@@ -14,29 +17,22 @@ bool print_gpu_cuda(struct gpu_info* gpu) {
char* cc = get_str_cc(gpu->arch); char* cc = get_str_cc(gpu->arch);
printf("%s (Compute Capability %s)\n", gpu->name, cc); printf("%s (Compute Capability %s)\n", gpu->name, cc);
free(cc); free(cc);
return true; return true;
} }
struct cache* get_cache_info(cudaDeviceProp prop) { struct cache* get_cache_info(cudaDeviceProp prop) {
struct cache* cach = (struct cache*) emalloc(sizeof(struct cache)); struct cache* cach = (struct cache*) emalloc(sizeof(struct cache));
cach->L2 = (struct cach*) emalloc(sizeof(struct cach)); cach->L2 = (struct cach*) emalloc(sizeof(struct cach));
cach->L2->size = prop.l2CacheSize; cach->L2->size = prop.l2CacheSize;
cach->L2->num_caches = 1; cach->L2->num_caches = 1;
cach->L2->exists = true; cach->L2->exists = true;
return cach; return cach;
} }
int get_tensor_cores(struct uarch* arch, int sm, int major) { int get_tensor_cores(struct uarch* arch, int sm, int major) {
if(major == 7) { if(major == 7) {
// TU116 does not have tensor cores! if (is_chip_TU116(arch))
// https://www.anandtech.com/show/13973/nvidia-gtx-1660-ti-review-feat-evga-xc-gaming/2
if(arch->chip == CHIP_TU116 || arch->chip == CHIP_TU116BM ||
arch->chip == CHIP_TU116GL || arch->chip == CHIP_TU116M) {
return 0; return 0;
}
return sm * 8; return sm * 8;
} }
else if(major == 8) return sm * 4; else if(major == 8) return sm * 4;
@@ -45,57 +41,57 @@ int get_tensor_cores(struct uarch* arch, int sm, int major) {
struct topology_c* get_topology_info(struct uarch* arch, cudaDeviceProp prop) { struct topology_c* get_topology_info(struct uarch* arch, cudaDeviceProp prop) {
struct topology_c* topo = (struct topology_c*) emalloc(sizeof(struct topology_c)); struct topology_c* topo = (struct topology_c*) emalloc(sizeof(struct topology_c));
topo->streaming_mp = prop.multiProcessorCount; topo->streaming_mp = prop.multiProcessorCount;
topo->cores_per_mp = _ConvertSMVer2Cores(prop.major, prop.minor); topo->cores_per_mp = _ConvertSMVer2Cores(prop.major, prop.minor);
topo->cuda_cores = topo->streaming_mp * topo->cores_per_mp; topo->cuda_cores = topo->streaming_mp * topo->cores_per_mp;
topo->tensor_cores = get_tensor_cores(arch, topo->streaming_mp, prop.major); topo->tensor_cores = get_tensor_cores(arch, topo->streaming_mp, prop.major);
return topo; return topo;
} }
int32_t guess_clock_multipilier(struct gpu_info* gpu, struct memory* mem) { int32_t guess_clock_multipilier(struct gpu_info* gpu, struct memory* mem) {
// Guess clock multiplier
int32_t clk_mul = 1; int32_t clk_mul = 1;
int32_t clk8 = abs((mem->freq/8) - gpu->freq); int32_t clk8 = abs((mem->freq/8) - gpu->freq);
int32_t clk4 = abs((mem->freq/4) - gpu->freq); int32_t clk4 = abs((mem->freq/4) - gpu->freq);
int32_t clk2 = abs((mem->freq/2) - gpu->freq); int32_t clk2 = abs((mem->freq/2) - gpu->freq);
int32_t clk1 = abs((mem->freq/1) - gpu->freq); int32_t clk1 = abs((mem->freq/1) - gpu->freq);
int32_t min = mem->freq; int32_t min = mem->freq;
if(clkm_possible_for_uarch(8, gpu->arch) && min > clk8) { clk_mul = 8; min = clk8; } if(clkm_possible_for_uarch(8, gpu->arch) && min > clk8) { clk_mul = 8; min = clk8; }
if(clkm_possible_for_uarch(4, gpu->arch) && min > clk4) { clk_mul = 4; min = clk4; } if(clkm_possible_for_uarch(4, gpu->arch) && min > clk4) { clk_mul = 4; min = clk4; }
if(clkm_possible_for_uarch(2, gpu->arch) && min > clk2) { clk_mul = 2; min = clk2; } if(clkm_possible_for_uarch(2, gpu->arch) && min > clk2) { clk_mul = 2; min = clk2; }
if(clkm_possible_for_uarch(1, gpu->arch) && min > clk1) { clk_mul = 1; min = clk1; } if(clkm_possible_for_uarch(1, gpu->arch) && min > clk1) { clk_mul = 1; min = clk1; }
return clk_mul; return clk_mul;
} }
struct memory* get_memory_info(struct gpu_info* gpu, cudaDeviceProp prop) { struct memory* get_memory_info(struct gpu_info* gpu, cudaDeviceProp prop) {
struct memory* mem = (struct memory*) emalloc(sizeof(struct memory)); struct memory* mem = (struct memory*) emalloc(sizeof(struct memory));
int val = 0;
mem->size_bytes = (unsigned long long) prop.totalGlobalMem; mem->size_bytes = (unsigned long long) prop.totalGlobalMem;
mem->freq = prop.memoryClockRate * 0.001f;
if (cudaDeviceGetAttribute(&val, cudaDevAttrMemoryClockRate, gpu->idx) == cudaSuccess) {
if (val > 1000000)
mem->freq = (float)val / 1000000.0f;
else
mem->freq = (float)val * 0.001f;
} else {
mem->freq = 0.0f;
}
mem->bus_width = prop.memoryBusWidth; mem->bus_width = prop.memoryBusWidth;
mem->clk_mul = guess_clock_multipilier(gpu, mem); mem->clk_mul = guess_clock_multipilier(gpu, mem);
mem->type = guess_memtype_from_cmul_and_uarch(mem->clk_mul, gpu->arch); mem->type = guess_memtype_from_cmul_and_uarch(mem->clk_mul, gpu->arch);
// Fix frequency returned from CUDA to show real frequency if (mem->clk_mul > 0)
mem->freq = mem->freq / mem->clk_mul; mem->freq = mem->freq / mem->clk_mul;
return mem; return mem;
} }
// Compute peak performance when using CUDA cores
int64_t get_peak_performance_cuda(struct gpu_info* gpu) { int64_t get_peak_performance_cuda(struct gpu_info* gpu) {
return gpu->freq * 1000000 * gpu->topo_c->cuda_cores * 2; return gpu->freq * 1000000 * gpu->topo_c->cuda_cores * 2;
} }
// Compute peak performance when using tensor cores
int64_t get_peak_performance_tcu(cudaDeviceProp prop, struct gpu_info* gpu) { int64_t get_peak_performance_tcu(cudaDeviceProp prop, struct gpu_info* gpu) {
// Volta / Turing tensor cores performs 4x4x4 FP16 matrix multiplication
// Ampere tensor cores performs 8x4x8 FP16 matrix multiplicacion
if(prop.major == 7) return gpu->freq * 1000000 * 4 * 4 * 4 * 2 * gpu->topo_c->tensor_cores; if(prop.major == 7) return gpu->freq * 1000000 * 4 * 4 * 4 * 2 * gpu->topo_c->tensor_cores;
else if(prop.major == 8) return gpu->freq * 1000000 * 8 * 4 * 8 * 2 * gpu->topo_c->tensor_cores; else if(prop.major == 8) return gpu->freq * 1000000 * 8 * 4 * 8 * 2 * gpu->topo_c->tensor_cores;
else return 0; else return 0;
@@ -117,8 +113,7 @@ struct gpu_info* get_gpu_info_cuda(struct pci_dev *devices, int gpu_idx) {
} }
int num_gpus = -1; int num_gpus = -1;
cudaError_t err = cudaSuccess; cudaError_t err = cudaGetDeviceCount(&num_gpus);
err = cudaGetDeviceCount(&num_gpus);
if(gpu_idx == 0) { if(gpu_idx == 0) {
printf("\r%*c\r", (int) strlen(CUDA_DRIVER_START_WARNING), ' '); printf("\r%*c\r", (int) strlen(CUDA_DRIVER_START_WARNING), ' ');
@@ -136,7 +131,6 @@ struct gpu_info* get_gpu_info_cuda(struct pci_dev *devices, int gpu_idx) {
} }
if(gpu->idx+1 > num_gpus) { if(gpu->idx+1 > num_gpus) {
// Master is trying to query an invalid GPU
return NULL; return NULL;
} }
@@ -146,15 +140,25 @@ struct gpu_info* get_gpu_info_cuda(struct pci_dev *devices, int gpu_idx) {
return NULL; return NULL;
} }
gpu->freq = deviceProp.clockRate * 1e-3f; int core_clk = 0;
if (cudaDeviceGetAttribute(&core_clk, cudaDevAttrClockRate, gpu->idx) == cudaSuccess) {
if (core_clk > 1000000)
gpu->freq = core_clk / 1000000.0f;
else
gpu->freq = core_clk * 0.001f;
} else {
gpu->freq = 0.0f;
}
gpu->vendor = GPU_VENDOR_NVIDIA; gpu->vendor = GPU_VENDOR_NVIDIA;
gpu->name = (char *) emalloc(sizeof(char) * (strlen(deviceProp.name) + 1)); gpu->name = (char *) emalloc(strlen(deviceProp.name) + 1);
strcpy(gpu->name, deviceProp.name); strcpy(gpu->name, deviceProp.name);
if((gpu->pci = get_pci_from_pciutils(devices, PCI_VENDOR_ID_NVIDIA, gpu_idx)) == NULL) { if((gpu->pci = get_pci_from_pciutils(devices, PCI_VENDOR_ID_NVIDIA, gpu_idx)) == NULL) {
printErr("Unable to find a valid device for vendor id 0x%.4X using pciutils", PCI_VENDOR_ID_NVIDIA); printErr("Unable to find a valid device for vendor id 0x%.4X using pciutils", PCI_VENDOR_ID_NVIDIA);
return NULL; return NULL;
} }
gpu->arch = get_uarch_from_cuda(gpu); gpu->arch = get_uarch_from_cuda(gpu);
gpu->cach = get_cache_info(deviceProp); gpu->cach = get_cache_info(deviceProp);
gpu->mem = get_memory_info(gpu, deviceProp); gpu->mem = get_memory_info(gpu, deviceProp);
@@ -165,19 +169,7 @@ struct gpu_info* get_gpu_info_cuda(struct pci_dev *devices, int gpu_idx) {
return gpu; return gpu;
} }
char* get_str_sm(struct gpu_info* gpu) { char* get_str_sm(struct gpu_info* gpu) { return get_str_generic(gpu->topo_c->streaming_mp); }
return get_str_generic(gpu->topo_c->streaming_mp); char* get_str_cores_sm(struct gpu_info* gpu) { return get_str_generic(gpu->topo_c->cores_per_mp); }
} char* get_str_cuda_cores(struct gpu_info* gpu) { return get_str_generic(gpu->topo_c->cuda_cores); }
char* get_str_tensor_cores(struct gpu_info* gpu) { return get_str_generic(gpu->topo_c->tensor_cores); }
char* get_str_cores_sm(struct gpu_info* gpu) {
return get_str_generic(gpu->topo_c->cores_per_mp);
}
char* get_str_cuda_cores(struct gpu_info* gpu) {
return get_str_generic(gpu->topo_c->cuda_cores);
}
char* get_str_tensor_cores(struct gpu_info* gpu) {
return get_str_generic(gpu->topo_c->tensor_cores);
}

View File

@@ -8,6 +8,7 @@
#include "../common/uarch.hpp" #include "../common/uarch.hpp"
#include "../common/global.hpp" #include "../common/global.hpp"
#include "../common/gpu.hpp" #include "../common/gpu.hpp"
#include "pci.hpp"
#include "chips.hpp" #include "chips.hpp"
// Any clock multiplier // Any clock multiplier
@@ -361,3 +362,8 @@ void free_uarch_struct(struct uarch* arch) {
free(arch->chip_str); free(arch->chip_str);
free(arch); free(arch);
} }
bool is_chip_TU116(struct uarch* arch) {
return arch->chip == CHIP_TU116 || arch->chip == CHIP_TU116BM ||
arch->chip == CHIP_TU116GL || arch->chip == CHIP_TU116M;
}

View File

@@ -13,5 +13,6 @@ char* get_str_cc(struct uarch* arch);
char* get_str_chip(struct uarch* arch); char* get_str_chip(struct uarch* arch);
char* get_str_process(struct uarch* arch); char* get_str_process(struct uarch* arch);
void free_uarch_struct(struct uarch* arch); void free_uarch_struct(struct uarch* arch);
bool is_chip_TU116(struct uarch* arch);
#endif #endif

View File

@@ -13,7 +13,6 @@
#include "hsa.hpp" #include "hsa.hpp"
#include "uarch.hpp" #include "uarch.hpp"
#include "../common/pci.hpp"
#include "../common/global.hpp" #include "../common/global.hpp"
#include "../common/uarch.hpp" #include "../common/uarch.hpp"
@@ -23,7 +22,16 @@ struct agent_info {
char vendor_name[64]; char vendor_name[64];
char device_mkt_name[64]; char device_mkt_name[64];
uint32_t max_clock_freq; uint32_t max_clock_freq;
// Memory
uint32_t bus_width;
uint32_t lds_size;
uint64_t global_size;
// Topology
uint32_t compute_unit; uint32_t compute_unit;
uint32_t num_shader_engines;
uint32_t simds_per_cu;
uint32_t num_xcc; // Acccelerator Complex Dies (XCDs)
uint32_t matrix_cores; // Cores with WMMA/MFMA capabilities
}; };
#define RET_IF_HSA_ERR(err) { \ #define RET_IF_HSA_ERR(err) { \
@@ -41,6 +49,51 @@ struct agent_info {
} \ } \
} }
hsa_status_t memory_pool_callback(hsa_amd_memory_pool_t pool, void* data) {
struct agent_info* info = reinterpret_cast<struct agent_info *>(data);
hsa_amd_segment_t segment;
hsa_status_t err = hsa_amd_memory_pool_get_info(pool, HSA_AMD_MEMORY_POOL_INFO_SEGMENT, &segment);
RET_IF_HSA_ERR(err);
if (segment == HSA_AMD_SEGMENT_GROUP) {
// LDS memory
// We want to make sure that this memory pool is not repeated.
if (info->lds_size != 0) {
printErr("Found HSA_AMD_SEGMENT_GROUP twice!");
return HSA_STATUS_ERROR;
}
uint32_t size = 0;
err = hsa_amd_memory_pool_get_info(pool, HSA_AMD_MEMORY_POOL_INFO_SIZE, &size);
RET_IF_HSA_ERR(err);
info->lds_size = size;
}
else if (segment == HSA_AMD_SEGMENT_GLOBAL) {
// Global memory
uint32_t global_flags = 0;
err = hsa_amd_memory_pool_get_info(pool, HSA_AMD_MEMORY_POOL_INFO_GLOBAL_FLAGS, &global_flags);
RET_IF_HSA_ERR(err);
if (global_flags & HSA_AMD_MEMORY_POOL_GLOBAL_FLAG_EXTENDED_SCOPE_FINE_GRAINED) {
if (info->global_size != 0) {
printErr("Found HSA_AMD_MEMORY_POOL_GLOBAL_FLAG_EXTENDED_SCOPE_FINE_GRAINED twice!");
return HSA_STATUS_ERROR;
}
uint64_t size = 0;
err = hsa_amd_memory_pool_get_info(pool, HSA_AMD_MEMORY_POOL_INFO_SIZE, &size);
RET_IF_HSA_ERR(err);
info->global_size = size;
}
}
return HSA_STATUS_SUCCESS;
}
hsa_status_t agent_callback(hsa_agent_t agent, void *data) { hsa_status_t agent_callback(hsa_agent_t agent, void *data) {
struct agent_info* info = reinterpret_cast<struct agent_info *>(data); struct agent_info* info = reinterpret_cast<struct agent_info *>(data);
@@ -63,6 +116,26 @@ hsa_status_t agent_callback(hsa_agent_t agent, void *data) {
err = hsa_agent_get_info(agent, (hsa_agent_info_t) HSA_AMD_AGENT_INFO_COMPUTE_UNIT_COUNT, &info->compute_unit); err = hsa_agent_get_info(agent, (hsa_agent_info_t) HSA_AMD_AGENT_INFO_COMPUTE_UNIT_COUNT, &info->compute_unit);
RET_IF_HSA_ERR(err); RET_IF_HSA_ERR(err);
// According to the documentation, this is deprecated. But what should I be using then?
err = hsa_agent_get_info(agent, (hsa_agent_info_t) HSA_AMD_AGENT_INFO_MEMORY_WIDTH, &info->bus_width);
RET_IF_HSA_ERR(err);
err = hsa_agent_get_info(agent, (hsa_agent_info_t) HSA_AMD_AGENT_INFO_NUM_SHADER_ENGINES, &info->num_shader_engines);
RET_IF_HSA_ERR(err);
err = hsa_agent_get_info(agent, (hsa_agent_info_t) HSA_AMD_AGENT_INFO_NUM_SIMDS_PER_CU, &info->simds_per_cu);
RET_IF_HSA_ERR(err);
err = hsa_agent_get_info(agent, (hsa_agent_info_t) HSA_AMD_AGENT_INFO_NUM_XCC, &info->num_xcc);
RET_IF_HSA_ERR(err);
// We will check against zero to see if it was set beforehand.
info->global_size = 0;
info->lds_size = 0;
// This will fill global_size and lds_size.
err = hsa_amd_agent_iterate_memory_pools(agent, memory_pool_callback, data);
RET_IF_HSA_ERR(err);
} }
return HSA_STATUS_SUCCESS; return HSA_STATUS_SUCCESS;
@@ -72,11 +145,27 @@ struct topology_h* get_topology_info(struct agent_info info) {
struct topology_h* topo = (struct topology_h*) emalloc(sizeof(struct topology_h)); struct topology_h* topo = (struct topology_h*) emalloc(sizeof(struct topology_h));
topo->compute_units = info.compute_unit; topo->compute_units = info.compute_unit;
topo->num_shader_engines = info.num_shader_engines; // not printed at the moment
topo->simds_per_cu = info.simds_per_cu; // not printed at the moment
topo->num_xcc = info.num_xcc;
// Old GPUs (GCN I guess) might not have matrix cores.
// Not sure what would happen here?
topo->matrix_cores = topo->compute_units * topo->simds_per_cu;
return topo; return topo;
} }
struct gpu_info* get_gpu_info_hsa(struct pci_dev *devices, int gpu_idx) { struct memory* get_memory_info(struct gpu_info* gpu, struct agent_info info) {
struct memory* mem = (struct memory*) emalloc(sizeof(struct memory));
mem->bus_width = info.bus_width;
mem->lds_size = info.lds_size;
mem->size_bytes = info.global_size;
return mem;
}
struct gpu_info* get_gpu_info_hsa(int gpu_idx) {
struct gpu_info* gpu = (struct gpu_info*) emalloc(sizeof(struct gpu_info)); struct gpu_info* gpu = (struct gpu_info*) emalloc(sizeof(struct gpu_info));
gpu->pci = NULL; gpu->pci = NULL;
gpu->idx = gpu_idx; gpu->idx = gpu_idx;
@@ -119,6 +208,7 @@ struct gpu_info* get_gpu_info_hsa(struct pci_dev *devices, int gpu_idx) {
gpu->name = (char *) emalloc(sizeof(char) * (strlen(info.device_mkt_name) + 1)); gpu->name = (char *) emalloc(sizeof(char) * (strlen(info.device_mkt_name) + 1));
strcpy(gpu->name, info.device_mkt_name); strcpy(gpu->name, info.device_mkt_name);
gpu->arch = get_uarch_from_hsa(gpu, info.gpu_name); gpu->arch = get_uarch_from_hsa(gpu, info.gpu_name);
gpu->mem = get_memory_info(gpu, info);
if (gpu->arch == NULL) { if (gpu->arch == NULL) {
return NULL; return NULL;
@@ -136,3 +226,17 @@ struct gpu_info* get_gpu_info_hsa(struct pci_dev *devices, int gpu_idx) {
char* get_str_cu(struct gpu_info* gpu) { char* get_str_cu(struct gpu_info* gpu) {
return get_str_generic(gpu->topo_h->compute_units); return get_str_generic(gpu->topo_h->compute_units);
} }
char* get_str_xcds(struct gpu_info* gpu) {
// If there is a single XCD, then we dont want to
// print it.
if (gpu->topo_h->num_xcc == 1) {
return NULL;
}
return get_str_generic(gpu->topo_h->num_xcc);
}
char* get_str_matrix_cores(struct gpu_info* gpu) {
// TODO: Show XX (WMMA/MFMA)
return get_str_generic(gpu->topo_h->matrix_cores);
}

View File

@@ -3,7 +3,9 @@
#include "../common/gpu.hpp" #include "../common/gpu.hpp"
struct gpu_info* get_gpu_info_hsa(struct pci_dev *devices, int gpu_idx); struct gpu_info* get_gpu_info_hsa(int gpu_idx);
char* get_str_cu(struct gpu_info* gpu); char* get_str_cu(struct gpu_info* gpu);
char* get_str_xcds(struct gpu_info* gpu);
char* get_str_matrix_cores(struct gpu_info* gpu);
#endif #endif

View File

@@ -127,7 +127,7 @@ enum {
#define CHECK_UARCH_START if (false) {} #define CHECK_UARCH_START if (false) {}
#define CHECK_UARCH(arch, chip_, str, uarch, process) \ #define CHECK_UARCH(arch, chip_, str, uarch, process) \
else if (arch->chip == chip_) fill_uarch(arch, str, uarch, process); else if (arch->chip == chip_) fill_uarch(arch, str, uarch, process);
#define CHECK_UARCH_END else { if(arch->chip != CHIP_UNKNOWN_CUDA) printBug("map_chip_to_uarch_hsa: Unknown chip id: %d", arch->chip); fill_uarch(arch, STRING_UNKNOWN, UARCH_UNKNOWN, UNK); } #define CHECK_UARCH_END else { if(arch->chip != CHIP_UNKNOWN_HSA) printBug("map_chip_to_uarch_hsa: Unknown chip id: %d", arch->chip); fill_uarch(arch, STRING_UNKNOWN, UARCH_UNKNOWN, UNK); }
void fill_uarch(struct uarch* arch, char const *str, MICROARCH u, uint32_t process) { void fill_uarch(struct uarch* arch, char const *str, MICROARCH u, uint32_t process) {
arch->chip_str = (char *) emalloc(sizeof(char) * (strlen(str)+1)); arch->chip_str = (char *) emalloc(sizeof(char) * (strlen(str)+1));