8 Commits

Author SHA1 Message Date
Dr-Noob
bc6e3b35e9 Fixes 2025-10-16 08:25:38 +02:00
Dr-Noob
8c81067577 Fix 2025-10-16 08:22:10 +02:00
Dr-Noob
462f61ce40 Fixes 2025-10-16 08:18:02 +02:00
Dr-Noob
7c361ee879 Not sure this is what we want 2025-10-16 08:14:46 +02:00
Dr-Noob
e0b0a6913c Build pciutils only if neccesary 2025-10-16 07:52:58 +02:00
Dr-Noob
8794cd322d [v0.30] Add support for building on AMD where rocm-cmake is not installed 2025-10-16 07:24:45 +02:00
Dr-Noob
5df85aea2c [v0.30] Add uarch detection to AMD GPUs
Similarly to NVIDIA and Intel GPUs, we now detect microarchitecture,
also with manufacturing process and specific chip name. We infer all
of this from the gfx name (in the code we use the term llvm_target),
altough it's not clear yet that this method is completely reliable (see
comments for more details). In the future we might want to replace that
with a better way. Once we have the gfx name, we *should* be able to
infer the specific chip, and from the chip we can easily infer the
microarchitecture.

This commit also includes some refactorings and code improvements on
the HSA backend.
2025-10-15 08:23:28 +02:00
Dr-Noob
b29b17d14f [v0.30] Add support for AMD GPUs
Adds very basic support for AMD (experimental). The only install
requirement is ROCm. Unlike NVIDIA, we don't need the CUDA equivalent
(HIP) to make gpufetch work, which reduces the installation
requirements quite significantly.

Major changes:

* CMakeLists:
  - Make CUDA not compiled by default (since we now may want to target
    AMD only)
  - Set build flags on gpufetch cmake target instead of doing
    "set(CMAKE_CXX_FLAGS". This fixes a warning coming from ROCm.
  - Assumes that the ROCm CMake files are installed (should be fixed
    later)

* hsa folder: AMD support is implemented via HSA (Heterogeneous System
  Architecture) calls. Therefore, HSA is added as a new backend to
  gpufetch. We only print basic stuff for now, so we may need more
  things in the future to give full support for AMD GPUs.

NOTE: This commit will probably break AUR packages since we used to
build CUDA by default, which is no longer the case. The AUR package
should be updated and use -DENABLE_CUDA_BACKEND or -DENABLE_HSA_BACKEND
as appropriate.
2025-10-12 12:34:56 +02:00
18 changed files with 826 additions and 52 deletions

View File

@@ -7,17 +7,19 @@ project(gpufetch CXX)
set(SRC_DIR "src")
set(COMMON_DIR "${SRC_DIR}/common")
set(CUDA_DIR "${SRC_DIR}/cuda")
set(HSA_DIR "${SRC_DIR}/hsa")
set(INTEL_DIR "${SRC_DIR}/intel")
if(NOT DEFINED ENABLE_INTEL_BACKEND)
set(ENABLE_INTEL_BACKEND true)
# Make sure that at least one backend is enabled.
# It does not make sense that the user has not specified any backend.
if(NOT ENABLE_INTEL_BACKEND AND NOT ENABLE_CUDA_BACKEND AND NOT ENABLE_HSA_BACKEND)
message(FATAL_ERROR "No backend was enabled! Please enable at least one backend with -DENABLE_XXX_BACKEND")
endif()
if(NOT DEFINED ENABLE_CUDA_BACKEND OR ENABLE_CUDA_BACKEND)
if(ENABLE_CUDA_BACKEND)
check_language(CUDA)
if(CMAKE_CUDA_COMPILER)
enable_language(CUDA)
set(ENABLE_CUDA_BACKEND true)
# Must link_directories early so add_executable(gpufetch ...) gets the right directories
link_directories(cuda_backend ${CMAKE_CUDA_COMPILER_TOOLKIT_ROOT}/targets/x86_64-linux/lib)
else()
@@ -25,7 +27,70 @@ if(NOT DEFINED ENABLE_CUDA_BACKEND OR ENABLE_CUDA_BACKEND)
endif()
endif()
if(ENABLE_HSA_BACKEND)
find_package(ROCmCMakeBuildTools QUIET)
if (ROCmCMakeBuildTools_FOUND)
find_package(hsa-runtime64 1.0 REQUIRED)
link_directories(hsa_backend hsa-runtime64::hsa-runtime64)
# Find HSA headers
# ROCm does not seem to provide this, which is quite frustrating.
find_path(HSA_INCLUDE_DIR
NAMES hsa/hsa.h
HINTS
$ENV{ROCM_PATH}/include # allow users override via env variable
/opt/rocm/include # common default path
/usr/include
/usr/local/include
)
if(NOT HSA_INCLUDE_DIR)
message(STATUS "${BoldYellow}HSA not found, disabling HSA backend${ColorReset}")
set(ENABLE_HSA_BACKEND false)
endif()
else()
# rocm-cmake is not installed, try to manually find neccesary files.
message(STATUS "${BoldYellow}Could NOT find HSA automatically, running manual search...${ColorReset}")
if (NOT DEFINED ROCM_PATH)
set(ROCM_PATH "/opt/rocm" CACHE PATH "Path to ROCm")
endif()
find_path(HSA_INCLUDE_DIR hsa/hsa.h HINTS ${ROCM_PATH}/include)
find_library(HSA_LIBRARY hsa-runtime64 HINTS ${ROCM_PATH}/lib ${ROCM_PATH}/lib64)
if (HSA_INCLUDE_DIR AND HSA_LIBRARY)
message(STATUS "${BoldYellow}HSA was found manually${ColorReset}")
else()
set(ENABLE_HSA_BACKEND false)
message(STATUS "${BoldYellow}HSA was not found manually${ColorReset}")
endif()
endif()
endif()
set(GPUFECH_COMMON
${COMMON_DIR}/main.cpp
${COMMON_DIR}/args.cpp
${COMMON_DIR}/gpu.cpp
${COMMON_DIR}/global.cpp
${COMMON_DIR}/printer.cpp
${COMMON_DIR}/master.cpp
${COMMON_DIR}/uarch.cpp
)
set(GPUFETCH_LINK_TARGETS z)
if(NOT(ENABLE_HSA_BACKEND AND NOT ENABLE_CUDA_BACKEND AND NOT ENABLE_INTEL_BACKEND))
# Look for pciutils only if not building HSA only.
#
# This has the (intented) secondary effect that if only HSA backend is enabled
# by the user, but ROCm cannot be found, pciutils will still be compiled in
# order to show the list of GPUs available on the system, so that the user will
# get at least some feedback even if HSA is not found.
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_LIST_DIR}/cmake")
list(APPEND GPUFECH_COMMON ${COMMON_DIR}/pci.cpp ${COMMON_DIR}/sort.cpp)
list(APPEND GPUFETCH_LINK_TARGETS pci)
set(CMAKE_ENABLE_PCIUTILS ON)
find_package(PCIUTILS)
if(NOT ${PCIUTILS_FOUND})
message(STATUS "${BoldYellow}pciutils not found, downloading and building a local copy...${ColorReset}")
@@ -48,10 +113,16 @@ else()
# Needed for linking libpci in FreeBSD
link_directories(/usr/local/lib/)
endif()
endif()
add_executable(gpufetch ${COMMON_DIR}/main.cpp ${COMMON_DIR}/args.cpp ${COMMON_DIR}/gpu.cpp ${COMMON_DIR}/pci.cpp ${COMMON_DIR}/sort.cpp ${COMMON_DIR}/global.cpp ${COMMON_DIR}/printer.cpp ${COMMON_DIR}/master.cpp ${COMMON_DIR}/uarch.cpp)
set(SANITY_FLAGS "-Wfloat-equal -Wshadow -Wpointer-arith")
set(CMAKE_CXX_FLAGS "${SANITY_FLAGS} -Wall -Wextra -pedantic -fstack-protector-all -pedantic -std=c++11")
add_executable(gpufetch ${GPUFECH_COMMON})
set(SANITY_FLAGS -Wfloat-equal -Wshadow -Wpointer-arith -Wall -Wextra -pedantic -fstack-protector-all -pedantic)
target_compile_features(gpufetch PRIVATE cxx_std_11)
target_compile_options(gpufetch PRIVATE ${SANITY_FLAGS})
if (CMAKE_ENABLE_PCIUTILS)
target_compile_definitions(gpufetch PUBLIC BACKEND_USE_PCI)
endif()
if(ENABLE_INTEL_BACKEND)
target_compile_definitions(gpufetch PUBLIC BACKEND_INTEL)
@@ -94,7 +165,27 @@ if(ENABLE_CUDA_BACKEND)
target_link_libraries(gpufetch cuda_backend)
endif()
target_link_libraries(gpufetch pci z)
if(ENABLE_HSA_BACKEND)
target_compile_definitions(gpufetch PUBLIC BACKEND_HSA)
add_library(hsa_backend STATIC ${HSA_DIR}/hsa.cpp ${HSA_DIR}/uarch.cpp)
if(NOT ${PCIUTILS_FOUND})
add_dependencies(hsa_backend pciutils)
endif()
target_include_directories(hsa_backend PRIVATE "${HSA_INCLUDE_DIR}")
if (HSA_LIBRARY)
target_link_libraries(hsa_backend PRIVATE ${HSA_LIBRARY})
else()
target_link_libraries(hsa_backend PRIVATE hsa-runtime64::hsa-runtime64)
endif()
target_link_libraries(gpufetch hsa_backend)
endif()
target_link_libraries(gpufetch ${GPUFETCH_LINK_TARGETS})
install(TARGETS gpufetch DESTINATION bin)
if(NOT WIN32)
@@ -115,6 +206,11 @@ if(ENABLE_CUDA_BACKEND)
else()
message(STATUS "CUDA backend: ${BoldRed}OFF${ColorReset}")
endif()
if(ENABLE_HSA_BACKEND)
message(STATUS "HSA backend: ${BoldGreen}ON${ColorReset}")
else()
message(STATUS "HSA backend: ${BoldRed}OFF${ColorReset}")
endif()
if(ENABLE_INTEL_BACKEND)
message(STATUS "Intel backend: ${BoldGreen}ON${ColorReset}")
else()

View File

@@ -33,6 +33,7 @@ gpufetch is a command-line tool written in C++ that displays the GPU information
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
- [Table of contents](#table-of-contents)
- [1. Support](#1-support)
- [2. Backends](#2-backends)
- [2.1 CUDA backend is not enabled. Why?](#21-cuda-backend-is-not-enabled-why)
@@ -49,14 +50,16 @@ gpufetch is a command-line tool written in C++ that displays the GPU information
gpufetch supports the following GPUs:
- **NVIDIA** GPUs (Compute Capability >= 2.0)
- **AMD** GPUs (Experimental) (RDNA 3.0, CDNA 3.0)
- **Intel** iGPUs (Generation >= Gen6)
Only compilation under **Linux** is supported.
## 2. Backends
gpufetch is made up of two backends:
gpufetch is made up of three backends:
- CUDA backend
- HSA backend
- Intel backend
Backends are enabled and disabled at **compile time**. When compiling gpufetch, check the CMake output to see which backends are enabled.
@@ -111,6 +114,7 @@ By default, `gpufetch` will print the GPU logo with the system color scheme. How
By specifying a name, gpufetch will use the specific colors of each manufacture. Valid values are:
- intel
- amd
- nvidia
```

View File

@@ -23,6 +23,8 @@ fi
# In case you want to explicitely disable a backend, you can:
# Disable CUDA backend:
# cmake -DCMAKE_BUILD_TYPE=$BUILD_TYPE -DENABLE_CUDA_BACKEND=OFF ..
# Disable HSA backend:
# cmake -DCMAKE_BUILD_TYPE=$BUILD_TYPE -DENABLE_HSA_BACKEND=OFF ..
# Disable Intel backend:
# cmake -DCMAKE_BUILD_TYPE=$BUILD_TYPE -DENABLE_INTEL_BACKEND=OFF ..

View File

@@ -13,12 +13,14 @@
#define NUM_COLORS 4
#define COLOR_STR_NVIDIA "nvidia"
#define COLOR_STR_AMD "amd"
#define COLOR_STR_INTEL "intel"
// +-----------------------+-----------------------+
// | Color logo | Color text |
// | Color 1 | Color 2 | Color 1 | Color 2 |
#define COLOR_DEFAULT_NVIDIA "118,185,000:255,255,255:255,255,255:118,185,000"
#define COLOR_DEFAULT_AMD "250,250,250:250,250,250:200,200,200:255,255,255"
#define COLOR_DEFAULT_INTEL "015,125,194:230,230,230:040,150,220:230,230,230"
struct args_struct {
@@ -168,6 +170,7 @@ bool parse_color(char* optarg_str, struct color*** cs) {
bool free_ptr = true;
if(strcmp(optarg_str, COLOR_STR_NVIDIA) == 0) color_to_copy = COLOR_DEFAULT_NVIDIA;
else if(strcmp(optarg_str, COLOR_STR_AMD) == 0) color_to_copy = COLOR_DEFAULT_AMD;
else if(strcmp(optarg_str, COLOR_STR_INTEL) == 0) color_to_copy = COLOR_DEFAULT_INTEL;
else {
str_to_parse = optarg_str;

View File

@@ -34,6 +34,23 @@ $C2## ## ## ## ## ## ## ## #: :# \
$C2## ## ## ## ## ## ## ## ####### \
$C2## ## ### ## ###### ## ## ## "
#define ASCII_AMD \
"$C2 '############### \
$C2 ,############# \
$C2 .#### \
$C2 #. .#### \
$C2 :##. .#### \
$C2 :###. .#### \
$C2 #########. :## \
$C2 #######. ; \
$C1 \
$C1 ### ### ### ####### \
$C1 ## ## ##### ##### ## ## \
$C1 ## ## ### #### ### ## ## \
$C1 ######### ### ## ### ## ## \
$C1## ## ### ### ## ## \
$C1## ## ### ### ####### "
#define ASCII_INTEL \
"$C1 .#################. \
$C1 .#### ####. \
@@ -68,6 +85,27 @@ $C1 olcc::; ,:ccloMMMMMMMMM \
$C1 :......oMMMMMMMMMMMMMMMMMMMMMM \
$C1 :lllMMMMMMMMMMMMMMMMMMMMMMMMMM "
#define ASCII_AMD_L \
"$C1 \
$C1 \
$C1 \
$C1 \
$C1 \
$C1 \
$C1 @@@@ @@@ @@@ @@@@@@@@ $C2 ############ \
$C1 @@@@@@ @@@@@ @@@@@ @@@ @@@ $C2 ########## \
$C1 @@@ @@@ @@@@@@@@@@@@@ @@@ @@ $C2 # ##### \
$C1 @@@ @@@ @@@ @@@ @@@ @@@ @@ $C2 ### ##### \
$C1 @@@@@@@@@@@@ @@@ @@@ @@@ @@@ $C2######### ### \
$C1 @@@ @@@ @@@ @@@ @@@@@@@@@ $C2######## ## \
$C1 \
$C1 \
$C1 \
$C1 \
$C1 \
$C1 \
$C1 "
#define ASCII_INTEL_L \
"$C1 ###############@ \
$C1 ######@ ######@ \
@@ -95,9 +133,11 @@ typedef struct ascii_logo asciiL;
// | LOGO | W | H | REPLACE | COLORS LOGO | COLORS TEXT |
// ------------------------------------------------------------------------------------------
asciiL logo_nvidia = { ASCII_NVIDIA, 45, 19, false, {C_FG_GREEN, C_FG_WHITE}, {C_FG_WHITE, C_FG_GREEN} };
asciiL logo_amd = { ASCII_AMD, 39, 15, false, {C_FG_WHITE, C_FG_GREEN}, {C_FG_WHITE, C_FG_GREEN} };
asciiL logo_intel = { ASCII_INTEL, 48, 14, false, {C_FG_CYAN}, {C_FG_CYAN, C_FG_WHITE} };
// Long variants | ---------------------------------------------------------------------------------------|
asciiL logo_nvidia_l = { ASCII_NVIDIA_L, 50, 15, false, {C_FG_GREEN, C_FG_WHITE}, {C_FG_WHITE, C_FG_GREEN} };
asciiL logo_amd_l = { ASCII_AMD_L, 62, 19, true, {C_BG_WHITE, C_BG_WHITE}, {C_FG_CYAN, C_FG_B_WHITE} };
asciiL logo_intel_l = { ASCII_INTEL_L, 62, 19, true, {C_BG_CYAN, C_BG_WHITE}, {C_FG_CYAN, C_FG_WHITE} };
asciiL logo_unknown = { NULL, 0, 0, false, {C_NONE}, {C_NONE, C_NONE} };

View File

@@ -3,12 +3,11 @@
#include <cstdint>
#include "../cuda/pci.hpp"
#define UNKNOWN_FREQ -1
enum {
GPU_VENDOR_NVIDIA,
GPU_VENDOR_AMD,
GPU_VENDOR_INTEL
};
@@ -44,6 +43,11 @@ struct topology_c {
int32_t tensor_cores;
};
// HSA topology
struct topology_h {
int32_t compute_units;
};
// Intel topology
struct topology_i {
int32_t slices;
@@ -72,6 +76,8 @@ struct gpu_info {
struct memory* mem;
struct cache* cach;
struct topology_c* topo_c;
// HSA specific
struct topology_h* topo_h;
// Intel specific
struct topology_i* topo_i;
};

View File

@@ -8,7 +8,11 @@
#include "../cuda/cuda.hpp"
#include "../cuda/uarch.hpp"
static const char* VERSION = "0.25";
#ifdef BACKEND_USE_PCI
#include "pci.hpp"
#endif
static const char* VERSION = "0.30";
void print_help(char *argv[]) {
const char **t = args_str;
@@ -79,8 +83,12 @@ int main(int argc, char* argv[]) {
}
if(get_num_gpus_available(list) == 0) {
#ifdef BACKEND_USE_PCI
printErr("No GPU was detected! Available GPUs are:");
print_gpus_list_pci();
#else
printErr("No GPU was detected!");
#endif
printf("Please, make sure that the appropiate backend is enabled:\n");
print_enabled_backends();
printf("Visit https://github.com/Dr-Noob/gpufetch#2-backends for more information\n");

View File

@@ -1,12 +1,16 @@
#include <cstdlib>
#include <cstdio>
#ifdef BACKEND_USE_PCI
#include "pci.hpp"
#endif
#include "global.hpp"
#include "colors.hpp"
#include "master.hpp"
#include "args.hpp"
#include "../cuda/cuda.hpp"
#include "../hsa/hsa.hpp"
#include "../intel/intel.hpp"
#define MAX_GPUS 1000
@@ -18,7 +22,9 @@ struct gpu_list {
struct gpu_list* get_gpu_list() {
int idx = 0;
#ifdef BACKEND_USE_PCI
struct pci_dev *devices = get_pci_devices_from_pciutils();
#endif
struct gpu_list* list = (struct gpu_list*) malloc(sizeof(struct gpu_list));
list->num_gpus = 0;
list->gpus = (struct gpu_info**) malloc(sizeof(struct info*) * MAX_GPUS);
@@ -35,6 +41,18 @@ struct gpu_list* get_gpu_list() {
list->num_gpus += idx;
#endif
#ifdef BACKEND_HSA
bool valid = true;
while(valid) {
list->gpus[idx] = get_gpu_info_hsa(idx);
if(list->gpus[idx] != NULL) idx++;
else valid = false;
}
list->num_gpus += idx;
#endif
#ifdef BACKEND_INTEL
list->gpus[idx] = get_gpu_info_intel(devices);
if(list->gpus[idx] != NULL) list->num_gpus++;
@@ -51,6 +69,11 @@ bool print_gpus_list(struct gpu_list* list) {
print_gpu_cuda(list->gpus[i]);
#endif
}
else if(list->gpus[i]->vendor == GPU_VENDOR_AMD) {
#ifdef BACKEND_AMD
print_gpu_hsa(list->gpus[i]);
#endif
}
else if(list->gpus[i]->vendor == GPU_VENDOR_INTEL) {
#ifdef BACKEND_INTEL
print_gpu_intel(list->gpus[i]);
@@ -69,6 +92,13 @@ void print_enabled_backends() {
printf("%sOFF%s\n", C_FG_RED, C_RESET);
#endif
printf("- HSA backend: ");
#ifdef BACKEND_HSA
printf("%sON%s\n", C_FG_GREEN, C_RESET);
#else
printf("%sOFF%s\n", C_FG_RED, C_RESET);
#endif
printf("- Intel backend: ");
#ifdef BACKEND_INTEL
printf("%sON%s\n", C_FG_GREEN, C_RESET);

View File

@@ -10,6 +10,8 @@
#include "../intel/uarch.hpp"
#include "../intel/intel.hpp"
#include "../hsa/hsa.hpp"
#include "../hsa/uarch.hpp"
#include "../cuda/cuda.hpp"
#include "../cuda/uarch.hpp"
@@ -233,6 +235,9 @@ void choose_ascii_art(struct ascii* art, struct color** cs, struct terminal* ter
if(art->vendor == GPU_VENDOR_NVIDIA) {
art->art = choose_ascii_art_aux(&logo_nvidia_l, &logo_nvidia, term, lf);
}
else if(art->vendor == GPU_VENDOR_AMD) {
art->art = choose_ascii_art_aux(&logo_amd_l, &logo_amd, term, lf);
}
else if(art->vendor == GPU_VENDOR_INTEL) {
art->art = choose_ascii_art_aux(&logo_intel_l, &logo_intel, term, lf);
}
@@ -478,6 +483,50 @@ bool print_gpufetch_cuda(struct gpu_info* gpu, STYLE s, struct color** cs, struc
}
#endif
#ifdef BACKEND_HSA
bool print_gpufetch_amd(struct gpu_info* gpu, STYLE s, struct color** cs, struct terminal* term) {
struct ascii* art = set_ascii(get_gpu_vendor(gpu), s);
if(art == NULL)
return false;
char* gpu_name = get_str_gpu_name(gpu);
char* gpu_chip = get_str_chip(gpu->arch);
char* uarch = get_str_uarch_hsa(gpu->arch);
char* manufacturing_process = get_str_process(gpu->arch);
char* sms = get_str_cu(gpu);
char* max_frequency = get_str_freq(gpu);
setAttribute(art, ATTRIBUTE_NAME, gpu_name);
if (gpu_chip != NULL) {
setAttribute(art, ATTRIBUTE_CHIP, gpu_chip);
}
setAttribute(art, ATTRIBUTE_UARCH, uarch);
setAttribute(art, ATTRIBUTE_TECHNOLOGY, manufacturing_process);
setAttribute(art, ATTRIBUTE_FREQUENCY, max_frequency);
setAttribute(art, ATTRIBUTE_STREAMINGMP, sms);
const char** attribute_fields = ATTRIBUTE_FIELDS;
uint32_t longest_attribute = longest_attribute_length(art, attribute_fields);
uint32_t longest_field = longest_field_length(art, longest_attribute);
choose_ascii_art(art, cs, term, longest_field);
if(!ascii_fits_screen(term->w, *art->art, longest_field)) {
// Despite of choosing the smallest logo, the output does not fit
// Choose the shorter field names and recalculate the longest attr
attribute_fields = ATTRIBUTE_FIELDS_SHORT;
longest_attribute = longest_attribute_length(art, attribute_fields);
}
print_ascii_generic(art, longest_attribute, term->w - art->art->width, attribute_fields);
free(art->attributes);
free(art);
return true;
}
#endif
struct terminal* get_terminal_size() {
struct terminal* term = (struct terminal*) emalloc(sizeof(struct terminal));
@@ -517,11 +566,22 @@ bool print_gpufetch(struct gpu_info* gpu, STYLE s, struct color** cs) {
return false;
#endif
}
else {
else if(gpu->vendor == GPU_VENDOR_AMD) {
#ifdef BACKEND_HSA
return print_gpufetch_amd(gpu, s, cs, term);
#else
return false;
#endif
}
else if(gpu->vendor == GPU_VENDOR_INTEL) {
#ifdef BACKEND_INTEL
return print_gpufetch_intel(gpu, s, cs, term);
#else
return false;
#endif
}
else {
printErr("Invalid GPU vendor: %d", gpu->vendor);
return false;
}
}

View File

@@ -16,6 +16,9 @@ struct uarch {
int32_t cc_minor;
int32_t compute_capability;
// HSA specific
int32_t llvm_target;
// Intel specific
int32_t gt;
int32_t eu;

View File

@@ -5,8 +5,8 @@
#include "cuda.hpp"
#include "uarch.hpp"
#include "pci.hpp"
#include "gpufetch_helper_cuda.hpp"
#include "../common/pci.hpp"
#include "../common/global.hpp"
#include "../common/uarch.hpp"
@@ -33,10 +33,8 @@ int get_tensor_cores(struct uarch* arch, int sm, int major) {
if(major == 7) {
// TU116 does not have tensor cores!
// https://www.anandtech.com/show/13973/nvidia-gtx-1660-ti-review-feat-evga-xc-gaming/2
if(arch->chip == CHIP_TU116 || arch->chip == CHIP_TU116BM ||
arch->chip == CHIP_TU116GL || arch->chip == CHIP_TU116M) {
if (is_chip_TU116(arch))
return 0;
}
return sm * 8;
}
else if(major == 8) return sm * 4;

View File

@@ -8,6 +8,7 @@
#include "../common/uarch.hpp"
#include "../common/global.hpp"
#include "../common/gpu.hpp"
#include "pci.hpp"
#include "chips.hpp"
// Any clock multiplier
@@ -361,3 +362,8 @@ void free_uarch_struct(struct uarch* arch) {
free(arch->chip_str);
free(arch);
}
bool is_chip_TU116(struct uarch* arch) {
return arch->chip == CHIP_TU116 || arch->chip == CHIP_TU116BM ||
arch->chip == CHIP_TU116GL || arch->chip == CHIP_TU116M;
}

View File

@@ -13,5 +13,6 @@ char* get_str_cc(struct uarch* arch);
char* get_str_chip(struct uarch* arch);
char* get_str_process(struct uarch* arch);
void free_uarch_struct(struct uarch* arch);
bool is_chip_TU116(struct uarch* arch);
#endif

37
src/hsa/chips.hpp Normal file
View File

@@ -0,0 +1,37 @@
#ifndef __HSA_GPUCHIPS__
#define __HSA_GPUCHIPS__
typedef uint32_t GPUCHIP;
enum {
CHIP_UNKNOWN_HSA,
// VEGA (TODO)
// ...
// RDNA
CHIP_NAVI_10,
CHIP_NAVI_12,
CHIP_NAVI_14,
// RDNA2
// There are way more (eg Oberon)
// Maybe we'll add them in the future.
CHIP_NAVI_21,
CHIP_NAVI_22,
CHIP_NAVI_23,
CHIP_NAVI_24,
// RDNA3
// There are way more as well.
// Supporting Navi only for now.
CHIP_NAVI_31,
CHIP_NAVI_32,
CHIP_NAVI_33,
// RDNA4
CHIP_NAVI_44,
CHIP_NAVI_48,
// CDNA
CHIP_ARCTURUS, // MI100 series
CHIP_ALDEBARAN, // MI200 series
CHIP_AQUA_VANJARAM, // MI300 series
CHIP_CDNA_NEXT // MI350 series
};
#endif

137
src/hsa/hsa.cpp Normal file
View File

@@ -0,0 +1,137 @@
#include <iostream>
#include <hsa/hsa.h>
#include <hsa/hsa_ext_amd.h>
#include <cstring>
#include <cstdlib>
#include <cstdio>
#include <iostream>
#include <iomanip>
#include <hsa/hsa.h>
#include <hsa/hsa_ext_amd.h>
#include "hsa.hpp"
#include "uarch.hpp"
#include "../common/global.hpp"
#include "../common/uarch.hpp"
struct agent_info {
unsigned deviceId; // ID of the target GPU device
char gpu_name[64];
char vendor_name[64];
char device_mkt_name[64];
uint32_t max_clock_freq;
uint32_t compute_unit;
};
#define RET_IF_HSA_ERR(err) { \
if ((err) != HSA_STATUS_SUCCESS) { \
char err_val[12]; \
char* err_str = NULL; \
if (hsa_status_string(err, \
(const char**)&err_str) != HSA_STATUS_SUCCESS) { \
snprintf(&(err_val[0]), sizeof(err_val), "%#x", (uint32_t)err); \
err_str = &(err_val[0]); \
} \
printErr("HSA failure at: %s:%d\n", __FILE__, __LINE__); \
printErr("Call returned %s\n", err_str); \
return (err); \
} \
}
hsa_status_t agent_callback(hsa_agent_t agent, void *data) {
struct agent_info* info = reinterpret_cast<struct agent_info *>(data);
hsa_device_type_t type;
hsa_status_t err = hsa_agent_get_info(agent, HSA_AGENT_INFO_DEVICE, &type);
RET_IF_HSA_ERR(err);
if (type == HSA_DEVICE_TYPE_GPU) {
err = hsa_agent_get_info(agent, HSA_AGENT_INFO_NAME, info->gpu_name);
RET_IF_HSA_ERR(err);
err = hsa_agent_get_info(agent, HSA_AGENT_INFO_VENDOR_NAME, info->vendor_name);
RET_IF_HSA_ERR(err);
err = hsa_agent_get_info(agent, (hsa_agent_info_t) HSA_AMD_AGENT_INFO_PRODUCT_NAME, &info->device_mkt_name);
RET_IF_HSA_ERR(err);
err = hsa_agent_get_info(agent, (hsa_agent_info_t) HSA_AMD_AGENT_INFO_MAX_CLOCK_FREQUENCY, &info->max_clock_freq);
RET_IF_HSA_ERR(err);
err = hsa_agent_get_info(agent, (hsa_agent_info_t) HSA_AMD_AGENT_INFO_COMPUTE_UNIT_COUNT, &info->compute_unit);
RET_IF_HSA_ERR(err);
}
return HSA_STATUS_SUCCESS;
}
struct topology_h* get_topology_info(struct agent_info info) {
struct topology_h* topo = (struct topology_h*) emalloc(sizeof(struct topology_h));
topo->compute_units = info.compute_unit;
return topo;
}
struct gpu_info* get_gpu_info_hsa(int gpu_idx) {
struct gpu_info* gpu = (struct gpu_info*) emalloc(sizeof(struct gpu_info));
gpu->pci = NULL;
gpu->idx = gpu_idx;
if(gpu->idx < 0) {
printErr("GPU index must be equal or greater than zero");
return NULL;
}
if(gpu->idx > 0) {
// Currently we only support fetching GPU 0.
return NULL;
}
hsa_status_t err = hsa_init();
if (err != HSA_STATUS_SUCCESS) {
printErr("Failed to initialize HSA runtime");
return NULL;
}
struct agent_info info;
info.deviceId = gpu_idx;
// Iterate over all agents in the system
err = hsa_iterate_agents(agent_callback, &info);
if (err != HSA_STATUS_SUCCESS) {
printErr("Failed to iterate HSA agents");
hsa_shut_down();
return NULL;
}
if (strcmp(info.vendor_name, "AMD") != 0) {
printErr("HSA vendor name is: '%s'. Only AMD is supported!", info.vendor_name);
return NULL;
}
gpu->vendor = GPU_VENDOR_AMD;
gpu->freq = info.max_clock_freq;
gpu->topo_h = get_topology_info(info);
gpu->name = (char *) emalloc(sizeof(char) * (strlen(info.device_mkt_name) + 1));
strcpy(gpu->name, info.device_mkt_name);
gpu->arch = get_uarch_from_hsa(gpu, info.gpu_name);
if (gpu->arch == NULL) {
return NULL;
}
// Shut down the HSA runtime
err = hsa_shut_down();
if (err != HSA_STATUS_SUCCESS) {
printErr("Failed to shutdown HSA runtime");
return NULL;
}
return gpu;
}
char* get_str_cu(struct gpu_info* gpu) {
return get_str_generic(gpu->topo_h->compute_units);
}

9
src/hsa/hsa.hpp Normal file
View File

@@ -0,0 +1,9 @@
#ifndef __HSA_GPU__
#define __HSA_GPU__
#include "../common/gpu.hpp"
struct gpu_info* get_gpu_info_hsa(int gpu_idx);
char* get_str_cu(struct gpu_info* gpu);
#endif

321
src/hsa/uarch.cpp Normal file
View File

@@ -0,0 +1,321 @@
#include <cstdlib>
#include <cstdint>
#include <cstring>
#include "../common/uarch.hpp"
#include "../common/global.hpp"
#include "../common/gpu.hpp"
#include "chips.hpp"
// MICROARCH values
enum {
UARCH_UNKNOWN,
// GCN (Graphics Core Next)
// Empty for now
// ...
// RDNA (Radeon DNA)
UARCH_RDNA,
UARCH_RDNA2,
UARCH_RDNA3,
UARCH_RDNA4,
// CDNA (Compute DNA)
UARCH_CDNA,
UARCH_CDNA2,
UARCH_CDNA3,
UARCH_CDNA4
};
static const char *uarch_str[] = {
/*[ARCH_UNKNOWN] = */ STRING_UNKNOWN,
/*[UARCH_RDNA] = */ "RDNA",
/*[UARCH_RDNA2] = */ "RDNA2",
/*[UARCH_RDNA3] = */ "RDNA3",
/*[UARCH_RDNA4] = */ "RDNA4",
/*[UARCH_CDNA] = */ "CDNA",
/*[UARCH_CDNA2] = */ "CDNA2",
/*[UARCH_CDNA3] = */ "CDNA3",
/*[UARCH_CDNA4] = */ "CDNA4",
};
// Sources:
// - https://rocm.docs.amd.com/en/latest/reference/gpu-arch-specs.html
// - https://www.techpowerup.com
//
// This is sometimes refered to as LLVM target, but also shader ISA.
//
// LLVM target *usually* maps to a specific architecture. However there
// are case where this is not true:
// MI8 is GCN3.0 with LLVM target gfx803
// MI6 is GCN4.0 with LLVM target gfx803
// or
// Strix Point can be gfx1150 or gfx1151
//
// NOTE: GCN chips are stored for completeness, but they are
// not actively supported.
enum {
TARGET_UNKNOWN_HSA,
/// GCN (Graphics Core Next)
/// ------------------------
// GCN 1.0
TARGET_GFX600,
TARGET_GFX601,
TARGET_GFX602,
// GCN 2.0
TARGET_GFX700,
TARGET_GFX701,
TARGET_GFX702,
TARGET_GFX703,
TARGET_GFX704,
TARGET_GFX705,
// GCN 3.0 / 4.0
TARGET_GFX801,
TARGET_GFX802,
TARGET_GFX803,
TARGET_GFX805,
TARGET_GFX810,
// GCN 5.0
TARGET_GFX900,
TARGET_GFX902,
TARGET_GFX904,
// GCN 5.1
TARGET_GFX906,
// ???
TARGET_GFX909,
TARGET_GFX90C,
/// RDNA (Radeon DNA)
/// -----------------
// RDNA1
TARGET_GFX1010,
TARGET_GFX1011,
TARGET_GFX1012,
// RDNA2
TARGET_GFX1013, // Oberon
TARGET_GFX1030,
TARGET_GFX1031,
TARGET_GFX1032,
TARGET_GFX1033,
TARGET_GFX1034,
TARGET_GFX1035, // ??
TARGET_GFX1036, // ??
// RDNA3
TARGET_GFX1100,
TARGET_GFX1101,
TARGET_GFX1102,
TARGET_GFX1103, // ???
// RDNA3.5
TARGET_GFX1150, // Strix Point
TARGET_GFX1151, // Strix Halo / Strix Point
TARGET_GFX1152, // Krackan Point
TARGET_GFX1153, // ???
// RDNA4
TARGET_GFX1200,
TARGET_GFX1201,
TARGET_GFX1250, // ???
TARGET_GFX1251, // ???
/// CDNA (Compute DNA)
/// ------------------
// CDNA
TARGET_GFX908,
// CDNA2
TARGET_GFX90A,
// CDNA3
TARGET_GFX942,
// CDNA4
TARGET_GFX950
};
#define CHECK_UARCH_START if (false) {}
#define CHECK_UARCH(arch, chip_, str, uarch, process) \
else if (arch->chip == chip_) fill_uarch(arch, str, uarch, process);
#define CHECK_UARCH_END else { if(arch->chip != CHIP_UNKNOWN_HSA) printBug("map_chip_to_uarch_hsa: Unknown chip id: %d", arch->chip); fill_uarch(arch, STRING_UNKNOWN, UARCH_UNKNOWN, UNK); }
void fill_uarch(struct uarch* arch, char const *str, MICROARCH u, uint32_t process) {
arch->chip_str = (char *) emalloc(sizeof(char) * (strlen(str)+1));
strcpy(arch->chip_str, str);
arch->uarch = u;
arch->process = process;
}
// On chiplet based chips (such as Navi31, Navi32, etc),
// we have 2 different processes: The MCD process and the
// rest of the chip process. They might be different and here
// we just take one - let's take MCD process for now.
//
// TODO: Should we differentiate?
void map_chip_to_uarch_hsa(struct uarch* arch) {
CHECK_UARCH_START
// RDNA
CHECK_UARCH(arch, CHIP_NAVI_10, "Navi 10", UARCH_RDNA, 7)
CHECK_UARCH(arch, CHIP_NAVI_12, "Navi 12", UARCH_RDNA, 7)
CHECK_UARCH(arch, CHIP_NAVI_14, "Navi 14", UARCH_RDNA, 7)
CHECK_UARCH(arch, CHIP_NAVI_21, "Navi 21", UARCH_RDNA2, 7)
CHECK_UARCH(arch, CHIP_NAVI_22, "Navi 22", UARCH_RDNA2, 7)
CHECK_UARCH(arch, CHIP_NAVI_23, "Navi 23", UARCH_RDNA2, 7)
CHECK_UARCH(arch, CHIP_NAVI_24, "Navi 24", UARCH_RDNA2, 6)
CHECK_UARCH(arch, CHIP_NAVI_31, "Navi 31", UARCH_RDNA3, 6)
CHECK_UARCH(arch, CHIP_NAVI_32, "Navi 32", UARCH_RDNA3, 6)
CHECK_UARCH(arch, CHIP_NAVI_33, "Navi 33", UARCH_RDNA3, 6)
CHECK_UARCH(arch, CHIP_NAVI_44, "Navi 44", UARCH_RDNA4, 4)
CHECK_UARCH(arch, CHIP_NAVI_48, "Navi 48", UARCH_RDNA4, 4)
// CDNA
// NOTE: We will not show chip name for CDNA, thus use empty str
CHECK_UARCH(arch, CHIP_ARCTURUS, "", UARCH_CDNA, 7)
CHECK_UARCH(arch, CHIP_ALDEBARAN, "", UARCH_CDNA2, 6)
CHECK_UARCH(arch, CHIP_AQUA_VANJARAM, "", UARCH_CDNA3, 6)
CHECK_UARCH(arch, CHIP_CDNA_NEXT, "", UARCH_CDNA4, 6) // big difference between MCD and rest of the chip process
CHECK_UARCH_END
}
#define CHECK_TGT_START if (false) {}
#define CHECK_TGT(target, llvm_target, chip) \
else if (target == llvm_target) return chip;
#define CHECK_TGT_END else { printBug("LLVM target '%d' has no matching chip", target); return CHIP_UNKNOWN_HSA; }
// We have at least 2 choices to infer the chip:
//
// - LLVM target (e.g., gfx1101 is Navi 32)
// - PCI ID (e.g., 0x7470 is Navi 32)
//
// For now we will use the first approach, which seems to have
// some issues like mentioned in the enum.
// However PCI detection is also not perfect, since it is
// quite hard to find PCI ids from old hardware.
GPUCHIP get_chip_from_target_hsa(int32_t target) {
CHECK_TGT_START
/// RDNA
/// -------------------------------------------
CHECK_TGT(target, TARGET_GFX1010, CHIP_NAVI_10)
CHECK_TGT(target, TARGET_GFX1011, CHIP_NAVI_12)
CHECK_TGT(target, TARGET_GFX1012, CHIP_NAVI_14)
// CHECK_TGT(target, TARGET_GFX1013, TODO)
/// RDNA2
/// -------------------------------------------
CHECK_TGT(target, TARGET_GFX1030, CHIP_NAVI_21)
CHECK_TGT(target, TARGET_GFX1031, CHIP_NAVI_22)
CHECK_TGT(target, TARGET_GFX1032, CHIP_NAVI_23)
CHECK_TGT(target, TARGET_GFX1033, CHIP_NAVI_21)
CHECK_TGT(target, TARGET_GFX1034, CHIP_NAVI_24)
// CHECK_TGT(target, TARGET_GFX1035, TODO)
// CHECK_TGT(target, TARGET_GFX1036, TODO)
/// RDNA3
/// -------------------------------------------
CHECK_TGT(target, TARGET_GFX1100, CHIP_NAVI_31)
CHECK_TGT(target, TARGET_GFX1101, CHIP_NAVI_32)
CHECK_TGT(target, TARGET_GFX1102, CHIP_NAVI_33)
// CHECK_TGT(target, TARGET_GFX1103, TODO)
/// RDNA3.5
/// -------------------------------------------
// CHECK_TGT(target, TARGET_GFX1150, TODO)
// CHECK_TGT(target, TARGET_GFX1151, TODO)
// CHECK_TGT(target, TARGET_GFX1152, TODO)
// CHECK_TGT(target, TARGET_GFX1153, TODO)
/// RDNA4
/// -------------------------------------------
CHECK_TGT(target, TARGET_GFX1200, CHIP_NAVI_44)
CHECK_TGT(target, TARGET_GFX1201, CHIP_NAVI_48)
// CHECK_TGT(target, TARGET_GFX1250, TODO)
// CHECK_TGT(target, TARGET_GFX1251, TODO)
/// CDNA
/// -------------------------------------------
CHECK_TGT(target, TARGET_GFX908, CHIP_ARCTURUS)
/// CDNA2
/// -------------------------------------------
CHECK_TGT(target, TARGET_GFX90A, CHIP_ALDEBARAN)
/// CDNA3
/// -------------------------------------------
CHECK_TGT(target, TARGET_GFX942, CHIP_AQUA_VANJARAM)
/// CDNA4
/// -------------------------------------------
CHECK_TGT(target, TARGET_GFX950, CHIP_CDNA_NEXT)
CHECK_TGT_END
}
#define CHECK_TGT_STR_START if (false) {}
#define CHECK_TGT_STR(target, llvm_target, chip) \
else if (strcmp(target, llvm_target) == 0) return chip;
#define CHECK_TGT_STR_END else { return TARGET_UNKNOWN_HSA; }
// Maps the LLVM target string to the enum value
int32_t get_llvm_target_from_str(char* target) {
// TODO: Autogenerate this
// TODO: Add all, not only the ones we support in get_chip_from_target_hsa
CHECK_TGT_STR_START
CHECK_TGT_STR(target, "gfx1010", TARGET_GFX1010)
CHECK_TGT_STR(target, "gfx1011", TARGET_GFX1011)
CHECK_TGT_STR(target, "gfx1012", TARGET_GFX1012)
CHECK_TGT_STR(target, "gfx1013", TARGET_GFX1013)
CHECK_TGT_STR(target, "gfx1030", TARGET_GFX1030)
CHECK_TGT_STR(target, "gfx1031", TARGET_GFX1031)
CHECK_TGT_STR(target, "gfx1032", TARGET_GFX1032)
CHECK_TGT_STR(target, "gfx1033", TARGET_GFX1033)
CHECK_TGT_STR(target, "gfx1034", TARGET_GFX1034)
CHECK_TGT_STR(target, "gfx1035", TARGET_GFX1035)
CHECK_TGT_STR(target, "gfx1036", TARGET_GFX1036)
CHECK_TGT_STR(target, "gfx1100", TARGET_GFX1100)
CHECK_TGT_STR(target, "gfx1101", TARGET_GFX1101)
CHECK_TGT_STR(target, "gfx1102", TARGET_GFX1102)
CHECK_TGT_STR(target, "gfx1103", TARGET_GFX1103)
CHECK_TGT_STR(target, "gfx1200", TARGET_GFX1200)
CHECK_TGT_STR(target, "gfx1201", TARGET_GFX1201)
CHECK_TGT_STR(target, "gfx1250", TARGET_GFX1250)
CHECK_TGT_STR(target, "gfx1251", TARGET_GFX1251)
CHECK_TGT_STR(target, "gfx908", TARGET_GFX908)
CHECK_TGT_STR(target, "gfx90a", TARGET_GFX90A)
CHECK_TGT_STR(target, "gfx942", TARGET_GFX942)
CHECK_TGT_STR(target, "gfx950", TARGET_GFX950)
CHECK_TGT_STR_END
}
struct uarch* get_uarch_from_hsa(struct gpu_info* gpu, char* gpu_name) {
struct uarch* arch = (struct uarch*) emalloc(sizeof(struct uarch));
arch->llvm_target = get_llvm_target_from_str(gpu_name);
if (arch->llvm_target == TARGET_UNKNOWN_HSA) {
printErr("Unknown LLVM target: '%s'", gpu_name);
return NULL;
}
arch->chip_str = NULL;
arch->chip = get_chip_from_target_hsa(arch->llvm_target);
map_chip_to_uarch_hsa(arch);
return arch;
}
bool is_uarch_valid(struct uarch* arch) {
if (arch == NULL) {
printBug("Invalid uarch: arch is NULL");
return false;
}
if (arch->uarch >= UARCH_UNKNOWN && arch->uarch <= UARCH_CDNA4) {
return true;
}
else {
printBug("Invalid uarch: %d", arch->uarch);
return false;
}
}
bool is_cdna(struct uarch* arch) {
return arch->uarch == UARCH_CDNA ||
arch->uarch == UARCH_CDNA2 ||
arch->uarch == UARCH_CDNA3 ||
arch->uarch == UARCH_CDNA4;
}
char* get_str_chip(struct uarch* arch) {
// We dont want to show CDNA chip names as they add
// no value, since each architecture maps one to one
// to a chip.
if (is_cdna(arch)) return NULL;
return arch->chip_str;
}
const char* get_str_uarch_hsa(struct uarch* arch) {
if (!is_uarch_valid(arch)) {
return NULL;
}
return uarch_str[arch->uarch];
}

13
src/hsa/uarch.hpp Normal file
View File

@@ -0,0 +1,13 @@
#ifndef __HSA_UARCH__
#define __HSA_UARCH__
#include "../common/gpu.hpp"
struct uarch;
struct uarch* get_uarch_from_hsa(struct gpu_info* gpu, char* gpu_name);
char* get_str_uarch_hsa(struct uarch* arch);
char* get_str_process(struct uarch* arch); // TODO: Shouldnt we define this in the cpp?
char* get_str_chip(struct uarch* arch);
#endif