6 Commits

Author SHA1 Message Date
Dr-Noob
84e6021a95 Remove TODO 2025-10-26 10:47:27 +01:00
Dr-Noob
a4916255cf Fix 2025-10-26 10:44:09 +01:00
Dr-Noob
b5dc30d4b3 Add matrix cores 2025-10-26 10:42:25 +01:00
Dr-Noob
2fa90179b4 Fix 2025-10-26 10:28:41 +01:00
Dr-Noob
711936be81 Show XCDs 2025-10-26 10:27:51 +01:00
Dr-Noob
94a9a440f0 Basic support 2025-10-23 21:40:14 +02:00
3 changed files with 45 additions and 120 deletions

View File

@@ -1,24 +1,5 @@
#!/bin/bash #!/bin/bash
print_help() {
cat << EOF
Usage: $0 <backends> [build_type]
<backends> MANDATORY. Comma-separated list of
backends to enable.
Valid options: hsa, intel, cuda
Example: hsa,cuda
[build_type] OPTIONAL. Build type. Valid options:
debug, release (default: release)
Examples:
$0 hsa,intel debug
$0 cuda
$0 hsa,intel,cuda release
EOF
}
# gpufetch build script # gpufetch build script
set -e set -e
@@ -26,79 +7,19 @@ rm -rf build/ gpufetch
mkdir build/ mkdir build/
cd build/ cd build/
if [ "$1" == "--help" ] if [ "$1" == "debug" ]
then then
echo "gpufetch build script" BUILD_TYPE="Debug"
echo
print_help
exit 0
fi
if [[ $# -lt 1 ]]; then
echo "ERROR: At least one backend must be specified."
echo
print_help
exit 1
fi
# Determine if last argument is build type
LAST_ARG="${!#}"
if [[ "$LAST_ARG" == "debug" || "$LAST_ARG" == "release" ]]; then
BUILD_TYPE="$LAST_ARG"
BACKEND_ARG="${1}"
else else
BUILD_TYPE="release" BUILD_TYPE="Release"
BACKEND_ARG="${1}"
fi fi
# Split comma-separated backends into an array
IFS=',' read -r -a BACKENDS <<< "$BACKEND_ARG"
# Validate build type
if [[ "$BUILD_TYPE" != "debug" && "$BUILD_TYPE" != "release" ]]
then
echo "Error: Invalid build type '$BUILD_TYPE'."
echo "Valid options are: debug, release"
exit 1
fi
# From lower to upper case
CMAKE_FLAGS="-DCMAKE_BUILD_TYPE=${BUILD_TYPE^}"
# Validate backends
VALID_BACKENDS=("hsa" "intel" "cuda")
for BACKEND in "${BACKENDS[@]}"; do
case "$BACKEND" in
hsa)
CMAKE_FLAGS+=" -DENABLE_HSA_BACKEND=ON"
;;
intel)
CMAKE_FLAGS+=" -DENABLE_INTEL_BACKEND=ON"
;;
cuda)
CMAKE_FLAGS+=" -DENABLE_CUDA_BACKEND=ON"
;;
*)
echo "ERROR: Invalid backend '$BACKEND'."
echo "Valid options: ${VALID_BACKENDS[*]}"
exit 1
;;
esac
done
# You can also manually specify the compilation flags.
# If you need to, just run the cmake command directly
# instead of using this script.
#
# Here you will find some help:
#
# In case you have CUDA installed but it is not detected, # In case you have CUDA installed but it is not detected,
# - set CMAKE_CUDA_COMPILER to your nvcc binary: # - set CMAKE_CUDA_COMPILER to your nvcc binary:
# - set CMAKE_CUDA_COMPILER_TOOLKIT_ROOT to the CUDA root dir # - set CMAKE_CUDA_COMPILER_TOOLKIT_ROOT to the CUDA root dir
# for example: # for example:
# cmake -DCMAKE_BUILD_TYPE=$BUILD_TYPE -DCMAKE_CUDA_COMPILER=/usr/local/cuda/bin/nvcc -DCMAKE_CUDA_COMPILER_TOOLKIT_ROOT=/usr/local/cuda/ .. # cmake -DCMAKE_BUILD_TYPE=$BUILD_TYPE -DCMAKE_CUDA_COMPILER=/usr/local/cuda/bin/nvcc -DCMAKE_CUDA_COMPILER_TOOLKIT_ROOT=/usr/local/cuda/ ..
#
# In case you want to explicitely disable a backend, you can: # In case you want to explicitely disable a backend, you can:
# Disable CUDA backend: # Disable CUDA backend:
# cmake -DCMAKE_BUILD_TYPE=$BUILD_TYPE -DENABLE_CUDA_BACKEND=OFF .. # cmake -DCMAKE_BUILD_TYPE=$BUILD_TYPE -DENABLE_CUDA_BACKEND=OFF ..
@@ -107,9 +28,7 @@ done
# Disable Intel backend: # Disable Intel backend:
# cmake -DCMAKE_BUILD_TYPE=$BUILD_TYPE -DENABLE_INTEL_BACKEND=OFF .. # cmake -DCMAKE_BUILD_TYPE=$BUILD_TYPE -DENABLE_INTEL_BACKEND=OFF ..
echo "$0: Running cmake $CMAKE_FLAGS" cmake -DCMAKE_BUILD_TYPE=$BUILD_TYPE ..
echo
cmake $CMAKE_FLAGS ..
os=$(uname) os=$(uname)
if [ "$os" == 'Linux' ]; then if [ "$os" == 'Linux' ]; then

View File

@@ -72,7 +72,7 @@ static const AttributeField ATTRIBUTE_INFO[] = {
{ ATTRIBUTE_FREQUENCY, "Max Frequency:", "Max Freq.:" }, { ATTRIBUTE_FREQUENCY, "Max Frequency:", "Max Freq.:" },
{ ATTRIBUTE_PEAK, "Peak Performance:", "Peak Perf.:" }, { ATTRIBUTE_PEAK, "Peak Performance:", "Peak Perf.:" },
{ ATTRIBUTE_COMPUTE_UNITS, "Compute Units (CUs):", "CUs" }, { ATTRIBUTE_COMPUTE_UNITS, "Compute Units (CUs):", "CUs" },
{ ATTRIBUTE_MATRIX_CORES, "Matrix Cores:", "Matrix Cores:" }, { ATTRIBUTE_MATRIX_CORES, "Matrix Cores: ", "Matrix Cores:" },
{ ATTRIBUTE_XCDS, "XCDs:", "XCDs" }, { ATTRIBUTE_XCDS, "XCDs:", "XCDs" },
{ ATTRIBUTE_LDS_SIZE, "LDS size:", "LDS:" }, { ATTRIBUTE_LDS_SIZE, "LDS size:", "LDS:" },
{ ATTRIBUTE_STREAMINGMP, "SMs:", "SMs:" }, { ATTRIBUTE_STREAMINGMP, "SMs:", "SMs:" },

View File

@@ -1,6 +1,3 @@
// patched cuda.cpp for cuda13 by cloudy
#include <cuda_runtime.h> #include <cuda_runtime.h>
#include <cstring> #include <cstring>
#include <cstdlib> #include <cstdlib>
@@ -17,20 +14,25 @@ bool print_gpu_cuda(struct gpu_info* gpu) {
char* cc = get_str_cc(gpu->arch); char* cc = get_str_cc(gpu->arch);
printf("%s (Compute Capability %s)\n", gpu->name, cc); printf("%s (Compute Capability %s)\n", gpu->name, cc);
free(cc); free(cc);
return true; return true;
} }
struct cache* get_cache_info(cudaDeviceProp prop) { struct cache* get_cache_info(cudaDeviceProp prop) {
struct cache* cach = (struct cache*) emalloc(sizeof(struct cache)); struct cache* cach = (struct cache*) emalloc(sizeof(struct cache));
cach->L2 = (struct cach*) emalloc(sizeof(struct cach)); cach->L2 = (struct cach*) emalloc(sizeof(struct cach));
cach->L2->size = prop.l2CacheSize; cach->L2->size = prop.l2CacheSize;
cach->L2->num_caches = 1; cach->L2->num_caches = 1;
cach->L2->exists = true; cach->L2->exists = true;
return cach; return cach;
} }
int get_tensor_cores(struct uarch* arch, int sm, int major) { int get_tensor_cores(struct uarch* arch, int sm, int major) {
if(major == 7) { if(major == 7) {
// TU116 does not have tensor cores!
// https://www.anandtech.com/show/13973/nvidia-gtx-1660-ti-review-feat-evga-xc-gaming/2
if (is_chip_TU116(arch)) if (is_chip_TU116(arch))
return 0; return 0;
return sm * 8; return sm * 8;
@@ -41,57 +43,57 @@ int get_tensor_cores(struct uarch* arch, int sm, int major) {
struct topology_c* get_topology_info(struct uarch* arch, cudaDeviceProp prop) { struct topology_c* get_topology_info(struct uarch* arch, cudaDeviceProp prop) {
struct topology_c* topo = (struct topology_c*) emalloc(sizeof(struct topology_c)); struct topology_c* topo = (struct topology_c*) emalloc(sizeof(struct topology_c));
topo->streaming_mp = prop.multiProcessorCount; topo->streaming_mp = prop.multiProcessorCount;
topo->cores_per_mp = _ConvertSMVer2Cores(prop.major, prop.minor); topo->cores_per_mp = _ConvertSMVer2Cores(prop.major, prop.minor);
topo->cuda_cores = topo->streaming_mp * topo->cores_per_mp; topo->cuda_cores = topo->streaming_mp * topo->cores_per_mp;
topo->tensor_cores = get_tensor_cores(arch, topo->streaming_mp, prop.major); topo->tensor_cores = get_tensor_cores(arch, topo->streaming_mp, prop.major);
return topo; return topo;
} }
int32_t guess_clock_multipilier(struct gpu_info* gpu, struct memory* mem) { int32_t guess_clock_multipilier(struct gpu_info* gpu, struct memory* mem) {
// Guess clock multiplier
int32_t clk_mul = 1; int32_t clk_mul = 1;
int32_t clk8 = abs((mem->freq/8) - gpu->freq); int32_t clk8 = abs((mem->freq/8) - gpu->freq);
int32_t clk4 = abs((mem->freq/4) - gpu->freq); int32_t clk4 = abs((mem->freq/4) - gpu->freq);
int32_t clk2 = abs((mem->freq/2) - gpu->freq); int32_t clk2 = abs((mem->freq/2) - gpu->freq);
int32_t clk1 = abs((mem->freq/1) - gpu->freq); int32_t clk1 = abs((mem->freq/1) - gpu->freq);
int32_t min = mem->freq; int32_t min = mem->freq;
if(clkm_possible_for_uarch(8, gpu->arch) && min > clk8) { clk_mul = 8; min = clk8; } if(clkm_possible_for_uarch(8, gpu->arch) && min > clk8) { clk_mul = 8; min = clk8; }
if(clkm_possible_for_uarch(4, gpu->arch) && min > clk4) { clk_mul = 4; min = clk4; } if(clkm_possible_for_uarch(4, gpu->arch) && min > clk4) { clk_mul = 4; min = clk4; }
if(clkm_possible_for_uarch(2, gpu->arch) && min > clk2) { clk_mul = 2; min = clk2; } if(clkm_possible_for_uarch(2, gpu->arch) && min > clk2) { clk_mul = 2; min = clk2; }
if(clkm_possible_for_uarch(1, gpu->arch) && min > clk1) { clk_mul = 1; min = clk1; } if(clkm_possible_for_uarch(1, gpu->arch) && min > clk1) { clk_mul = 1; min = clk1; }
return clk_mul; return clk_mul;
} }
struct memory* get_memory_info(struct gpu_info* gpu, cudaDeviceProp prop) { struct memory* get_memory_info(struct gpu_info* gpu, cudaDeviceProp prop) {
struct memory* mem = (struct memory*) emalloc(sizeof(struct memory)); struct memory* mem = (struct memory*) emalloc(sizeof(struct memory));
int val = 0;
mem->size_bytes = (unsigned long long) prop.totalGlobalMem; mem->size_bytes = (unsigned long long) prop.totalGlobalMem;
mem->freq = prop.memoryClockRate * 0.001f;
if (cudaDeviceGetAttribute(&val, cudaDevAttrMemoryClockRate, gpu->idx) == cudaSuccess) {
if (val > 1000000)
mem->freq = (float)val / 1000000.0f;
else
mem->freq = (float)val * 0.001f;
} else {
mem->freq = 0.0f;
}
mem->bus_width = prop.memoryBusWidth; mem->bus_width = prop.memoryBusWidth;
mem->clk_mul = guess_clock_multipilier(gpu, mem); mem->clk_mul = guess_clock_multipilier(gpu, mem);
mem->type = guess_memtype_from_cmul_and_uarch(mem->clk_mul, gpu->arch); mem->type = guess_memtype_from_cmul_and_uarch(mem->clk_mul, gpu->arch);
if (mem->clk_mul > 0) // Fix frequency returned from CUDA to show real frequency
mem->freq = mem->freq / mem->clk_mul; mem->freq = mem->freq / mem->clk_mul;
return mem; return mem;
} }
// Compute peak performance when using CUDA cores
int64_t get_peak_performance_cuda(struct gpu_info* gpu) { int64_t get_peak_performance_cuda(struct gpu_info* gpu) {
return gpu->freq * 1000000 * gpu->topo_c->cuda_cores * 2; return gpu->freq * 1000000 * gpu->topo_c->cuda_cores * 2;
} }
// Compute peak performance when using tensor cores
int64_t get_peak_performance_tcu(cudaDeviceProp prop, struct gpu_info* gpu) { int64_t get_peak_performance_tcu(cudaDeviceProp prop, struct gpu_info* gpu) {
// Volta / Turing tensor cores performs 4x4x4 FP16 matrix multiplication
// Ampere tensor cores performs 8x4x8 FP16 matrix multiplicacion
if(prop.major == 7) return gpu->freq * 1000000 * 4 * 4 * 4 * 2 * gpu->topo_c->tensor_cores; if(prop.major == 7) return gpu->freq * 1000000 * 4 * 4 * 4 * 2 * gpu->topo_c->tensor_cores;
else if(prop.major == 8) return gpu->freq * 1000000 * 8 * 4 * 8 * 2 * gpu->topo_c->tensor_cores; else if(prop.major == 8) return gpu->freq * 1000000 * 8 * 4 * 8 * 2 * gpu->topo_c->tensor_cores;
else return 0; else return 0;
@@ -113,7 +115,8 @@ struct gpu_info* get_gpu_info_cuda(struct pci_dev *devices, int gpu_idx) {
} }
int num_gpus = -1; int num_gpus = -1;
cudaError_t err = cudaGetDeviceCount(&num_gpus); cudaError_t err = cudaSuccess;
err = cudaGetDeviceCount(&num_gpus);
if(gpu_idx == 0) { if(gpu_idx == 0) {
printf("\r%*c\r", (int) strlen(CUDA_DRIVER_START_WARNING), ' '); printf("\r%*c\r", (int) strlen(CUDA_DRIVER_START_WARNING), ' ');
@@ -131,6 +134,7 @@ struct gpu_info* get_gpu_info_cuda(struct pci_dev *devices, int gpu_idx) {
} }
if(gpu->idx+1 > num_gpus) { if(gpu->idx+1 > num_gpus) {
// Master is trying to query an invalid GPU
return NULL; return NULL;
} }
@@ -140,25 +144,15 @@ struct gpu_info* get_gpu_info_cuda(struct pci_dev *devices, int gpu_idx) {
return NULL; return NULL;
} }
int core_clk = 0; gpu->freq = deviceProp.clockRate * 1e-3f;
if (cudaDeviceGetAttribute(&core_clk, cudaDevAttrClockRate, gpu->idx) == cudaSuccess) {
if (core_clk > 1000000)
gpu->freq = core_clk / 1000000.0f;
else
gpu->freq = core_clk * 0.001f;
} else {
gpu->freq = 0.0f;
}
gpu->vendor = GPU_VENDOR_NVIDIA; gpu->vendor = GPU_VENDOR_NVIDIA;
gpu->name = (char *) emalloc(strlen(deviceProp.name) + 1); gpu->name = (char *) emalloc(sizeof(char) * (strlen(deviceProp.name) + 1));
strcpy(gpu->name, deviceProp.name); strcpy(gpu->name, deviceProp.name);
if((gpu->pci = get_pci_from_pciutils(devices, PCI_VENDOR_ID_NVIDIA, gpu_idx)) == NULL) { if((gpu->pci = get_pci_from_pciutils(devices, PCI_VENDOR_ID_NVIDIA, gpu_idx)) == NULL) {
printErr("Unable to find a valid device for vendor id 0x%.4X using pciutils", PCI_VENDOR_ID_NVIDIA); printErr("Unable to find a valid device for vendor id 0x%.4X using pciutils", PCI_VENDOR_ID_NVIDIA);
return NULL; return NULL;
} }
gpu->arch = get_uarch_from_cuda(gpu); gpu->arch = get_uarch_from_cuda(gpu);
gpu->cach = get_cache_info(deviceProp); gpu->cach = get_cache_info(deviceProp);
gpu->mem = get_memory_info(gpu, deviceProp); gpu->mem = get_memory_info(gpu, deviceProp);
@@ -169,7 +163,19 @@ struct gpu_info* get_gpu_info_cuda(struct pci_dev *devices, int gpu_idx) {
return gpu; return gpu;
} }
char* get_str_sm(struct gpu_info* gpu) { return get_str_generic(gpu->topo_c->streaming_mp); } char* get_str_sm(struct gpu_info* gpu) {
char* get_str_cores_sm(struct gpu_info* gpu) { return get_str_generic(gpu->topo_c->cores_per_mp); } return get_str_generic(gpu->topo_c->streaming_mp);
char* get_str_cuda_cores(struct gpu_info* gpu) { return get_str_generic(gpu->topo_c->cuda_cores); } }
char* get_str_tensor_cores(struct gpu_info* gpu) { return get_str_generic(gpu->topo_c->tensor_cores); }
char* get_str_cores_sm(struct gpu_info* gpu) {
return get_str_generic(gpu->topo_c->cores_per_mp);
}
char* get_str_cuda_cores(struct gpu_info* gpu) {
return get_str_generic(gpu->topo_c->cuda_cores);
}
char* get_str_tensor_cores(struct gpu_info* gpu) {
return get_str_generic(gpu->topo_c->tensor_cores);
}