From 1928780b1ab5b92ed633a0466a484d6ed526b97d Mon Sep 17 00:00:00 2001 From: Agnes Leroy Date: Thu, 31 Oct 2024 10:38:59 +0100 Subject: [PATCH] chore(gpu): increase sm for rtxa6000 --- backends/tfhe-cuda-backend/cuda/src/device.cu | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/backends/tfhe-cuda-backend/cuda/src/device.cu b/backends/tfhe-cuda-backend/cuda/src/device.cu index 5177db7216..72ef7f1e5f 100644 --- a/backends/tfhe-cuda-backend/cuda/src/device.cu +++ b/backends/tfhe-cuda-backend/cuda/src/device.cu @@ -268,17 +268,20 @@ void cuda_drop_async(void *ptr, cudaStream_t stream, uint32_t gpu_index) { /// Get the maximum size for the shared memory int cuda_get_max_shared_memory(uint32_t gpu_index) { int max_shared_memory = 0; - cudaDeviceGetAttribute(&max_shared_memory, cudaDevAttrMaxSharedMemoryPerBlock, - gpu_index); - check_cuda_error(cudaGetLastError()); #if CUDA_ARCH == 900 max_shared_memory = 226000; #elif CUDA_ARCH == 890 max_shared_memory = 100000; +#elif CUDA_ARCH == 860 + max_shared_memory = 100000; #elif CUDA_ARCH == 800 max_shared_memory = 163000; #elif CUDA_ARCH == 700 max_shared_memory = 95000; +#else + cudaDeviceGetAttribute(&max_shared_memory, cudaDevAttrMaxSharedMemoryPerBlock, + gpu_index); + check_cuda_error(cudaGetLastError()); #endif return max_shared_memory; }