Browse Source

cuda: enable flash attention

ggml added an option to disable flash attention so explicitly enable it
Michael Yang 2 months ago
parent
commit
b42aba40ed
1 changed files with 1 additions and 0 deletions
  1. 1 0
      CMakeLists.txt

+ 1 - 0
CMakeLists.txt

@@ -23,6 +23,7 @@ set(GGML_SCHED_MAX_COPIES 4)
 set(GGML_LLAMAFILE ON)
 set(GGML_CUDA_PEER_MAX_BATCH_SIZE 128)
 set(GGML_CUDA_GRAPHS ON)
+set(GGML_CUDA_FA ON)
 
 if((CMAKE_OSX_ARCHITECTURES AND NOT CMAKE_OSX_ARCHITECTURES MATCHES "arm64")
     OR (NOT CMAKE_OSX_ARCHITECTURES AND NOT CMAKE_SYSTEM_PROCESSOR MATCHES "arm|aarch64|ARM64|ARMv[0-9]+"))