build_cuda.sh 1.4 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647
  1. #!/bin/bash
  2. os="$(uname -s)"
  3. if [[ "$os" == "Windows_NT" || "$os" == "MINGW64_NT"* ]]; then
  4. output="ggml-cuda.dll"
  5. else
  6. output="libggml-cuda.so"
  7. fi
  8. nvcc \
  9. -t $(nproc) \
  10. --generate-code=arch=compute_50,code=[compute_50,sm_50] \
  11. --generate-code=arch=compute_52,code=[compute_52,sm_52] \
  12. --generate-code=arch=compute_61,code=[compute_61,sm_61] \
  13. --generate-code=arch=compute_70,code=[compute_70,sm_70] \
  14. --generate-code=arch=compute_75,code=[compute_75,sm_75] \
  15. --generate-code=arch=compute_80,code=[compute_80,sm_80] \
  16. -DGGML_CUDA_DMMV_X=32 \
  17. -DGGML_CUDA_PEER_MAX_BATCH_SIZE=128 \
  18. -DGGML_CUDA_MMV_Y=1 \
  19. -DGGML_USE_CUDA=1 \
  20. -DGGML_SHARED=1 \
  21. -DGGML_BUILD=1 \
  22. -DGGML_USE_LLAMAFILE \
  23. -D_GNU_SOURCE \
  24. -DCMAKE_POSITION_INDEPENDENT_CODE=on \
  25. -Wno-deprecated-gpu-targets \
  26. --forward-unknown-to-host-compiler \
  27. -use_fast_math \
  28. -link \
  29. -shared \
  30. -I. \
  31. -lcuda -lcublas -lcudart -lcublasLt \
  32. -O3 \
  33. -o $output \
  34. ggml-cuda.cu \
  35. ggml-cuda/*.cu \
  36. ggml-cuda/template-instances/fattn-wmma*.cu \
  37. ggml-cuda/template-instances/mmq*.cu \
  38. ggml-cuda/template-instances/fattn-vec*q4_0-q4_0.cu \
  39. ggml-cuda/template-instances/fattn-vec*q8_0-q8_0.cu \
  40. ggml-cuda/template-instances/fattn-vec*f16-f16.cu \
  41. ggml.c ggml-backend.c ggml-alloc.c ggml-quants.c sgemm.cpp
  42. # -DGGML_CUDA_USE_GRAPHS=1
  43. # -DGGML_CUDA_FA_ALL_QUANTS=1