gen_windows.ps1 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384
  1. #!powershell
  2. $ErrorActionPreference = "Stop"
  3. function init_vars {
  4. $script:patches = @("0001-Expose-callable-API-for-server.patch")
  5. $script:cmakeDefs = @("-DBUILD_SHARED_LIBS=on", "-DLLAMA_NATIVE=off", "-DLLAMA_F16C=off", "-DLLAMA_FMA=off", "-DLLAMA_AVX512=off", "-DLLAMA_AVX2=off", "-DLLAMA_AVX=on", "-DLLAMA_K_QUANTS=on", "-DLLAMA_ACCELERATE=on", "-A","x64")
  6. $script:cmakeTargets = @("ggml", "ggml_static", "llama", "build_info", "common", "ext_server_shared", "llava_static")
  7. if ($env:CGO_CFLAGS -contains "-g") {
  8. $script:cmakeDefs += @("-DCMAKE_VERBOSE_MAKEFILE=on", "-DLLAMA_SERVER_VERBOSE=on")
  9. $script:config = "RelWithDebInfo"
  10. } else {
  11. $script:cmakeDefs += @("-DLLAMA_SERVER_VERBOSE=off")
  12. $script:config = "Release"
  13. }
  14. }
  15. function git_module_setup {
  16. # TODO add flags to skip the init/patch logic to make it easier to mod llama.cpp code in-repo
  17. & git submodule init
  18. if ($LASTEXITCODE -ne 0) { exit($LASTEXITCODE)}
  19. & git submodule update --force gguf
  20. if ($LASTEXITCODE -ne 0) { exit($LASTEXITCODE)}
  21. }
  22. function apply_patches {
  23. # Wire up our CMakefile
  24. if (!(Select-String -Path "gguf/examples/server/CMakeLists.txt" -Pattern 'ollama')) {
  25. Add-Content -Path "gguf/examples/server/CMakeLists.txt" -Value 'include (../../../CMakeLists.txt) # ollama'
  26. }
  27. # Avoid duplicate main symbols when we link into the cgo binary
  28. $content = Get-Content -Path "./gguf/examples/server/server.cpp"
  29. $content = $content -replace 'int main\(', 'int __main('
  30. Set-Content -Path "./gguf/examples/server/server.cpp" -Value $content
  31. }
  32. function build {
  33. write-host "generating config with: cmake -S gguf -B $script:buildDir $script:cmakeDefs"
  34. & cmake --version
  35. & cmake -S gguf -B $script:buildDir $script:cmakeDefs
  36. if ($LASTEXITCODE -ne 0) { exit($LASTEXITCODE)}
  37. write-host "building with: cmake --build $script:buildDir --config $script:config ($script:cmakeTargets | ForEach-Object { "--target", $_ })"
  38. & cmake --build $script:buildDir --config $script:config ($script:cmakeTargets | ForEach-Object { "--target", $_ })
  39. if ($LASTEXITCODE -ne 0) { exit($LASTEXITCODE)}
  40. }
  41. function install {
  42. rm -ea 0 -recurse -force -path "${script:buildDir}/lib"
  43. md "${script:buildDir}/lib" -ea 0 > $null
  44. cp "${script:buildDir}/bin/${script:config}/ext_server_shared.dll" "${script:buildDir}/lib"
  45. cp "${script:buildDir}/bin/${script:config}/llama.dll" "${script:buildDir}/lib"
  46. }
  47. function cleanup {
  48. Set-Location "gguf/examples/server"
  49. git checkout CMakeLists.txt server.cpp
  50. }
  51. init_vars
  52. git_module_setup
  53. apply_patches
  54. # first build CPU based
  55. $script:buildDir="gguf/build/windows/cpu"
  56. build
  57. install
  58. # Then build cuda as a dynamically loaded library
  59. init_vars
  60. $script:buildDir="gguf/build/windows/cuda"
  61. $script:cmakeDefs += @("-DLLAMA_CUBLAS=ON")
  62. build
  63. install
  64. # TODO - actually implement ROCm support on windows
  65. $script:buildDir="gguf/build/windows/rocm"
  66. rm -ea 0 -recurse -force -path "${script:buildDir}/lib"
  67. md "${script:buildDir}/lib" -ea 0 > $null
  68. echo $null >> "${script:buildDir}/lib/.generated"
  69. cleanup
  70. write-host "`ngo generate completed"