gen_windows.ps1 3.2 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192
  1. #!powershell
  2. $ErrorActionPreference = "Stop"
  3. function init_vars {
  4. $script:patches = @("0001-Expose-callable-API-for-server.patch")
  5. $script:cmakeDefs = @("-DBUILD_SHARED_LIBS=on", "-DLLAMA_NATIVE=off", "-DLLAMA_F16C=off", "-DLLAMA_FMA=off", "-DLLAMA_AVX512=off", "-DLLAMA_AVX2=off", "-DLLAMA_AVX=on", "-DLLAMA_K_QUANTS=on", "-DLLAMA_ACCELERATE=on", "-A","x64")
  6. if ($env:CGO_CFLAGS -contains "-g") {
  7. $script:cmakeDefs += @("-DCMAKE_VERBOSE_MAKEFILE=on")
  8. $script:config = "RelWithDebInfo"
  9. } else {
  10. $script:config = "Release"
  11. }
  12. }
  13. function git_module_setup {
  14. # TODO add flags to skip the init/patch logic to make it easier to mod llama.cpp code in-repo
  15. & git submodule init
  16. if ($LASTEXITCODE -ne 0) { exit($LASTEXITCODE)}
  17. & git submodule update --force gguf
  18. if ($LASTEXITCODE -ne 0) { exit($LASTEXITCODE)}
  19. }
  20. function apply_patches {
  21. rm -erroraction ignore -path "gguf/examples/server/server.h"
  22. foreach ($patch in $script:patches) {
  23. write-host "Applying patch $patch"
  24. & git -C gguf apply ../patches/$patch
  25. if ($LASTEXITCODE -ne 0) { exit($LASTEXITCODE)}
  26. }
  27. }
  28. function build {
  29. write-host "generating config with: cmake -S gguf -B $script:buildDir $script:cmakeDefs"
  30. & cmake --version
  31. & cmake -S gguf -B $script:buildDir $script:cmakeDefs
  32. if ($LASTEXITCODE -ne 0) { exit($LASTEXITCODE)}
  33. write-host "building with: cmake --build $script:buildDir --config $script:config"
  34. & cmake --build $script:buildDir --config $script:config
  35. if ($LASTEXITCODE -ne 0) { exit($LASTEXITCODE)}
  36. }
  37. function install {
  38. rm -erroraction ignore -recurse -force -path $script:installDir
  39. & cmake --install $script:buildDir --prefix $script:installDir --config $script:config
  40. if ($LASTEXITCODE -ne 0) { exit($LASTEXITCODE)}
  41. }
  42. init_vars
  43. git_module_setup
  44. apply_patches
  45. # first build CPU based
  46. $script:buildDir="gguf/build/wincpu"
  47. $script:installDir="gguf/build/wincpu/dist"
  48. build
  49. # install
  50. md gguf/build/lib -ea 0
  51. md gguf/build/wincpu/dist/lib -ea 0
  52. mv gguf/build/wincpu/bin/$script:config/ext_server_shared.dll gguf/build/wincpu/dist/lib/cpu_server.dll
  53. # Nope, this barfs on lots of symbol problems
  54. #mv gguf/build/wincpu/examples/server/$script:config/ext_server_shared.dll gguf/build/wincpu/dist/lib/cpu_server.lib
  55. # Nope: this needs lots of include paths to pull in things like msvcprt.lib and other deps
  56. # & cl.exe `
  57. # gguf/build/wincpu/examples/server/$script:config/ext_server.lib `
  58. # gguf/build/wincpu/common/$script:config/common.lib `
  59. # gguf/build/wincpu/$script:config/llama.lib `
  60. # gguf/build/wincpu/$script:config/ggml_static.lib `
  61. # /link /DLL /DEF:cpu_server.def /NOENTRY /MACHINE:X64 /OUT:gguf/build/wincpu/dist/lib/cpu_server.dll
  62. # if ($LASTEXITCODE -ne 0) { exit($LASTEXITCODE)}
  63. # Then build cuda as a dynamically loaded library
  64. init_vars
  65. $script:buildDir="gguf/build/wincuda"
  66. $script:installDir="gguf/build/wincuda/dist"
  67. $script:cmakeDefs += @("-DLLAMA_CUBLAS=ON", "-DBUILD_SHARED_LIBS=on")
  68. build
  69. install
  70. cp gguf/build/wincuda/dist/bin/ext_server_shared.dll gguf/build/lib/cuda_server.dll
  71. # TODO - more to do here to create a usable dll
  72. # TODO - implement ROCm support on windows
  73. md gguf/build/winrocm/lib -ea 0
  74. echo $null >> gguf/build/winrocm/lib/.generated