gen_windows.ps1 3.4 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788
  1. #!powershell
  2. $ErrorActionPreference = "Stop"
  3. function init_vars {
  4. $script:llamacppDir = "../llama.cpp"
  5. $script:patches = @("0001-Expose-callable-API-for-server.patch")
  6. $script:cmakeDefs = @("-DBUILD_SHARED_LIBS=on", "-DLLAMA_NATIVE=off", "-DLLAMA_F16C=off", "-DLLAMA_FMA=off", "-DLLAMA_AVX512=off", "-DLLAMA_AVX2=off", "-DLLAMA_AVX=on", "-A","x64")
  7. $script:cmakeTargets = @("ggml", "ggml_static", "llama", "build_info", "common", "ext_server_shared", "llava_static")
  8. if ($env:CGO_CFLAGS -contains "-g") {
  9. $script:cmakeDefs += @("-DCMAKE_VERBOSE_MAKEFILE=on", "-DLLAMA_SERVER_VERBOSE=on")
  10. $script:config = "RelWithDebInfo"
  11. } else {
  12. $script:cmakeDefs += @("-DLLAMA_SERVER_VERBOSE=off")
  13. $script:config = "Release"
  14. }
  15. }
  16. function git_module_setup {
  17. # TODO add flags to skip the init/patch logic to make it easier to mod llama.cpp code in-repo
  18. & git submodule init
  19. if ($LASTEXITCODE -ne 0) { exit($LASTEXITCODE)}
  20. & git submodule update --force "${script:llamacppDir}"
  21. if ($LASTEXITCODE -ne 0) { exit($LASTEXITCODE)}
  22. }
  23. function apply_patches {
  24. # Wire up our CMakefile
  25. if (!(Select-String -Path "${script:llamacppDir}/examples/server/CMakeLists.txt" -Pattern 'ollama')) {
  26. Add-Content -Path "${script:llamacppDir}/examples/server/CMakeLists.txt" -Value 'include (../../../ext_server/CMakeLists.txt) # ollama'
  27. }
  28. # Avoid duplicate main symbols when we link into the cgo binary
  29. $content = Get-Content -Path "${script:llamacppDir}/examples/server/server.cpp"
  30. $content = $content -replace 'int main\(', 'int __main('
  31. Set-Content -Path "${script:llamacppDir}/examples/server/server.cpp" -Value $content
  32. }
  33. function build {
  34. write-host "generating config with: cmake -S ${script:llamacppDir} -B $script:buildDir $script:cmakeDefs"
  35. & cmake --version
  36. & cmake -S "${script:llamacppDir}" -B $script:buildDir $script:cmakeDefs
  37. if ($LASTEXITCODE -ne 0) { exit($LASTEXITCODE)}
  38. write-host "building with: cmake --build $script:buildDir --config $script:config ($script:cmakeTargets | ForEach-Object { "--target", $_ })"
  39. & cmake --build $script:buildDir --config $script:config ($script:cmakeTargets | ForEach-Object { "--target", $_ })
  40. if ($LASTEXITCODE -ne 0) { exit($LASTEXITCODE)}
  41. }
  42. function install {
  43. rm -ea 0 -recurse -force -path "${script:buildDir}/lib"
  44. md "${script:buildDir}/lib" -ea 0 > $null
  45. cp "${script:buildDir}/bin/${script:config}/ext_server_shared.dll" "${script:buildDir}/lib"
  46. cp "${script:buildDir}/bin/${script:config}/llama.dll" "${script:buildDir}/lib"
  47. # Display the dll dependencies in the build log
  48. dumpbin /dependents "${script:buildDir}/bin/${script:config}/ext_server_shared.dll" | select-string ".dll"
  49. }
  50. function cleanup {
  51. Set-Location "${script:llamacppDir}/examples/server"
  52. git checkout CMakeLists.txt server.cpp
  53. }
  54. init_vars
  55. git_module_setup
  56. apply_patches
  57. # first build CPU based
  58. $script:buildDir="${script:llamacppDir}/build/windows/cpu"
  59. build
  60. install
  61. # Then build cuda as a dynamically loaded library
  62. init_vars
  63. $script:buildDir="${script:llamacppDir}/build/windows/cuda"
  64. $script:cmakeDefs += @("-DLLAMA_CUBLAS=ON")
  65. build
  66. install
  67. # TODO - actually implement ROCm support on windows
  68. $script:buildDir="${script:llamacppDir}/build/windows/rocm"
  69. rm -ea 0 -recurse -force -path "${script:buildDir}/lib"
  70. md "${script:buildDir}/lib" -ea 0 > $null
  71. echo $null >> "${script:buildDir}/lib/.generated"
  72. cleanup
  73. write-host "`ngo generate completed"