CMakeLists.txt 1.1 KB

12345678910111213141516171819202122232425
  1. # Ollama specific CMakefile to include in llama.cpp/examples/server
  2. set(TARGET ext_server)
  3. option(LLAMA_SERVER_VERBOSE "Build verbose logging option for Server" ON)
  4. if (WIN32)
  5. add_library(${TARGET} SHARED ../../../ext_server/ext_server.cpp ../../llama.cpp)
  6. else()
  7. add_library(${TARGET} STATIC ../../../ext_server/ext_server.cpp ../../llama.cpp)
  8. endif()
  9. target_include_directories(${TARGET} PRIVATE ../../common)
  10. target_include_directories(${TARGET} PRIVATE ../..)
  11. target_include_directories(${TARGET} PRIVATE ../../..)
  12. target_compile_features(${TARGET} PRIVATE cxx_std_11)
  13. target_compile_definitions(${TARGET} PUBLIC LLAMA_SERVER_LIBRARY=1)
  14. target_link_libraries(${TARGET} PRIVATE ggml llava common )
  15. set_target_properties(${TARGET} PROPERTIES POSITION_INDEPENDENT_CODE ON)
  16. target_compile_definitions(${TARGET} PRIVATE SERVER_VERBOSE=$<BOOL:${LLAMA_SERVER_VERBOSE}>)
  17. install(TARGETS ext_server LIBRARY)
  18. if (CUDAToolkit_FOUND)
  19. target_include_directories(${TARGET} PRIVATE ${CMAKE_CUDA_TOOLKIT_INCLUDE_DIRECTORIES})
  20. if (WIN32)
  21. target_link_libraries(${TARGET} PRIVATE nvml)
  22. endif()
  23. endif()