CMakeLists.txt 1.3 KB

1234567891011121314151617181920212223242526272829
  1. # Ollama specific CMakefile to include in llama.cpp/examples/server
  2. set(TARGET ext_server)
  3. option(LLAMA_SERVER_VERBOSE "Build verbose logging option for Server" ON)
  4. add_library(${TARGET} STATIC ../../../ext_server/ext_server.cpp)
  5. target_include_directories(${TARGET} PRIVATE ../../common)
  6. target_include_directories(${TARGET} PRIVATE ../..)
  7. target_include_directories(${TARGET} PRIVATE ../../..)
  8. target_compile_features(${TARGET} PRIVATE cxx_std_11)
  9. target_compile_definitions(${TARGET} PUBLIC LLAMA_SERVER_LIBRARY=1)
  10. target_link_libraries(${TARGET} PRIVATE common llama llava ${CMAKE_THREAD_LIBS_INIT})
  11. target_compile_definitions(${TARGET} PRIVATE
  12. SERVER_VERBOSE=$<BOOL:${LLAMA_SERVER_VERBOSE}>
  13. )
  14. if (BUILD_SHARED_LIBS)
  15. set_target_properties(ext_server PROPERTIES POSITION_INDEPENDENT_CODE ON)
  16. target_compile_definitions(ext_server PRIVATE LLAMA_SHARED LLAMA_BUILD)
  17. add_library(ext_server_shared SHARED $<TARGET_OBJECTS:ext_server>)
  18. target_link_libraries(ext_server_shared PRIVATE ggml llama llava common ${CMAKE_THREAD_LIBS_INIT})
  19. install(TARGETS ext_server_shared LIBRARY)
  20. endif()
  21. if (CUDAToolkit_FOUND)
  22. target_include_directories(${TARGET} PRIVATE ${CMAKE_CUDA_TOOLKIT_INCLUDE_DIRECTORIES})
  23. if (WIN32)
  24. target_link_libraries(ext_server_shared PRIVATE nvml)
  25. endif()
  26. endif()