@@ -2,6 +2,8 @@ cmake_minimum_required(VERSION 3.12)
22
33project (jllama CXX)
44
5+ set (LLAMA_CPP_TAG b1645)
6+
57# todo: Is there a better way to build the library than copy & pasting the build argument cmake definition of llama.cpp?
68include (build -args.cmake)
79
@@ -35,6 +37,9 @@ if(NOT DEFINED OS_NAME)
3537 OUTPUT_STRIP_TRAILING_WHITESPACE
3638 )
3739endif ()
40+ if (NOT OS_NAME)
41+ message (FATAL_ERROR "Could not determine OS name" )
42+ endif ()
3843
3944# find which architecture we build for if not set (make sure to run mvn compile first)
4045if (NOT DEFINED OS_ARCH)
@@ -44,6 +49,21 @@ if(NOT DEFINED OS_ARCH)
4449 OUTPUT_STRIP_TRAILING_WHITESPACE
4550 )
4651endif ()
52+ if (NOT OS_ARCH)
53+ message (FATAL_ERROR "Could not determine CPU architecture" )
54+ endif ()
55+
56+ set (JLLAMA_DIR ${CMAKE_SOURCE_DIR} /src/main/resources/de/kherud/llama/${OS_NAME} /${OS_ARCH} )
57+ message (STATUS "Installing files to ${JLLAMA_DIR} " )
58+
59+ # checkout llama.cpp
60+ include (FetchContent)
61+ FetchContent_Declare(
62+ llama.cpp
63+ GIT_REPOSITORY https://github.com/ggerganov/llama.cpp.git
64+ GIT_TAG ${LLAMA_CPP_TAG}
65+ )
66+ FetchContent_MakeAvailable(llama.cpp)
4767
4868add_library (jllama SHARED src/main/cpp/jllama.cpp)
4969
@@ -53,27 +73,21 @@ target_include_directories(jllama PRIVATE
5373 src/main/cpp
5474)
5575
56- add_subdirectory (
57- src/main/cpp/llama.cpp
58- )
59-
6076target_link_libraries (jllama PRIVATE common llama ${LLAMA_EXTRA_LIBS} )
6177
6278target_compile_features (jllama PRIVATE cxx_std_11)
6379
6480if (OS_NAME STREQUAL "Windows" )
6581 set_target_properties (jllama llama PROPERTIES
66- RUNTIME_OUTPUT_DIRECTORY_RELEASE " ${CMAKE_SOURCE_DIR} /src/main/resources/de/kherud/llama/ ${OS_NAME} / ${OS_ARCH} "
82+ RUNTIME_OUTPUT_DIRECTORY_RELEASE ${JLLAMA_DIR}
6783 )
6884else ()
6985 set_target_properties (jllama llama PROPERTIES
70- LIBRARY_OUTPUT_DIRECTORY " ${CMAKE_SOURCE_DIR} /src/main/resources/de/kherud/llama/ ${OS_NAME} / ${OS_ARCH} "
86+ LIBRARY_OUTPUT_DIRECTORY ${JLLAMA_DIR}
7187 )
7288endif ()
7389
7490if (LLAMA_METAL)
7591 # copy ggml-metal.metal to bin directory
76- configure_file (src/main/cpp/ llama.cpp /ggml-metal.metal ${CMAKE_SOURCE_DIR} /src/main/resources/de/kherud/llama/ ${OS_NAME} / ${OS_ARCH } /ggml-metal.metal COPYONLY )
92+ configure_file (${ llama.cpp_SOURCE_DIR} /ggml-metal.metal ${JLLAMA_DIR } /ggml-metal.metal COPYONLY )
7793endif ()
78-
79- message (STATUS "Installing files to ${CMAKE_SOURCE_DIR} /src/main/resources/de/kherud/llama/${OS_NAME} /${OS_ARCH} " )
0 commit comments