aboutsummaryrefslogtreecommitdiff
path: root/vcpkg/ports/llama-cpp
diff options
context:
space:
mode:
Diffstat (limited to 'vcpkg/ports/llama-cpp')
-rw-r--r--vcpkg/ports/llama-cpp/cmake-config.diff33
-rw-r--r--vcpkg/ports/llama-cpp/pkgconfig.diff12
-rw-r--r--vcpkg/ports/llama-cpp/portfile.cmake67
-rw-r--r--vcpkg/ports/llama-cpp/vcpkg.json32
4 files changed, 144 insertions, 0 deletions
diff --git a/vcpkg/ports/llama-cpp/cmake-config.diff b/vcpkg/ports/llama-cpp/cmake-config.diff
new file mode 100644
index 0000000..a259a8e
--- /dev/null
+++ b/vcpkg/ports/llama-cpp/cmake-config.diff
@@ -0,0 +1,33 @@
+diff --git a/cmake/llama-config.cmake.in b/cmake/llama-config.cmake.in
+index 90cbec5..884938f 100644
+--- a/cmake/llama-config.cmake.in
++++ b/cmake/llama-config.cmake.in
+@@ -6,10 +6,10 @@ set(LLAMA_SHARED_LIB @BUILD_SHARED_LIBS@)
+ @PACKAGE_INIT@
+
+ set_and_check(LLAMA_INCLUDE_DIR "@PACKAGE_LLAMA_INCLUDE_INSTALL_DIR@")
+-set_and_check(LLAMA_LIB_DIR "@PACKAGE_LLAMA_LIB_INSTALL_DIR@")
+-set_and_check(LLAMA_BIN_DIR "@PACKAGE_LLAMA_BIN_INSTALL_DIR@")
++#set_and_check(LLAMA_LIB_DIR "@PACKAGE_LLAMA_LIB_INSTALL_DIR@")
++#set_and_check(LLAMA_BIN_DIR "@PACKAGE_LLAMA_BIN_INSTALL_DIR@")
+
+-find_package(ggml REQUIRED HINTS ${LLAMA_LIB_DIR}/cmake)
++find_package(ggml REQUIRED CONFIG)
+
+ find_library(llama_LIBRARY llama
+ REQUIRED
+@@ -17,6 +17,7 @@ find_library(llama_LIBRARY llama
+ NO_CMAKE_FIND_ROOT_PATH
+ )
+
++if(NOT TARGET llama)
+ add_library(llama UNKNOWN IMPORTED)
+ set_target_properties(llama
+ PROPERTIES
+@@ -26,5 +27,6 @@ set_target_properties(llama
+ IMPORTED_LOCATION "${llama_LIBRARY}"
+ INTERFACE_COMPILE_FEATURES c_std_90
+ POSITION_INDEPENDENT_CODE ON)
++endif()
+
+ check_required_components(Llama)
diff --git a/vcpkg/ports/llama-cpp/pkgconfig.diff b/vcpkg/ports/llama-cpp/pkgconfig.diff
new file mode 100644
index 0000000..a1828bb
--- /dev/null
+++ b/vcpkg/ports/llama-cpp/pkgconfig.diff
@@ -0,0 +1,12 @@
+diff --git a/cmake/llama.pc.in b/cmake/llama.pc.in
+index 6fb58b5..8a283e7 100644
+--- a/cmake/llama.pc.in
++++ b/cmake/llama.pc.in
+@@ -6,5 +6,6 @@ includedir=@CMAKE_INSTALL_FULL_INCLUDEDIR@
+ Name: llama
+ Description: Port of Facebook's LLaMA model in C/C++
+ Version: @LLAMA_INSTALL_VERSION@
+-Libs: -L${libdir} -lggml -lggml-base -lllama
++Requires: ggml
++Libs: -L${libdir} -lllama
+ Cflags: -I${includedir}
diff --git a/vcpkg/ports/llama-cpp/portfile.cmake b/vcpkg/ports/llama-cpp/portfile.cmake
new file mode 100644
index 0000000..c91d8f4
--- /dev/null
+++ b/vcpkg/ports/llama-cpp/portfile.cmake
@@ -0,0 +1,67 @@
+vcpkg_from_github(
+ OUT_SOURCE_PATH SOURCE_PATH
+ REPO ggml-org/llama.cpp
+ REF b${VERSION}
+ SHA512 c823aa1739a84b6fd50255a2c2c92e9da1cec55c62791886424b4ac126759bf9b63710e3c366fac6a004dbed0175b77756acef85ad495792142e671381b2026a
+ HEAD_REF master
+ PATCHES
+ cmake-config.diff
+ pkgconfig.diff
+)
+file(REMOVE_RECURSE "${SOURCE_PATH}/ggml/include" "${SOURCE_PATH}/ggml/src")
+
+vcpkg_check_features(OUT_FEATURE_OPTIONS options
+ FEATURES
+ download LLAMA_CURL
+ tools LLAMA_BUILD_TOOLS
+)
+
+vcpkg_cmake_configure(
+ SOURCE_PATH "${SOURCE_PATH}"
+ OPTIONS
+ ${options}
+ -DGGML_CCACHE=OFF
+ -DLLAMA_ALL_WARNINGS=OFF
+ -DLLAMA_BUILD_TESTS=OFF
+ -DLLAMA_BUILD_EXAMPLES=OFF
+ -DLLAMA_BUILD_SERVER=OFF
+ -DLLAMA_USE_SYSTEM_GGML=ON
+ -DVCPKG_LOCK_FIND_PACKAGE_Git=OFF
+)
+
+vcpkg_cmake_install()
+vcpkg_cmake_config_fixup(CONFIG_PATH "lib/cmake/llama")
+vcpkg_copy_pdbs()
+vcpkg_fixup_pkgconfig()
+
+file(INSTALL "${SOURCE_PATH}/gguf-py/gguf" DESTINATION "${CURRENT_PACKAGES_DIR}/tools/${PORT}/gguf-py")
+file(RENAME "${CURRENT_PACKAGES_DIR}/bin/convert_hf_to_gguf.py" "${CURRENT_PACKAGES_DIR}/tools/${PORT}/convert-hf-to-gguf.py")
+file(REMOVE "${CURRENT_PACKAGES_DIR}/debug/bin/convert_hf_to_gguf.py")
+
+if("tools" IN_LIST FEATURES)
+ vcpkg_copy_tools(
+ TOOL_NAMES
+ llama-batched-bench
+ llama-bench
+ llama-cli
+ llama-cvector-generator
+ llama-export-lora
+ llama-gguf-split
+ llama-imatrix
+ llama-mtmd-cli
+ llama-perplexity
+ llama-quantize
+ llama-run
+ llama-tokenize
+ llama-tts
+ AUTO_CLEAN
+ )
+endif()
+
+file(REMOVE_RECURSE "${CURRENT_PACKAGES_DIR}/debug/include")
+file(REMOVE_RECURSE "${CURRENT_PACKAGES_DIR}/debug/share")
+vcpkg_clean_executables_in_bin(FILE_NAMES none)
+
+set(gguf-py-license "${CURRENT_BUILDTREES_DIR}/${TARGET_TRIPLET}-rel/gguf-py LICENSE")
+file(COPY_FILE "${SOURCE_PATH}/gguf-py/LICENSE" "${gguf-py-license}")
+vcpkg_install_copyright(FILE_LIST "${SOURCE_PATH}/LICENSE" "${gguf-py-license}")
diff --git a/vcpkg/ports/llama-cpp/vcpkg.json b/vcpkg/ports/llama-cpp/vcpkg.json
new file mode 100644
index 0000000..b587e28
--- /dev/null
+++ b/vcpkg/ports/llama-cpp/vcpkg.json
@@ -0,0 +1,32 @@
+{
+ "name": "llama-cpp",
+ "version": "6550",
+ "description": "LLM inference in C/C++",
+ "homepage": "https://github.com/ggml-org/llama.cpp",
+ "license": "MIT",
+ "dependencies": [
+ "ggml",
+ {
+ "name": "vcpkg-cmake",
+ "host": true
+ },
+ {
+ "name": "vcpkg-cmake-config",
+ "host": true
+ }
+ ],
+ "features": {
+ "download": {
+ "description": "Support downloading a model from an URL",
+ "dependencies": [
+ {
+ "name": "curl",
+ "default-features": false
+ }
+ ]
+ },
+ "tools": {
+ "description": "Build tools"
+ }
+ }
+}