{ "name": "llama-cpp", "version": "6550", "description": "LLM inference in C/C++", "homepage": "https://github.com/ggml-org/llama.cpp", "license": "MIT", "dependencies": [ "ggml", { "name": "vcpkg-cmake", "host": true }, { "name": "vcpkg-cmake-config", "host": true } ], "features": { "download": { "description": "Support downloading a model from an URL", "dependencies": [ { "name": "curl", "default-features": false } ] }, "tools": { "description": "Build tools" } } }