diff options
| author | Ethan Morgan <ethan@gweithio.com> | 2026-02-14 16:44:06 +0000 |
|---|---|---|
| committer | Ethan Morgan <ethan@gweithio.com> | 2026-02-14 16:44:06 +0000 |
| commit | 54409423f767d8b1cf30cb7d0efca6b4ca138823 (patch) | |
| tree | d915ac7828703ce4b963efdd9728a1777ba18c1e /vcpkg/ports/openvino/vcpkg.json | |
Diffstat (limited to 'vcpkg/ports/openvino/vcpkg.json')
| -rw-r--r-- | vcpkg/ports/openvino/vcpkg.json | 181 |
1 files changed, 181 insertions, 0 deletions
diff --git a/vcpkg/ports/openvino/vcpkg.json b/vcpkg/ports/openvino/vcpkg.json new file mode 100644 index 0000000..8abed2c --- /dev/null +++ b/vcpkg/ports/openvino/vcpkg.json @@ -0,0 +1,181 @@ +{ + "$schema": "https://raw.githubusercontent.com/microsoft/vcpkg-tool/main/docs/vcpkg.schema.json", + "name": "openvino", + "version": "2025.3.0", + "maintainers": "OpenVINO Developers <openvino@intel.com>", + "summary": "This is a port for Open Visual Inference And Optimization toolkit for AI inference", + "description": [ + "Intel® Distribution of OpenVINO™ toolkit is an open-source toolkit for optimizing ", + "and deploying AI inference. It can be used to develop applications and solutions based ", + "on deep learning tasks, such as: emulation of human vision, automatic speech recognition, ", + "natural language processing, recommendation systems, etc. It provides high-performance ", + "and rich deployment options, from edge to cloud" + ], + "homepage": "https://github.com/openvinotoolkit/openvino", + "documentation": "https://docs.openvino.ai/latest/index.html", + "license": "Apache-2.0", + "supports": "!uwp & !x86 & !(android & arm32)", + "dependencies": [ + "nlohmann-json", + "pugixml", + { + "name": "tbb", + "version>=": "2021.10.0#2" + }, + { + "name": "vcpkg-cmake", + "host": true + }, + { + "name": "vcpkg-cmake-config", + "host": true + }, + { + "name": "vcpkg-get-python-packages", + "host": true + }, + { + "name": "xbyak", + "platform": "!(arm | uwp)", + "version>=": "6.73" + } + ], + "default-features": [ + "auto", + "auto-batch", + { + "name": "cpu", + "platform": "!(windows & arm)" + }, + { + "name": "gpu", + "platform": "x64 & !(osx | uwp)" + }, + "hetero", + "ir", + { + "name": "npu", + "platform": "x64 & !(osx | uwp)" + }, + "onnx", + "paddle", + "pytorch", + "tensorflow", + "tensorflow-lite" + ], + "features": { + "auto": { + "description": "Enables Auto plugin for inference" + }, + "auto-batch": { + "description": "Enables Auto Batch plugin for inference, useful for throughput mode" + }, + "cpu": { + "description": "Enables CPU plugin for inference", + "supports": "!(windows & arm)" + }, + "gpu": { + "description": "Enables GPU plugin for inference", + "supports": "x64 & !(osx | uwp)", + "dependencies": [ + "opencl", + "rapidjson" + ] + }, + "hetero": { + "description": "Enables Hetero plugin for inference" + }, + "ir": { + "description": "Enables IR frontend for reading models in OpenVINO IR format" + }, + "npu": { + "description": "Enables NPU plugin for inference", + "supports": "x64 & !(osx | uwp)", + "dependencies": [ + "level-zero" + ] + }, + "onnx": { + "description": "Enables ONNX frontend for reading models in ONNX format", + "dependencies": [ + { + "name": "onnx", + "version>=": "1.16.2" + }, + { + "name": "protobuf", + "version>=": "3.21.2" + }, + { + "name": "protobuf", + "host": true, + "version>=": "3.21.2" + } + ] + }, + "paddle": { + "description": "Enables PaddlePaddle frontend for reading models in PaddlePaddle format", + "dependencies": [ + { + "name": "protobuf", + "version>=": "3.21.2" + }, + { + "name": "protobuf", + "host": true, + "version>=": "3.21.2" + } + ] + }, + "pytorch": { + "description": "Enables PyTorch frontend to convert models in PyTorch format" + }, + "tensorflow": { + "description": "Enables TensorFlow frontend for reading models in TensorFlow format", + "dependencies": [ + { + "$comment": "to workaround a linking issue for 2025.2.0, 2025.3.0 openvino[core,tensorflow]", + "name": "openvino", + "default-features": false, + "features": [ + "pytorch" + ], + "platform": "linux & static" + }, + { + "name": "protobuf", + "version>=": "3.21.2" + }, + { + "name": "protobuf", + "host": true, + "version>=": "3.21.2" + }, + "snappy" + ] + }, + "tensorflow-lite": { + "description": "Enables TensorFlow Lite frontend for reading models in TensorFlow Lite format", + "dependencies": [ + { + "name": "flatbuffers", + "version>=": "2.0.6" + }, + { + "name": "flatbuffers", + "host": true, + "version>=": "2.0.6" + }, + { + "$comment": "to workaround a linking issue for 2025.2.0, 2025.3.0 openvino[core,tensorflow]", + "name": "openvino", + "default-features": false, + "features": [ + "pytorch" + ], + "platform": "linux & static" + } + ] + } + } +} |