aboutsummaryrefslogtreecommitdiff
path: root/vcpkg/ports/openvino
diff options
context:
space:
mode:
Diffstat (limited to 'vcpkg/ports/openvino')
-rw-r--r--vcpkg/ports/openvino/onednn_gpu_includes.patch38
-rw-r--r--vcpkg/ports/openvino/portfile.cmake165
-rw-r--r--vcpkg/ports/openvino/usage4
-rw-r--r--vcpkg/ports/openvino/vcpkg.json181
4 files changed, 388 insertions, 0 deletions
diff --git a/vcpkg/ports/openvino/onednn_gpu_includes.patch b/vcpkg/ports/openvino/onednn_gpu_includes.patch
new file mode 100644
index 0000000..0ce5957
--- /dev/null
+++ b/vcpkg/ports/openvino/onednn_gpu_includes.patch
@@ -0,0 +1,38 @@
+diff --git a/src/plugins/intel_gpu/src/graph/CMakeLists.txt b/src/plugins/intel_gpu/src/graph/CMakeLists.txt
+index b3ee51e242..31477ec128 100644
+--- a/src/plugins/intel_gpu/src/graph/CMakeLists.txt
++++ b/src/plugins/intel_gpu/src/graph/CMakeLists.txt
+@@ -82,7 +82,7 @@ macro(ov_gpu_add_backend_target)
+
+ # We use onednn headers all over the graph module, so we have to append includes to all backends and add a dependency between targets
+ if (ENABLE_ONEDNN_FOR_GPU)
+- target_include_directories(${ARG_NAME} SYSTEM BEFORE PRIVATE $<TARGET_PROPERTY:onednn_gpu_tgt,INTERFACE_INCLUDE_DIRECTORIES>)
++ target_include_directories(${ARG_NAME} BEFORE PRIVATE $<TARGET_PROPERTY:onednn_gpu_tgt,INTERFACE_INCLUDE_DIRECTORIES>)
+ add_dependencies(openvino_intel_gpu_${IMPL_TYPE}_obj onednn_gpu_tgt)
+ endif()
+ endmacro()
+diff --git a/src/plugins/intel_gpu/src/runtime/CMakeLists.txt b/src/plugins/intel_gpu/src/runtime/CMakeLists.txt
+index 85dfec05c4..3f72a41949 100644
+--- a/src/plugins/intel_gpu/src/runtime/CMakeLists.txt
++++ b/src/plugins/intel_gpu/src/runtime/CMakeLists.txt
+@@ -56,7 +56,7 @@ if(OV_COMPILER_IS_INTEL_LLVM)
+ endif()
+
+ if(ENABLE_ONEDNN_FOR_GPU)
+- ov_target_link_libraries_as_system(${TARGET_NAME} PUBLIC onednn_gpu_tgt)
++ target_link_libraries(${TARGET_NAME} PUBLIC onednn_gpu_tgt)
+ endif()
+
+ ov_set_threading_interface_for(${TARGET_NAME})
+diff --git a/src/plugins/intel_gpu/thirdparty/CMakeLists.txt b/src/plugins/intel_gpu/thirdparty/CMakeLists.txt
+index e47b2b1ffd..e03c95fdeb 100644
+--- a/src/plugins/intel_gpu/thirdparty/CMakeLists.txt
++++ b/src/plugins/intel_gpu/thirdparty/CMakeLists.txt
+@@ -173,7 +173,6 @@ if(ENABLE_ONEDNN_FOR_GPU)
+ set_target_properties(onednn_gpu_tgt PROPERTIES
+ INTERFACE_LINK_LIBRARIES $<BUILD_INTERFACE:${ONEDNN_GPU_LIB_PATH}>
+ INTERFACE_INCLUDE_DIRECTORIES "$<BUILD_INTERFACE:${LIB_INCLUDE_DIRS}>"
+- INTERFACE_SYSTEM_INCLUDE_DIRECTORIES "${LIB_INCLUDE_DIRS}"
+ INTERFACE_COMPILE_DEFINITIONS "${LIB_DEFINITIONS}"
+ )
+ add_dependencies(onednn_gpu_tgt onednn_gpu_build)
diff --git a/vcpkg/ports/openvino/portfile.cmake b/vcpkg/ports/openvino/portfile.cmake
new file mode 100644
index 0000000..2297e5d
--- /dev/null
+++ b/vcpkg/ports/openvino/portfile.cmake
@@ -0,0 +1,165 @@
+vcpkg_from_github(
+ OUT_SOURCE_PATH SOURCE_PATH
+ REPO openvinotoolkit/openvino
+ REF "${VERSION}"
+ SHA512 97241f147c4e74054b787b1bb694ab1051df661ae049e75d0ffa54cec71325155e2a54ded777f09709f7e6a7ff740c7e69d2f3bd73320f1711330d07e427922b
+ HEAD_REF master
+ PATCHES
+ onednn_gpu_includes.patch
+)
+
+vcpkg_check_features(OUT_FEATURE_OPTIONS FEATURE_OPTIONS
+ FEATURES
+ cpu ENABLE_INTEL_CPU
+ gpu ENABLE_INTEL_GPU
+ npu ENABLE_INTEL_NPU
+ auto ENABLE_AUTO
+ hetero ENABLE_HETERO
+ auto-batch ENABLE_AUTO_BATCH
+ ir ENABLE_OV_IR_FRONTEND
+ onnx ENABLE_OV_ONNX_FRONTEND
+ paddle ENABLE_OV_PADDLE_FRONTEND
+ pytorch ENABLE_OV_PYTORCH_FRONTEND
+ tensorflow ENABLE_OV_TF_FRONTEND
+ tensorflow-lite ENABLE_OV_TF_LITE_FRONTEND
+)
+
+if(ENABLE_INTEL_GPU)
+ # python is required for conversion of OpenCL source files into .cpp.
+ vcpkg_find_acquire_program(PYTHON3)
+
+ # remove 'rapidjson' directory and use vcpkg's one to comply with ODR
+ file(REMOVE_RECURSE "${SOURCE_PATH}/src/plugins/intel_gpu/thirdparty/rapidjson")
+
+ vcpkg_from_github(
+ OUT_SOURCE_PATH DEP_SOURCE_PATH
+ REPO oneapi-src/oneDNN
+ REF 8edf6bb60fdaa2769f3cebf44bd2ee94da00744c
+ SHA512 4e413c93306d81b0c4823789990f9a99ce96cd7a523db9a347e399db0cdf0af3eb4aef449bc5821fe9023ae22655677b2be7897b9c4bfb974d11aab17b017d20
+ )
+ file(COPY "${DEP_SOURCE_PATH}/" DESTINATION "${SOURCE_PATH}/src/plugins/intel_gpu/thirdparty/onednn_gpu")
+
+ list(APPEND FEATURE_OPTIONS
+ "-DENABLE_SYSTEM_OPENCL=ON"
+ "-DPython3_EXECUTABLE=${PYTHON3}")
+endif()
+
+if(ENABLE_INTEL_CPU)
+ vcpkg_from_github(
+ OUT_SOURCE_PATH DEP_SOURCE_PATH
+ REPO openvinotoolkit/oneDNN
+ REF 3d7a6f1d068d8ae08f189aa4baa93d177bc07507
+ SHA512 f5de57f7a8972b4c3aefe359beeb51bd9a5cde6bbc6316891f73148555ef9f299145449faf7d761a8e801fb8ddee68b4455d2b9486067915fe7f445f6b099d6c
+ )
+ file(COPY "${DEP_SOURCE_PATH}/" DESTINATION "${SOURCE_PATH}/src/plugins/intel_cpu/thirdparty/onednn")
+
+ vcpkg_from_github(
+ OUT_SOURCE_PATH DEP_SOURCE_PATH
+ REPO openvinotoolkit/mlas
+ REF d1bc25ec4660cddd87804fcf03b2411b5dfb2e94
+ SHA512 8d6dd319924135b7b22940d623305bf200b812ae64cde79000709de4fad429fbd43794301ef16e6f10ed7132777b7a73e9f30ecae7c030aea80d57d7c0ce4500
+ )
+ file(COPY "${DEP_SOURCE_PATH}/" DESTINATION "${SOURCE_PATH}/src/plugins/intel_cpu/thirdparty/mlas")
+
+ if(VCPKG_TARGET_ARCHITECTURE MATCHES "arm")
+ # scons (python tool) is required for ARM Compute Library building
+ vcpkg_find_acquire_program(PYTHON3)
+
+ x_vcpkg_get_python_packages(
+ PYTHON_VERSION 3
+ PYTHON_EXECUTABLE ${PYTHON3}
+ PACKAGES scons
+ OUT_PYTHON_VAR OV_PYTHON_WITH_SCONS
+ )
+
+ list(APPEND FEATURE_OPTIONS "-DPython3_EXECUTABLE=${OV_PYTHON_WITH_SCONS}")
+
+ vcpkg_from_github(
+ OUT_SOURCE_PATH DEP_SOURCE_PATH
+ REPO ARM-software/ComputeLibrary
+ REF v25.03
+ SHA512 a7c9f8138631aabe24cfe68021d3cdaf6716b69dbcf183694217ca87720efd399f5d809f9fd4522a435a6a991855bcf40d5c6fa6189d77ee8ca5caa1f9ade95c
+ )
+ file(COPY "${DEP_SOURCE_PATH}/" DESTINATION "${SOURCE_PATH}/src/plugins/intel_cpu/thirdparty/ComputeLibrary")
+
+ vcpkg_from_github(
+ OUT_SOURCE_PATH DEP_SOURCE_PATH
+ REPO ARM-software/kleidiai
+ REF eaf63a6ae9a903fb4fa8a4d004a974995011f444
+ SHA512 2eed2183927037ab3841daeae2a0df3dfaa680ae4dea5db98247d6d7dd3f897d5109929098eb1b08e3a0797ddc03013acdb449642435df12a11cffbe4f5d2674
+ )
+ file(COPY "${DEP_SOURCE_PATH}/" DESTINATION "${SOURCE_PATH}/src/plugins/intel_cpu/thirdparty/kleidiai")
+ endif()
+endif()
+
+if(ENABLE_INTEL_NPU)
+ list(APPEND FEATURE_OPTIONS
+ "-DENABLE_INTEL_NPU_INTERNAL=OFF"
+ "-DENABLE_SYSTEM_LEVEL_ZERO=ON")
+
+ vcpkg_from_github(
+ OUT_SOURCE_PATH DEP_SOURCE_PATH
+ REPO intel/level-zero-npu-extensions
+ REF f8bba8915e0a5fe8317f7aa48007ecc5a8c179ca
+ SHA512 a93b907159c67fe76634869d71c5434756f32d6e6e81ae86cdc517499576dc5691221dde7821bbaf9d39bf3cd62066a032fd30be26e7662f2b78b046c0ddd2f6
+ )
+ file(COPY "${DEP_SOURCE_PATH}/" DESTINATION "${SOURCE_PATH}/src/plugins/intel_npu/thirdparty/level-zero-ext")
+endif()
+
+if(ENABLE_OV_TF_FRONTEND OR ENABLE_OV_ONNX_FRONTEND OR ENABLE_OV_PADDLE_FRONTEND)
+ list(APPEND FEATURE_OPTIONS "-DENABLE_SYSTEM_PROTOBUF=ON")
+endif()
+
+if(ENABLE_OV_TF_FRONTEND)
+ list(APPEND FEATURE_OPTIONS "-DENABLE_SYSTEM_SNAPPY=ON")
+endif()
+
+if(ENABLE_OV_TF_LITE_FRONTEND)
+ list(APPEND FEATURE_OPTIONS "-DENABLE_SYSTEM_FLATBUFFERS=ON")
+endif()
+
+if(CMAKE_HOST_WIN32)
+ list(APPEND FEATURE_OPTIONS "-DENABLE_API_VALIDATOR=OFF")
+endif()
+
+vcpkg_find_acquire_program(PKGCONFIG)
+
+vcpkg_cmake_configure(
+ SOURCE_PATH "${SOURCE_PATH}"
+ OPTIONS
+ ${FEATURE_OPTIONS}
+ "-DCMAKE_DISABLE_FIND_PACKAGE_OpenCV=ON"
+ "-DCPACK_GENERATOR=VCPKG"
+ "-DENABLE_CLANG_FORMAT=OFF"
+ "-DENABLE_CPPLINT=OFF"
+ "-DENABLE_JS=OFF"
+ "-DENABLE_NCC_STYLE=OFF"
+ "-DENABLE_PYTHON=OFF"
+ "-DENABLE_SAMPLES=OFF"
+ "-DENABLE_SYSTEM_PUGIXML=ON"
+ "-DENABLE_SYSTEM_TBB=ON"
+ "-DENABLE_TBBBIND_2_5=OFF"
+ "-DENABLE_TEMPLATE=OFF"
+ "-DENABLE_OV_JAX_FRONTEND=OFF"
+ "-DPKG_CONFIG_EXECUTABLE=${PKGCONFIG}"
+)
+
+vcpkg_cmake_install()
+
+vcpkg_cmake_config_fixup()
+
+vcpkg_copy_pdbs()
+
+file(REMOVE_RECURSE "${CURRENT_PACKAGES_DIR}/debug/share")
+file(REMOVE_RECURSE "${CURRENT_PACKAGES_DIR}/debug/include")
+
+vcpkg_install_copyright(
+ FILE_LIST
+ "${SOURCE_PATH}/LICENSE"
+ "${SOURCE_PATH}/licensing/third-party-programs.txt"
+ "${SOURCE_PATH}/licensing/onednn_third-party-programs.txt"
+ "${SOURCE_PATH}/licensing/runtime-third-party-programs.txt"
+ COMMENT
+ "OpenVINO License")
+
+file(INSTALL "${CMAKE_CURRENT_LIST_DIR}/usage" DESTINATION "${CURRENT_PACKAGES_DIR}/share/${PORT}")
diff --git a/vcpkg/ports/openvino/usage b/vcpkg/ports/openvino/usage
new file mode 100644
index 0000000..87db5b1
--- /dev/null
+++ b/vcpkg/ports/openvino/usage
@@ -0,0 +1,4 @@
+openvino provides CMake targets:
+
+ find_package(OpenVINO REQUIRED)
+ target_link_libraries(main PRIVATE openvino::runtime)
diff --git a/vcpkg/ports/openvino/vcpkg.json b/vcpkg/ports/openvino/vcpkg.json
new file mode 100644
index 0000000..8abed2c
--- /dev/null
+++ b/vcpkg/ports/openvino/vcpkg.json
@@ -0,0 +1,181 @@
+{
+ "$schema": "https://raw.githubusercontent.com/microsoft/vcpkg-tool/main/docs/vcpkg.schema.json",
+ "name": "openvino",
+ "version": "2025.3.0",
+ "maintainers": "OpenVINO Developers <openvino@intel.com>",
+ "summary": "This is a port for Open Visual Inference And Optimization toolkit for AI inference",
+ "description": [
+ "Intel® Distribution of OpenVINO™ toolkit is an open-source toolkit for optimizing ",
+ "and deploying AI inference. It can be used to develop applications and solutions based ",
+ "on deep learning tasks, such as: emulation of human vision, automatic speech recognition, ",
+ "natural language processing, recommendation systems, etc. It provides high-performance ",
+ "and rich deployment options, from edge to cloud"
+ ],
+ "homepage": "https://github.com/openvinotoolkit/openvino",
+ "documentation": "https://docs.openvino.ai/latest/index.html",
+ "license": "Apache-2.0",
+ "supports": "!uwp & !x86 & !(android & arm32)",
+ "dependencies": [
+ "nlohmann-json",
+ "pugixml",
+ {
+ "name": "tbb",
+ "version>=": "2021.10.0#2"
+ },
+ {
+ "name": "vcpkg-cmake",
+ "host": true
+ },
+ {
+ "name": "vcpkg-cmake-config",
+ "host": true
+ },
+ {
+ "name": "vcpkg-get-python-packages",
+ "host": true
+ },
+ {
+ "name": "xbyak",
+ "platform": "!(arm | uwp)",
+ "version>=": "6.73"
+ }
+ ],
+ "default-features": [
+ "auto",
+ "auto-batch",
+ {
+ "name": "cpu",
+ "platform": "!(windows & arm)"
+ },
+ {
+ "name": "gpu",
+ "platform": "x64 & !(osx | uwp)"
+ },
+ "hetero",
+ "ir",
+ {
+ "name": "npu",
+ "platform": "x64 & !(osx | uwp)"
+ },
+ "onnx",
+ "paddle",
+ "pytorch",
+ "tensorflow",
+ "tensorflow-lite"
+ ],
+ "features": {
+ "auto": {
+ "description": "Enables Auto plugin for inference"
+ },
+ "auto-batch": {
+ "description": "Enables Auto Batch plugin for inference, useful for throughput mode"
+ },
+ "cpu": {
+ "description": "Enables CPU plugin for inference",
+ "supports": "!(windows & arm)"
+ },
+ "gpu": {
+ "description": "Enables GPU plugin for inference",
+ "supports": "x64 & !(osx | uwp)",
+ "dependencies": [
+ "opencl",
+ "rapidjson"
+ ]
+ },
+ "hetero": {
+ "description": "Enables Hetero plugin for inference"
+ },
+ "ir": {
+ "description": "Enables IR frontend for reading models in OpenVINO IR format"
+ },
+ "npu": {
+ "description": "Enables NPU plugin for inference",
+ "supports": "x64 & !(osx | uwp)",
+ "dependencies": [
+ "level-zero"
+ ]
+ },
+ "onnx": {
+ "description": "Enables ONNX frontend for reading models in ONNX format",
+ "dependencies": [
+ {
+ "name": "onnx",
+ "version>=": "1.16.2"
+ },
+ {
+ "name": "protobuf",
+ "version>=": "3.21.2"
+ },
+ {
+ "name": "protobuf",
+ "host": true,
+ "version>=": "3.21.2"
+ }
+ ]
+ },
+ "paddle": {
+ "description": "Enables PaddlePaddle frontend for reading models in PaddlePaddle format",
+ "dependencies": [
+ {
+ "name": "protobuf",
+ "version>=": "3.21.2"
+ },
+ {
+ "name": "protobuf",
+ "host": true,
+ "version>=": "3.21.2"
+ }
+ ]
+ },
+ "pytorch": {
+ "description": "Enables PyTorch frontend to convert models in PyTorch format"
+ },
+ "tensorflow": {
+ "description": "Enables TensorFlow frontend for reading models in TensorFlow format",
+ "dependencies": [
+ {
+ "$comment": "to workaround a linking issue for 2025.2.0, 2025.3.0 openvino[core,tensorflow]",
+ "name": "openvino",
+ "default-features": false,
+ "features": [
+ "pytorch"
+ ],
+ "platform": "linux & static"
+ },
+ {
+ "name": "protobuf",
+ "version>=": "3.21.2"
+ },
+ {
+ "name": "protobuf",
+ "host": true,
+ "version>=": "3.21.2"
+ },
+ "snappy"
+ ]
+ },
+ "tensorflow-lite": {
+ "description": "Enables TensorFlow Lite frontend for reading models in TensorFlow Lite format",
+ "dependencies": [
+ {
+ "name": "flatbuffers",
+ "version>=": "2.0.6"
+ },
+ {
+ "name": "flatbuffers",
+ "host": true,
+ "version>=": "2.0.6"
+ },
+ {
+ "$comment": "to workaround a linking issue for 2025.2.0, 2025.3.0 openvino[core,tensorflow]",
+ "name": "openvino",
+ "default-features": false,
+ "features": [
+ "pytorch"
+ ],
+ "platform": "linux & static"
+ }
+ ]
+ }
+ }
+}