aboutsummaryrefslogtreecommitdiff
path: root/vcpkg/ports/libtorch
diff options
context:
space:
mode:
authorEthan Morgan <ethan@gweithio.com>2026-02-14 16:44:06 +0000
committerEthan Morgan <ethan@gweithio.com>2026-02-14 16:44:06 +0000
commit54409423f767d8b1cf30cb7d0efca6b4ca138823 (patch)
treed915ac7828703ce4b963efdd9728a1777ba18c1e /vcpkg/ports/libtorch
move to own git serverHEADmaster
Diffstat (limited to 'vcpkg/ports/libtorch')
-rw-r--r--vcpkg/ports/libtorch/fix-cmake.patch404
-rw-r--r--vcpkg/ports/libtorch/fix-dist-cuda.patch22
-rw-r--r--vcpkg/ports/libtorch/fix-glog.patch42
-rw-r--r--vcpkg/ports/libtorch/fix-osx.patch16
-rw-r--r--vcpkg/ports/libtorch/fix-pytorch-pr-156630.patch29
-rw-r--r--vcpkg/ports/libtorch/fix-vulkan.patch43
-rw-r--r--vcpkg/ports/libtorch/kineto.patch34
-rw-r--r--vcpkg/ports/libtorch/portfile.cmake240
-rw-r--r--vcpkg/ports/libtorch/vcpkg.json173
9 files changed, 1003 insertions, 0 deletions
diff --git a/vcpkg/ports/libtorch/fix-cmake.patch b/vcpkg/ports/libtorch/fix-cmake.patch
new file mode 100644
index 0000000..436efde
--- /dev/null
+++ b/vcpkg/ports/libtorch/fix-cmake.patch
@@ -0,0 +1,404 @@
+diff --git a/CMakeLists.txt b/CMakeLists.txt
+index 07edb30..1ca26e3 100644
+--- a/CMakeLists.txt
++++ b/CMakeLists.txt
+@@ -1247,8 +1247,7 @@ if(USE_MIMALLOC)
+ set(MI_BUILD_OBJECT OFF)
+ set(MI_BUILD_TESTS OFF)
+ add_definitions(-DUSE_MIMALLOC)
+- add_subdirectory(third_party/mimalloc)
+- include_directories(third_party/mimalloc/include)
++ find_package(mimalloc CONFIG REQUIRED)
+ endif()
+
+ if(USE_MIMALLOC AND USE_MIMALLOC_ON_MKL)
+@@ -1316,10 +1315,7 @@ if(BUILD_SHARED_LIBS)
+ DIRECTORY ${PROJECT_SOURCE_DIR}/cmake/Modules_CUDA_fix
+ DESTINATION share/cmake/Caffe2/
+ COMPONENT dev)
+- install(
+- FILES ${PROJECT_SOURCE_DIR}/cmake/Modules/FindCUDAToolkit.cmake
+- DESTINATION share/cmake/Caffe2/
+- COMPONENT dev)
++
+ install(
+ FILES ${PROJECT_SOURCE_DIR}/cmake/Modules/FindCUSPARSELT.cmake
+ DESTINATION share/cmake/Caffe2/
+diff --git a/aten/src/ATen/CMakeLists.txt b/aten/src/ATen/CMakeLists.txt
+index 085af37..1c61f27 100644
+--- a/aten/src/ATen/CMakeLists.txt
++++ b/aten/src/ATen/CMakeLists.txt
+@@ -515,7 +515,7 @@ if(NOT EMSCRIPTEN AND NOT INTERN_BUILD_MOBILE)
+ list(APPEND ATen_THIRD_PARTY_INCLUDE ${CMAKE_BINARY_DIR}/include)
+ link_directories(${CMAKE_BINARY_DIR}/sleef/lib)
+ else()
+- add_library(sleef SHARED IMPORTED)
++ add_library(sleef UNKNOWN IMPORTED)
+ find_library(SLEEF_LIBRARY sleef)
+ if(NOT SLEEF_LIBRARY)
+ message(FATAL_ERROR "Cannot find sleef")
+@@ -523,7 +523,7 @@ if(NOT EMSCRIPTEN AND NOT INTERN_BUILD_MOBILE)
+ message("Found sleef: ${SLEEF_LIBRARY}")
+ set_target_properties(sleef PROPERTIES IMPORTED_LOCATION "${SLEEF_LIBRARY}")
+ endif()
+- list(APPEND ATen_CPU_DEPENDENCY_LIBS sleef)
++ list(APPEND ATen_CPU_DEPENDENCY_LIBS ${SLEEF_LIBRARY})
+
+ if(NOT MSVC)
+ set(CMAKE_C_FLAGS_DEBUG ${OLD_CMAKE_C_FLAGS_DEBUG})
+@@ -534,8 +534,8 @@ if(USE_CUDA AND NOT USE_ROCM)
+ add_definitions(-DCUTLASS_ENABLE_TENSOR_CORE_MMA=1)
+ add_definitions(-DCUTLASS_ENABLE_SM90_EXTENDED_MMA_SHAPES=1)
+ add_definitions(-DCUTE_SM90_EXTENDED_MMA_SHAPES_ENABLED)
+- list(APPEND ATen_CUDA_INCLUDE ${CMAKE_CURRENT_SOURCE_DIR}/../../../third_party/cutlass/include)
+- list(APPEND ATen_CUDA_INCLUDE ${CMAKE_CURRENT_SOURCE_DIR}/../../../third_party/cutlass/tools/util/include)
++ find_package(NvidiaCutlass CONFIG REQUIRED)
++ list(APPEND ATen_CUDA_DEPENDENCY_LIBS nvidia::cutlass::cutlass)
+ if($ENV{ATEN_STATIC_CUDA})
+ list(APPEND ATen_CUDA_DEPENDENCY_LIBS
+ ${CUDA_LIBRARIES}
+diff --git a/c10/CMakeLists.txt b/c10/CMakeLists.txt
+index 34577ca..5462cfe 100644
+--- a/c10/CMakeLists.txt
++++ b/c10/CMakeLists.txt
+@@ -121,8 +121,7 @@ if(NOT BUILD_LIBTORCHLESS)
+ endif()
+
+ if(USE_MIMALLOC)
+- target_link_libraries(c10 PRIVATE "mimalloc-static")
+- add_dependencies(c10 mimalloc-static)
++ target_link_libraries(c10 PRIVATE "mimalloc")
+ endif()
+
+ if(LINUX)
+@@ -163,7 +162,7 @@ if(NOT BUILD_LIBTORCHLESS)
+ # Note: for now, we will put all export path into one single Caffe2Targets group
+ # to deal with the cmake deployment need. Inside the Caffe2Targets set, the
+ # individual libraries like libc10.so and libcaffe2.so are still self-contained.
+- install(TARGETS c10 EXPORT Caffe2Targets DESTINATION lib)
++ install(TARGETS c10 EXPORT Caffe2Targets RUNTIME DESTINATION bin LIBRARY DESTINATION lib ARCHIVE DESTINATION lib)
+ endif()
+
+ install(DIRECTORY ${CMAKE_CURRENT_LIST_DIR}
+@@ -173,5 +172,5 @@ install(FILES ${CMAKE_BINARY_DIR}/c10/macros/cmake_macros.h
+ DESTINATION include/c10/macros)
+
+ if(MSVC AND C10_BUILD_SHARED_LIBS)
+- install(FILES $<TARGET_PDB_FILE:c10> DESTINATION lib OPTIONAL)
++ install(FILES $<TARGET_PDB_FILE:c10> DESTINATION bin OPTIONAL)
+ endif()
+diff --git a/c10/cuda/CMakeLists.txt b/c10/cuda/CMakeLists.txt
+index 3327dab..4a0476f 100644
+--- a/c10/cuda/CMakeLists.txt
++++ b/c10/cuda/CMakeLists.txt
+@@ -82,7 +82,7 @@ if(NOT BUILD_LIBTORCHLESS)
+ # Note: for now, we will put all export path into one single Caffe2Targets group
+ # to deal with the cmake deployment need. Inside the Caffe2Targets set, the
+ # individual libraries like libc10.so and libcaffe2.so are still self-contained.
+-install(TARGETS c10_cuda EXPORT Caffe2Targets DESTINATION lib)
++install(TARGETS c10_cuda EXPORT Caffe2Targets RUNTIME DESTINATION "${TORCH_INSTALL_BIN_DIR}" LIBRARY DESTINATION "${TORCH_INSTALL_LIB_DIR}" ARCHIVE DESTINATION "${TORCH_INSTALL_LIB_DIR}")
+
+ endif()
+
+diff --git a/caffe2/CMakeLists.txt b/caffe2/CMakeLists.txt
+index d2d23b7..51930dd 100644
+--- a/caffe2/CMakeLists.txt
++++ b/caffe2/CMakeLists.txt
+@@ -86,7 +86,7 @@ endif()
+ # ---[ Caffe2 build
+ # Note: the folders that are being commented out have not been properly
+ # addressed yet.
+-
++if(FALSE)
+ if(NOT MSVC AND USE_XNNPACK)
+ if(NOT TARGET fxdiv)
+ set(FXDIV_BUILD_TESTS OFF CACHE BOOL "")
+@@ -96,6 +96,7 @@ if(NOT MSVC AND USE_XNNPACK)
+ "${CMAKE_BINARY_DIR}/FXdiv")
+ endif()
+ endif()
++endif()
+
+ add_subdirectory(core)
+ add_subdirectory(serialize)
+@@ -557,7 +558,7 @@ if(USE_CUDA)
+ endif()
+
+ target_link_libraries(caffe2_nvrtc PRIVATE caffe2::nvrtc ${DELAY_LOAD_FLAGS})
+- install(TARGETS caffe2_nvrtc DESTINATION "${TORCH_INSTALL_LIB_DIR}")
++ install(TARGETS caffe2_nvrtc RUNTIME DESTINATION "${TORCH_INSTALL_BIN_DIR}" LIBRARY DESTINATION "${TORCH_INSTALL_LIB_DIR}" ARCHIVE DESTINATION "${TORCH_INSTALL_LIB_DIR}")
+ if(USE_NCCL)
+ list(APPEND Caffe2_GPU_SRCS
+ ${TORCH_SRC_DIR}/csrc/cuda/nccl.cpp)
+@@ -1134,7 +1135,7 @@ if(USE_XPU)
+ endif()
+ endif()
+
+-if(NOT MSVC AND USE_XNNPACK)
++if(FALSE)
+ TARGET_LINK_LIBRARIES(torch_cpu PRIVATE fxdiv)
+ endif()
+
+@@ -1251,11 +1252,11 @@ if(USE_KINETO)
+ ${TORCH_ROOT}/third_party/kineto/libkineto/src)
+ endif()
+
+-target_include_directories(torch_cpu PRIVATE
+- ${TORCH_ROOT}/third_party/cpp-httplib)
++target_link_libraries(torch_cpu PRIVATE httplib::httplib nlohmann_json::nlohmann_json)
++if(APPLE)
++ target_link_libraries(torch_cpu PUBLIC "-framework CoreFoundation" "-framework CFNetwork")
+
+-target_include_directories(torch_cpu PRIVATE
+- ${TORCH_ROOT}/third_party/nlohmann/include)
++endif()
+
+ install(DIRECTORY "${TORCH_SRC_DIR}/csrc"
+ DESTINATION ${TORCH_INSTALL_INCLUDE_DIR}/torch
+@@ -1536,17 +1537,17 @@ endif()
+
+ caffe2_interface_library(torch torch_library)
+
+-install(TARGETS torch_cpu torch_cpu_library EXPORT Caffe2Targets DESTINATION "${TORCH_INSTALL_LIB_DIR}")
++install(TARGETS torch_cpu torch_cpu_library EXPORT Caffe2Targets RUNTIME DESTINATION "${TORCH_INSTALL_BIN_DIR}" LIBRARY DESTINATION "${TORCH_INSTALL_LIB_DIR}" ARCHIVE DESTINATION "${TORCH_INSTALL_LIB_DIR}")
+
+ if(USE_CUDA)
+- install(TARGETS torch_cuda torch_cuda_library EXPORT Caffe2Targets DESTINATION "${TORCH_INSTALL_LIB_DIR}")
++ install(TARGETS torch_cuda torch_cuda_library EXPORT Caffe2Targets RUNTIME DESTINATION "${TORCH_INSTALL_BIN_DIR}" LIBRARY DESTINATION "${TORCH_INSTALL_LIB_DIR}" ARCHIVE DESTINATION "${TORCH_INSTALL_LIB_DIR}")
+ elseif(USE_ROCM)
+- install(TARGETS torch_hip torch_hip_library EXPORT Caffe2Targets DESTINATION "${TORCH_INSTALL_LIB_DIR}")
++ install(TARGETS torch_hip torch_hip_library EXPORT Caffe2Targets RUNTIME DESTINATION "${TORCH_INSTALL_BIN_DIR}" LIBRARY DESTINATION "${TORCH_INSTALL_LIB_DIR}" ARCHIVE DESTINATION "${TORCH_INSTALL_LIB_DIR}")
+ elseif(USE_XPU)
+- install(TARGETS torch_xpu torch_xpu_library EXPORT Caffe2Targets DESTINATION "${TORCH_INSTALL_LIB_DIR}")
++ install(TARGETS torch_xpu torch_xpu_library EXPORT Caffe2Targets RUNTIME DESTINATION "${TORCH_INSTALL_BIN_DIR}" LIBRARY DESTINATION "${TORCH_INSTALL_LIB_DIR}" ARCHIVE DESTINATION "${TORCH_INSTALL_LIB_DIR}")
+ endif()
+
+-install(TARGETS torch torch_library EXPORT Caffe2Targets DESTINATION "${TORCH_INSTALL_LIB_DIR}")
++install(TARGETS torch torch_library EXPORT Caffe2Targets RUNTIME DESTINATION "${TORCH_INSTALL_BIN_DIR}" LIBRARY DESTINATION "${TORCH_INSTALL_LIB_DIR}" ARCHIVE DESTINATION "${TORCH_INSTALL_LIB_DIR}")
+
+ target_link_libraries(torch PUBLIC torch_cpu_library)
+
+@@ -1685,7 +1686,7 @@ if(BUILD_SHARED_LIBS)
+ target_link_libraries(torch_global_deps torch::nvtoolsext)
+ endif()
+ endif()
+- install(TARGETS torch_global_deps DESTINATION "${TORCH_INSTALL_LIB_DIR}")
++ install(TARGETS torch_global_deps RUNTIME DESTINATION "${TORCH_INSTALL_BIN_DIR}" LIBRARY DESTINATION "${TORCH_INSTALL_LIB_DIR}" ARCHIVE DESTINATION "${TORCH_INSTALL_LIB_DIR}")
+ endif()
+
+ # ---[ Caffe2 HIP sources.
+diff --git a/cmake/Codegen.cmake b/cmake/Codegen.cmake
+index 724d993..f743939 100644
+--- a/cmake/Codegen.cmake
++++ b/cmake/Codegen.cmake
+@@ -36,11 +36,13 @@ endfunction()
+ ################################################################################
+
+ # -- [ Deterine commit hash
+-execute_process(
+- COMMAND "${Python_EXECUTABLE}" -c "from tools.generate_torch_version import get_sha;print(get_sha('.'), end='')"
+- OUTPUT_VARIABLE COMMIT_SHA
+- WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/..
+-)
++if(NOT DEFINED COMMIT_SHA)
++ execute_process(
++ COMMAND "${Python_EXECUTABLE}" -c "from tools.generate_torch_version import get_sha;print(get_sha('.'), end='')"
++ OUTPUT_VARIABLE COMMIT_SHA
++ WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/..
++ )
++endif()
+
+ # ---[ Write the macros file
+ configure_file(
+diff --git a/cmake/Dependencies.cmake b/cmake/Dependencies.cmake
+index 5227204..9acaf75 100644
+--- a/cmake/Dependencies.cmake
++++ b/cmake/Dependencies.cmake
+@@ -46,6 +46,7 @@ if(USE_CUDA)
+ # A helper variable recording the list of Caffe2 dependent libraries
+ # torch::cudart is dealt with separately, due to CUDA_ADD_LIBRARY
+ # design reason (it adds CUDA_LIBRARIES itself).
++ find_package(NvidiaCutlass CONFIG REQUIRED)
+ set(Caffe2_PUBLIC_CUDA_DEPENDENCY_LIBS )
+ if(NOT CAFFE2_USE_NVRTC)
+ caffe2_update_option(USE_NVRTC OFF)
+@@ -378,6 +379,9 @@ if(INTERN_BUILD_MOBILE OR NOT DISABLE_NNPACK_AND_FAMILY)
+ set(USE_PTHREADPOOL ON CACHE BOOL "" FORCE)
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DUSE_PTHREADPOOL")
+
++ find_package(unofficial-pthreadpool CONFIG REQUIRED)
++ add_library(pthreadpool ALIAS unofficial::pthreadpool)
++
+ if(NOT TARGET pthreadpool)
+ if(USE_SYSTEM_PTHREADPOOL)
+ add_library(pthreadpool SHARED IMPORTED)
+@@ -412,6 +416,9 @@ endif()
+ if(NOT CMAKE_SYSTEM_PROCESSOR MATCHES "^(s390x|ppc64le)$")
+ # ---[ Caffe2 uses cpuinfo library in the thread pool
+ # ---[ But it doesn't support s390x/powerpc and thus not used on s390x/powerpc
++ find_package(cpuinfo CONFIG REQUIRED)
++ add_library(cpuinfo ALIAS cpuinfo::cpuinfo)
++ add_library(clog ALIAS cpuinfo::clog)
+ if(NOT TARGET cpuinfo AND USE_SYSTEM_CPUINFO)
+ add_library(cpuinfo SHARED IMPORTED)
+ find_library(CPUINFO_LIBRARY cpuinfo)
+@@ -476,8 +483,10 @@ endif()
+
+ # ---[ NNPACK
+ if(USE_NNPACK)
+- include(${CMAKE_CURRENT_LIST_DIR}/External/nnpack.cmake)
+- if(NNPACK_FOUND)
++
++ find_package(unofficial-nnpack CONFIG REQUIRED)
++ add_library(nnpack ALIAS unofficial::nnpack::nnpack)
++ if(nnpack_FOUND)
+ if(TARGET nnpack)
+ # ---[ NNPACK is being built together with Caffe2: explicitly specify dependency
+ list(APPEND Caffe2_DEPENDENCY_LIBS nnpack)
+@@ -492,6 +501,12 @@ if(USE_NNPACK)
+ endif()
+
+ # ---[ XNNPACK
++if(USE_XNNPACK)
++ find_package(unofficial-xnnpack CONFIG REQUIRED)
++ add_library(XNNPACK ALIAS unofficial::xnnpack::XNNPACK)
++ add_library(microkernels-prod ALIAS unofficial::xnnpack::microkernels-prod)
++ list(APPEND Caffe2_DEPENDENCY_LIBS XNNPACK microkernels-prod)
++endif()
+ if(USE_XNNPACK AND NOT USE_SYSTEM_XNNPACK)
+ if(NOT DEFINED XNNPACK_SOURCE_DIR)
+ set(XNNPACK_SOURCE_DIR "${CAFFE2_THIRD_PARTY_ROOT}/XNNPACK" CACHE STRING "XNNPACK source directory")
+@@ -702,7 +717,7 @@ if(BUILD_TEST OR BUILD_MOBILE_BENCHMARK OR BUILD_MOBILE_TEST)
+ endif()
+
+ # ---[ FBGEMM
+-if(USE_FBGEMM)
++if(FALSE)
+ set(CAFFE2_THIRD_PARTY_ROOT "${PROJECT_SOURCE_DIR}/third_party")
+ if(NOT DEFINED FBGEMM_SOURCE_DIR)
+ set(FBGEMM_SOURCE_DIR "${CAFFE2_THIRD_PARTY_ROOT}/fbgemm" CACHE STRING "FBGEMM source directory")
+@@ -755,6 +770,9 @@ endif()
+
+ if(USE_FBGEMM)
+ caffe2_update_option(USE_FBGEMM ON)
++ find_package(fbgemmLibrary REQUIRED)
++ list(APPEND Caffe2_DEPENDENCY_LIBS fbgemm)
++
+ else()
+ caffe2_update_option(USE_FBGEMM OFF)
+ message(WARNING
+@@ -804,6 +822,8 @@ if(USE_ITT)
+ endif()
+
+ # ---[ Caffe2 depends on FP16 library for half-precision conversions
++find_package(unofficial-fp16 CONFIG REQUIRED)
++add_library(fp16 ALIAS unofficial::fp16::fp16)
+ if(NOT TARGET fp16 AND NOT USE_SYSTEM_FP16)
+ set(CAFFE2_THIRD_PARTY_ROOT "${PROJECT_SOURCE_DIR}/third_party")
+ # PSIMD is required by FP16
+@@ -1155,6 +1175,7 @@ if(USE_DISTRIBUTED AND USE_TENSORPIPE)
+ if(MSVC)
+ message(WARNING "Tensorpipe cannot be used on Windows.")
+ else()
++ if(FALSE)
+ if(USE_CUDA)
+ set(TP_USE_CUDA ON CACHE BOOL "" FORCE)
+ set(TP_ENABLE_CUDA_IPC ON CACHE BOOL "" FORCE)
+@@ -1177,6 +1198,9 @@ if(USE_DISTRIBUTED AND USE_TENSORPIPE)
+ if(CMAKE_VERSION VERSION_GREATER_EQUAL "4.0.0")
+ unset(CMAKE_POLICY_VERSION_MINIMUM)
+ endif()
++ endif()
++ find_package(unofficial-tensorpipe CONFIG REQUIRED)
++ add_library(tensorpipe ALIAS unofficial::tensorpipe::tensorpipe)
+
+ list(APPEND Caffe2_DEPENDENCY_LIBS tensorpipe)
+ list(APPEND Caffe2_DEPENDENCY_LIBS nlohmann)
+@@ -1329,7 +1353,7 @@ if(CAFFE2_CMAKE_BUILDING_WITH_MAIN_REPO AND NOT INTERN_DISABLE_ONNX)
+ caffe2_interface_library(onnx onnx_library)
+ endif()
+ list(APPEND Caffe2_DEPENDENCY_WHOLE_LINK_LIBS onnx_library)
+- else()
++ elseif(FALSE)
+ add_library(onnx SHARED IMPORTED)
+ find_library(ONNX_LIBRARY onnx)
+ if(NOT ONNX_LIBRARY)
+@@ -1345,6 +1369,8 @@ if(CAFFE2_CMAKE_BUILDING_WITH_MAIN_REPO AND NOT INTERN_DISABLE_ONNX)
+ message("-- Found onnx: ${ONNX_LIBRARY} ${ONNX_PROTO_LIBRARY}")
+ list(APPEND Caffe2_DEPENDENCY_LIBS onnx_proto onnx)
+ endif()
++ find_package(ONNX CONFIG REQUIRED)
++ list(APPEND Caffe2_DEPENDENCY_LIBS ONNX::onnx ONNX::onnx_proto)
+ # Recover the build shared libs option.
+ set(BUILD_SHARED_LIBS ${TEMP_BUILD_SHARED_LIBS})
+ endif()
+@@ -1515,6 +1541,9 @@ if(NOT INTERN_BUILD_MOBILE)
+ endif()
+
+ if(USE_KLEIDIAI)
++ find_package(KleidiAI CONFIG REQUIRED)
++ list(APPEND Caffe2_DEPENDENCY_LIBS KleidiAI::kleidiai)
++ elseif(FALSE)
+ if(CMAKE_C_COMPILER_ID STREQUAL "Clang" AND CMAKE_C_COMPILER_VERSION VERSION_LESS "11" )
+ message(WARNING "KleidiAI: Using non-supported Clang version. Expected 11 or newer, received ${CMAKE_C_COMPILER_VERSION}.")
+ endif()
+@@ -1586,6 +1615,7 @@ endif()
+ #
+ set(TEMP_BUILD_SHARED_LIBS ${BUILD_SHARED_LIBS})
+ set(BUILD_SHARED_LIBS OFF CACHE BOOL "Build shared libs" FORCE)
++if(0)
+ add_subdirectory(${PROJECT_SOURCE_DIR}/third_party/fmt)
+
+ # Disable compiler feature checks for `fmt`.
+@@ -1596,7 +1626,8 @@ add_subdirectory(${PROJECT_SOURCE_DIR}/third_party/fmt)
+ # `fmt` is compatible with a superset of the compilers that PyTorch is, it
+ # shouldn't be too bad to just disable the checks.
+ set_target_properties(fmt-header-only PROPERTIES INTERFACE_COMPILE_FEATURES "")
+-
++endif()
++find_package(fmt REQUIRED)
+ list(APPEND Caffe2_DEPENDENCY_LIBS fmt::fmt-header-only)
+ set(BUILD_SHARED_LIBS ${TEMP_BUILD_SHARED_LIBS} CACHE BOOL "Build shared libs" FORCE)
+
+@@ -1745,13 +1776,10 @@ if(USE_KINETO)
+ endif()
+ endif()
+
+-# Include google/FlatBuffers
+-include(${CMAKE_CURRENT_LIST_DIR}/FlatBuffers.cmake)
+
+-# Include cpp-httplib
+-add_library(httplib INTERFACE IMPORTED)
+-target_include_directories(httplib SYSTEM INTERFACE ${PROJECT_SOURCE_DIR}/third_party/cpp-httplib)
++find_package(FlatBuffers REQUIRED)
++add_library(flatbuffers ALIAS flatbuffers::flatbuffers)
++find_package(httplib REQUIRED)
++find_package(nlohmann_json REQUIRED)
++add_library(nlohmann ALIAS nlohmann_json)
+
+-# Include nlohmann-json
+-add_library(nlohmann INTERFACE IMPORTED)
+-include_directories(nlohmann SYSTEM INTERFACE ${PROJECT_SOURCE_DIR}/third_party/nlohmann/include)
+diff --git a/torch/CMakeLists.txt b/torch/CMakeLists.txt
+index 8b8ebdc..1777c0e 100644
+--- a/torch/CMakeLists.txt
++++ b/torch/CMakeLists.txt
+@@ -59,18 +59,7 @@ set(TORCH_PYTHON_INCLUDE_DIRECTORIES
+ ${CMAKE_BINARY_DIR}
+ ${CMAKE_BINARY_DIR}/aten/src
+ ${CMAKE_BINARY_DIR}/caffe2/aten/src
+- ${CMAKE_BINARY_DIR}/third_party
+- ${CMAKE_BINARY_DIR}/third_party/onnx
+-
+ ${TORCH_ROOT}/third_party/valgrind-headers
+-
+- ${TORCH_ROOT}/third_party/gloo
+- ${TORCH_ROOT}/third_party/onnx
+- ${TORCH_ROOT}/third_party/flatbuffers/include
+- ${TORCH_ROOT}/third_party/kineto/libkineto/include
+- ${TORCH_ROOT}/third_party/cpp-httplib
+- ${TORCH_ROOT}/third_party/nlohmann/include
+-
+ ${TORCH_SRC_DIR}/csrc
+ ${TORCH_SRC_DIR}/csrc/api/include
+ ${TORCH_SRC_DIR}/lib
diff --git a/vcpkg/ports/libtorch/fix-dist-cuda.patch b/vcpkg/ports/libtorch/fix-dist-cuda.patch
new file mode 100644
index 0000000..65e4bc1
--- /dev/null
+++ b/vcpkg/ports/libtorch/fix-dist-cuda.patch
@@ -0,0 +1,22 @@
+diff --git a/cmake/Dependencies.cmake b/cmake/Dependencies.cmake
+index 9acaf75..4f44d3e 100644
+--- a/cmake/Dependencies.cmake
++++ b/cmake/Dependencies.cmake
+@@ -1205,6 +1205,8 @@ if(USE_DISTRIBUTED AND USE_TENSORPIPE)
+ list(APPEND Caffe2_DEPENDENCY_LIBS tensorpipe)
+ list(APPEND Caffe2_DEPENDENCY_LIBS nlohmann)
+ if(USE_CUDA)
++ add_library(tensorpipe_cuda ALIAS unofficial::tensorpipe::tensorpipe_cuda)
++
+ list(APPEND Caffe2_CUDA_DEPENDENCY_LIBS tensorpipe_cuda)
+ elseif(USE_ROCM)
+ message(WARNING "TensorPipe doesn't yet support ROCm")
+@@ -1279,7 +1281,7 @@ if(USE_GLOO)
+ endif()
+ # Pick the right dependency depending on USE_CUDA
+ list(APPEND Caffe2_DEPENDENCY_LIBS gloo)
+- if(USE_CUDA)
++ if(FALSE)
+ list(APPEND Caffe2_CUDA_DEPENDENCY_LIBS gloo_cuda)
+ elseif(USE_ROCM)
+ list(APPEND Caffe2_HIP_DEPENDENCY_LIBS gloo_hip)
diff --git a/vcpkg/ports/libtorch/fix-glog.patch b/vcpkg/ports/libtorch/fix-glog.patch
new file mode 100644
index 0000000..429c933
--- /dev/null
+++ b/vcpkg/ports/libtorch/fix-glog.patch
@@ -0,0 +1,42 @@
+diff --git a/c10/util/Logging.cpp b/c10/util/Logging.cpp
+index c463658..a523040 100644
+--- a/c10/util/Logging.cpp
++++ b/c10/util/Logging.cpp
+@@ -362,7 +362,7 @@ void UpdateLoggingLevelsFromFlags() {
+
+ void ShowLogInfoToStderr() {
+ FLAGS_logtostderr = 1;
+- FLAGS_minloglevel = std::min(FLAGS_minloglevel, google::GLOG_INFO);
++ FLAGS_minloglevel = std::min<int>(FLAGS_minloglevel, google::GLOG_INFO);
+ }
+ } // namespace c10
+
+diff --git a/c10/util/Logging.cpp b/c10/util/Logging.cpp
+index a523040..04375b6 100644
+--- a/c10/util/Logging.cpp
++++ b/c10/util/Logging.cpp
+@@ -295,23 +295,13 @@ C10_DEFINE_int(
+ google::GLOG_WARNING,
+ "The minimum log level that caffe2 will output.");
+
+-// Google glog's api does not have an external function that allows one to check
+-// if glog is initialized or not. It does have an internal function - so we are
+-// declaring it here. This is a hack but has been used by a bunch of others too
+-// (e.g. Torch).
+-namespace google {
+-namespace glog_internal_namespace_ {
+-bool IsGoogleLoggingInitialized();
+-} // namespace glog_internal_namespace_
+-} // namespace google
+-
+ namespace c10 {
+ namespace {
+
+ void initGoogleLogging(char const* name) {
+ #if !defined(_MSC_VER)
+ // This trick can only be used on UNIX platforms
+- if (!::google::glog_internal_namespace_::IsGoogleLoggingInitialized())
++ if (!::google::IsGoogleLoggingInitialized())
+ #endif
+ {
+ ::google::InitGoogleLogging(name);
diff --git a/vcpkg/ports/libtorch/fix-osx.patch b/vcpkg/ports/libtorch/fix-osx.patch
new file mode 100644
index 0000000..f864151
--- /dev/null
+++ b/vcpkg/ports/libtorch/fix-osx.patch
@@ -0,0 +1,16 @@
+diff --git a/CMakeLists.txt b/CMakeLists.txt
+index 463788e..1ca26e3 100644
+--- a/CMakeLists.txt
++++ b/CMakeLists.txt
+@@ -796,7 +796,10 @@ if(NOT CMAKE_BUILD_TYPE)
+ endif()
+
+ # The below means we are cross compiling for arm64 or x86_64 on MacOSX
+-if(NOT IOS
++if(TRUE)
++ message(STATUS "Using custom protoc executable: ${PROTOBUF_PROTOC_EXECUTABLE}")
++ message(STATUS "Using custom caffe2 protoc executable: ${CAFFE2_CUSTOM_PROTOC_EXECUTABLE}")
++elseif(NOT IOS
+ AND CMAKE_SYSTEM_NAME STREQUAL "Darwin"
+ AND CMAKE_OSX_ARCHITECTURES MATCHES "^(x86_64|arm64)$")
+ set(CROSS_COMPILING_MACOSX TRUE)
diff --git a/vcpkg/ports/libtorch/fix-pytorch-pr-156630.patch b/vcpkg/ports/libtorch/fix-pytorch-pr-156630.patch
new file mode 100644
index 0000000..47dd78a
--- /dev/null
+++ b/vcpkg/ports/libtorch/fix-pytorch-pr-156630.patch
@@ -0,0 +1,29 @@
+diff --git a/aten/src/ATen/native/cuda/SegmentReduce.cu b/aten/src/ATen/native/cuda/SegmentReduce.cu
+index 04bec04..3ea8af2 100644
+--- a/aten/src/ATen/native/cuda/SegmentReduce.cu
++++ b/aten/src/ATen/native/cuda/SegmentReduce.cu
+@@ -1,5 +1,6 @@
+ #define TORCH_ASSERT_ONLY_METHOD_OPERATORS
+ #include <ATen/native/SegmentReduce.h>
++#include <cuda_runtime.h>
+
+ #include <ATen/core/Tensor.h>
+ #include <ATen/Dispatch.h>
+@@ -17,6 +18,10 @@
+ #include <ATen/ops/cumsum.h>
+ #endif
+
++// SegmentReduce compilation with CUDA-12.9 causes NVCC crash on Windows
++// See https://github.com/pytorch/pytorch/issues/156181
++#if !defined(_WIN32) || CUDART_VERSION < 12090
++
+ namespace at::native {
+
+ namespace {
+@@ -600,3 +605,5 @@ REGISTER_DISPATCH(
+ &_segment_reduce_offsets_backward_cuda_kernel);
+
+ } // namespace at::native
++
++#endif // !defined(_WIN32) || CUDART_VERSION < 12090
+\ No newline at end of file
diff --git a/vcpkg/ports/libtorch/fix-vulkan.patch b/vcpkg/ports/libtorch/fix-vulkan.patch
new file mode 100644
index 0000000..0842aa0
--- /dev/null
+++ b/vcpkg/ports/libtorch/fix-vulkan.patch
@@ -0,0 +1,43 @@
+diff --git a/CMakeLists.txt b/CMakeLists.txt
+index 1ca26e3..7d3442c 100644
+--- a/CMakeLists.txt
++++ b/CMakeLists.txt
+@@ -1104,7 +1104,8 @@ if(NOT MSVC)
+ else()
+ # Define export functions for AOTI.
+ add_compile_definitions(EXPORT_AOTI_FUNCTIONS)
+-
++ # needed for vulkan compile
++ add_compile_definitions(_USE_MATH_DEFINES) # math.h macros like M_PI, M_SQRT2, M_2_SQRTPI, etc.
+ # skip unwanted includes from windows.h
+ add_compile_definitions(WIN32_LEAN_AND_MEAN)
+ # Windows SDK broke compatibility since version 25131, but introduced this
+diff --git a/aten/src/ATen/native/vulkan/api/Allocator.h b/aten/src/ATen/native/vulkan/api/Allocator.h
+index a89c333..a9a41ef 100644
+--- a/aten/src/ATen/native/vulkan/api/Allocator.h
++++ b/aten/src/ATen/native/vulkan/api/Allocator.h
+@@ -50,7 +50,11 @@
+ #pragma clang diagnostic ignored "-Winconsistent-missing-destructor-override"
+ #endif /* __clang__ */
+
+-#include <include/vk_mem_alloc.h>
++#if __has_include(<vma/vk_mem_alloc.h>)
++#include <vma/vk_mem_alloc.h>
++#else
++#include <vk_mem_alloc.h>
++#endif
+
+ #ifdef __clang__
+ #pragma clang diagnostic pop
+diff --git a/aten/src/ATen/native/vulkan/api/QueryPool.cpp b/aten/src/ATen/native/vulkan/api/QueryPool.cpp
+index 9c0c7fb..ae4a81f 100644
+--- a/aten/src/ATen/native/vulkan/api/QueryPool.cpp
++++ b/aten/src/ATen/native/vulkan/api/QueryPool.cpp
+@@ -9,6 +9,7 @@
+ #include <iomanip>
+ #include <iostream>
+ #include <utility>
++#include <algorithm>
+
+ namespace at {
+ namespace native {
diff --git a/vcpkg/ports/libtorch/kineto.patch b/vcpkg/ports/libtorch/kineto.patch
new file mode 100644
index 0000000..8081062
--- /dev/null
+++ b/vcpkg/ports/libtorch/kineto.patch
@@ -0,0 +1,34 @@
+diff --git a/libkineto/CMakeLists.txt b/libkineto/CMakeLists.txt
+index 7d36ffb9d4..8f97998114 100644
+--- a/libkineto/CMakeLists.txt
++++ b/libkineto/CMakeLists.txt
+@@ -111,27 +111,8 @@ endif()
+ target_compile_options(kineto_base PRIVATE "${KINETO_COMPILE_OPTIONS}")
+ target_compile_options(kineto_api PRIVATE "${KINETO_COMPILE_OPTIONS}")
+
+-if(NOT TARGET fmt)
+- if(NOT FMT_SOURCE_DIR)
+- set(FMT_SOURCE_DIR "${LIBKINETO_THIRDPARTY_DIR}/fmt"
+- CACHE STRING "fmt source directory from submodules")
+- endif()
+-
+- # Build FMT.
+- # FMT and some other libraries use BUILD_SHARED_LIBS to control
+- # the library type.
+- # Save and restore the value after configuring FMT
+- set(TEMP_BUILD_SHARED_LIBS ${BUILD_SHARED_LIBS})
+- set(BUILD_SHARED_LIBS OFF CACHE BOOL "Build shared libs" FORCE)
+- set(FMT_LIBRARY_TYPE static CACHE STRING "Set lib type to static")
+- add_subdirectory("${FMT_SOURCE_DIR}" "${LIBKINETO_BINARY_DIR}/fmt")
+- set_property(TARGET fmt PROPERTY POSITION_INDEPENDENT_CODE ON)
+- set(BUILD_SHARED_LIBS ${TEMP_BUILD_SHARED_LIBS} CACHE BOOL "Build shared libs" FORCE)
+-endif()
+-
+-set(FMT_INCLUDE_DIR "${FMT_SOURCE_DIR}/include")
+-message(STATUS "Kineto: FMT_SOURCE_DIR = ${FMT_SOURCE_DIR}")
+-message(STATUS "Kineto: FMT_INCLUDE_DIR = ${FMT_INCLUDE_DIR}")
++find_package(fmt REQUIRED)
++
+ if (NOT CUPTI_INCLUDE_DIR)
+ set(CUPTI_INCLUDE_DIR "${CUDA_SOURCE_DIR}/extras/CUPTI/include")
+ endif()
diff --git a/vcpkg/ports/libtorch/portfile.cmake b/vcpkg/ports/libtorch/portfile.cmake
new file mode 100644
index 0000000..4b06823
--- /dev/null
+++ b/vcpkg/ports/libtorch/portfile.cmake
@@ -0,0 +1,240 @@
+vcpkg_check_linkage(ONLY_DYNAMIC_LIBRARY)
+
+vcpkg_from_github(
+ OUT_SOURCE_PATH SOURCE_PATH
+ REPO pytorch/pytorch
+ REF "v${VERSION}"
+ SHA512 a9fc2252af9031c2cd46dde558c491aea8bc322fb80157a7760f300a44b759d4bfe866f030fbb974b80493057cfff4dd512498f99a100ed6d05bf620258ed37e
+ HEAD_REF master
+ PATCHES
+ fix-cmake.patch
+ fix-osx.patch
+ fix-vulkan.patch
+ fix-glog.patch
+ fix-pytorch-pr-156630.patch # https://github.com/pytorch/pytorch/pull/156630
+ fix-dist-cuda.patch
+ )
+
+file(REMOVE_RECURSE "${SOURCE_PATH}/caffe2/core/macros.h") # We must use generated header files
+
+vcpkg_from_github(
+ OUT_SOURCE_PATH src_kineto
+ REPO pytorch/kineto
+ REF d9753139d181b9ff42872465aac0e5d3018be415
+ SHA512 f037fac78e566c40108acf9eace55a8f67a2c5b71f298fd3cd17bf22cf05240c260fd89f017fa411656a7505ec9073a06a3048e191251d5cfc4b52c237b37d0b
+ HEAD_REF main
+ PATCHES
+ kineto.patch
+)
+file(COPY "${src_kineto}/" DESTINATION "${SOURCE_PATH}/third_party/kineto")
+
+vcpkg_from_github(
+ OUT_SOURCE_PATH src_cudnn
+ REPO NVIDIA/cudnn-frontend # new port ?
+ REF 2533f5e5c1877fd76266133c1479ef1643ce3a8b # 1.6.1
+ SHA512 8caacdf9f7dbd6ce55507f5f7165db8640b681e2a7dfd6a841de8eaa3489cff5ba41d11758cc464320b2ff9a491f8234e1749580cf43cac702f07cf82611e084
+ HEAD_REF main
+)
+file(COPY "${src_cudnn}/" DESTINATION "${SOURCE_PATH}/third_party/cudnn_frontend")
+
+
+file(REMOVE
+ "${SOURCE_PATH}/cmake/Modules/FindBLAS.cmake"
+ "${SOURCE_PATH}/cmake/Modules/FindLAPACK.cmake"
+ "${SOURCE_PATH}/cmake/Modules/FindCUDA.cmake"
+ "${SOURCE_PATH}/cmake/Modules/FindCUDAToolkit.cmake"
+ "${SOURCE_PATH}/cmake/Modules/Findpybind11.cmake"
+)
+
+find_program(FLATC NAMES flatc PATHS "${CURRENT_HOST_INSTALLED_DIR}/tools/flatbuffers" REQUIRED NO_DEFAULT_PATH NO_CMAKE_PATH)
+message(STATUS "Using flatc: ${FLATC}")
+
+vcpkg_execute_required_process(
+ COMMAND ${FLATC} --cpp --no-prefix --scoped-enums --gen-mutable mobile_bytecode.fbs
+ LOGNAME codegen-flatc-mobile_bytecode
+ WORKING_DIRECTORY "${SOURCE_PATH}/torch/csrc/jit/serialization"
+)
+
+find_program(PROTOC NAMES protoc PATHS "${CURRENT_HOST_INSTALLED_DIR}/tools/protobuf" REQUIRED NO_DEFAULT_PATH NO_CMAKE_PATH)
+message(STATUS "Using protoc: ${PROTOC}")
+
+x_vcpkg_get_python_packages(
+ PYTHON_VERSION 3
+ PACKAGES typing-extensions pyyaml
+ # numpy
+ OUT_PYTHON_VAR PYTHON3
+)
+
+message(STATUS "Using Python3: ${PYTHON3}")
+
+vcpkg_check_features(OUT_FEATURE_OPTIONS FEATURE_OPTIONS
+ FEATURES
+ dist USE_DISTRIBUTED # MPI, Gloo, TensorPipe
+ zstd USE_ZSTD
+ fbgemm USE_FBGEMM
+ opencv USE_OPENCV
+ opencl USE_OPENCL
+ mkldnn USE_MKLDNN
+ cuda USE_CUDA
+ cuda USE_CUDNN
+ cuda USE_NCCL
+ cuda USE_SYSTEM_NCCL
+ cuda USE_NVRTC
+ cuda AT_CUDA_ENABLED
+ cuda AT_CUDNN_ENABLED
+ cuda USE_MAGMA
+ vulkan USE_VULKAN
+ vulkan USE_VULKAN_RELAXED_PRECISION
+ rocm USE_ROCM # This is an alternative to cuda not a feature! (Not in vcpkg.json!) -> disabled
+ llvm USE_LLVM
+ mpi USE_MPI
+ nnpack USE_NNPACK # todo: check use of `DISABLE_NNPACK_AND_FAMILY`
+# No feature in vcpkg yet so disabled. -> Requires numpy build by vcpkg itself
+ python BUILD_PYTHON
+ python USE_NUMPY
+ glog USE_GLOG
+ gflags USE_GFLAGS
+)
+
+if("dist" IN_LIST FEATURES)
+ if(VCPKG_TARGET_IS_LINUX OR VCPKG_TARGET_IS_OSX)
+ list(APPEND FEATURE_OPTIONS -DUSE_TENSORPIPE=ON)
+ endif()
+ if(VCPKG_TARGET_IS_OSX)
+ list(APPEND FEATURE_OPTIONS -DUSE_LIBUV=ON)
+ endif()
+ list(APPEND FEATURE_OPTIONS -DUSE_GLOO=${VCPKG_TARGET_IS_LINUX})
+endif()
+
+if("cuda" IN_LIST FEATURES)
+ vcpkg_find_cuda(OUT_CUDA_TOOLKIT_ROOT cuda_toolkit_root)
+ list(APPEND FEATURE_OPTIONS
+ "-DCMAKE_CUDA_COMPILER=${NVCC}"
+ "-DCUDAToolkit_ROOT=${cuda_toolkit_root}"
+ )
+endif()
+
+if("vulkan" IN_LIST FEATURES) # Vulkan::glslc in FindVulkan.cmake
+ find_program(GLSLC NAMES glslc PATHS "${CURRENT_HOST_INSTALLED_DIR}/tools/shaderc" REQUIRED)
+ message(STATUS "Using glslc: ${GLSLC}")
+ list(APPEND FEATURE_OPTIONS "-DVulkan_GLSLC_EXECUTABLE:FILEPATH=${GLSLC}")
+endif()
+
+set(TARGET_IS_MOBILE OFF)
+if(VCPKG_TARGET_IS_ANDROID OR VCPKG_TARGET_IS_IOS)
+ set(TARGET_IS_MOBILE ON)
+endif()
+
+set(TARGET_IS_APPLE OFF)
+if(VCPKG_TARGET_IS_IOS OR VCPKG_TARGET_IS_OSX)
+ set(TARGET_IS_APPLE ON)
+endif()
+
+string(COMPARE EQUAL "${VCPKG_CRT_LINKAGE}" "static" USE_STATIC_RUNTIME)
+vcpkg_cmake_configure(
+ SOURCE_PATH "${SOURCE_PATH}"
+ DISABLE_PARALLEL_CONFIGURE
+ OPTIONS
+ ${FEATURE_OPTIONS}
+ -DProtobuf_PROTOC_EXECUTABLE:FILEPATH=${PROTOC}
+ -DCAFFE2_CUSTOM_PROTOC_EXECUTABLE:FILEPATH=${PROTOC}
+ -DPython_EXECUTABLE:FILEPATH=${PYTHON3}
+ -DPython3_EXECUTABLE:FILEPATH=${PYTHON3}
+ -DBUILD_PYTHON=OFF
+ -DUSE_NUMPY=OFF
+ -DCAFFE2_STATIC_LINK_CUDA=ON
+ -DCAFFE2_USE_MSVC_STATIC_RUNTIME=${USE_STATIC_RUNTIME}
+ -DBUILD_CUSTOM_PROTOBUF=OFF
+ -DBUILD_PYTHON=OFF
+ -DUSE_LITE_PROTO=OFF
+ -DBUILD_TEST=OFF
+ -DATEN_NO_TEST=ON
+ -DUSE_SYSTEM_LIBS=ON
+ -DUSE_METAL=OFF
+ -DUSE_FLASH_ATTENTION=OFF
+ -DUSE_PYTORCH_METAL=OFF
+ -DUSE_PYTORCH_METAL_EXPORT=OFF
+ -DUSE_PYTORCH_QNNPACK:BOOL=OFF
+ -DUSE_ITT=OFF
+ -DUSE_ROCKSDB=ON
+ -DUSE_OBSERVERS=OFF
+ -DUSE_KINETO=OFF
+ -DUSE_ROCM=OFF
+ -DUSE_NUMA=OFF
+ -DUSE_SYSTEM_LIBS=ON
+ -DBUILD_JNI=${VCPKG_TARGET_IS_ANDROID}
+ -DUSE_NNAPI=${VCPKG_TARGET_IS_ANDROID}
+ ${BLAS_OPTIONS}
+ # BLAS=MKL not supported in this port
+ -DUSE_MKLDNN=OFF
+ -DUSE_MKLDNN_CBLAS=OFF
+ #-DCAFFE2_USE_MKL=ON
+ #-DAT_MKL_ENABLED=ON
+ -DAT_MKLDNN_ENABLED=OFF
+ -DUSE_OPENCL=ON
+ -DUSE_KINETO=OFF #
+ # Should be enabled in-future along with the "python" feature (currently disabled)
+ # OPTIONS_RELEASE
+ # -DPYTHON_LIBRARY=${CURRENT_INSTALLED_DIR}/lib/python311.lib
+ # OPTIONS_DEBUG
+ # -DPYTHON_LIBRARY=${CURRENT_INSTALLED_DIR}/debug/lib/python311_d.lib
+ MAYBE_UNUSED_VARIABLES
+ USE_NUMA
+ USE_SYSTEM_BIND11
+ MKLDNN_CPU_RUNTIME
+ PYTHON_LIBRARY
+)
+
+vcpkg_cmake_install()
+vcpkg_copy_pdbs()
+
+vcpkg_cmake_config_fixup(PACKAGE_NAME Caffe2 CONFIG_PATH "share/cmake/Caffe2" DO_NOT_DELETE_PARENT_CONFIG_PATH)
+vcpkg_cmake_config_fixup(PACKAGE_NAME torch CONFIG_PATH "share/cmake/Torch" DO_NOT_DELETE_PARENT_CONFIG_PATH)
+vcpkg_cmake_config_fixup(PACKAGE_NAME ATen CONFIG_PATH "share/cmake/ATen" )
+
+vcpkg_replace_string("${CURRENT_PACKAGES_DIR}/share/torch/TorchConfig.cmake" "/../../../" "/../../")
+
+# Traverse the folder and remove "some" empty folders
+function(cleanup_once folder)
+ if(NOT IS_DIRECTORY "${folder}")
+ return()
+ endif()
+ file(GLOB paths LIST_DIRECTORIES true "${folder}/*")
+ list(LENGTH paths count)
+ # 1. remove if the given folder is empty
+ if(count EQUAL 0)
+ file(REMOVE_RECURSE "${folder}")
+ message(STATUS "Removed ${folder}")
+ return()
+ endif()
+ # 2. repeat the operation for hop 1 sub-directories
+ foreach(path ${paths})
+ cleanup_once(${path})
+ endforeach()
+endfunction()
+
+# Some folders may contain empty folders. They will become empty after `cleanup_once`.
+# Repeat given times to delete new empty folders.
+function(cleanup_repeat folder repeat)
+ if(NOT IS_DIRECTORY "${folder}")
+ return()
+ endif()
+ while(repeat GREATER_EQUAL 1)
+ math(EXPR repeat "${repeat} - 1" OUTPUT_FORMAT DECIMAL)
+ cleanup_once("${folder}")
+ endwhile()
+endfunction()
+
+cleanup_repeat("${CURRENT_PACKAGES_DIR}/include" 5)
+cleanup_repeat("${CURRENT_PACKAGES_DIR}/lib/site-packages" 13)
+
+file(REMOVE_RECURSE
+ "${CURRENT_PACKAGES_DIR}/debug/include"
+ "${CURRENT_PACKAGES_DIR}/debug/share"
+)
+
+vcpkg_install_copyright(FILE_LIST "${SOURCE_PATH}/LICENSE")
+
+
+set(VCPKG_POLICY_DLLS_WITHOUT_EXPORTS enabled) # torch_global_deps.dll is empty.c and just for linking deps
+
diff --git a/vcpkg/ports/libtorch/vcpkg.json b/vcpkg/ports/libtorch/vcpkg.json
new file mode 100644
index 0000000..4a62c0c
--- /dev/null
+++ b/vcpkg/ports/libtorch/vcpkg.json
@@ -0,0 +1,173 @@
+{
+ "name": "libtorch",
+ "version": "2.7.1",
+ "description": "Tensors and Dynamic neural networks in Python with strong GPU acceleration",
+ "homepage": "https://pytorch.org/",
+ "license": null,
+ "supports": "(windows & !static) | osx | linux",
+ "dependencies": [
+ "blas",
+ "cpp-httplib",
+ "cpuinfo",
+ "eigen3",
+ {
+ "name": "fbgemm",
+ "platform": "x64"
+ },
+ "flatbuffers",
+ {
+ "name": "flatbuffers",
+ "host": true
+ },
+ "fmt",
+ "foxi",
+ "fp16",
+ "gemmlowp",
+ "lapack",
+ "lmdb",
+ "mimalloc",
+ "nlohmann-json",
+ "onnx",
+ "onnx-optimizer",
+ "opencl",
+ "pocketfft",
+ "protobuf",
+ {
+ "name": "protobuf",
+ "host": true
+ },
+ "pthreadpool",
+ "pybind11",
+ "sleef",
+ {
+ "name": "vcpkg-cmake",
+ "host": true
+ },
+ {
+ "name": "vcpkg-cmake-config",
+ "host": true
+ },
+ {
+ "name": "vcpkg-get-python-packages",
+ "host": true
+ },
+ "xnnpack"
+ ],
+ "default-features": [
+ "gflags",
+ "glog",
+ "opencv",
+ "zstd"
+ ],
+ "features": {
+ "cuda": {
+ "description": "Build with CUDA GPU backend",
+ "supports": "(windows & x64 & !staticcrt) | (linux & x64)",
+ "dependencies": [
+ "cuda",
+ "cudnn",
+ {
+ "name": "gloo",
+ "features": [
+ "cuda"
+ ],
+ "platform": "linux"
+ },
+ "magma",
+ {
+ "name": "nccl",
+ "platform": "linux"
+ },
+ "nvidia-cutlass",
+ {
+ "name": "tensorpipe",
+ "features": [
+ "cuda"
+ ],
+ "platform": "linux"
+ }
+ ]
+ },
+ "dist": {
+ "description": "Use distributed with MPI, Gloo, libuv, TensorPipe",
+ "supports": "linux | windows",
+ "dependencies": [
+ {
+ "name": "gloo",
+ "platform": "linux"
+ },
+ {
+ "name": "libtorch",
+ "default-features": false,
+ "features": [
+ "mpi"
+ ],
+ "platform": "linux"
+ },
+ {
+ "name": "libuv",
+ "platform": "windows | osx"
+ },
+ {
+ "name": "tensorpipe",
+ "platform": "linux"
+ }
+ ]
+ },
+ "gflags": {
+ "description": "Build with gflags",
+ "dependencies": [
+ "gflags"
+ ]
+ },
+ "glog": {
+ "description": "Build with glog",
+ "dependencies": [
+ "glog"
+ ]
+ },
+ "llvm": {
+ "description": "Build with LLVM",
+ "dependencies": [
+ "llvm"
+ ]
+ },
+ "mpi": {
+ "description": "Build with MPI",
+ "dependencies": [
+ "mpi"
+ ]
+ },
+ "nnpack": {
+ "description": "Build with NNPack",
+ "supports": "linux | osx",
+ "dependencies": [
+ "nnpack"
+ ]
+ },
+ "opencv": {
+ "description": "Build with OpenCV",
+ "dependencies": [
+ "opencv"
+ ]
+ },
+ "vulkan": {
+ "description": "Build with Vulkan GPU backend",
+ "dependencies": [
+ {
+ "name": "shaderc",
+ "host": true
+ },
+ "vulkan",
+ "vulkan-loader",
+ "vulkan-memory-allocator"
+ ]
+ },
+ "zstd": {
+ "description": "Build with ZSTD",
+ "dependencies": [
+ "zstd"
+ ]
+ }
+ }
+}