commit: a53e55843d4fc5ec93c954b602a0c7f8d39a4418
Author: Alfredo Tupone <tupone <AT> gentoo <DOT> org>
AuthorDate: Tue Jan 27 07:08:00 2026 +0000
Commit: Alfredo Tupone <tupone <AT> gentoo <DOT> org>
CommitDate: Tue Jan 27 07:08:37 2026 +0000
URL: https://gitweb.gentoo.org/repo/gentoo.git/commit/?id=a53e5584
sci-ml/caffe2: add 2.10.0
Signed-off-by: Alfredo Tupone <tupone <AT> gentoo.org>
sci-ml/caffe2/Manifest | 1 +
sci-ml/caffe2/caffe2-2.10.0.ebuild | 406 +++++++++++++++++++++++++
sci-ml/caffe2/files/caffe2-2.10.0-gentoo.patch | 220 ++++++++++++++
3 files changed, 627 insertions(+)
diff --git a/sci-ml/caffe2/Manifest b/sci-ml/caffe2/Manifest
index 71de6ac421f8..35010993e955 100644
--- a/sci-ml/caffe2/Manifest
+++ b/sci-ml/caffe2/Manifest
@@ -1,5 +1,6 @@
DIST composable_kernel-7fe50dc3.tar.gz 5380728 BLAKE2B
c89c346d8e2d7a93a9cf26409e477fcdd25c43bc3f99d904c3bfe1bc282c6844ef2f2c80aceabe3bf4494db3457285384d5de5a22281aa426ba7479af82b0caf
SHA512
a62f92e2dd7da944bd34bab6cf3bf624f630dc316d29c755e9fd523343c3f7648b0b7e0c9a0c8f5e9654477599ae8be9dac687d4054b0390f064ac2e40fc1cd3
DIST composable_kernel-8086bbe3.tar.gz 4418862 BLAKE2B
b710e3d4586899443ec01044dad19fd2f992c351e2f65ba526dfcc47cc65c095beaf8ac21a8f71c02a0eb524d364e817b27241a9198884f2bdae9924b51e24e4
SHA512
8410b5a1c864d71f3034ef0d9d1245078856d09cc191faec59856c229bf11d89ae291036d735cb5cec4f1d72e6e9e8f6921833147f9619d30cfab8722d3a9f63
DIST flash-attention-2.7.4.gh.tar.gz 5841323 BLAKE2B
432999d763f2b3d732580ddfea5d3e01370351db0656546259a5e500a07516dd03c98828bfb55855dabe4adc651033b5d97ea4725ca46158b9970f0fbc662710
SHA512
05a4afb09e666f7404d6a3f8b5256e7bed6eba60a6f1bde2b7dbb96d318975f0b458c2521c7a38d88e97b6e4c27f29077cf787849daf82586e33f43a3d9a84b3
+DIST pytorch-2.10.0.tar.gz 62555251 BLAKE2B
6b48b4d3d3802a82d37231c472032f8c9390bdbfe9e810ff811f5521737b46794db51e87f7aad3928be917923ae8ac9c619b14d8d0cfacfc115dffb38347bd43
SHA512
929b1be42954f22e2091e0696d6175c8f30752925ce0bbe3a60a9393aff1f0afb228fa5efaf1ce26e78cd9e99294d150109e9ba9cb3243c90e9ec06cc082f74d
DIST pytorch-2.8.0.tar.gz 56565754 BLAKE2B
a8f07513b92f9293f8322508f9fc73a462f89fe51cb1f280af371cee19cbe7e2bf900ba2b3c43fd08ea415566db441a6d6310d77f18477e957641be311a361a5
SHA512
448e9dad4aa10f1793d35e6ffe9f0f69b7719d41e6eccceb687a8d0c148e22d03e4f76170a05308ef9323a7aea41aa74605077ae1d68c6d949f13b3340ebf310
DIST pytorch-2.9.1.tar.gz 55764697 BLAKE2B
b22e154034f8a25aa3ef949eb6b0456777e11fe5f97de56c6112d93a2e154db425e97848911af458d179f03d7154956f53b715c7b9d7e7f074e0baceac35dad8
SHA512
d7098408d44e0fee9ded4afd6622df6f08757bf02eee878ae25b62a275f82eb16f96a07027c670c6ffdd431c8714c569249bd8518ac8828a504e99908b8c38b1
diff --git a/sci-ml/caffe2/caffe2-2.10.0.ebuild
b/sci-ml/caffe2/caffe2-2.10.0.ebuild
new file mode 100644
index 000000000000..c95b5716ae7f
--- /dev/null
+++ b/sci-ml/caffe2/caffe2-2.10.0.ebuild
@@ -0,0 +1,406 @@
+# Copyright 2022-2026 Gentoo Authors
+# Distributed under the terms of the GNU General Public License v2
+
+EAPI=8
+
+PYTHON_COMPAT=( python3_{11..14} )
+ROCM_VERSION=6.1
+inherit python-single-r1 cmake cuda flag-o-matic prefix rocm toolchain-funcs
+
+MYPN=pytorch
+MYP=${MYPN}-${PV}
+
+# caffe2-2.9.0 depends on future version of composable kernel
+# TODO: replace it with DEPEND in the future
+CK_COMMIT=7fe50dc3da2069d6645d9deb8c017a876472a977
+CK_P=composable_kernel-${CK_COMMIT:0:8}
+
+FLASH_PV=2.7.4
+FLASH_PN=flash-attention
+FLASH_P=${FLASH_PN}-${FLASH_PV}
+FLASH_ATT_URI="https://github.com/Dao-AILab/${FLASH_PN}/archive/refs/tags/v${FLASH_PV}.tar.gz
-> ${FLASH_P}.gh.tar.gz"
+
+AOTRITON_PV=0.9.2b
+AOTRITON_PN=aotriton
+AOTRITON_P=${AOTRITON_PN}-${AOTRITON_PV}
+AOTRITON_tar=${AOTRITON_P}-manylinux_2_28_x86_64-rocm6.3-shared.tar.gz
+
+DESCRIPTION="A deep learning framework"
+HOMEPAGE="https://pytorch.org/"
+SRC_URI="
+ https://github.com/pytorch/${MYPN}/archive/refs/tags/v${PV}.tar.gz ->
${MYP}.tar.gz
+ rocm? (
+
https://github.com/ROCm/composable_kernel/archive/${CK_COMMIT}.tar.gz
+ -> ${CK_P}.tar.gz
+ )
+ cuda? (
+ flash? ( ${FLASH_ATT_URI} )
+ memefficient? ( ${FLASH_ATT_URI} )
+ )
+"
+
+S="${WORKDIR}"/${MYP}
+
+LICENSE="BSD"
+SLOT="0"
+KEYWORDS="~amd64 ~arm64"
+IUSE="cuda cusparselt distributed fbgemm flash gloo memefficient mkl mpi nccl
nnpack +numpy
+ onednn openblas opencl openmp qnnpack rocm xnnpack"
+RESTRICT="test"
+REQUIRED_USE="
+ ${PYTHON_REQUIRED_USE}
+ mpi? ( distributed )
+ gloo? ( distributed )
+ ?? ( cuda rocm )
+ rocm? (
+ || ( ${ROCM_REQUIRED_USE} )
+ )
+ flash? ( || ( cuda rocm ) )
+ memefficient? ( || ( cuda rocm ) )
+ nccl? ( rocm )
+"
+
+RDEPEND="
+ ${PYTHON_DEPS}
+ dev-cpp/abseil-cpp:=
+ dev-cpp/gflags:=
+ >=dev-cpp/glog-0.5.0:=
+ dev-libs/cpuinfo
+ dev-libs/libfmt:=
+ dev-cpp/opentelemetry-cpp
+ dev-libs/protobuf:=
+ dev-libs/sleef
+ ~sci-ml/kineto-0.4.0_p20250617
+ sci-ml/onnx
+ virtual/lapack
+ cuda? (
+ dev-libs/cudnn
+ >=sci-ml/cudnn-frontend-1.12.0:=
+ >=dev-util/nvidia-cuda-toolkit-12.9:=[profiler]
+ cusparselt? ( dev-libs/cusparselt )
+ )
+ fbgemm? ( sci-ml/FBGEMM )
+ gloo? ( >=sci-ml/gloo-2025.06.04[cuda?,rocm?] )
+ mpi? ( virtual/mpi )
+ nnpack? (
+ sci-ml/NNPACK
+ dev-libs/pthreadpool
+ )
+ numpy? ( $(python_gen_cond_dep '
+ dev-python/numpy[${PYTHON_USEDEP}]
+ ') )
+ onednn? ( sci-ml/oneDNN )
+ opencl? ( virtual/opencl )
+ qnnpack? (
+ !sci-libs/QNNPACK
+ sci-ml/gemmlowp
+ dev-libs/pthreadpool
+ )
+ rocm? (
+ nccl? ( >=dev-libs/rccl-6.3:= <dev-libs/rccl-7.2:= )
+ >=dev-util/hip-6.3:= <dev-util/hip-7.2:=
+ >=dev-util/roctracer-6.3:= <dev-util/roctracer-7.2:=
+ >=sci-libs/hipBLAS-6.3:= <sci-libs/hipBLAS-7.2:=[rocsolver(+)]
+ >=sci-libs/hipBLASLt-6.3:= <sci-libs/hipBLASLt-7.2:=
+ >=sci-libs/hipFFT-6.3:= <sci-libs/hipFFT-7.2:=
+ >=sci-libs/hipRAND-6.3:= <sci-libs/hipRAND-7.2:=
+ >=sci-libs/hipSOLVER-6.3:= <sci-libs/hipSOLVER-7.2:=
+ >=sci-libs/hipSPARSE-6.3:= <sci-libs/hipSPARSE-7.2:=
+ >=sci-libs/miopen-6.3:= <sci-libs/miopen-7.2:=
+ >=sci-libs/rocBLAS-6.3:= <sci-libs/rocBLAS-7.2:=
+ >=sci-libs/rocRAND-6.3:= <sci-libs/rocRAND-7.2:=
+ >=sci-libs/rocSOLVER-6.3:= <sci-libs/rocSOLVER-7.2:=
+ memefficient? ( =sci-libs/aotriton-bin-0.11*:= )
+ distributed? ( >=dev-util/rocm-smi-6.3:=
<dev-util/rocm-smi-7.2:= )
+ )
+ distributed? (
+ !rocm? ( sci-ml/tensorpipe[cuda?] )
+ dev-cpp/cpp-httplib:=
+ )
+ xnnpack? (
+ >=sci-ml/XNNPACK-2024.11
+ dev-libs/pthreadpool
+ )
+ mkl? ( sci-libs/mkl )
+ openblas? ( sci-libs/openblas )
+"
+
+DEPEND="
+ ${RDEPEND}
+ dev-cpp/nlohmann_json
+ dev-libs/flatbuffers
+ dev-libs/FXdiv
+ dev-libs/pocketfft
+ dev-libs/psimd
+ sci-ml/FP16
+ $(python_gen_cond_dep '
+ dev-python/pybind11[${PYTHON_USEDEP}]
+ dev-python/pyyaml[${PYTHON_USEDEP}]
+ dev-python/typing-extensions[${PYTHON_USEDEP}]
+ ')
+ cuda? ( >=dev-libs/cutlass-3.9.2[tools(+)] )
+ onednn? ( sci-ml/ideep )
+ rocm? (
+ >=sci-libs/hipCUB-6.3:= <sci-libs/hipCUB-7.2:=
+ >=sci-libs/rocPRIM-6.3:= <sci-libs/rocPRIM-7.2:=
+ >=sci-libs/rocThrust-6.3:= <sci-libs/rocThrust-7.2:=
+ )
+ qnnpack? ( dev-libs/clog )
+"
+
+PATCHES=(
+ "${FILESDIR}"/${PN}-2.5.1-unbundle_fmt.patch
+ "${FILESDIR}"/${PN}-2.5.1-unbundle_kineto.patch
+ "${FILESDIR}"/${PN}-2.8.0-unbundle_pocketfft.patch
+ "${FILESDIR}"/${PN}-2.5.1-cudnn_include_fix.patch
+ "${FILESDIR}"/${PN}-2.4.0-cpp-httplib.patch
+ "${FILESDIR}"/${PN}-2.5.1-glog-0.6.0.patch
+ "${FILESDIR}"/${PN}-2.6.0-rocm-fix-std-cpp17.patch
+ "${FILESDIR}"/${PN}-2.7.0-glog-0.7.1.patch
+ "${FILESDIR}"/${PN}-2.7.1-aotriton-fixes.patch
+ "${FILESDIR}"/${PN}-2.8.0-rocm-minus-flash.patch
+ "${FILESDIR}"/${PN}-2.9.0-cmake.patch
+ "${FILESDIR}"/${PN}-2.9.0-rocm-distributed-link.patch
+ "${FILESDIR}"/${PN}-2.9.1-torch_cpu.patch
+ "${FILESDIR}"/${P}-gentoo.patch
+)
+
+src_prepare() {
+ if use cuda && ( use flash || use memefficient ); then
+ mv "${WORKDIR}"/${FLASH_P}/* third_party/${FLASH_PN}/ || die
+ fi
+ filter-lto #bug 862672
+
+ # Unbundle fmt
+ sed -i \
+ -e 's|::fmt-header-only||' \
+ c10/CMakeLists.txt \
+ cmake/Dependencies.cmake \
+ torch/CMakeLists.txt \
+ || die
+
+ # tensorpipe is in system, not a build target of caffe2
+ sed -e '/target_compile_options_if_supported(tensorpipe/d' -i
cmake/Dependencies.cmake || die
+
+ # Drop third_party from CMake tree
+ sed -i \
+ -e '/add_subdirectory.*third_party/d' \
+ CMakeLists.txt \
+ cmake/Dependencies.cmake \
+ cmake/ProtoBuf.cmake \
+ aten/src/ATen/CMakeLists.txt \
+ || die
+ # Change libc10* path
+ sed -i \
+ -e "/EXPORT/s|DESTINATION lib)|DESTINATION $(get_libdir))|" \
+ c10/cuda/CMakeLists.txt \
+ c10/CMakeLists.txt \
+ c10/hip/CMakeLists.txt \
+ || die
+
+ # Change libaotriton path
+ sed -i \
+ -e "s|}/lib|}/$(get_libdir)|g" \
+ cmake/External/aotriton.cmake \
+ || die
+
+ # Noisy warnings from Logging.h
+ sed -i 's/-Wextra-semi//' cmake/public/utils.cmake || die
+
+ cmake_src_prepare
+ pushd torch/csrc/jit/serialization > /dev/null || die
+ flatc --cpp --gen-mutable --scoped-enums mobile_bytecode.fbs || die
+ popd > /dev/null || die
+
+ # prefixify the hardcoded paths, after all patches are applied
+ hprefixify \
+ aten/CMakeLists.txt \
+ caffe2/CMakeLists.txt \
+ cmake/Metal.cmake \
+ cmake/Modules/*.cmake \
+ cmake/Modules_CUDA_fix/FindCUDNN.cmake \
+ cmake/Modules_CUDA_fix/upstream/FindCUDA/make2cmake.cmake \
+
cmake/Modules_CUDA_fix/upstream/FindPackageHandleStandardArgs.cmake \
+ cmake/public/LoadHIP.cmake \
+ cmake/public/cuda.cmake \
+ cmake/Dependencies.cmake \
+ torch/CMakeLists.txt \
+ CMakeLists.txt
+
+ if use rocm; then
+ sed -e "s:/opt/rocm:/usr:" \
+ -e "s:lib/cmake:$(get_libdir)/cmake:g" \
+ -i cmake/public/LoadHIP.cmake || die
+
+ # TODO: delete, when caffe2 depends on systemwide
composable_kernel
+ sed -e
"s:third_party/composable_kernel:../composable_kernel-${CK_COMMIT}:g" \
+ -i aten/src/ATen/CMakeLists.txt || die
+
+ # Bug 959808: fix for gfx101x targets
+ pushd "${WORKDIR}/composable_kernel-${CK_COMMIT}" > /dev/null
|| die
+ eapply "${FILESDIR}"/composable-kernel-7fe50dc-expand-isa.patch
+ popd > /dev/null || die
+
+ if tc-is-clang; then
+ # Systemwide gcc (for absl and at::TensorBase) + hipcc
(llvm>=18) need abi-compat=17.
+ # But systemwide clang>=18 + hipcc (>=llvm-18) need
opposite!
+ # See also:
https://github.com/llvm/llvm-project/issues/102443#issuecomment-2329726287
+ sed -e '/-fclang-abi-compat=17/d' -i
cmake/Dependencies.cmake || die
+ fi
+
+ # Workaround for libc++ issue
https://github.com/llvm/llvm-project/issues/100802
+ sed -e 's/std::memcpy/memcpy/g' -i torch/headeronly/util/Half.h
|| die
+
+ # Typo: https://github.com/pytorch/pytorch/pull/166502
+ sed -e 's/gloo_hiop/gloo_hip/' -i cmake/Modules/FindGloo.cmake
|| die
+
+ ebegin "HIPifying cuda sources"
+ ${EPYTHON} tools/amd_build/build_amd.py || die
+ eend $?
+ fi
+}
+
+src_configure() {
+ if use cuda && [[ -z ${TORCH_CUDA_ARCH_LIST} ]]; then
+ ewarn "WARNING: caffe2 is being built with its default CUDA
compute capabilities: 3.5 and 7.0."
+ ewarn "These may not be optimal for your GPU."
+ ewarn ""
+ ewarn "To configure caffe2 with the CUDA compute capability
that is optimal for your GPU,"
+ ewarn "set TORCH_CUDA_ARCH_LIST in your make.conf, and
re-emerge caffe2."
+ ewarn "For example, to use CUDA capability 7.5 & 3.5, add:
TORCH_CUDA_ARCH_LIST=7.5 3.5"
+ ewarn "For a Maxwell model GPU, an example value would be:
TORCH_CUDA_ARCH_LIST=Maxwell"
+ ewarn ""
+ ewarn "You can look up your GPU's CUDA compute capability at
https://developer.nvidia.com/cuda-gpus"
+ ewarn "or by running /opt/cuda/extras/demo_suite/deviceQuery |
grep 'CUDA Capability'"
+ fi
+
+ local mycmakeargs=(
+ -DBUILD_CUSTOM_PROTOBUF=OFF
+ -DBUILD_TEST=OFF
+ -DLIBSHM_INSTALL_LIB_SUBDIR="${EPREFIX}"/usr/$(get_libdir)
+ -DPython_EXECUTABLE="${PYTHON}"
+ -DTORCH_INSTALL_LIB_DIR="${EPREFIX}"/usr/$(get_libdir)
+ -DUSE_CCACHE=OFF
+ -DUSE_CUDA=$(usex cuda)
+ -DUSE_DISTRIBUTED=$(usex distributed)
+ -DUSE_FBGEMM=$(usex fbgemm)
+ -DUSE_FLASH_ATTENTION=$(usex flash)
+ -DUSE_GFLAGS=ON
+ -DUSE_GLOG=ON
+ -DUSE_GLOO=$(usex gloo)
+ -DUSE_ITT=OFF
+ -DUSE_KINETO=ON
+ -DUSE_KLEIDIAI=OFF # TODO
+ -DUSE_MAGMA=OFF # TODO: In GURU as sci-libs/magma
+ -DUSE_MEM_EFF_ATTENTION=$(usex memefficient)
+ -DUSE_MKLDNN=$(usex onednn)
+ -DUSE_MPI=$(usex mpi)
+ -DUSE_NCCL=OFF
+ -DUSE_NNPACK=$(usex nnpack)
+ -DUSE_NUMA=OFF
+ -DUSE_NUMPY=$(usex numpy)
+ -DUSE_OPENCL=$(usex opencl)
+ -DUSE_OPENMP=$(usex openmp)
+ -DUSE_PYTORCH_QNNPACK=$(usex qnnpack)
+ -DUSE_PYTORCH_METAL=OFF
+ -DUSE_ROCM=$(usex rocm)
+ -DUSE_SYSTEM_CPUINFO=ON
+ -DUSE_SYSTEM_EIGEN_INSTALL=ON
+ -DUSE_SYSTEM_FP16=ON
+ -DUSE_SYSTEM_FXDIV=ON
+ -DUSE_SYSTEM_GLOO=ON
+ -DUSE_SYSTEM_NVTX=ON
+ -DUSE_SYSTEM_ONNX=ON
+ -DUSE_SYSTEM_PSIMD=ON
+ -DUSE_SYSTEM_PTHREADPOOL=ON
+ -DUSE_SYSTEM_PYBIND11=ON
+ -DUSE_SYSTEM_SLEEF=ON
+ -DUSE_SYSTEM_XNNPACK=$(usex xnnpack)
+ -DUSE_TENSORPIPE=$(use distributed && use !rocm && echo ON ||
echo OFF)
+ -DUSE_UCC=OFF
+ -DUSE_VALGRIND=OFF
+ -DUSE_XNNPACK=$(usex xnnpack)
+ -DUSE_XPU=OFF
+ -Wno-dev
+ )
+
+ if use mkl; then
+ mycmakeargs+=(-DBLAS=MKL)
+ elif use openblas; then
+ mycmakeargs+=(-DBLAS=OpenBLAS)
+ else
+ mycmakeargs+=(-DBLAS=Generic -DBLAS_LIBRARIES=)
+ fi
+
+ if use cuda; then
+ addpredict "/dev/nvidiactl" # bug 867706
+ addpredict "/dev/char"
+ addpredict "/proc/self/task" # bug 926116
+
+ mycmakeargs+=(
+ -DUSE_CUDNN=ON
+ -DTORCH_CUDA_ARCH_LIST="${TORCH_CUDA_ARCH_LIST:-3.5
7.0}"
+ -DUSE_NCCL=OFF # TODO: NVIDIA Collective Communication
Library
+ -DCMAKE_CUDA_FLAGS="$(cuda_gccdir -f | tr -d \")"
+ -DUSE_CUSPARSELT=$(usex cusparselt)
+ )
+ elif use rocm; then
+ export PYTORCH_ROCM_ARCH="$(get_amdgpu_flags)"
+
+ if use memefficient; then
+ export AOTRITON_INSTALLED_PREFIX="${ESYSROOT}/usr"
+ fi
+
+ mycmakeargs+=(
+ -DUSE_NCCL=$(usex nccl)
+ -DUSE_SYSTEM_NCCL=ON
+ -DCMAKE_REQUIRE_FIND_PACKAGE_HIP=ON
+ -DUSE_ROCM_CK_SDPA=OFF # requires flash + aiter, works only on
gfx90a/gfx942/gfx950
+ )
+
+ # ROCm libraries produce too much warnings
+ append-cxxflags -Wno-deprecated-declarations -Wno-unused-result
-Wno-unused-value
+ fi
+
+ if use onednn; then
+ mycmakeargs+=(
+ -DMKLDNN_FOUND=ON
+ -DMKLDNN_LIBRARIES=dnnl
+
-DMKLDNN_INCLUDE_DIR="${ESYSROOT}/usr/include/oneapi/dnnl"
+ )
+ fi
+
+ cmake_src_configure
+}
+
+src_compile() {
+ PYTORCH_BUILD_VERSION=${PV} \
+ PYTORCH_BUILD_NUMBER=0 \
+ cmake_src_compile
+}
+
+python_install() {
+ python_domodule python/torch
+ mkdir "${D}"$(python_get_sitedir)/torch/bin || die
+ mkdir "${D}"$(python_get_sitedir)/torch/lib || die
+ mkdir "${D}"$(python_get_sitedir)/torch/include || die
+ ln -s ../../../../../include/torch \
+ "${D}$(python_get_sitedir)"/torch/include/torch || die # bug
923269
+ ln -s ../../../../../bin/torch_shm_manager \
+ "${D}"/$(python_get_sitedir)/torch/bin/torch_shm_manager || die
+ ln -s ../../../../../$(get_libdir)/libtorch_global_deps.so \
+ "${D}"/$(python_get_sitedir)/torch/lib/libtorch_global_deps.so
|| die
+}
+
+src_install() {
+ cmake_src_install
+
+ # Used by pytorch ebuild
+ insinto "/var/lib/${PN}"
+ doins "${BUILD_DIR}"/CMakeCache.txt
+
+ rm -rf python
+ mkdir -p python/torch || die
+ cp torch/version.py python/torch/ || die
+ python_install
+}
diff --git a/sci-ml/caffe2/files/caffe2-2.10.0-gentoo.patch
b/sci-ml/caffe2/files/caffe2-2.10.0-gentoo.patch
new file mode 100644
index 000000000000..b211756a49cc
--- /dev/null
+++ b/sci-ml/caffe2/files/caffe2-2.10.0-gentoo.patch
@@ -0,0 +1,220 @@
+--- a/CMakeLists.txt
++++ b/CMakeLists.txt
+@@ -1084,7 +1084,7 @@
+ set(CMAKE_COLOR_DIAGNOSTICS ON)
+ endif()
+ if(NOT MSVC)
+- string(APPEND CMAKE_CXX_FLAGS " -O2 -fPIC")
++ string(APPEND CMAKE_CXX_FLAGS " -O2")
+
+ # This prevents use of `c10::optional`, `c10::nullopt` etc within the
codebase
+ string(APPEND CMAKE_CXX_FLAGS " -DC10_NODEPRECATED")
+@@ -1095,7 +1095,6 @@
+ # Details at http://eigen.tuxfamily.org/bz/show_bug.cgi?id=1459
+ string(APPEND CMAKE_CXX_FLAGS " -Wall")
+ string(APPEND CMAKE_CXX_FLAGS " -Wextra")
+- append_cxx_flag_if_supported("-Werror=return-type" CMAKE_CXX_FLAGS)
+ append_cxx_flag_if_supported("-Werror=non-virtual-dtor" CMAKE_CXX_FLAGS)
+ append_cxx_flag_if_supported("-Werror=braced-scalar-init" CMAKE_CXX_FLAGS)
+ append_cxx_flag_if_supported("-Werror=range-loop-construct" CMAKE_CXX_FLAGS)
+
+@@ -1083,7 +1082,6 @@
+ endif()
+ append_cxx_flag_if_supported("-fno-math-errno" CMAKE_CXX_FLAGS)
+ append_cxx_flag_if_supported("-fno-trapping-math" CMAKE_CXX_FLAGS)
+- append_cxx_flag_if_supported("-Werror=format" CMAKE_CXX_FLAGS)
+ if(CMAKE_COMPILER_IS_GNUCXX AND CMAKE_CXX_COMPILER_VERSION
VERSION_GREATER_EQUAL 13)
+ append_cxx_flag_if_supported("-Wno-dangling-reference" CMAKE_CXX_FLAGS)
+ append_cxx_flag_if_supported("-Wno-error=dangling-reference"
CMAKE_CXX_FLAGS)
+ append_cxx_flag_if_supported("-Wno-error=redundant-move" CMAKE_CXX_FLAGS)
+--- a/aten/src/ATen/native/quantized/cpu/qnnpack/CMakeLists.txt
++++ b/aten/src/ATen/native/quantized/cpu/qnnpack/CMakeLists.txt
+@@ -323,7 +323,7 @@
+ set_target_properties(pytorch_qnnpack PROPERTIES PUBLIC_HEADER
include/qnnpack_func.h)
+
+ # ---[ Configure clog
+-if(NOT TARGET clog)
++if(FALSE)
+ set(CLOG_BUILD_TESTS OFF CACHE BOOL "")
+ set(CLOG_RUNTIME_TYPE "${CPUINFO_RUNTIME_TYPE}" CACHE STRING "")
+ add_subdirectory(
+@@ -335,7 +335,8 @@
+ target_compile_options(clog PRIVATE "-Wno-unused-result")
+ endif()
+ endif()
+-target_link_libraries(pytorch_qnnpack PUBLIC clog)
++find_library(CLOG_LIBRARY NAMES clog REQUIRED)
++target_link_libraries(pytorch_qnnpack PUBLIC ${CLOG_LIBRARY})
+
+ # ---[ Configure cpuinfo
+ if(NOT TARGET cpuinfo AND USE_SYSTEM_CPUINFO)
+--- a/caffe2/CMakeLists.txt
++++ b/caffe2/CMakeLists.txt
+@@ -87,7 +87,7 @@ endif()
+ # Note: the folders that are being commented out have not been properly
+ # addressed yet.
+
+-if(NOT MSVC AND USE_XNNPACK)
++if(FALSE)
+ if(NOT TARGET fxdiv)
+ set(FXDIV_BUILD_TESTS OFF CACHE BOOL "")
+ set(FXDIV_BUILD_BENCHMARKS OFF CACHE BOOL "")
+@@ -1212,7 +1212,6 @@ if(USE_XPU)
+ endif()
+
+ if(NOT MSVC AND USE_XNNPACK)
+- TARGET_LINK_LIBRARIES(torch_cpu PRIVATE fxdiv)
+ endif()
+
+ # ==========================================================
+@@ -1324,17 +1323,6 @@
+ target_include_directories(torch_cpu PRIVATE
+ "/usr/include/kineto")
+
+-if(USE_KINETO)
+- target_include_directories(torch_cpu PRIVATE
+- ${TORCH_ROOT}/third_party/kineto/libkineto/src)
+-endif()
+-
+-target_include_directories(torch_cpu PRIVATE
+- ${TORCH_ROOT}/third_party/cpp-httplib)
+-
+-target_include_directories(torch_cpu PRIVATE
+- ${TORCH_ROOT}/third_party/nlohmann/include)
+-
+ install(DIRECTORY
+ "${TORCH_SRC_DIR}/csrc"
+ "${TORCH_SRC_DIR}/headeronly"
+--- a/cmake/Codegen.cmake
++++ b/cmake/Codegen.cmake
+@@ -64,7 +64,7 @@ if(INTERN_BUILD_ATEN_OPS)
+ if(MSVC)
+ set(OPT_FLAG "/fp:strict ")
+ else(MSVC)
+- set(OPT_FLAG "-O3 ")
++ set(OPT_FLAG " ")
+ if("${CMAKE_BUILD_TYPE}" MATCHES "Debug")
+ set(OPT_FLAG " ")
+ endif()
+--- a/cmake/Dependencies.cmake
++++ b/cmake/Dependencies.cmake
+@@ -492,7 +492,9 @@
+ set_property(TARGET pytorch_qnnpack PROPERTY POSITION_INDEPENDENT_CODE
ON)
+ set_property(TARGET cpuinfo PROPERTY POSITION_INDEPENDENT_CODE ON)
+ # QNNPACK depends on gemmlowp headers
+- target_include_directories(pytorch_qnnpack PRIVATE
"${CAFFE2_THIRD_PARTY_ROOT}/gemmlowp")
++ find_package(gemmlowp REQUIRED)
++ get_target_property(GEMMLOWP_INCLUDE_DIRS gemmlowp::gemmlowp
INTERFACE_INCLUDE_DIRECTORIES)
++ target_include_directories(pytorch_qnnpack PRIVATE
${GEMMLOWP_INCLUDE_DIRS})
+ endif()
+
+ list(APPEND Caffe2_DEPENDENCY_LIBS pytorch_qnnpack)
+@@ -593,7 +591,7 @@
+ find_library(microkernels-prod_LIBRARY microkernels-prod)
+ set_property(TARGET XNNPACK PROPERTY IMPORTED_LOCATION "${XNNPACK_LIBRARY}")
+ set_property(TARGET microkernels-prod PROPERTY IMPORTED_LOCATION
"${microkernels-prod_LIBRARY}")
+- if(NOT XNNPACK_LIBRARY OR NOT microkernels-prod_LIBRARY)
++ if(FALSE)
+ message(FATAL_ERROR "Cannot find XNNPACK")
+ endif()
+ message("-- Found XNNPACK: ${XNNPACK_LIBRARY}")
+@@ -674,7 +672,7 @@ if(BUILD_TEST OR BUILD_MOBILE_BENCHMARK OR
BUILD_MOBILE_TEST)
+ endif()
+
+ # ---[ FBGEMM
+-if(USE_FBGEMM)
++if(FALSE)
+ set(CAFFE2_THIRD_PARTY_ROOT "${PROJECT_SOURCE_DIR}/third_party")
+ if(NOT DEFINED FBGEMM_SOURCE_DIR)
+ set(FBGEMM_SOURCE_DIR "${CAFFE2_THIRD_PARTY_ROOT}/fbgemm" CACHE STRING
"FBGEMM source directory")
+@@ -698,6 +696,7 @@ if(USE_FBGEMM)
+ endif()
+
+ if(USE_FBGEMM)
++ list(APPEND Caffe2_DEPENDENCY_LIBS fbgemm)
+ caffe2_update_option(USE_FBGEMM ON)
+ else()
+ caffe2_update_option(USE_FBGEMM OFF)
+@@ -1156,7 +1155,6 @@
+ endif()
+ set(TP_BUILD_LIBUV ON CACHE BOOL "" FORCE)
+ add_compile_options(-DTORCH_USE_LIBUV)
+- include_directories(BEFORE SYSTEM
${CMAKE_CURRENT_LIST_DIR}/../third_party/tensorpipe/third_party/libuv/include)
+ set(TP_STATIC_OR_SHARED STATIC CACHE STRING "" FORCE)
+
+ # Tensorpipe uses cuda_add_library
+@@ -1676,11 +1676,9 @@
+
+ # Include cpp-httplib
+ add_library(httplib INTERFACE IMPORTED)
+-target_include_directories(httplib SYSTEM INTERFACE
${PROJECT_SOURCE_DIR}/third_party/cpp-httplib)
+
+ # Include nlohmann-json
+ add_library(nlohmann INTERFACE IMPORTED)
+-include_directories(nlohmann SYSTEM INTERFACE
${PROJECT_SOURCE_DIR}/third_party/nlohmann/include)
+
+ # Include moodycamel
+ add_library(moodycamel INTERFACE IMPORTED)
+--- a/cmake/External/nnpack.cmake
++++ b/cmake/External/nnpack.cmake
+@@ -56,7 +56,7 @@
+ set(PTHREADPOOL_SOURCE_DIR "${CAFFE2_THIRD_PARTY_ROOT}/pthreadpool" CACHE
STRING "pthreadpool source directory")
+ set(GOOGLETEST_SOURCE_DIR "${CAFFE2_THIRD_PARTY_ROOT}/googletest" CACHE
STRING "Google Test source directory")
+
+- if(NOT TARGET nnpack)
++ if(FALSE)
+ set(NNPACK_BUILD_TESTS OFF CACHE BOOL "")
+ set(NNPACK_BUILD_BENCHMARKS OFF CACHE BOOL "")
+ set(NNPACK_LIBRARY_TYPE "static" CACHE STRING "")
+--- a/aten/src/ATen/CMakeLists.txt 2025-02-27 14:23:02.402742165 +0100
++++ b/aten/src/ATen/CMakeLists.txt 2025-02-27 14:23:40.445850718 +0100
+@@ -448,8 +448,6 @@
+ if(USE_CUDA)
+ list(APPEND ATen_CUDA_INCLUDE ${CMAKE_CURRENT_SOURCE_DIR}/cuda)
+ # Next two lines are needed because TunableOp uses third-party/fmt
+- list(APPEND ATen_CUDA_INCLUDE
$<TARGET_PROPERTY:fmt::fmt-header-only,INTERFACE_INCLUDE_DIRECTORIES>)
+- list(APPEND ATen_CUDA_DEPENDENCY_LIBS fmt::fmt-header-only)
+ list(APPEND ATen_CUDA_CU_SRCS
+ ${cuda_cu}
+ ${native_cuda_cu}
+@@ -521,8 +519,6 @@
+ endif()
+
+ # Next two lines are needed because TunableOp uses third-party/fmt
+- list(APPEND ATen_HIP_INCLUDE
$<TARGET_PROPERTY:fmt::fmt-header-only,INTERFACE_INCLUDE_DIRECTORIES>)
+- list(APPEND ATen_HIP_DEPENDENCY_LIBS fmt::fmt-header-only)
+ if(USE_FLASH_ATTENTION AND USE_ROCM_CK_SDPA)
+ list(APPEND ATen_HIP_INCLUDE
${CMAKE_CURRENT_SOURCE_DIR}/native/transformers/hip/flash_attn/ck)
+ endif()
+--- a/torch/CMakeLists.txt
++++ b/torch/CMakeLists.txt
+@@ -59,16 +59,10 @@
+ ${CMAKE_BINARY_DIR}/aten/src
+ ${CMAKE_BINARY_DIR}/caffe2/aten/src
+ ${CMAKE_BINARY_DIR}/third_party
+- ${CMAKE_BINARY_DIR}/third_party/onnx
+
+ ${TORCH_ROOT}/third_party/valgrind-headers
+
+- ${TORCH_ROOT}/third_party/gloo
+- ${TORCH_ROOT}/third_party/onnx
+- ${TORCH_ROOT}/third_party/flatbuffers/include
+ "/usr/include/kineto"
+- ${TORCH_ROOT}/third_party/cpp-httplib
+- ${TORCH_ROOT}/third_party/nlohmann/include
+
+ ${TORCH_SRC_DIR}/csrc
+ ${TORCH_SRC_DIR}/csrc/api/include
+--- a/cmake/FlatBuffers.cmake
++++ b/cmake/FlatBuffers.cmake
+@@ -1,10 +1 @@
+-set(FlatBuffers_Include ${PROJECT_SOURCE_DIR}/third_party/flatbuffers/include)
+-file(GLOB FlatBuffers_Library_SRCS
+- ${FlatBuffers_Include}/flatbuffers/*.h
+-)
+ add_library(flatbuffers INTERFACE)
+-target_sources(
+- flatbuffers
+- INTERFACE ${FlatBuffers_Library_SRCS}
+-)
+-target_include_directories(flatbuffers INTERFACE ${FlatBuffers_Include})