commit: c3f23775dd9ca3a029ec653a4a5bdb46b432937a
Author: Alfredo Tupone <tupone <AT> gentoo <DOT> org>
AuthorDate: Fri Oct 24 17:02:43 2025 +0000
Commit: Alfredo Tupone <tupone <AT> gentoo <DOT> org>
CommitDate: Fri Oct 24 18:01:25 2025 +0000
URL: https://gitweb.gentoo.org/repo/gentoo.git/commit/?id=c3f23775
sci-ml/caffe2: drop 2.7.1-r5
Signed-off-by: Alfredo Tupone <tupone <AT> gentoo.org>
sci-ml/caffe2/Manifest | 1 -
sci-ml/caffe2/caffe2-2.7.1-r5.ebuild | 389 -----------------------
sci-ml/caffe2/files/caffe2-2.7.0-cmake.patch | 40 ---
sci-ml/caffe2/files/caffe2-2.7.0-gentoo.patch | 157 ---------
sci-ml/caffe2/files/caffe2-2.7.0-llvm.patch | 15 -
sci-ml/caffe2/files/caffe2-2.7.1-ck-config.patch | 38 ---
6 files changed, 640 deletions(-)
diff --git a/sci-ml/caffe2/Manifest b/sci-ml/caffe2/Manifest
index b45b9d6bcc18..898b3d979d20 100644
--- a/sci-ml/caffe2/Manifest
+++ b/sci-ml/caffe2/Manifest
@@ -1,6 +1,5 @@
DIST composable_kernel-7fe50dc3.tar.gz 5380728 BLAKE2B
c89c346d8e2d7a93a9cf26409e477fcdd25c43bc3f99d904c3bfe1bc282c6844ef2f2c80aceabe3bf4494db3457285384d5de5a22281aa426ba7479af82b0caf
SHA512
a62f92e2dd7da944bd34bab6cf3bf624f630dc316d29c755e9fd523343c3f7648b0b7e0c9a0c8f5e9654477599ae8be9dac687d4054b0390f064ac2e40fc1cd3
DIST composable_kernel-8086bbe3.tar.gz 4418862 BLAKE2B
b710e3d4586899443ec01044dad19fd2f992c351e2f65ba526dfcc47cc65c095beaf8ac21a8f71c02a0eb524d364e817b27241a9198884f2bdae9924b51e24e4
SHA512
8410b5a1c864d71f3034ef0d9d1245078856d09cc191faec59856c229bf11d89ae291036d735cb5cec4f1d72e6e9e8f6921833147f9619d30cfab8722d3a9f63
DIST flash-attention-2.7.4.gh.tar.gz 5841323 BLAKE2B
432999d763f2b3d732580ddfea5d3e01370351db0656546259a5e500a07516dd03c98828bfb55855dabe4adc651033b5d97ea4725ca46158b9970f0fbc662710
SHA512
05a4afb09e666f7404d6a3f8b5256e7bed6eba60a6f1bde2b7dbb96d318975f0b458c2521c7a38d88e97b6e4c27f29077cf787849daf82586e33f43a3d9a84b3
-DIST pytorch-2.7.1.tar.gz 50203605 BLAKE2B
3f4b2643d86fe9ff30b2f335353dfe6a8e222bcc12143bc5d09268fb37bfd42f9451620e6e0db225c3c3e7930c999115fdd2ed62b7eae93b0d5e233270c7c760
SHA512
a9fc2252af9031c2cd46dde558c491aea8bc322fb80157a7760f300a44b759d4bfe866f030fbb974b80493057cfff4dd512498f99a100ed6d05bf620258ed37e
DIST pytorch-2.8.0.tar.gz 56565754 BLAKE2B
a8f07513b92f9293f8322508f9fc73a462f89fe51cb1f280af371cee19cbe7e2bf900ba2b3c43fd08ea415566db441a6d6310d77f18477e957641be311a361a5
SHA512
448e9dad4aa10f1793d35e6ffe9f0f69b7719d41e6eccceb687a8d0c148e22d03e4f76170a05308ef9323a7aea41aa74605077ae1d68c6d949f13b3340ebf310
DIST pytorch-2.9.0.tar.gz 55750268 BLAKE2B
943459ec60a4e1f5e36766defc7018fbf9722fb1564b723c2a7ebcb2a5d8b1735f0b1542dc67a77f788af3e2454ea6261dbdee5beb2bcfa4af2e58ca566edc93
SHA512
2ecdc0eac39ecee68b0f4c98e498424cde00c45bbeeff576c8778046f97119cd02885498b072352dd3cdd9aecd02baf61cdc5554bce8d757b30c673053a0cc80
diff --git a/sci-ml/caffe2/caffe2-2.7.1-r5.ebuild
b/sci-ml/caffe2/caffe2-2.7.1-r5.ebuild
deleted file mode 100644
index 795093a146a1..000000000000
--- a/sci-ml/caffe2/caffe2-2.7.1-r5.ebuild
+++ /dev/null
@@ -1,389 +0,0 @@
-# Copyright 2022-2025 Gentoo Authors
-# Distributed under the terms of the GNU General Public License v2
-
-EAPI=8
-
-PYTHON_COMPAT=( python3_{11..13} )
-ROCM_VERSION=6.1
-inherit python-single-r1 cmake cuda flag-o-matic prefix rocm toolchain-funcs
-
-MYPN=pytorch
-MYP=${MYPN}-${PV}
-
-# caffe2-2.6.0 depends on future version of composable kernel
-# TODO: replace it with RDEPEND in the future
-CK_COMMIT=8086bbe3a78d931eb96fe12fdc014082e18d18d3
-CK_P=composable_kernel-${CK_COMMIT:0:8}
-
-FLASH_PV=2.7.4
-FLASH_PN=flash-attention
-FLASH_P=${FLASH_PN}-${FLASH_PV}
-
-AOTRITON_PV=0.9.2b
-AOTRITON_PN=aotriton
-AOTRITON_P=${AOTRITON_PN}-${AOTRITON_PV}
-AOTRITON_tar=${AOTRITON_P}-manylinux_2_28_x86_64-rocm6.3-shared.tar.gz
-
-DESCRIPTION="A deep learning framework"
-HOMEPAGE="https://pytorch.org/"
-SRC_URI="
- https://github.com/pytorch/${MYPN}/archive/refs/tags/v${PV}.tar.gz ->
${MYP}.tar.gz
- rocm? (
-
https://github.com/ROCm/composable_kernel/archive/${CK_COMMIT}.tar.gz
- -> ${CK_P}.tar.gz
- )
- flash? (
-
https://github.com/Dao-AILab/${FLASH_PN}/archive/refs/tags/v${FLASH_PV}.tar.gz
- -> ${FLASH_P}.gh.tar.gz
- )
-"
-
-S="${WORKDIR}"/${MYP}
-
-LICENSE="BSD"
-SLOT="0"
-KEYWORDS="~amd64 ~arm64"
-IUSE="cuda cusparselt distributed fbgemm flash gloo memefficient mkl mpi
nnpack +numpy
- onednn openblas opencl openmp qnnpack rocm xnnpack"
-RESTRICT="test"
-REQUIRED_USE="
- ${PYTHON_REQUIRED_USE}
- mpi? ( distributed )
- gloo? ( distributed )
- ?? ( cuda rocm )
- rocm? (
- || ( ${ROCM_REQUIRED_USE} )
- !flash
- )
-"
-
-RDEPEND="
- ${PYTHON_DEPS}
- dev-cpp/abseil-cpp:=
- dev-cpp/gflags:=
- >=dev-cpp/glog-0.5.0
- dev-cpp/nlohmann_json
- dev-cpp/opentelemetry-cpp
- dev-libs/cpuinfo
- dev-libs/libfmt:=
- dev-libs/protobuf:=
- dev-libs/pthreadpool
- dev-libs/sleef
- sci-ml/foxi
- ~sci-ml/kineto-0.4.0_p20250214
- <sci-ml/onnx-1.18.0
- virtual/lapack
- cuda? (
- dev-libs/cudnn
- >=sci-ml/cudnn-frontend-1.0.3:0/8
- >=dev-util/nvidia-cuda-toolkit-12.9:=[profiler]
- cusparselt? ( dev-libs/cusparselt )
- )
- fbgemm? ( sci-ml/FBGEMM )
- gloo? ( <=sci-ml/gloo-2023.12.03[cuda?] )
- mpi? ( virtual/mpi )
- nnpack? ( sci-ml/NNPACK )
- numpy? ( $(python_gen_cond_dep '
- dev-python/numpy[${PYTHON_USEDEP}]
- ') )
- onednn? ( =sci-ml/oneDNN-3.5* )
- opencl? ( virtual/opencl )
- qnnpack? (
- !sci-libs/QNNPACK
- sci-ml/gemmlowp
- )
- rocm? (
- >=dev-libs/rccl-6.1 <dev-libs/rccl-6.5
- >=dev-util/hip-6.1 <dev-util/hip-6.5
- >=dev-util/roctracer-6.1 <dev-util/roctracer-6.5
- >=sci-libs/hipBLAS-6.1 <sci-libs/hipBLAS-6.5
- >=sci-libs/hipBLASLt-6.1 <sci-libs/hipBLASLt-6.5
- >=sci-libs/hipCUB-6.1 <sci-libs/hipCUB-6.5
- >=sci-libs/hipFFT-6.1 <sci-libs/hipFFT-6.5
- >=sci-libs/hipRAND-6.1 <sci-libs/hipRAND-6.5
- >=sci-libs/hipSOLVER-6.1 <sci-libs/hipSOLVER-6.5
- >=sci-libs/hipSPARSE-6.1 <sci-libs/hipSPARSE-6.5
- >=sci-libs/miopen-6.1 <sci-libs/miopen-6.5
- >=sci-libs/rocPRIM-6.1 <sci-libs/rocPRIM-6.5
- >=sci-libs/rocThrust-6.1 <sci-libs/rocThrust-6.5
- memefficient? ( sci-libs/aotriton-bin:0/0.9 )
- )
- distributed? (
- sci-ml/tensorpipe[cuda?]
- dev-cpp/cpp-httplib
- )
- xnnpack? ( >=sci-ml/XNNPACK-2024.11 )
- mkl? ( sci-libs/mkl )
- openblas? ( sci-libs/openblas )
-"
-
-DEPEND="
- ${RDEPEND}
- dev-libs/flatbuffers
- dev-libs/FXdiv
- dev-libs/pocketfft
- dev-libs/psimd
- sci-ml/FP16
- $(python_gen_cond_dep '
- dev-python/pybind11[${PYTHON_USEDEP}]
- dev-python/pyyaml[${PYTHON_USEDEP}]
- dev-python/typing-extensions[${PYTHON_USEDEP}]
- ')
- cuda? ( ~dev-libs/cutlass-3.8.0 )
- onednn? ( sci-ml/ideep )
- qnnpack? ( dev-libs/clog )
-"
-
-PATCHES=(
- "${FILESDIR}"/${PN}-2.5.1-unbundle_fmt.patch
- "${FILESDIR}"/${PN}-2.5.1-unbundle_kineto.patch
- "${FILESDIR}"/${PN}-2.5.1-cudnn_include_fix.patch
- "${FILESDIR}"/${PN}-2.7.0-gentoo.patch
- "${FILESDIR}"/${PN}-2.4.0-cpp-httplib.patch
- "${FILESDIR}"/${PN}-2.5.1-glog-0.6.0.patch
- "${FILESDIR}"/${PN}-2.5.1-newfix-functorch-install.patch
- "${FILESDIR}"/${PN}-2.6.0-rocm-fix-std-cpp17.patch
- "${FILESDIR}"/${PN}-2.7.0-cmake.patch
- "${FILESDIR}"/${PN}-2.7.0-glog-0.7.1.patch
- "${FILESDIR}"/${PN}-2.7.0-llvm.patch
- "${FILESDIR}"/${PN}-2.7.1-ck-config.patch
- "${FILESDIR}"/${PN}-2.7.1-aotriton-fixes.patch
-)
-
-src_prepare() {
- if use flash; then
- mv "${WORKDIR}"/${FLASH_P}/* third_party/${FLASH_PN}/ || die
- fi
- filter-lto #bug 862672
-
- # Unbundle fmt
- sed -i \
- -e 's|::fmt-header-only||' \
- c10/CMakeLists.txt \
- cmake/Dependencies.cmake \
- torch/CMakeLists.txt \
- || die
-
- # Drop third_party from CMake tree
- sed -i \
- -e '/add_subdirectory.*third_party/d' \
- CMakeLists.txt \
- cmake/Dependencies.cmake \
- cmake/ProtoBuf.cmake \
- aten/src/ATen/CMakeLists.txt \
- || die
- # Change libc10* path
- sed -i \
- -e "/EXPORT/s|DESTINATION lib)|DESTINATION $(get_libdir))|" \
- c10/cuda/CMakeLists.txt \
- c10/CMakeLists.txt \
- c10/hip/CMakeLists.txt \
- || die
- sed -i \
- -e '/Using pocketfft in directory:/d' \
- cmake/Dependencies.cmake \
- || die
-
- # Change libaotriton path
- sed -i \
- -e "s|}/lib|}/$(get_libdir)|g" \
- cmake/External/aotriton.cmake \
- || die
-
- # Noisy warnings from Logging.h
- sed -i 's/-Wextra-semi//' cmake/public/utils.cmake || die
-
- cmake_src_prepare
- pushd torch/csrc/jit/serialization || die
- flatc --cpp --gen-mutable --scoped-enums mobile_bytecode.fbs || die
- popd
-
- # prefixify the hardcoded paths, after all patches are applied
- hprefixify \
- aten/CMakeLists.txt \
- caffe2/CMakeLists.txt \
- cmake/Metal.cmake \
- cmake/Modules/*.cmake \
- cmake/Modules_CUDA_fix/FindCUDNN.cmake \
- cmake/Modules_CUDA_fix/upstream/FindCUDA/make2cmake.cmake \
-
cmake/Modules_CUDA_fix/upstream/FindPackageHandleStandardArgs.cmake \
- cmake/public/LoadHIP.cmake \
- cmake/public/cuda.cmake \
- cmake/Dependencies.cmake \
- torch/CMakeLists.txt \
- CMakeLists.txt
-
- if use rocm; then
- sed -e "s:/opt/rocm:/usr:" \
- -e "s:lib/cmake:$(get_libdir)/cmake:g" \
- -i cmake/public/LoadHIP.cmake || die
-
- # TODO: delete, when caffe2 depends on systemwide
composable_kernel
- sed -e
"s:third_party/composable_kernel:../composable_kernel-${CK_COMMIT}:g" \
- -i aten/src/ATen/CMakeLists.txt || die
-
- # Bug 959808: fix for gfx101x targets
- pushd "${WORKDIR}/composable_kernel-${CK_COMMIT}" > /dev/null
|| die
- eapply "${FILESDIR}"/composable-kernel-6.4.1-expand-isa.patch
- popd > /dev/null || die
-
- if tc-is-clang; then
- # Systemwide gcc (for absl and at::TensorBase) + hipcc
(llvm>=18) need abi-compat=17.
- # But systemwide clang>=18 + hipcc (>=llvm-18) need
opposite!
- # See also:
https://github.com/llvm/llvm-project/issues/102443#issuecomment-2329726287
- sed '/-fclang-abi-compat=17/d' -i
cmake/Dependencies.cmake || die
- fi
-
- # Workaround for libc++ issue
https://github.com/llvm/llvm-project/issues/100802
- sed 's/std::memcpy/memcpy/g' -i c10/util/Half.h || die
-
- ebegin "HIPifying cuda sources"
- ${EPYTHON} tools/amd_build/build_amd.py || die
- eend $?
- fi
-}
-
-src_configure() {
- if use cuda && [[ -z ${TORCH_CUDA_ARCH_LIST} ]]; then
- ewarn "WARNING: caffe2 is being built with its default CUDA
compute capabilities: 3.5 and 7.0."
- ewarn "These may not be optimal for your GPU."
- ewarn ""
- ewarn "To configure caffe2 with the CUDA compute capability
that is optimal for your GPU,"
- ewarn "set TORCH_CUDA_ARCH_LIST in your make.conf, and
re-emerge caffe2."
- ewarn "For example, to use CUDA capability 7.5 & 3.5, add:
TORCH_CUDA_ARCH_LIST=7.5 3.5"
- ewarn "For a Maxwell model GPU, an example value would be:
TORCH_CUDA_ARCH_LIST=Maxwell"
- ewarn ""
- ewarn "You can look up your GPU's CUDA compute capability at
https://developer.nvidia.com/cuda-gpus"
- ewarn "or by running /opt/cuda/extras/demo_suite/deviceQuery |
grep 'CUDA Capability'"
- fi
-
- local mycmakeargs=(
- -DBUILD_CUSTOM_PROTOBUF=OFF
- -DLIBSHM_INSTALL_LIB_SUBDIR="${EPREFIX}"/usr/$(get_libdir)
- -DPython_EXECUTABLE="${PYTHON}"
- -DTORCH_INSTALL_LIB_DIR="${EPREFIX}"/usr/$(get_libdir)
- -DUSE_CCACHE=OFF
- -DUSE_CUDA=$(usex cuda)
- -DUSE_DISTRIBUTED=$(usex distributed)
- -DUSE_FAKELOWP=OFF
- -DUSE_FBGEMM=$(usex fbgemm)
- -DUSE_FLASH_ATTENTION=$(usex flash)
- -DUSE_GFLAGS=ON
- -DUSE_GLOG=ON
- -DUSE_GLOO=$(usex gloo)
- -DUSE_ITT=OFF
- -DUSE_KINETO=ON
- -DUSE_KLEIDIAI=OFF # TODO
- -DUSE_MAGMA=OFF # TODO: In GURU as sci-libs/magma
- -DUSE_MEM_EFF_ATTENTION=$(usex memefficient)
- -DUSE_MKLDNN=$(usex onednn)
- -DUSE_MPI=$(usex mpi)
- -DUSE_NCCL=OFF
- -DUSE_NNPACK=$(usex nnpack)
- -DUSE_NUMA=OFF
- -DUSE_NUMPY=$(usex numpy)
- -DUSE_OPENCL=$(usex opencl)
- -DUSE_OPENMP=$(usex openmp)
- -DUSE_PYTORCH_QNNPACK=$(usex qnnpack)
- -DUSE_PYTORCH_METAL=OFF
- -DUSE_ROCM=$(usex rocm)
- -DUSE_SYSTEM_CPUINFO=ON
- -DUSE_SYSTEM_EIGEN_INSTALL=ON
- -DUSE_SYSTEM_FP16=ON
- -DUSE_SYSTEM_FXDIV=ON
- -DUSE_SYSTEM_GLOO=ON
- -DUSE_SYSTEM_NVTX=ON
- -DUSE_SYSTEM_ONNX=ON
- -DUSE_SYSTEM_PSIMD=ON
- -DUSE_SYSTEM_PTHREADPOOL=ON
- -DUSE_SYSTEM_PYBIND11=ON
- -DUSE_SYSTEM_SLEEF=ON
- -DUSE_SYSTEM_XNNPACK=$(usex xnnpack)
- -DUSE_TENSORPIPE=$(usex distributed)
- -DUSE_UCC=OFF
- -DUSE_VALGRIND=OFF
- -DUSE_XNNPACK=$(usex xnnpack)
- -DUSE_XPU=OFF
- -Wno-dev
- )
-
- if use mkl; then
- mycmakeargs+=(-DBLAS=MKL)
- elif use openblas; then
- mycmakeargs+=(-DBLAS=OpenBLAS)
- else
- mycmakeargs+=(-DBLAS=Generic -DBLAS_LIBRARIES=)
- fi
-
- if use cuda; then
- addpredict "/dev/nvidiactl" # bug 867706
- addpredict "/dev/char"
- addpredict "/proc/self/task" # bug 926116
-
- mycmakeargs+=(
- -DUSE_CUDNN=ON
- -DTORCH_CUDA_ARCH_LIST="${TORCH_CUDA_ARCH_LIST:-3.5
7.0}"
- -DUSE_NCCL=OFF # TODO: NVIDIA Collective Communication
Library
- -DCMAKE_CUDA_FLAGS="$(cuda_gccdir -f | tr -d \")"
- -DUSE_CUSPARSELT=$(usex cusparselt)
- )
- elif use rocm; then
- export PYTORCH_ROCM_ARCH="$(get_amdgpu_flags)"
-
- if use memefficient; then
- export AOTRITON_INSTALLED_PREFIX="${ESYSROOT}/usr"
- fi
-
- mycmakeargs+=(
- -DUSE_NCCL=ON
- -DUSE_SYSTEM_NCCL=ON
- -DCMAKE_REQUIRE_FIND_PACKAGE_HIP=ON
- )
-
- # ROCm libraries produce too much warnings
- append-cxxflags -Wno-deprecated-declarations -Wno-unused-result
-Wno-unused-value
- fi
-
- if use onednn; then
- mycmakeargs+=(
- -DMKLDNN_FOUND=ON
- -DMKLDNN_LIBRARIES=dnnl
-
-DMKLDNN_INCLUDE_DIR="${ESYSROOT}/usr/include/oneapi/dnnl"
- )
- fi
-
- cmake_src_configure
-}
-
-src_compile() {
- PYTORCH_BUILD_VERSION=${PV} \
- PYTORCH_BUILD_NUMBER=0 \
- cmake_src_compile
-}
-
-python_install() {
- python_domodule python/torch
- mkdir "${D}"$(python_get_sitedir)/torch/bin || die
- mkdir "${D}"$(python_get_sitedir)/torch/lib || die
- mkdir "${D}"$(python_get_sitedir)/torch/include || die
- ln -s ../../../../../include/torch \
- "${D}$(python_get_sitedir)"/torch/include/torch || die # bug
923269
- ln -s ../../../../../bin/torch_shm_manager \
- "${D}"/$(python_get_sitedir)/torch/bin/torch_shm_manager || die
- ln -s ../../../../../$(get_libdir)/libtorch_global_deps.so \
- "${D}"/$(python_get_sitedir)/torch/lib/libtorch_global_deps.so
|| die
-}
-
-src_install() {
- cmake_src_install
-
- # Used by pytorch ebuild
- insinto "/var/lib/${PN}"
- doins "${BUILD_DIR}"/CMakeCache.txt
- dostrip -x /var/lib/${PN}/functorch.so
-
- rm -rf python
- mkdir -p python/torch || die
- cp torch/version.py python/torch/ || die
- python_install
-}
diff --git a/sci-ml/caffe2/files/caffe2-2.7.0-cmake.patch
b/sci-ml/caffe2/files/caffe2-2.7.0-cmake.patch
deleted file mode 100644
index 008dfe560105..000000000000
--- a/sci-ml/caffe2/files/caffe2-2.7.0-cmake.patch
+++ /dev/null
@@ -1,40 +0,0 @@
---- a/.ci/pytorch/test_example_code/CMakeLists.txt 2025-06-24
11:57:17.268200696 +0200
-+++ b/.ci/pytorch/test_example_code/CMakeLists.txt 2025-06-24
11:57:27.656239353 +0200
-@@ -1,4 +1,4 @@
--cmake_minimum_required(VERSION 3.0 FATAL_ERROR)
-+cmake_minimum_required(VERSION 3.5 FATAL_ERROR)
- project(simple-torch-test)
-
- find_package(Torch REQUIRED)
---- a/aten/src/ATen/test/test_install/CMakeLists.txt 2025-06-24
11:54:39.366613030 +0200
-+++ b/aten/src/ATen/test/test_install/CMakeLists.txt 2025-06-24
11:54:49.938652376 +0200
-@@ -1,4 +1,4 @@
--cmake_minimum_required(VERSION 3.0)
-+cmake_minimum_required(VERSION 3.5)
- find_package(ATen REQUIRED)
- include_directories(${ATEN_INCLUDE_DIR})
-
---- a/android/test_app/app/CMakeLists.txt 2025-06-24 11:49:00.371351384
+0200
-+++ b/android/test_app/app/CMakeLists.txt 2025-06-24 11:49:12.083394978
+0200
-@@ -1,4 +1,4 @@
--cmake_minimum_required(VERSION 3.4.1)
-+cmake_minimum_required(VERSION 3.5)
- set(PROJECT_NAME pytorch_testapp_jni)
- project(${PROJECT_NAME} CXX)
- set(CMAKE_CXX_STANDARD 17 CACHE STRING "The C++ standard whose features are
requested to build this target.")
---- a/android/pytorch_android/CMakeLists.txt 2025-06-24 11:58:48.551540427
+0200
-+++ b/android/pytorch_android/CMakeLists.txt 2025-06-24 11:58:59.802582301
+0200
-@@ -1,4 +1,4 @@
--cmake_minimum_required(VERSION 3.4.1)
-+cmake_minimum_required(VERSION 3.5)
- option(BUILD_LITE_INTERPRETER "Master flag to build pytorch_jni_lite" ON)
- message(
- STATUS
---- a/android/pytorch_android_torchvision/CMakeLists.txt 2025-06-24
12:04:49.205884981 +0200
-+++ b/android/pytorch_android_torchvision/CMakeLists.txt 2025-06-24
12:04:58.357919901 +0200
-@@ -1,4 +1,4 @@
--cmake_minimum_required(VERSION 3.4.1)
-+cmake_minimum_required(VERSION 3.5)
- project(pytorch_vision_jni CXX)
- set(CMAKE_CXX_STANDARD 17 CACHE STRING "The C++ standard whose features are
requested to build this target.")
- set(CMAKE_VERBOSE_MAKEFILE ON)
diff --git a/sci-ml/caffe2/files/caffe2-2.7.0-gentoo.patch
b/sci-ml/caffe2/files/caffe2-2.7.0-gentoo.patch
deleted file mode 100644
index 78011bc46cdf..000000000000
--- a/sci-ml/caffe2/files/caffe2-2.7.0-gentoo.patch
+++ /dev/null
@@ -1,157 +0,0 @@
---- a/CMakeLists.txt
-+++ b/CMakeLists.txt
-@@ -989,12 +989,11 @@ endif()
- # third_party/FBGEMM
- include(cmake/public/utils.cmake)
- if(NOT MSVC)
-- string(APPEND CMAKE_CXX_FLAGS " -O2 -fPIC")
-+ string(APPEND CMAKE_CXX_FLAGS " -O2")
- # Eigen fails to build with some versions, so convert this to a warning
- # Details at http://eigen.tuxfamily.org/bz/show_bug.cgi?id=1459
- string(APPEND CMAKE_CXX_FLAGS " -Wall")
- string(APPEND CMAKE_CXX_FLAGS " -Wextra")
-- append_cxx_flag_if_supported("-Werror=return-type" CMAKE_CXX_FLAGS)
- append_cxx_flag_if_supported("-Werror=non-virtual-dtor" CMAKE_CXX_FLAGS)
- append_cxx_flag_if_supported("-Werror=braced-scalar-init" CMAKE_CXX_FLAGS)
- append_cxx_flag_if_supported("-Werror=range-loop-construct" CMAKE_CXX_FLAGS)
-@@ -1092,7 +1091,6 @@
- endif()
- append_cxx_flag_if_supported("-fno-math-errno" CMAKE_CXX_FLAGS)
- append_cxx_flag_if_supported("-fno-trapping-math" CMAKE_CXX_FLAGS)
-- append_cxx_flag_if_supported("-Werror=format" CMAKE_CXX_FLAGS)
- if(CMAKE_COMPILER_IS_GNUCXX AND CMAKE_CXX_COMPILER_VERSION
VERSION_GREATER_EQUAL 13)
- append_cxx_flag_if_supported("-Wno-dangling-reference" CMAKE_CXX_FLAGS)
- append_cxx_flag_if_supported("-Wno-error=dangling-reference"
CMAKE_CXX_FLAGS)
- append_cxx_flag_if_supported("-Wno-error=redundant-move" CMAKE_CXX_FLAGS)
---- a/aten/src/ATen/native/quantized/cpu/qnnpack/CMakeLists.txt
-+++ b/aten/src/ATen/native/quantized/cpu/qnnpack/CMakeLists.txt
-@@ -323,16 +323,8 @@ set_target_properties(pytorch_qnnpack PROPERTIES
PUBLIC_HEADER include/pytorch_q
- set_target_properties(pytorch_qnnpack PROPERTIES PUBLIC_HEADER
include/qnnpack_func.h)
-
- # ---[ Configure clog
--if(NOT TARGET clog)
-- set(CLOG_BUILD_TESTS OFF CACHE BOOL "")
-- set(CLOG_RUNTIME_TYPE "${CPUINFO_RUNTIME_TYPE}" CACHE STRING "")
-- add_subdirectory(
-- "${CLOG_SOURCE_DIR}"
-- "${CONFU_DEPENDENCIES_BINARY_DIR}/clog")
-- # We build static version of clog but a dynamic library may indirectly
depend on it
-- set_property(TARGET clog PROPERTY POSITION_INDEPENDENT_CODE ON)
--endif()
--target_link_libraries(pytorch_qnnpack PUBLIC clog)
-+find_library(CLOG_LIBRARY NAMES clog REQUIRED)
-+target_link_libraries(pytorch_qnnpack PUBLIC ${CLOG_LIBRARY})
-
- # ---[ Configure cpuinfo
- if(NOT TARGET cpuinfo AND USE_SYSTEM_CPUINFO)
---- a/caffe2/CMakeLists.txt
-+++ b/caffe2/CMakeLists.txt
-@@ -87,7 +87,7 @@ endif()
- # Note: the folders that are being commented out have not been properly
- # addressed yet.
-
--if(NOT MSVC AND USE_XNNPACK)
-+if(FALSE)
- if(NOT TARGET fxdiv)
- set(FXDIV_BUILD_TESTS OFF CACHE BOOL "")
- set(FXDIV_BUILD_BENCHMARKS OFF CACHE BOOL "")
-@@ -1135,7 +1135,6 @@ if(USE_XPU)
- endif()
-
- if(NOT MSVC AND USE_XNNPACK)
-- TARGET_LINK_LIBRARIES(torch_cpu PRIVATE fxdiv)
- endif()
-
- # ==========================================================
---- a/cmake/Codegen.cmake
-+++ b/cmake/Codegen.cmake
-@@ -64,7 +64,7 @@ if(INTERN_BUILD_ATEN_OPS)
- if(MSVC)
- set(OPT_FLAG "/fp:strict ")
- else(MSVC)
-- set(OPT_FLAG "-O3 ")
-+ set(OPT_FLAG " ")
- if("${CMAKE_BUILD_TYPE}" MATCHES "Debug")
- set(OPT_FLAG " ")
- endif()
---- a/cmake/Dependencies.cmake
-+++ b/cmake/Dependencies.cmake
-@@ -467,7 +467,9 @@
- set_property(TARGET pytorch_qnnpack PROPERTY POSITION_INDEPENDENT_CODE
ON)
- set_property(TARGET cpuinfo PROPERTY POSITION_INDEPENDENT_CODE ON)
- # QNNPACK depends on gemmlowp headers
-- target_include_directories(pytorch_qnnpack PRIVATE
"${CAFFE2_THIRD_PARTY_ROOT}/gemmlowp")
-+ find_package(gemmlowp REQUIRED)
-+ get_target_property(GEMMLOWP_INCLUDE_DIRS gemmlowp::gemmlowp
INTERFACE_INCLUDE_DIRECTORIES)
-+ target_include_directories(pytorch_qnnpack PRIVATE
${GEMMLOWP_INCLUDE_DIRS})
- endif()
-
- list(APPEND Caffe2_DEPENDENCY_LIBS pytorch_qnnpack)
-@@ -562,7 +564,7 @@
- find_library(microkernels-prod_LIBRARY microkernels-prod)
- set_property(TARGET XNNPACK PROPERTY IMPORTED_LOCATION "${XNNPACK_LIBRARY}")
- set_property(TARGET microkernels-prod PROPERTY IMPORTED_LOCATION
"${microkernels-prod_LIBRARY}")
-- if(NOT XNNPACK_LIBRARY or NOT microkernels-prod_LIBRARY)
-+ if(FALSE)
- message(FATAL_ERROR "Cannot find XNNPACK")
- endif()
- message("-- Found XNNPACK: ${XNNPACK_LIBRARY}")
-@@ -699,7 +701,7 @@ if(BUILD_TEST OR BUILD_MOBILE_BENCHMARK OR
BUILD_MOBILE_TEST)
- endif()
-
- # ---[ FBGEMM
--if(USE_FBGEMM)
-+if(FALSE)
- set(CAFFE2_THIRD_PARTY_ROOT "${PROJECT_SOURCE_DIR}/third_party")
- if(NOT DEFINED FBGEMM_SOURCE_DIR)
- set(FBGEMM_SOURCE_DIR "${CAFFE2_THIRD_PARTY_ROOT}/fbgemm" CACHE STRING
"FBGEMM source directory")
-@@ -751,6 +753,7 @@ if(USE_FBGEMM)
- endif()
-
- if(USE_FBGEMM)
-+ list(APPEND Caffe2_DEPENDENCY_LIBS fbgemm)
- caffe2_update_option(USE_FBGEMM ON)
- else()
- caffe2_update_option(USE_FBGEMM OFF)
---- a/cmake/External/nnpack.cmake
-+++ b/cmake/External/nnpack.cmake
-@@ -56,7 +56,7 @@
- set(PTHREADPOOL_SOURCE_DIR "${CAFFE2_THIRD_PARTY_ROOT}/pthreadpool" CACHE
STRING "pthreadpool source directory")
- set(GOOGLETEST_SOURCE_DIR "${CAFFE2_THIRD_PARTY_ROOT}/googletest" CACHE
STRING "Google Test source directory")
-
-- if(NOT TARGET nnpack)
-+ if(FALSE)
- set(NNPACK_BUILD_TESTS OFF CACHE BOOL "")
- set(NNPACK_BUILD_BENCHMARKS OFF CACHE BOOL "")
- set(NNPACK_LIBRARY_TYPE "static" CACHE STRING "")
---- a/cmake/public/utils.cmake
-+++ b/cmake/public/utils.cmake
-@@ -439,8 +439,6 @@ function(torch_compile_options libname)
- endif()
-
- # Use -O2 for release builds (-O3 doesn't improve perf, and -Os results in
perf regression)
-- target_compile_options(${libname} PRIVATE
--
$<$<AND:$<COMPILE_LANGUAGE:CXX>,$<OR:$<CONFIG:Release>,$<CONFIG:RelWithDebInfo>>>:-O2>)
-
- endfunction()
-
---- a/aten/src/ATen/CMakeLists.txt 2025-02-27 14:23:02.402742165 +0100
-+++ b/aten/src/ATen/CMakeLists.txt 2025-02-27 14:23:40.445850718 +0100
-@@ -301,8 +301,6 @@
- if(USE_CUDA)
- list(APPEND ATen_CUDA_INCLUDE ${CMAKE_CURRENT_SOURCE_DIR}/cuda)
- # Next two lines are needed because TunableOp uses third-party/fmt
-- list(APPEND ATen_CUDA_INCLUDE
$<TARGET_PROPERTY:fmt::fmt-header-only,INTERFACE_INCLUDE_DIRECTORIES>)
-- list(APPEND ATen_CUDA_DEPENDENCY_LIBS fmt::fmt-header-only)
- list(APPEND ATen_CUDA_CU_SRCS
- ${cuda_cu}
- ${native_cuda_cu}
-@@ -315,8 +313,6 @@
- list(APPEND ATen_HIP_INCLUDE
${CMAKE_CURRENT_SOURCE_DIR}/../../../third_party/composable_kernel/include)
- list(APPEND ATen_HIP_INCLUDE
${CMAKE_CURRENT_SOURCE_DIR}/../../../third_party/composable_kernel/library/include)
- # Next two lines are needed because TunableOp uses third-party/fmt
-- list(APPEND ATen_HIP_INCLUDE
$<TARGET_PROPERTY:fmt::fmt-header-only,INTERFACE_INCLUDE_DIRECTORIES>)
-- list(APPEND ATen_HIP_DEPENDENCY_LIBS fmt::fmt-header-only)
- if(USE_FLASH_ATTENTION)
- list(APPEND ATen_HIP_INCLUDE
${CMAKE_CURRENT_SOURCE_DIR}/native/transformers/hip/flash_attn/ck)
- endif()
diff --git a/sci-ml/caffe2/files/caffe2-2.7.0-llvm.patch
b/sci-ml/caffe2/files/caffe2-2.7.0-llvm.patch
deleted file mode 100644
index e0818fa31e71..000000000000
--- a/sci-ml/caffe2/files/caffe2-2.7.0-llvm.patch
+++ /dev/null
@@ -1,15 +0,0 @@
---- a/c10/util/strong_type.h 2025-06-29 10:28:19.365533325 +0200
-+++ b/c10/util/strong_type.h 2025-06-29 10:28:40.944598046 +0200
-@@ -1604,12 +1604,6 @@
- return hash<T>::operator()(value_of(tt));
- }
- };
--template <typename T, typename Tag, typename ... M>
--struct is_arithmetic<::strong::type<T, Tag, M...>>
-- : is_base_of<::strong::arithmetic::modifier<::strong::type<T, Tag, M...>>,
-- ::strong::type<T, Tag, M...>>
--{
--};
-
- #if STRONG_HAS_STD_FORMAT
- template<typename T, typename Tag, typename... M, typename Char>
diff --git a/sci-ml/caffe2/files/caffe2-2.7.1-ck-config.patch
b/sci-ml/caffe2/files/caffe2-2.7.1-ck-config.patch
deleted file mode 100644
index 8e77ab0ef465..000000000000
--- a/sci-ml/caffe2/files/caffe2-2.7.1-ck-config.patch
+++ /dev/null
@@ -1,38 +0,0 @@
-Use generated CK config.h rather than system
-
-Upstream commit:
https://github.com/pytorch/pytorch/commit/38e81a53324146d445a81eb8f80bccebe623eb35
---- a/aten/src/ATen/CMakeLists.txt
-+++ b/aten/src/ATen/CMakeLists.txt
-@@ -343,9 +343,32 @@ if(USE_CUDA)
- endif()
-
- if(USE_ROCM)
-+ # NOTE: The PyTorch build does not actually add_subdirectory
-+ # third_party/composable_kernel or use it as a CMake library. What is used
-+ # is header only, so this should be ok, except that the CMake build
generates
-+ # a ck/config.h. We just do that part here. Without this, the ck.h from the
-+ # ROCM SDK may get accidentally used instead.
-+ function(_pytorch_rocm_generate_ck_conf)
-+ set(CK_ENABLE_INT8 "ON")
-+ set(CK_ENABLE_FP16 "ON")
-+ set(CK_ENABLE_FP32 "ON")
-+ set(CK_ENABLE_FP64 "ON")
-+ set(CK_ENABLE_BF16 "ON")
-+ set(CK_ENABLE_FP8 "ON")
-+ set(CK_ENABLE_BF8 "ON")
-+ set(CK_USE_XDL "ON")
-+ set(CK_USE_WMMA "ON")
-+ configure_file(
-+
"${Torch_SOURCE_DIR}/third_party/composable_kernel/include/ck/config.h.in"
-+ "${CMAKE_CURRENT_BINARY_DIR}/composable_kernel/ck/config.h"
-+ )
-+ endfunction()
- list(APPEND ATen_HIP_INCLUDE ${CMAKE_CURRENT_SOURCE_DIR}/hip)
- list(APPEND ATen_HIP_INCLUDE
${CMAKE_CURRENT_SOURCE_DIR}/../../../third_party/composable_kernel/include)
- list(APPEND ATen_HIP_INCLUDE
${CMAKE_CURRENT_SOURCE_DIR}/../../../third_party/composable_kernel/library/include)
-+ list(APPEND ATen_HIP_INCLUDE ${CMAKE_CURRENT_BINARY_DIR}/composable_kernel)
-+ _pytorch_rocm_generate_ck_conf()
-+
- # Next two lines are needed because TunableOp uses third-party/fmt
- list(APPEND ATen_HIP_INCLUDE
$<TARGET_PROPERTY:fmt::fmt-header-only,INTERFACE_INCLUDE_DIRECTORIES>)
- list(APPEND ATen_HIP_DEPENDENCY_LIBS fmt::fmt-header-only)