commit:     3fb4d83f6c7e652758a961870fc5301e245e21d6
Author:     Alfredo Tupone <tupone <AT> gentoo <DOT> org>
AuthorDate: Fri Oct 24 16:55:50 2025 +0000
Commit:     Alfredo Tupone <tupone <AT> gentoo <DOT> org>
CommitDate: Fri Oct 24 18:01:16 2025 +0000
URL:        https://gitweb.gentoo.org/repo/gentoo.git/commit/?id=3fb4d83f

sci-ml/caffe2: add 2.9.0

Signed-off-by: Alfredo Tupone <tupone <AT> gentoo.org>

 sci-ml/caffe2/Manifest                             |   2 +
 sci-ml/caffe2/caffe2-2.9.0.ebuild                  | 399 +++++++++++++++++++++
 sci-ml/caffe2/files/caffe2-2.9.0-cmake.patch       |  53 +++
 sci-ml/caffe2/files/caffe2-2.9.0-gentoo.patch      | 231 ++++++++++++
 .../composable-kernel-7fe50dc-expand-isa.patch     | 141 ++++++++
 sci-ml/caffe2/metadata.xml                         |   1 +
 6 files changed, 827 insertions(+)

diff --git a/sci-ml/caffe2/Manifest b/sci-ml/caffe2/Manifest
index e25dafff34ea..b45b9d6bcc18 100644
--- a/sci-ml/caffe2/Manifest
+++ b/sci-ml/caffe2/Manifest
@@ -1,4 +1,6 @@
+DIST composable_kernel-7fe50dc3.tar.gz 5380728 BLAKE2B 
c89c346d8e2d7a93a9cf26409e477fcdd25c43bc3f99d904c3bfe1bc282c6844ef2f2c80aceabe3bf4494db3457285384d5de5a22281aa426ba7479af82b0caf
 SHA512 
a62f92e2dd7da944bd34bab6cf3bf624f630dc316d29c755e9fd523343c3f7648b0b7e0c9a0c8f5e9654477599ae8be9dac687d4054b0390f064ac2e40fc1cd3
 DIST composable_kernel-8086bbe3.tar.gz 4418862 BLAKE2B 
b710e3d4586899443ec01044dad19fd2f992c351e2f65ba526dfcc47cc65c095beaf8ac21a8f71c02a0eb524d364e817b27241a9198884f2bdae9924b51e24e4
 SHA512 
8410b5a1c864d71f3034ef0d9d1245078856d09cc191faec59856c229bf11d89ae291036d735cb5cec4f1d72e6e9e8f6921833147f9619d30cfab8722d3a9f63
 DIST flash-attention-2.7.4.gh.tar.gz 5841323 BLAKE2B 
432999d763f2b3d732580ddfea5d3e01370351db0656546259a5e500a07516dd03c98828bfb55855dabe4adc651033b5d97ea4725ca46158b9970f0fbc662710
 SHA512 
05a4afb09e666f7404d6a3f8b5256e7bed6eba60a6f1bde2b7dbb96d318975f0b458c2521c7a38d88e97b6e4c27f29077cf787849daf82586e33f43a3d9a84b3
 DIST pytorch-2.7.1.tar.gz 50203605 BLAKE2B 
3f4b2643d86fe9ff30b2f335353dfe6a8e222bcc12143bc5d09268fb37bfd42f9451620e6e0db225c3c3e7930c999115fdd2ed62b7eae93b0d5e233270c7c760
 SHA512 
a9fc2252af9031c2cd46dde558c491aea8bc322fb80157a7760f300a44b759d4bfe866f030fbb974b80493057cfff4dd512498f99a100ed6d05bf620258ed37e
 DIST pytorch-2.8.0.tar.gz 56565754 BLAKE2B 
a8f07513b92f9293f8322508f9fc73a462f89fe51cb1f280af371cee19cbe7e2bf900ba2b3c43fd08ea415566db441a6d6310d77f18477e957641be311a361a5
 SHA512 
448e9dad4aa10f1793d35e6ffe9f0f69b7719d41e6eccceb687a8d0c148e22d03e4f76170a05308ef9323a7aea41aa74605077ae1d68c6d949f13b3340ebf310
+DIST pytorch-2.9.0.tar.gz 55750268 BLAKE2B 
943459ec60a4e1f5e36766defc7018fbf9722fb1564b723c2a7ebcb2a5d8b1735f0b1542dc67a77f788af3e2454ea6261dbdee5beb2bcfa4af2e58ca566edc93
 SHA512 
2ecdc0eac39ecee68b0f4c98e498424cde00c45bbeeff576c8778046f97119cd02885498b072352dd3cdd9aecd02baf61cdc5554bce8d757b30c673053a0cc80

diff --git a/sci-ml/caffe2/caffe2-2.9.0.ebuild 
b/sci-ml/caffe2/caffe2-2.9.0.ebuild
new file mode 100644
index 000000000000..58002595e3b3
--- /dev/null
+++ b/sci-ml/caffe2/caffe2-2.9.0.ebuild
@@ -0,0 +1,399 @@
+# Copyright 2022-2025 Gentoo Authors
+# Distributed under the terms of the GNU General Public License v2
+
+EAPI=8
+
+PYTHON_COMPAT=( python3_{11..14} )
+ROCM_VERSION=6.1
+inherit python-single-r1 cmake cuda flag-o-matic prefix rocm toolchain-funcs
+
+MYPN=pytorch
+MYP=${MYPN}-${PV}
+
+# caffe2-2.9.0 depends on future version of composable kernel
+# TODO: replace it with DEPEND in the future
+CK_COMMIT=7fe50dc3da2069d6645d9deb8c017a876472a977
+CK_P=composable_kernel-${CK_COMMIT:0:8}
+
+FLASH_PV=2.7.4
+FLASH_PN=flash-attention
+FLASH_P=${FLASH_PN}-${FLASH_PV}
+FLASH_ATT_URI="https://github.com/Dao-AILab/${FLASH_PN}/archive/refs/tags/v${FLASH_PV}.tar.gz
 -> ${FLASH_P}.gh.tar.gz"
+
+AOTRITON_PV=0.9.2b
+AOTRITON_PN=aotriton
+AOTRITON_P=${AOTRITON_PN}-${AOTRITON_PV}
+AOTRITON_tar=${AOTRITON_P}-manylinux_2_28_x86_64-rocm6.3-shared.tar.gz
+
+DESCRIPTION="A deep learning framework"
+HOMEPAGE="https://pytorch.org/";
+SRC_URI="
+       https://github.com/pytorch/${MYPN}/archive/refs/tags/v${PV}.tar.gz -> 
${MYP}.tar.gz
+       rocm? (
+               
https://github.com/ROCm/composable_kernel/archive/${CK_COMMIT}.tar.gz
+               -> ${CK_P}.tar.gz
+       )
+       cuda? (
+               flash? ( ${FLASH_ATT_URI} )
+               memefficient? ( ${FLASH_ATT_URI} )
+       )
+"
+
+S="${WORKDIR}"/${MYP}
+
+LICENSE="BSD"
+SLOT="0"
+KEYWORDS="~amd64 ~arm64"
+IUSE="cuda cusparselt distributed fbgemm flash gloo memefficient mkl mpi nccl 
nnpack +numpy
+       onednn openblas opencl openmp qnnpack rocm xnnpack"
+RESTRICT="test"
+REQUIRED_USE="
+       ${PYTHON_REQUIRED_USE}
+       mpi? ( distributed )
+       gloo? ( distributed )
+       ?? ( cuda rocm )
+       rocm? (
+               || ( ${ROCM_REQUIRED_USE} )
+       )
+       flash? ( || ( cuda rocm ) )
+       memefficient? ( || ( cuda rocm ) )
+       nccl? ( rocm )
+"
+
+RDEPEND="
+       ${PYTHON_DEPS}
+       dev-cpp/abseil-cpp:=
+       dev-cpp/gflags:=
+       >=dev-cpp/glog-0.5.0:=
+       dev-cpp/nlohmann_json
+       dev-libs/cpuinfo
+       dev-libs/libfmt:=
+       dev-libs/protobuf:=
+       dev-libs/sleef
+       ~sci-ml/kineto-0.4.0_p20250617
+       <sci-ml/onnx-1.18.0
+       virtual/lapack
+       cuda? (
+               dev-libs/cudnn
+               >=sci-ml/cudnn-frontend-1.12.0:=
+               >=dev-util/nvidia-cuda-toolkit-12.9:=[profiler]
+               cusparselt? ( dev-libs/cusparselt )
+       )
+       fbgemm? ( sci-ml/FBGEMM )
+       gloo? ( >=sci-ml/gloo-2025.06.04[cuda?] )
+       mpi? ( virtual/mpi )
+       nnpack? (
+               sci-ml/NNPACK
+               dev-libs/pthreadpool
+       )
+       numpy? ( $(python_gen_cond_dep '
+               dev-python/numpy[${PYTHON_USEDEP}]
+               ') )
+       onednn? ( =sci-ml/oneDNN-3.5* )
+       opencl? ( virtual/opencl )
+       qnnpack? (
+               !sci-libs/QNNPACK
+               sci-ml/gemmlowp
+               dev-libs/pthreadpool
+       )
+       rocm? (
+               nccl? ( >=dev-libs/rccl-6.3:= <dev-libs/rccl-7.1:= )
+               >=dev-util/hip-6.3:=       <dev-util/hip-7.1:=
+               >=dev-util/roctracer-6.3:= <dev-util/roctracer-7.1:=
+               >=sci-libs/hipBLAS-6.3:=   <sci-libs/hipBLAS-7.1:=
+               >=sci-libs/hipBLASLt-6.3:= <sci-libs/hipBLASLt-7.1:=
+               >=sci-libs/hipFFT-6.3:=    <sci-libs/hipFFT-7.1:=
+               >=sci-libs/hipRAND-6.3:=   <sci-libs/hipRAND-7.1:=
+               >=sci-libs/hipSOLVER-6.3:= <sci-libs/hipSOLVER-7.1:=
+               >=sci-libs/hipSPARSE-6.3:= <sci-libs/hipSPARSE-7.1:=
+               >=sci-libs/miopen-6.3:=    <sci-libs/miopen-7.1:=
+               >=sci-libs/rocBLAS-6.3:=   <sci-libs/rocBLAS-7.1:=
+               >=sci-libs/rocRAND-6.3:=   <sci-libs/rocRAND-7.1:=
+               >=sci-libs/rocSOLVER-6.3:= <sci-libs/rocSOLVER-7.1:=
+               memefficient? ( sci-libs/aotriton-bin:0/0.11 )
+       )
+       distributed? (
+               !rocm? ( sci-ml/tensorpipe[cuda?] )
+               dev-cpp/cpp-httplib:=
+       )
+       xnnpack? (
+               >=sci-ml/XNNPACK-2024.11
+               dev-libs/pthreadpool
+       )
+       mkl? ( sci-libs/mkl )
+       openblas? ( sci-libs/openblas )
+"
+
+DEPEND="
+       ${RDEPEND}
+       dev-cpp/opentelemetry-cpp
+       dev-libs/flatbuffers
+       dev-libs/FXdiv
+       dev-libs/pocketfft
+       dev-libs/psimd
+       sci-ml/FP16
+       $(python_gen_cond_dep '
+               dev-python/pybind11[${PYTHON_USEDEP}]
+               dev-python/pyyaml[${PYTHON_USEDEP}]
+               dev-python/typing-extensions[${PYTHON_USEDEP}]
+       ')
+       cuda? ( >=dev-libs/cutlass-3.9.2[tools(+)] )
+       onednn? ( sci-ml/ideep )
+       rocm? (
+               >=sci-libs/hipCUB-6.3:=    <sci-libs/hipCUB-7.1:=
+               >=sci-libs/rocPRIM-6.3:=   <sci-libs/rocPRIM-7.1:=
+               >=sci-libs/rocThrust-6.3:= <sci-libs/rocThrust-7.1:=
+       )
+       qnnpack? ( dev-libs/clog )
+"
+
+PATCHES=(
+       "${FILESDIR}"/${PN}-2.5.1-unbundle_fmt.patch
+       "${FILESDIR}"/${PN}-2.5.1-unbundle_kineto.patch
+       "${FILESDIR}"/${PN}-2.8.0-unbundle_pocketfft.patch
+       "${FILESDIR}"/${PN}-2.5.1-cudnn_include_fix.patch
+       "${FILESDIR}"/${P}-gentoo.patch
+       "${FILESDIR}"/${PN}-2.4.0-cpp-httplib.patch
+       "${FILESDIR}"/${PN}-2.5.1-glog-0.6.0.patch
+       "${FILESDIR}"/${PN}-2.5.1-newfix-functorch-install.patch
+       "${FILESDIR}"/${PN}-2.6.0-rocm-fix-std-cpp17.patch
+       "${FILESDIR}"/${PN}-2.8.0-cmake.patch
+       "${FILESDIR}"/${PN}-2.7.0-glog-0.7.1.patch
+       "${FILESDIR}"/${PN}-2.7.1-aotriton-fixes.patch
+       "${FILESDIR}"/${PN}-2.8.0-rocm-minus-flash.patch
+       "${FILESDIR}"/${P}-cmake.patch
+)
+
+src_prepare() {
+       if use cuda && ( use flash || use memefficient ); then
+               mv "${WORKDIR}"/${FLASH_P}/* third_party/${FLASH_PN}/ || die
+       fi
+       filter-lto #bug 862672
+
+       # Unbundle fmt
+       sed -i \
+               -e 's|::fmt-header-only||' \
+               c10/CMakeLists.txt \
+               cmake/Dependencies.cmake \
+               torch/CMakeLists.txt \
+               || die
+
+       # Drop third_party from CMake tree
+       sed -i \
+               -e '/add_subdirectory.*third_party/d' \
+               CMakeLists.txt \
+               cmake/Dependencies.cmake \
+               cmake/ProtoBuf.cmake \
+               aten/src/ATen/CMakeLists.txt \
+               || die
+       # Change libc10* path
+       sed -i \
+               -e "/EXPORT/s|DESTINATION lib)|DESTINATION $(get_libdir))|" \
+               c10/cuda/CMakeLists.txt \
+               c10/CMakeLists.txt \
+               c10/hip/CMakeLists.txt \
+               || die
+
+       # Change libaotriton path
+       sed -i \
+               -e "s|}/lib|}/$(get_libdir)|g" \
+               cmake/External/aotriton.cmake \
+               || die
+
+       # Noisy warnings from Logging.h
+       sed -i 's/-Wextra-semi//' cmake/public/utils.cmake || die
+
+       cmake_src_prepare
+       pushd torch/csrc/jit/serialization > /dev/null || die
+       flatc --cpp --gen-mutable --scoped-enums mobile_bytecode.fbs || die
+       popd > /dev/null || die
+
+       # prefixify the hardcoded paths, after all patches are applied
+       hprefixify \
+               aten/CMakeLists.txt \
+               caffe2/CMakeLists.txt \
+               cmake/Metal.cmake \
+               cmake/Modules/*.cmake \
+               cmake/Modules_CUDA_fix/FindCUDNN.cmake \
+               cmake/Modules_CUDA_fix/upstream/FindCUDA/make2cmake.cmake \
+               
cmake/Modules_CUDA_fix/upstream/FindPackageHandleStandardArgs.cmake \
+               cmake/public/LoadHIP.cmake \
+               cmake/public/cuda.cmake \
+               cmake/Dependencies.cmake \
+               torch/CMakeLists.txt \
+               CMakeLists.txt
+
+       if use rocm; then
+               sed -e "s:/opt/rocm:/usr:" \
+                       -e "s:lib/cmake:$(get_libdir)/cmake:g" \
+                       -i cmake/public/LoadHIP.cmake || die
+
+               # TODO: delete, when caffe2 depends on systemwide 
composable_kernel
+               sed -e 
"s:third_party/composable_kernel:../composable_kernel-${CK_COMMIT}:g" \
+                       -i aten/src/ATen/CMakeLists.txt || die
+
+               # Bug 959808: fix for gfx101x targets
+               pushd "${WORKDIR}/composable_kernel-${CK_COMMIT}" > /dev/null 
|| die
+               eapply "${FILESDIR}"/composable-kernel-7fe50dc-expand-isa.patch
+               popd > /dev/null || die
+
+               if tc-is-clang; then
+                       # Systemwide gcc (for absl and at::TensorBase) + hipcc 
(llvm>=18) need abi-compat=17.
+                       # But systemwide clang>=18 + hipcc (>=llvm-18) need 
opposite!
+                       # See also: 
https://github.com/llvm/llvm-project/issues/102443#issuecomment-2329726287
+                       sed '/-fclang-abi-compat=17/d' -i 
cmake/Dependencies.cmake || die
+               fi
+
+               # Workaround for libc++ issue 
https://github.com/llvm/llvm-project/issues/100802
+               sed 's/std::memcpy/memcpy/g' -i torch/headeronly/util/Half.h || 
die
+
+               ebegin "HIPifying cuda sources"
+               ${EPYTHON} tools/amd_build/build_amd.py || die
+               eend $?
+       fi
+}
+
+src_configure() {
+       if use cuda && [[ -z ${TORCH_CUDA_ARCH_LIST} ]]; then
+               ewarn "WARNING: caffe2 is being built with its default CUDA 
compute capabilities: 3.5 and 7.0."
+               ewarn "These may not be optimal for your GPU."
+               ewarn ""
+               ewarn "To configure caffe2 with the CUDA compute capability 
that is optimal for your GPU,"
+               ewarn "set TORCH_CUDA_ARCH_LIST in your make.conf, and 
re-emerge caffe2."
+               ewarn "For example, to use CUDA capability 7.5 & 3.5, add: 
TORCH_CUDA_ARCH_LIST=7.5 3.5"
+               ewarn "For a Maxwell model GPU, an example value would be: 
TORCH_CUDA_ARCH_LIST=Maxwell"
+               ewarn ""
+               ewarn "You can look up your GPU's CUDA compute capability at 
https://developer.nvidia.com/cuda-gpus";
+               ewarn "or by running /opt/cuda/extras/demo_suite/deviceQuery | 
grep 'CUDA Capability'"
+       fi
+
+       local mycmakeargs=(
+               -DBUILD_CUSTOM_PROTOBUF=OFF
+               -DLIBSHM_INSTALL_LIB_SUBDIR="${EPREFIX}"/usr/$(get_libdir)
+               -DPython_EXECUTABLE="${PYTHON}"
+               -DTORCH_INSTALL_LIB_DIR="${EPREFIX}"/usr/$(get_libdir)
+               -DUSE_CCACHE=OFF
+               -DUSE_CUDA=$(usex cuda)
+               -DUSE_DISTRIBUTED=$(usex distributed)
+               -DUSE_FBGEMM=$(usex fbgemm)
+               -DUSE_FLASH_ATTENTION=$(usex flash)
+               -DUSE_GFLAGS=ON
+               -DUSE_GLOG=ON
+               -DUSE_GLOO=$(usex gloo)
+               -DUSE_ITT=OFF
+               -DUSE_KINETO=ON
+               -DUSE_KLEIDIAI=OFF # TODO
+               -DUSE_MAGMA=OFF # TODO: In GURU as sci-libs/magma
+               -DUSE_MEM_EFF_ATTENTION=$(usex memefficient)
+               -DUSE_MKLDNN=$(usex onednn)
+               -DUSE_MPI=$(usex mpi)
+               -DUSE_NCCL=OFF
+               -DUSE_NNPACK=$(usex nnpack)
+               -DUSE_NUMA=OFF
+               -DUSE_NUMPY=$(usex numpy)
+               -DUSE_OPENCL=$(usex opencl)
+               -DUSE_OPENMP=$(usex openmp)
+               -DUSE_PYTORCH_QNNPACK=$(usex qnnpack)
+               -DUSE_PYTORCH_METAL=OFF
+               -DUSE_ROCM=$(usex rocm)
+               -DUSE_SYSTEM_CPUINFO=ON
+               -DUSE_SYSTEM_EIGEN_INSTALL=ON
+               -DUSE_SYSTEM_FP16=ON
+               -DUSE_SYSTEM_FXDIV=ON
+               -DUSE_SYSTEM_GLOO=ON
+               -DUSE_SYSTEM_NVTX=ON
+               -DUSE_SYSTEM_ONNX=ON
+               -DUSE_SYSTEM_PSIMD=ON
+               -DUSE_SYSTEM_PTHREADPOOL=ON
+               -DUSE_SYSTEM_PYBIND11=ON
+               -DUSE_SYSTEM_SLEEF=ON
+               -DUSE_SYSTEM_XNNPACK=$(usex xnnpack)
+               -DUSE_TENSORPIPE=$(use distributed && use !rocm && echo ON || 
echo OFF)
+               -DUSE_UCC=OFF
+               -DUSE_VALGRIND=OFF
+               -DUSE_XNNPACK=$(usex xnnpack)
+               -DUSE_XPU=OFF
+               -Wno-dev
+       )
+
+       if use mkl; then
+               mycmakeargs+=(-DBLAS=MKL)
+       elif use openblas; then
+               mycmakeargs+=(-DBLAS=OpenBLAS)
+       else
+               mycmakeargs+=(-DBLAS=Generic -DBLAS_LIBRARIES=)
+       fi
+
+       if use cuda; then
+               addpredict "/dev/nvidiactl" # bug 867706
+               addpredict "/dev/char"
+               addpredict "/proc/self/task" # bug 926116
+
+               mycmakeargs+=(
+                       -DUSE_CUDNN=ON
+                       -DTORCH_CUDA_ARCH_LIST="${TORCH_CUDA_ARCH_LIST:-3.5 
7.0}"
+                       -DUSE_NCCL=OFF # TODO: NVIDIA Collective Communication 
Library
+                       -DCMAKE_CUDA_FLAGS="$(cuda_gccdir -f | tr -d \")"
+                       -DUSE_CUSPARSELT=$(usex cusparselt)
+               )
+       elif use rocm; then
+               export PYTORCH_ROCM_ARCH="$(get_amdgpu_flags)"
+
+               if use memefficient; then
+                       export AOTRITON_INSTALLED_PREFIX="${ESYSROOT}/usr"
+               fi
+
+               mycmakeargs+=(
+                       -DUSE_NCCL=$(usex nccl)
+                       -DUSE_SYSTEM_NCCL=ON
+                       -DCMAKE_REQUIRE_FIND_PACKAGE_HIP=ON
+               -DUSE_ROCM_CK_SDPA=OFF # requires flash + aiter, works only on 
gfx90a/gfx942/gfx950
+               )
+
+               # ROCm libraries produce too much warnings
+               append-cxxflags -Wno-deprecated-declarations -Wno-unused-result 
-Wno-unused-value
+       fi
+
+       if use onednn; then
+               mycmakeargs+=(
+                       -DMKLDNN_FOUND=ON
+                       -DMKLDNN_LIBRARIES=dnnl
+                       
-DMKLDNN_INCLUDE_DIR="${ESYSROOT}/usr/include/oneapi/dnnl"
+               )
+       fi
+
+       cmake_src_configure
+}
+
+src_compile() {
+       PYTORCH_BUILD_VERSION=${PV} \
+       PYTORCH_BUILD_NUMBER=0 \
+       cmake_src_compile
+}
+
+python_install() {
+       python_domodule python/torch
+       mkdir "${D}"$(python_get_sitedir)/torch/bin || die
+       mkdir "${D}"$(python_get_sitedir)/torch/lib || die
+       mkdir "${D}"$(python_get_sitedir)/torch/include || die
+       ln -s ../../../../../include/torch \
+               "${D}$(python_get_sitedir)"/torch/include/torch || die # bug 
923269
+       ln -s ../../../../../bin/torch_shm_manager \
+               "${D}"/$(python_get_sitedir)/torch/bin/torch_shm_manager || die
+       ln -s ../../../../../$(get_libdir)/libtorch_global_deps.so \
+               "${D}"/$(python_get_sitedir)/torch/lib/libtorch_global_deps.so 
|| die
+}
+
+src_install() {
+       cmake_src_install
+
+       # Used by pytorch ebuild
+       insinto "/var/lib/${PN}"
+       doins "${BUILD_DIR}"/CMakeCache.txt
+       dostrip -x /var/lib/${PN}/functorch.so
+
+       rm -rf python
+       mkdir -p python/torch || die
+       cp torch/version.py python/torch/ || die
+       python_install
+}

diff --git a/sci-ml/caffe2/files/caffe2-2.9.0-cmake.patch 
b/sci-ml/caffe2/files/caffe2-2.9.0-cmake.patch
new file mode 100644
index 000000000000..4249d28f941d
--- /dev/null
+++ b/sci-ml/caffe2/files/caffe2-2.9.0-cmake.patch
@@ -0,0 +1,53 @@
+--- a/.ci/pytorch/test_example_code/CMakeLists.txt     2025-10-24 
18:25:10.584717735 +0200
++++ b/.ci/pytorch/test_example_code/CMakeLists.txt     2025-10-24 
18:25:17.195639297 +0200
+@@ -1,4 +1,4 @@
+-cmake_minimum_required(VERSION 3.5 FATAL_ERROR)
++cmake_minimum_required(VERSION 3.10 FATAL_ERROR)
+ project(simple-torch-test)
+ 
+ find_package(Torch REQUIRED)
+--- a/aten/src/ATen/native/quantized/cpu/qnnpack/CMakeLists.txt        
2025-10-24 18:26:46.165583599 +0200
++++ b/aten/src/ATen/native/quantized/cpu/qnnpack/CMakeLists.txt        
2025-10-24 18:27:00.085418425 +0200
+@@ -4,7 +4,7 @@
+ # This source code is licensed under the BSD-style license found in the
+ # LICENSE file in the root directory of this source tree.
+ 
+-cmake_minimum_required(VERSION 3.5 FATAL_ERROR)
++cmake_minimum_required(VERSION 3.10 FATAL_ERROR)
+ 
+ include(GNUInstallDirs)
+ 
+--- a/aten/src/ATen/test/test_install/CMakeLists.txt   2025-10-24 
18:27:36.180990366 +0200
++++ b/aten/src/ATen/test/test_install/CMakeLists.txt   2025-10-24 
18:27:46.812864333 +0200
+@@ -1,4 +1,4 @@
+-cmake_minimum_required(VERSION 3.5)
++cmake_minimum_required(VERSION 3.10)
+ find_package(ATen REQUIRED)
+ include_directories(${ATEN_INCLUDE_DIR})
+ 
+--- a/aten/src/ATen/nnapi/CMakeLists.txt       2025-10-24 18:28:58.948009238 
+0200
++++ b/aten/src/ATen/nnapi/CMakeLists.txt       2025-10-24 18:29:11.040865888 
+0200
+@@ -1,6 +1,6 @@
+ # Define this to build the NNAPI binding out of tree.
+ if(PYTORCH_NNAPI_STANDALONE)
+-  cmake_minimum_required(VERSION 3.5 FATAL_ERROR)
++  cmake_minimum_required(VERSION 3.10 FATAL_ERROR)
+   project(pytorch_nnapi)
+ 
+   set(CMAKE_CXX_STANDARD 14 CACHE STRING "The C++ standard whose features are 
requested to build this target.")
+--- a/android/pytorch_android_torchvision/CMakeLists.txt       2025-10-24 
18:29:31.653621543 +0200
++++ b/android/pytorch_android_torchvision/CMakeLists.txt       2025-10-24 
18:29:41.668502824 +0200
+@@ -1,4 +1,4 @@
+-cmake_minimum_required(VERSION 3.5)
++cmake_minimum_required(VERSION 3.10)
+ project(pytorch_vision_jni CXX)
+ set(CMAKE_CXX_STANDARD 17 CACHE STRING "The C++ standard whose features are 
requested to build this target.")
+ #_cmake_modify_IGNORE set(CMAKE_VERBOSE_MAKEFILE ON)
+--- a/android/pytorch_android/CMakeLists.txt   2025-10-24 18:29:54.079355700 
+0200
++++ b/android/pytorch_android/CMakeLists.txt   2025-10-24 18:30:39.793813797 
+0200
+@@ -1,4 +1,4 @@
+-cmake_minimum_required(VERSION 3.5)
++cmake_minimum_required(VERSION 3.10)
+ option(BUILD_LITE_INTERPRETER "Master flag to build pytorch_jni_lite" ON)
+ message(
+   STATUS

diff --git a/sci-ml/caffe2/files/caffe2-2.9.0-gentoo.patch 
b/sci-ml/caffe2/files/caffe2-2.9.0-gentoo.patch
new file mode 100644
index 000000000000..5d4f3c3abeb2
--- /dev/null
+++ b/sci-ml/caffe2/files/caffe2-2.9.0-gentoo.patch
@@ -0,0 +1,231 @@
+--- a/CMakeLists.txt
++++ b/CMakeLists.txt
+@@ -1016,7 +1016,7 @@
+   set(CMAKE_COLOR_DIAGNOSTICS ON)
+ endif()
+ if(NOT MSVC)
+-  string(APPEND CMAKE_CXX_FLAGS " -O2 -fPIC")
++  string(APPEND CMAKE_CXX_FLAGS " -O2")
+ 
+   # This prevents use of `c10::optional`, `c10::nullopt` etc within the 
codebase
+   string(APPEND CMAKE_CXX_FLAGS " -DC10_NODEPRECATED")
+@@ -1027,7 +1027,6 @@
+   # Details at http://eigen.tuxfamily.org/bz/show_bug.cgi?id=1459
+   string(APPEND CMAKE_CXX_FLAGS " -Wall")
+   string(APPEND CMAKE_CXX_FLAGS " -Wextra")
+-  append_cxx_flag_if_supported("-Werror=return-type" CMAKE_CXX_FLAGS)
+   append_cxx_flag_if_supported("-Werror=non-virtual-dtor" CMAKE_CXX_FLAGS)
+   append_cxx_flag_if_supported("-Werror=braced-scalar-init" CMAKE_CXX_FLAGS)
+   append_cxx_flag_if_supported("-Werror=range-loop-construct" CMAKE_CXX_FLAGS)
+
+@@ -1083,7 +1082,6 @@
+   endif()
+   append_cxx_flag_if_supported("-fno-math-errno" CMAKE_CXX_FLAGS)
+   append_cxx_flag_if_supported("-fno-trapping-math" CMAKE_CXX_FLAGS)
+-  append_cxx_flag_if_supported("-Werror=format" CMAKE_CXX_FLAGS)
+   if(CMAKE_COMPILER_IS_GNUCXX AND CMAKE_CXX_COMPILER_VERSION 
VERSION_GREATER_EQUAL 13)
+     append_cxx_flag_if_supported("-Wno-dangling-reference" CMAKE_CXX_FLAGS)
+     append_cxx_flag_if_supported("-Wno-error=dangling-reference" 
CMAKE_CXX_FLAGS)
+     append_cxx_flag_if_supported("-Wno-error=redundant-move" CMAKE_CXX_FLAGS)
+--- a/aten/src/ATen/native/quantized/cpu/qnnpack/CMakeLists.txt
++++ b/aten/src/ATen/native/quantized/cpu/qnnpack/CMakeLists.txt
+@@ -323,7 +323,7 @@
+ set_target_properties(pytorch_qnnpack PROPERTIES PUBLIC_HEADER 
include/qnnpack_func.h)
+ 
+ # ---[ Configure clog
+-if(NOT TARGET clog)
++if(FALSE)
+   set(CLOG_BUILD_TESTS OFF CACHE BOOL "")
+   set(CLOG_RUNTIME_TYPE "${CPUINFO_RUNTIME_TYPE}" CACHE STRING "")
+   add_subdirectory(
+@@ -335,7 +335,8 @@
+     target_compile_options(clog PRIVATE "-Wno-unused-result")
+   endif()
+ endif()
+-target_link_libraries(pytorch_qnnpack PUBLIC clog)
++find_library(CLOG_LIBRARY NAMES clog REQUIRED)
++target_link_libraries(pytorch_qnnpack PUBLIC ${CLOG_LIBRARY})
+ 
+ # ---[ Configure cpuinfo
+ if(NOT TARGET cpuinfo AND USE_SYSTEM_CPUINFO)
+--- a/caffe2/CMakeLists.txt
++++ b/caffe2/CMakeLists.txt
+@@ -87,7 +87,7 @@ endif()
+ # Note: the folders that are being commented out have not been properly
+ # addressed yet.
+ 
+-if(NOT MSVC AND USE_XNNPACK)
++if(FALSE)
+   if(NOT TARGET fxdiv)
+     set(FXDIV_BUILD_TESTS OFF CACHE BOOL "")
+     set(FXDIV_BUILD_BENCHMARKS OFF CACHE BOOL "")
+@@ -1205,7 +1205,6 @@ if(USE_XPU)
+ endif()
+ 
+ if(NOT MSVC AND USE_XNNPACK)
+-  TARGET_LINK_LIBRARIES(torch_cpu PRIVATE fxdiv)
+ endif()
+ 
+ # ==========================================================
+@@ -1317,17 +1316,6 @@
+ target_include_directories(torch_cpu PRIVATE
+   "/usr/include/kineto")
+ 
+-if(USE_KINETO)
+-  target_include_directories(torch_cpu PRIVATE
+-    ${TORCH_ROOT}/third_party/kineto/libkineto/src)
+-endif()
+-
+-target_include_directories(torch_cpu PRIVATE
+-  ${TORCH_ROOT}/third_party/cpp-httplib)
+-
+-target_include_directories(torch_cpu PRIVATE
+-  ${TORCH_ROOT}/third_party/nlohmann/include)
+-
+ install(DIRECTORY
+   "${TORCH_SRC_DIR}/csrc"
+   "${TORCH_SRC_DIR}/headeronly"
+--- a/cmake/Codegen.cmake
++++ b/cmake/Codegen.cmake
+@@ -64,7 +64,7 @@ if(INTERN_BUILD_ATEN_OPS)
+   if(MSVC)
+     set(OPT_FLAG "/fp:strict ")
+   else(MSVC)
+-    set(OPT_FLAG "-O3 ")
++    set(OPT_FLAG " ")
+     if("${CMAKE_BUILD_TYPE}" MATCHES "Debug")
+       set(OPT_FLAG " ")
+     endif()
+--- a/cmake/Dependencies.cmake
++++ b/cmake/Dependencies.cmake
+@@ -484,7 +484,9 @@
+       set_property(TARGET pytorch_qnnpack PROPERTY POSITION_INDEPENDENT_CODE 
ON)
+       set_property(TARGET cpuinfo PROPERTY POSITION_INDEPENDENT_CODE ON)
+       # QNNPACK depends on gemmlowp headers
+-      target_include_directories(pytorch_qnnpack PRIVATE 
"${CAFFE2_THIRD_PARTY_ROOT}/gemmlowp")
++      find_package(gemmlowp REQUIRED)
++      get_target_property(GEMMLOWP_INCLUDE_DIRS gemmlowp::gemmlowp 
INTERFACE_INCLUDE_DIRECTORIES)
++      target_include_directories(pytorch_qnnpack PRIVATE 
${GEMMLOWP_INCLUDE_DIRS})
+     endif()
+ 
+     list(APPEND Caffe2_DEPENDENCY_LIBS pytorch_qnnpack)
+@@ -579,7 +581,7 @@
+   find_library(microkernels-prod_LIBRARY microkernels-prod)
+   set_property(TARGET XNNPACK PROPERTY IMPORTED_LOCATION "${XNNPACK_LIBRARY}")
+   set_property(TARGET microkernels-prod PROPERTY IMPORTED_LOCATION 
"${microkernels-prod_LIBRARY}")
+-  if(NOT XNNPACK_LIBRARY OR NOT microkernels-prod_LIBRARY)
++  if(FALSE)
+     message(FATAL_ERROR "Cannot find XNNPACK")
+   endif()
+   message("-- Found XNNPACK: ${XNNPACK_LIBRARY}")
+@@ -660,7 +662,7 @@ if(BUILD_TEST OR BUILD_MOBILE_BENCHMARK OR 
BUILD_MOBILE_TEST)
+ endif()
+ 
+ # ---[ FBGEMM
+-if(USE_FBGEMM)
++if(FALSE)
+   set(CAFFE2_THIRD_PARTY_ROOT "${PROJECT_SOURCE_DIR}/third_party")
+   if(NOT DEFINED FBGEMM_SOURCE_DIR)
+     set(FBGEMM_SOURCE_DIR "${CAFFE2_THIRD_PARTY_ROOT}/fbgemm" CACHE STRING 
"FBGEMM source directory")
+@@ -684,6 +686,7 @@ if(USE_FBGEMM)
+ endif()
+ 
+ if(USE_FBGEMM)
++  list(APPEND Caffe2_DEPENDENCY_LIBS fbgemm)
+   caffe2_update_option(USE_FBGEMM ON)
+ else()
+   caffe2_update_option(USE_FBGEMM OFF)
+@@ -1127,7 +1127,6 @@
+     endif()
+     set(TP_BUILD_LIBUV ON CACHE BOOL "" FORCE)
+     add_compile_options(-DTORCH_USE_LIBUV)
+-    include_directories(BEFORE SYSTEM 
${CMAKE_CURRENT_LIST_DIR}/../third_party/tensorpipe/third_party/libuv/include)
+     set(TP_STATIC_OR_SHARED STATIC CACHE STRING "" FORCE)
+ 
+     # Tensorpipe uses cuda_add_library
+@@ -1699,11 +1699,9 @@
+ 
+ # Include cpp-httplib
+ add_library(httplib INTERFACE IMPORTED)
+-target_include_directories(httplib SYSTEM INTERFACE 
${PROJECT_SOURCE_DIR}/third_party/cpp-httplib)
+ 
+ # Include nlohmann-json
+ add_library(nlohmann INTERFACE IMPORTED)
+-include_directories(nlohmann SYSTEM INTERFACE 
${PROJECT_SOURCE_DIR}/third_party/nlohmann/include)
+ 
+ # Include moodycamel
+ add_library(moodycamel INTERFACE IMPORTED)
+--- a/cmake/External/nnpack.cmake
++++ b/cmake/External/nnpack.cmake
+@@ -56,7 +56,7 @@
+   set(PTHREADPOOL_SOURCE_DIR "${CAFFE2_THIRD_PARTY_ROOT}/pthreadpool" CACHE 
STRING "pthreadpool source directory")
+   set(GOOGLETEST_SOURCE_DIR "${CAFFE2_THIRD_PARTY_ROOT}/googletest" CACHE 
STRING "Google Test source directory")
+ 
+-  if(NOT TARGET nnpack)
++  if(FALSE)
+     set(NNPACK_BUILD_TESTS OFF CACHE BOOL "")
+     set(NNPACK_BUILD_BENCHMARKS OFF CACHE BOOL "")
+     set(NNPACK_LIBRARY_TYPE "static" CACHE STRING "")
+--- a/cmake/public/utils.cmake
++++ b/cmake/public/utils.cmake
+@@ -440,8 +440,6 @@ function(torch_compile_options libname)
+   endif()
+ 
+   # Use -O2 for release builds (-O3 doesn't improve perf, and -Os results in 
perf regression)
+-  target_compile_options(${libname} PRIVATE
+-      
$<$<AND:$<COMPILE_LANGUAGE:CXX>,$<OR:$<CONFIG:Release>,$<CONFIG:RelWithDebInfo>>>:-O2>)
+ 
+ endfunction()
+ 
+--- a/aten/src/ATen/CMakeLists.txt     2025-02-27 14:23:02.402742165 +0100
++++ b/aten/src/ATen/CMakeLists.txt     2025-02-27 14:23:40.445850718 +0100
+@@ -415,8 +415,6 @@
+ if(USE_CUDA)
+   list(APPEND ATen_CUDA_INCLUDE ${CMAKE_CURRENT_SOURCE_DIR}/cuda)
+   # Next two lines are needed because TunableOp uses third-party/fmt
+-  list(APPEND ATen_CUDA_INCLUDE 
$<TARGET_PROPERTY:fmt::fmt-header-only,INTERFACE_INCLUDE_DIRECTORIES>)
+-  list(APPEND ATen_CUDA_DEPENDENCY_LIBS fmt::fmt-header-only)
+   list(APPEND ATen_CUDA_CU_SRCS
+     ${cuda_cu}
+     ${native_cuda_cu}
+@@ -437,8 +435,6 @@
+   endif()
+
+   # Next two lines are needed because TunableOp uses third-party/fmt
+-  list(APPEND ATen_HIP_INCLUDE 
$<TARGET_PROPERTY:fmt::fmt-header-only,INTERFACE_INCLUDE_DIRECTORIES>)
+-  list(APPEND ATen_HIP_DEPENDENCY_LIBS fmt::fmt-header-only)
+   if(USE_FLASH_ATTENTION AND USE_ROCM_CK_SDPA)
+     list(APPEND ATen_HIP_INCLUDE 
${CMAKE_CURRENT_SOURCE_DIR}/native/transformers/hip/flash_attn/ck)
+   endif()
+--- a/torch/CMakeLists.txt
++++ b/torch/CMakeLists.txt
+@@ -59,16 +59,10 @@
+     ${CMAKE_BINARY_DIR}/aten/src
+     ${CMAKE_BINARY_DIR}/caffe2/aten/src
+     ${CMAKE_BINARY_DIR}/third_party
+-    ${CMAKE_BINARY_DIR}/third_party/onnx
+ 
+     ${TORCH_ROOT}/third_party/valgrind-headers
+ 
+-    ${TORCH_ROOT}/third_party/gloo
+-    ${TORCH_ROOT}/third_party/onnx
+-    ${TORCH_ROOT}/third_party/flatbuffers/include
+     "/usr/include/kineto"
+-    ${TORCH_ROOT}/third_party/cpp-httplib
+-    ${TORCH_ROOT}/third_party/nlohmann/include
+ 
+     ${TORCH_SRC_DIR}/csrc
+     ${TORCH_SRC_DIR}/csrc/api/include
+--- a/cmake/FlatBuffers.cmake
++++ b/cmake/FlatBuffers.cmake
+@@ -1,10 +1 @@
+-set(FlatBuffers_Include ${PROJECT_SOURCE_DIR}/third_party/flatbuffers/include)
+-file(GLOB FlatBuffers_Library_SRCS
+-  ${FlatBuffers_Include}/flatbuffers/*.h
+-)
+ add_library(flatbuffers INTERFACE)
+-target_sources(
+-  flatbuffers
+-  INTERFACE ${FlatBuffers_Library_SRCS}
+-)
+-target_include_directories(flatbuffers INTERFACE ${FlatBuffers_Include})

diff --git a/sci-ml/caffe2/files/composable-kernel-7fe50dc-expand-isa.patch 
b/sci-ml/caffe2/files/composable-kernel-7fe50dc-expand-isa.patch
new file mode 100644
index 000000000000..53dcaf71fb44
--- /dev/null
+++ b/sci-ml/caffe2/files/composable-kernel-7fe50dc-expand-isa.patch
@@ -0,0 +1,141 @@
+Fix for "undeclared identifier 'CK_BUFFER_RESOURCE_3RD_DWORD'" for 
AMDGPU_TARGETS="gfx1012".
+Combines of 3 patches from 
https://github.com/ROCm/composable_kernel/issues/775#issuecomment-2726315348
+
+Bug: https://bugs.gentoo.org/947583
+Bug: https://bugs.gentoo.org/show_bug.cgi?id=959808
+--- a/include/ck/ck.hpp
++++ b/include/ck/ck.hpp
+@@ -78,7 +78,7 @@
+ #define CK_BUFFER_RESOURCE_3RD_DWORD -1
+ #elif defined(__gfx803__) || defined(__gfx900__) || defined(__gfx906__) || 
defined(__gfx9__)
+ #define CK_BUFFER_RESOURCE_3RD_DWORD 0x00020000
+-#elif defined(__gfx103__)
++#elif defined(__gfx101__) || defined(__gfx103__)
+ #define CK_BUFFER_RESOURCE_3RD_DWORD 0x31014000
+ #elif defined(__gfx11__) || defined(__gfx12__)
+ #define CK_BUFFER_RESOURCE_3RD_DWORD 0x31004000
+@@ -86,12 +86,12 @@
+ 
+ // FMA instruction
+ #ifndef __HIP_DEVICE_COMPILE__                   // for host code, define 
nothing
+-#elif defined(__gfx803__) || defined(__gfx900__) // for GPU code
+-#define CK_USE_AMD_V_MAC_F32
+-#elif defined(__gfx906__) || defined(__gfx9__) || defined(__gfx103__) // for 
GPU code
++#elif defined(__gfx906__) || defined(__gfx9__) || defined(__gfx103__) || 
defined(__gfx1011__) || defined(__gfx1012__) // for GPU code
+ #define CK_USE_AMD_V_FMAC_F32
+ #define CK_USE_AMD_V_DOT2_F32_F16
+ #define CK_USE_AMD_V_DOT4_I32_I8
++#elif defined(__gfx803__) || defined(__gfx900__) || defined(__gfx101__) // 
for GPU code
++#define CK_USE_AMD_V_MAC_F32
+ #elif defined(__gfx11__) || defined(__gfx12__)
+ #define CK_USE_AMD_V_FMAC_F32
+ #define CK_USE_AMD_V_DOT2_F32_F16
+--- 
a/include/ck/tensor_operation/gpu/device/impl/device_batched_gemm_multiple_d_dl.hpp
++++ 
b/include/ck/tensor_operation/gpu/device/impl/device_batched_gemm_multiple_d_dl.hpp
+@@ -71,7 +71,7 @@ __launch_bounds__(CK_MAX_THREAD_PER_BLOCK, 
CK_MIN_BLOCK_PER_CU)
+         const Block2CTileMap block_2_ctile_map)
+ {
+ #if(defined(__gfx906__) || defined(__gfx908__) || defined(__gfx90a__) || 
defined(__gfx94__) || \
+-    defined(__gfx103__) || defined(__gfx11__) || defined(__gfx12__))
++    defined(__gfx101__) || defined(__gfx103__) || defined(__gfx11__) || 
defined(__gfx12__))
+ 
+     const index_t num_blocks_per_batch =
+         __builtin_amdgcn_readfirstlane(get_grid_size() / batch_count);
+--- a/include/ck/tensor_operation/gpu/device/impl/device_gemm_multiple_d_dl.hpp
++++ b/include/ck/tensor_operation/gpu/device/impl/device_gemm_multiple_d_dl.hpp
+@@ -50,7 +50,7 @@ __launch_bounds__(CK_MAX_THREAD_PER_BLOCK, 
CK_MIN_BLOCK_PER_CU)
+         const CGridDesc_M0_M10_M11_N0_N10_N11 
e_grid_desc_m0_m10_m11_n0_n10_n11,
+         const Block2CTileMap block_2_ctile_map)
+ {
+-#if(defined(__gfx906__) || defined(__gfx9__) || defined(__gfx103__) || 
defined(__gfx11__) || \
++#if(defined(__gfx906__) || defined(__gfx9__) || defined(__gfx101__) || 
defined(__gfx103__) || defined(__gfx11__) || \
+     defined(__gfx12__))
+ 
+     constexpr index_t shared_block_size =
+--- 
a/include/ck/tensor_operation/gpu/device/impl/device_grouped_conv_bwd_weight_dl.hpp
++++ 
b/include/ck/tensor_operation/gpu/device/impl/device_grouped_conv_bwd_weight_dl.hpp
+@@ -48,7 +48,7 @@ __launch_bounds__(CK_MAX_THREAD_PER_BLOCK, 
CK_MIN_BLOCK_PER_CU)
+         const Block2CTileMap block_2_ctile_map,
+         const ComputePtrOffsetOfBatch compute_ptr_offset_of_batch)
+ {
+-#if(defined(__gfx906__) || defined(__gfx103__) || defined(__gfx90a__) || 
defined(__gfx908__) || \
++#if(defined(__gfx906__) || defined(__gfx101__) || defined(__gfx103__) || 
defined(__gfx90a__) || defined(__gfx908__) || \
+     defined(__gfx94__) || defined(__gfx11__) || defined(__gfx12__))
+     const index_t num_blocks_per_batch =
+         __builtin_amdgcn_readfirstlane(get_grid_size() / batch_count);
+--- 
a/include/ck/tensor_operation/gpu/device/impl/device_grouped_conv_fwd_dl_multiple_d_nhwc_kyxc_nhwk.hpp
++++ 
b/include/ck/tensor_operation/gpu/device/impl/device_grouped_conv_fwd_dl_multiple_d_nhwc_kyxc_nhwk.hpp
+@@ -90,7 +90,7 @@ __launch_bounds__(CK_MAX_THREAD_PER_BLOCK, 
CK_MIN_BLOCK_PER_CU)
+         const Block2CTileMap block_2_ctile_map,
+         const ComputePtrOffsetOfBatch compute_ptr_offset_of_batch)
+ {
+-#if(defined(__gfx906__) || defined(__gfx103__) || defined(__gfx90a__) || 
defined(__gfx908__) || \
++#if(defined(__gfx906__) || defined(__gfx101__) || defined(__gfx101__) || 
defined(__gfx103__) || defined(__gfx90a__) || defined(__gfx908__) || \
+     defined(__gfx94__) || defined(__gfx11__) || defined(__gfx12__))
+     // offset base pointer for each work-group
+     const index_t num_blocks_per_batch =
+--- 
a/include/ck/tensor_operation/gpu/device/impl/device_grouped_conv_fwd_dl_nhwc_kyxc_nhwk.hpp
++++ 
b/include/ck/tensor_operation/gpu/device/impl/device_grouped_conv_fwd_dl_nhwc_kyxc_nhwk.hpp
+@@ -106,7 +106,7 @@ __launch_bounds__(CK_MAX_THREAD_PER_BLOCK, 
CK_MIN_BLOCK_PER_CU)
+         const Block2CTileMap block_2_ctile_map,
+         const ComputePtrOffsetOfBatch compute_ptr_offset_of_batch)
+ {
+-#if(defined(__gfx906__) || defined(__gfx103__) || defined(__gfx11__) || 
defined(__gfx12__))
++#if(defined(__gfx906__) || defined(__gfx101__) || defined(__gfx103__) || 
defined(__gfx11__) || defined(__gfx12__))
+     // offset base pointer for each work-group
+     const index_t num_blocks_per_batch =
+         __builtin_amdgcn_readfirstlane(get_grid_size() / batch_count);
+--- 
a/include/ck/tensor_operation/gpu/device/impl/device_grouped_gemm_multiple_d_dl.hpp
++++ 
b/include/ck/tensor_operation/gpu/device/impl/device_grouped_gemm_multiple_d_dl.hpp
+@@ -40,7 +40,7 @@ __launch_bounds__(CK_MAX_THREAD_PER_BLOCK, 
CK_MIN_BLOCK_PER_CU)
+                                       const BElementwiseOperation 
b_element_op,
+                                       const CDEElementwiseOperation 
cde_element_op)
+ {
+-#if(defined(__gfx906__) || defined(__gfx908__) || defined(__gfx90a__) || 
defined(__gfx103__) || \
++#if(defined(__gfx906__) || defined(__gfx908__) || defined(__gfx90a__) || 
defined(__gfx101__) || defined(__gfx101__) || defined(__gfx103__) || \
+     defined(__gfx11__) || defined(__gfx94__) || defined(__gfx12__))
+     __shared__ char p_shared[GridwiseGemm::GetSharedMemoryNumberOfByte()];
+ 
+--- a/include/ck/tensor_operation/gpu/grid/gridwise_gemm_dpp.hpp
++++ b/include/ck/tensor_operation/gpu/grid/gridwise_gemm_dpp.hpp
+@@ -28,7 +28,7 @@ __launch_bounds__(CK_MAX_THREAD_PER_BLOCK, 
CK_MIN_BLOCK_PER_CU)
+ #endif
+     kernel_gemm_dpp(const typename GridwiseGemm::Argument karg)
+ {
+-#if(defined(__gfx103__) || defined(__gfx11__))
++#if(defined(__gfx101__) || defined(__gfx103__) || defined(__gfx11__))
+     __shared__ char p_shared[GridwiseGemm::GetSharedMemoryNumberOfByte()];
+ 
+     const auto a_grid_desc_ak0_m_ak1 = amd_wave_read_first_lane(
+--- a/include/ck/tensor_operation/gpu/grid/gridwise_tensor_rearrange.hpp
++++ b/include/ck/tensor_operation/gpu/grid/gridwise_tensor_rearrange.hpp
+@@ -36,7 +36,7 @@ __launch_bounds__(CK_MAX_THREAD_PER_BLOCK, 
CK_MIN_BLOCK_PER_CU)
+                             const ComputePtrOffsetOfStridedBatch 
compute_ptr_offset_of_batch)
+ {
+ #if(defined(__gfx906__) || defined(__gfx908__) || defined(__gfx90a__) || 
defined(__gfx94__) || \
+-    defined(__gfx103__) || defined(__gfx11__) || defined(__gfx12__))
++    defined(__gfx101__) || defined(__gfx103__) || defined(__gfx11__) || 
defined(__gfx12__))
+     GridwiseTensorRearrangeKernel::Run(in_grid_desc,
+                                        p_in_global,
+                                        out_grid_desc,
+--- a/include/ck_tile/core/config.hpp
++++ b/include/ck_tile/core/config.hpp
+@@ -9,6 +9,9 @@
+ #if defined(__gfx942__) || defined(__gfx950__)
+ #define __gfx94__
+ #endif
++#if defined(__gfx1010__) || defined(__gfx1011__) || defined(__gfx1012__)
++#define __gfx101__
++#endif
+ #if defined(__gfx1030__) || defined(__gfx1031__) || defined(__gfx1032__) || \
+     defined(__gfx1034__) || defined(__gfx1035__) || defined(__gfx1036__) || \
+     defined(__gfx10_3_generic__)
+@@ -200,7 +203,7 @@
+ #elif defined(__gfx803__) || defined(__gfx900__) || defined(__gfx906__) || \
+     defined(__gfx9__) // for GPU code
+ #define CK_TILE_BUFFER_RESOURCE_3RD_DWORD 0x00020000
+-#elif defined(__gfx103__) // for GPU code
++#elif defined(__gfx101__) || defined(__gfx103__) // for GPU code
+ #define CK_TILE_BUFFER_RESOURCE_3RD_DWORD 0x31014000
+ #elif defined(__gfx11__) || defined(__gfx12__) // for GPU code
+ #define CK_TILE_BUFFER_RESOURCE_3RD_DWORD 0x31004000

diff --git a/sci-ml/caffe2/metadata.xml b/sci-ml/caffe2/metadata.xml
index 961dd624f404..16e166ccaf7b 100644
--- a/sci-ml/caffe2/metadata.xml
+++ b/sci-ml/caffe2/metadata.xml
@@ -21,6 +21,7 @@
                <flag name="qnnpack">Use QNNPACK</flag>
                <flag name="rocm">Enable ROCm gpu computing support</flag>
                <flag name="xnnpack">Use <pkg>sci-ml/XNNPACK</pkg></flag>
+               <flag name="nccl">Use <pkg>dev-libs/rccl</pkg> (NCCL 
compatible) backend for distributed operations</flag>
        </use>
        <upstream>
                <remote-id type="github">pytorch/pytorch</remote-id>


Reply via email to