commit:     fe3e646a7e00b5995b8e0d5d03dea436ec75b7d8
Author:     Paul Zander <negril.nx+gentoo <AT> gmail <DOT> com>
AuthorDate: Mon Jan 26 14:13:23 2026 +0000
Commit:     Paul Zander <negril.nx+gentoo <AT> gmail <DOT> com>
CommitDate: Mon Jan 26 14:13:23 2026 +0000
URL:        https://gitweb.gentoo.org/repo/proj/guru.git/commit/?id=fe3e646a

sci-misc/llama-cpp: add 0_pre7836

Signed-off-by: Paul Zander <negril.nx+gentoo <AT> gmail.com>

 sci-misc/llama-cpp/Manifest                   |   1 +
 sci-misc/llama-cpp/llama-cpp-0_pre7836.ebuild | 162 ++++++++++++++++++++++++++
 2 files changed, 163 insertions(+)

diff --git a/sci-misc/llama-cpp/Manifest b/sci-misc/llama-cpp/Manifest
index 3fdd253743..5c86cf9090 100644
--- a/sci-misc/llama-cpp/Manifest
+++ b/sci-misc/llama-cpp/Manifest
@@ -5,3 +5,4 @@ DIST llama-cpp-0_pre6980.tar.gz 26431911 BLAKE2B 
b7d7c0dcdabde01acb816e73bc34456
 DIST llama-cpp-0_pre7276.tar.gz 27765814 BLAKE2B 
d0553ab1dd29c9d93a18c6217aab4553faf09e385a94b90732a537bbcf9bded54d5cda28553543e2c0cc71b6a157bfb80a48405f3f8281c51525757967b33e16
 SHA512 
3035fe53fea2ca3b0f35e479f4eaec75e38a2ea670600445776cd6fa696fc83ca19eb6dd7cd2ab1da69e78293c62318b5182e6e5b3423ae6c1f00854c5132a4c
 DIST llama-cpp-0_pre7611.tar.gz 28622786 BLAKE2B 
3c345645c9bcf07d8a513b9e883619b31b5254581f73429d638403758429fd2dfc5f78a22d538e8d88eb6c1be74bf805481af697480727ed750492ddec5c37fe
 SHA512 
c6c4780d7e68adfc385b57c6f7530423f8205bfa283572b0d414d55e143c03307e98676e41ad527c37d7837f831f8ff24be0f7bf59e366ea82f3802cdc946821
 DIST llama-cpp-0_pre7770.tar.gz 28797089 BLAKE2B 
0ad614f16c19ff1339571dd90be566ff4ccedfd991dfeb948f0cedc54f8447a3e72e35ac392bb60d5bc8a44d1757be3bea0123eac02b694f5f0a8f2c2b941b6a
 SHA512 
78896fdcf05330bb4b1fd86a985da56882d0166c6f276afda273dca183e4c7365decd5e9630c8d633e065162cbb06d6d8e4fdcf76be9768b238ee5053abaa3aa
+DIST llama-cpp-0_pre7836.tar.gz 28813563 BLAKE2B 
c843c0199b528114c23c58536552e2a7f581fda93364353e8aa38fda99ddb1942c421ad00be4daf86b49a2c311ef89bb6908b8900ca1ffb64df338cbfa11c354
 SHA512 
161176107de175d9b6fdc1ebfa9fe0cf6fd6968245730f7df7f17c6a179ef111685d0fed5a45031825dc4b07db270887a4307061f9918e375a1c65f261062d9d

diff --git a/sci-misc/llama-cpp/llama-cpp-0_pre7836.ebuild 
b/sci-misc/llama-cpp/llama-cpp-0_pre7836.ebuild
new file mode 100644
index 0000000000..ff61000fd0
--- /dev/null
+++ b/sci-misc/llama-cpp/llama-cpp-0_pre7836.ebuild
@@ -0,0 +1,162 @@
+# Copyright 2026 Gentoo Authors
+# Distributed under the terms of the GNU General Public License v2
+
+EAPI=8
+
+ROCM_VERSION="6.3"
+
+inherit cmake cuda rocm linux-info
+
+TINY_LLAMAS_COMMIT="99dd1a73db5a37100bd4ae633f4cfce6560e1567"
+
+DESCRIPTION="Port of Facebook's LLaMA model in C/C++"
+HOMEPAGE="https://github.com/ggml-org/llama.cpp";
+
+if [[ ${PV} == *9999* ]]; then
+       inherit git-r3
+       EGIT_REPO_URI="https://github.com/ggml-org/llama.cpp.git";
+else
+       MY_PV="b${PV#0_pre}"
+       
SRC_URI="https://github.com/ggml-org/llama.cpp/archive/refs/tags/${MY_PV}.tar.gz
 -> ${P}.tar.gz"
+       S="${WORKDIR}/llama.cpp-${MY_PV}"
+       KEYWORDS="~amd64"
+fi
+
+SRC_URI+="
+       examples? (
+               
https://huggingface.co/ggml-org/tiny-llamas/resolve/${TINY_LLAMAS_COMMIT}/stories15M-q4_0.gguf
+                       -> 
ggml-org_models_tinyllamas_stories15M-q4_0-${TINY_LLAMAS_COMMIT}.gguf
+       )
+"
+
+LICENSE="MIT"
+SLOT="0"
+CPU_FLAGS_X86=( avx avx2 f16c )
+
+# wwma USE explained here: 
https://github.com/ggml-org/llama.cpp/blob/master/docs/build.md#hip
+IUSE="curl openblas +openmp blis rocm cuda opencl vulkan flexiblas wmma 
examples"
+
+REQUIRED_USE="
+       ?? (
+               openblas
+               blis
+               flexiblas
+       )
+       wmma? (
+               rocm
+       )
+"
+
+# curl is needed for pulling models from huggingface
+# numpy is used by convert_hf_to_gguf.py
+CDEPEND="
+       curl? ( net-misc/curl:= )
+       openblas? ( sci-libs/openblas:= )
+       openmp? ( llvm-runtimes/openmp:= )
+       blis? ( sci-libs/blis:= )
+       flexiblas? ( sci-libs/flexiblas:= )
+       rocm? (
+               >=dev-util/hip-${ROCM_VERSION}:=
+               >=sci-libs/hipBLAS-${ROCM_VERSION}:=
+               wmma? (
+                       >=sci-libs/rocWMMA-${ROCM_VERSION}:=
+               )
+       )
+       cuda? ( dev-util/nvidia-cuda-toolkit:= )
+"
+DEPEND="${CDEPEND}
+       opencl? ( dev-util/opencl-headers )
+       vulkan? ( dev-util/vulkan-headers )
+"
+RDEPEND="${CDEPEND}
+       dev-python/numpy
+       opencl? ( dev-libs/opencl-icd-loader )
+       vulkan? ( media-libs/vulkan-loader )
+"
+BDEPEND="media-libs/shaderc"
+
+pkg_setup() {
+       if use rocm; then
+               linux-info_pkg_setup
+               if linux-info_get_any_version && linux_config_exists; then
+                       if ! linux_chkconfig_present HSA_AMD_SVM; then
+                               ewarn "To use ROCm/HIP, you need to have 
HSA_AMD_SVM option enabled in your kernel."
+                       fi
+               fi
+       fi
+}
+
+src_prepare() {
+       use cuda && cuda_src_prepare
+       cmake_src_prepare
+       if use examples; then
+               mkdir -p "${BUILD_DIR}/tinyllamas" || die
+               cp 
"${DISTDIR}/ggml-org_models_tinyllamas_stories15M-q4_0-${TINY_LLAMAS_COMMIT}.gguf"
 \
+                       "${BUILD_DIR}/tinyllamas/stories15M-q4_0.gguf" || die
+       fi
+}
+
+src_configure() {
+       local mycmakeargs=(
+               -DLLAMA_BUILD_TESTS=OFF
+               -DLLAMA_BUILD_EXAMPLES=$(usex examples)
+               -DLLAMA_BUILD_SERVER=ON
+               -DCMAKE_SKIP_BUILD_RPATH=ON
+               -DGGML_NATIVE=0 # don't set march
+               -DGGML_RPC=ON
+               -DLLAMA_CURL=$(usex curl)
+               -DBUILD_NUMBER="1"
+               -DGENTOO_REMOVE_CMAKE_BLAS_HACK=ON
+               -DGGML_CUDA=$(usex cuda)
+               -DGGML_OPENCL=$(usex opencl)
+               -DGGML_OPENMP=$(usex openmp)
+               -DGGML_VULKAN=$(usex vulkan)
+
+               # avoid clashing with whisper.cpp
+               -DCMAKE_INSTALL_LIBDIR="${EPREFIX}/usr/$(get_libdir)/llama.cpp"
+               -DCMAKE_INSTALL_RPATH="${EPREFIX}/usr/$(get_libdir)/llama.cpp"
+       )
+
+       if use openblas ; then
+               mycmakeargs+=(
+                       -DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS
+               )
+       fi
+
+       if use blis ; then
+               mycmakeargs+=(
+                       -DGGML_BLAS=ON -DGGML_BLAS_VENDOR=FLAME
+               )
+       fi
+
+       if use flexiblas; then
+               mycmakeargs+=(
+                       -DGGML_BLAS=ON -DGGML_BLAS_VENDOR=FlexiBLAS
+               )
+       fi
+
+       if use cuda; then
+               local -x CUDAHOSTCXX="$(cuda_gccdir)"
+               # tries to recreate dev symlinks
+               cuda_add_sandbox
+               addpredict "/dev/char/"
+       fi
+
+       if use rocm; then
+               rocm_use_hipcc
+               mycmakeargs+=(
+                       -DGGML_HIP=ON -DAMDGPU_TARGETS=$(get_amdgpu_flags)
+                       -DGGML_HIP_ROCWMMA_FATTN=$(usex wmma)
+               )
+       fi
+
+       cmake_src_configure
+}
+
+src_install() {
+       cmake_src_install
+       dobin "${BUILD_DIR}/bin/rpc-server"
+
+       # avoid clashing with whisper.cpp
+       rm -rf "${ED}/usr/include"
+}

Reply via email to