commit: dba4c2b3be4988d53c662f849e3785bf45488ddf
Author: Sergey Alirzaev <l29ah <AT> riseup <DOT> net>
AuthorDate: Sun Feb 23 17:54:20 2025 +0000
Commit: David Roman <davidroman96 <AT> gmail <DOT> com>
CommitDate: Sun Feb 23 17:54:20 2025 +0000
URL: https://gitweb.gentoo.org/repo/proj/guru.git/commit/?id=dba4c2b3
sci-misc/llama-cpp: add 0_pre4763
Signed-off-by: Sergey Alirzaev <l29ah <AT> riseup.net>
sci-misc/llama-cpp/Manifest | 1 +
sci-misc/llama-cpp/llama-cpp-0_pre4763.ebuild | 93 +++++++++++++++++++++++++++
2 files changed, 94 insertions(+)
diff --git a/sci-misc/llama-cpp/Manifest b/sci-misc/llama-cpp/Manifest
index b7c2a37bb..3085f9c75 100644
--- a/sci-misc/llama-cpp/Manifest
+++ b/sci-misc/llama-cpp/Manifest
@@ -1 +1,2 @@
DIST llama-cpp-0_pre4576.tar.gz 20506059 BLAKE2B
8f011811e4df1f8d0c26b19f96a709980e078dc7e769b33cbbb03a852a29b489f80c8a1e298fecea53997068f6b7897e4536ba5db289aa445a1a6f16f98adce3
SHA512
21150721524283454ab53e370fdaf4e766f89fbb8d4b43072b10657d8c8b686630616cddbae7954147a2ba0360ad20c4643761f3774481e13a7b180812935c4e
+DIST llama-cpp-0_pre4763.tar.gz 20737582 BLAKE2B
f6cb6885465e144c19698ac65410f59a6cc2b78d511968bc26c521ba90be87d102eb413e3ef903da30dae3336780e80a4c20cbbea30cc67375f790567e0e6e7a
SHA512
c48923286e717d734a3414ae12182c869dd0a99fde722b46d48822a9cbcc5fc16ec5ade4108bd463990a3c9880ea58b559ba0a6975d04c348b474893df566bc9
diff --git a/sci-misc/llama-cpp/llama-cpp-0_pre4763.ebuild
b/sci-misc/llama-cpp/llama-cpp-0_pre4763.ebuild
new file mode 100644
index 000000000..b4db64b49
--- /dev/null
+++ b/sci-misc/llama-cpp/llama-cpp-0_pre4763.ebuild
@@ -0,0 +1,93 @@
+# Copyright 2025 Gentoo Authors
+# Distributed under the terms of the GNU General Public License v2
+
+EAPI=8
+
+ROCM_VERSION="6.3"
+
+inherit cmake rocm
+
+if [[ "${PV}" != "9999" ]]; then
+ KEYWORDS="~amd64"
+ MY_PV="b${PV#0_pre}"
+ S="${WORKDIR}/llama.cpp-${MY_PV}"
+
SRC_URI="https://github.com/ggerganov/llama.cpp/archive/refs/tags/${MY_PV}.tar.gz
-> ${P}.tar.gz"
+else
+ inherit git-r3
+ EGIT_REPO_URI="https://github.com/ggerganov/llama.cpp.git"
+fi
+
+DESCRIPTION="Port of Facebook's LLaMA model in C/C++"
+HOMEPAGE="https://github.com/ggerganov/llama.cpp"
+
+LICENSE="MIT"
+SLOT="0"
+CPU_FLAGS_X86=( avx avx2 f16c )
+IUSE="curl openblas blis hip"
+REQUIRED_USE="?? ( openblas blis )"
+
+AMDGPU_TARGETS_COMPAT=(
+ gfx900
+ gfx90c
+ gfx902
+ gfx1010
+ gfx1011
+ gfx1012
+ gfx1030
+ gfx1031
+ gfx1032
+ gfx1034
+ gfx1035
+ gfx1036
+ gfx1100
+ gfx1101
+ gfx1102
+ gfx1103
+ gfx1150
+ gfx1151
+)
+
+# curl is needed for pulling models from huggingface
+# numpy is used by convert_hf_to_gguf.py
+DEPEND="
+ curl? ( net-misc/curl:= )
+ openblas? ( sci-libs/openblas:= )
+ blis? ( sci-libs/blis:= )
+ hip? ( >=dev-util/hip-6.3:= )
+"
+RDEPEND="${DEPEND}
+ dev-python/numpy
+"
+PATCHES=( "${FILESDIR}/blas-ld.diff" )
+
+src_configure() {
+ local mycmakeargs=(
+ -DLLAMA_BUILD_TESTS=OFF
+ -DLLAMA_BUILD_SERVER=ON
+ -DCMAKE_SKIP_BUILD_RPATH=ON
+ -DGGML_NATIVE=0 # don't set march
+ -DLLAMA_CURL=$(usex curl ON OFF)
+ -DBUILD_NUMBER="1"
+ )
+
+ if use openblas ; then
+ mycmakeargs+=(
+ -DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS
+ )
+ fi
+
+ if use blis ; then
+ mycmakeargs+=(
+ -DGGML_BLAS=ON -DGGML_BLAS_VENDOR=FLAME
+ )
+ fi
+
+ if use hip; then
+ rocm_use_hipcc
+ mycmakeargs+=(
+ -DGGML_HIP=ON -DAMDGPU_TARGETS=$(get_amdgpu_flags)
+ )
+ fi
+
+ cmake_src_configure
+}