commit:     e933540939add62cd4de6a464ddf73f912c423e6
Author:     Sergey Alirzaev <l29ah <AT> riseup <DOT> net>
AuthorDate: Mon Jan 27 11:52:32 2025 +0000
Commit:     David Roman <davidroman96 <AT> gmail <DOT> com>
CommitDate: Mon Jan 27 11:52:32 2025 +0000
URL:        https://gitweb.gentoo.org/repo/proj/guru.git/commit/?id=e9335409

sci-misc/llama-cpp: new package, add 0_pre4564, 9999

Signed-off-by: Sergey Alirzaev <l29ah <AT> riseup.net>

 sci-misc/llama-cpp/llama-cpp-0_pre4564.ebuild | 43 +++++++++++++++++++++++++++
 sci-misc/llama-cpp/llama-cpp-9999.ebuild      | 43 +++++++++++++++++++++++++++
 2 files changed, 86 insertions(+)

diff --git a/sci-misc/llama-cpp/llama-cpp-0_pre4564.ebuild 
b/sci-misc/llama-cpp/llama-cpp-0_pre4564.ebuild
new file mode 100644
index 000000000..5d2ad01ea
--- /dev/null
+++ b/sci-misc/llama-cpp/llama-cpp-0_pre4564.ebuild
@@ -0,0 +1,43 @@
+# Copyright 2023 Gentoo Authors
+# Distributed under the terms of the GNU General Public License v2
+
+EAPI=8
+
+inherit cmake llvm
+
+LLVM_MAX_SLOT=16
+
+EGIT_REPO_URI="https://github.com/ggerganov/llama.cpp.git";
+inherit git-r3
+
+if [[ "${PV}" != "9999" ]]; then
+       KEYWORDS="~amd64 ~arm64"
+       EGIT_COMMIT="b${PV#0_pre}"
+fi
+
+DESCRIPTION="Port of Facebook's LLaMA model in C/C++"
+HOMEPAGE="https://github.com/ggerganov/llama.cpp";
+
+LICENSE="MIT"
+SLOT="0"
+IUSE="cublas tests tools"
+CPU_FLAGS_X86=( avx avx2 f16c )
+
+DEPEND="
+       cublas? ( dev-util/nvidia-cuda-toolkit )"
+RDEPEND="${DEPEND}"
+BDEPEND="${DEPEND}"
+
+src_configure() {
+       local mycmakeargs=(
+               -DLLAMA_CUBLAS="$(usex cublas)"
+               -DLLAMA_BUILD_TESTS="$(usex tests)"
+               -DLLAMA_BUILD_SERVER=OFF
+               -DCMAKE_SKIP_BUILD_RPATH=ON
+               -DBUILD_NUMBER="1"
+       )
+       if use cublas ; then
+               addpredict /dev/nvidiactl
+       fi
+       cmake_src_configure
+}

diff --git a/sci-misc/llama-cpp/llama-cpp-9999.ebuild 
b/sci-misc/llama-cpp/llama-cpp-9999.ebuild
new file mode 100644
index 000000000..5d2ad01ea
--- /dev/null
+++ b/sci-misc/llama-cpp/llama-cpp-9999.ebuild
@@ -0,0 +1,43 @@
+# Copyright 2023 Gentoo Authors
+# Distributed under the terms of the GNU General Public License v2
+
+EAPI=8
+
+inherit cmake llvm
+
+LLVM_MAX_SLOT=16
+
+EGIT_REPO_URI="https://github.com/ggerganov/llama.cpp.git";
+inherit git-r3
+
+if [[ "${PV}" != "9999" ]]; then
+       KEYWORDS="~amd64 ~arm64"
+       EGIT_COMMIT="b${PV#0_pre}"
+fi
+
+DESCRIPTION="Port of Facebook's LLaMA model in C/C++"
+HOMEPAGE="https://github.com/ggerganov/llama.cpp";
+
+LICENSE="MIT"
+SLOT="0"
+IUSE="cublas tests tools"
+CPU_FLAGS_X86=( avx avx2 f16c )
+
+DEPEND="
+       cublas? ( dev-util/nvidia-cuda-toolkit )"
+RDEPEND="${DEPEND}"
+BDEPEND="${DEPEND}"
+
+src_configure() {
+       local mycmakeargs=(
+               -DLLAMA_CUBLAS="$(usex cublas)"
+               -DLLAMA_BUILD_TESTS="$(usex tests)"
+               -DLLAMA_BUILD_SERVER=OFF
+               -DCMAKE_SKIP_BUILD_RPATH=ON
+               -DBUILD_NUMBER="1"
+       )
+       if use cublas ; then
+               addpredict /dev/nvidiactl
+       fi
+       cmake_src_configure
+}

Reply via email to