commit:     b2cf10b4087169fd6ec3bf907138c938a282248e
Author:     Sergey Alirzaev <l29ah <AT> riseup <DOT> net>
AuthorDate: Mon Nov 10 02:24:29 2025 +0000
Commit:     David Roman <davidroman96 <AT> gmail <DOT> com>
CommitDate: Mon Nov 10 02:24:29 2025 +0000
URL:        https://gitweb.gentoo.org/repo/proj/guru.git/commit/?id=b2cf10b4

sci-misc/llama-cpp: + flexiblas backend

Signed-off-by: Sergey Alirzaev <l29ah <AT> riseup.net>

 sci-misc/llama-cpp/llama-cpp-0_pre6980.ebuild | 11 +++++++++--
 sci-misc/llama-cpp/llama-cpp-9999.ebuild      | 11 +++++++++--
 sci-misc/llama-cpp/metadata.xml               |  1 +
 3 files changed, 19 insertions(+), 4 deletions(-)

diff --git a/sci-misc/llama-cpp/llama-cpp-0_pre6980.ebuild 
b/sci-misc/llama-cpp/llama-cpp-0_pre6980.ebuild
index acbdfc0735..a81045e6d7 100644
--- a/sci-misc/llama-cpp/llama-cpp-0_pre6980.ebuild
+++ b/sci-misc/llama-cpp/llama-cpp-0_pre6980.ebuild
@@ -23,8 +23,8 @@ HOMEPAGE="https://github.com/ggml-org/llama.cpp";
 LICENSE="MIT"
 SLOT="0"
 CPU_FLAGS_X86=( avx avx2 f16c )
-IUSE="curl openblas +openmp blis hip cuda opencl vulkan"
-REQUIRED_USE="?? ( openblas blis )"
+IUSE="curl openblas +openmp blis hip cuda opencl vulkan flexiblas"
+REQUIRED_USE="?? ( openblas blis flexiblas )"
 
 # curl is needed for pulling models from huggingface
 # numpy is used by convert_hf_to_gguf.py
@@ -33,6 +33,7 @@ CDEPEND="
        openblas? ( sci-libs/openblas:= )
        openmp? ( llvm-runtimes/openmp:= )
        blis? ( sci-libs/blis:= )
+       flexiblas? ( sci-libs/flexiblas:= )
        hip? ( >=dev-util/hip-6.3:=
                >=sci-libs/hipBLAS-6.3:=
        )
@@ -99,6 +100,12 @@ src_configure() {
                )
        fi
 
+       if use flexiblas; then
+               mycmakeargs+=(
+                       -DGGML_BLAS=ON -DGGML_BLAS_VENDOR=FlexiBLAS
+               )
+       fi
+
        if use cuda; then
                local -x CUDAHOSTCXX="$(cuda_gccdir)"
                # tries to recreate dev symlinks

diff --git a/sci-misc/llama-cpp/llama-cpp-9999.ebuild 
b/sci-misc/llama-cpp/llama-cpp-9999.ebuild
index acbdfc0735..a81045e6d7 100644
--- a/sci-misc/llama-cpp/llama-cpp-9999.ebuild
+++ b/sci-misc/llama-cpp/llama-cpp-9999.ebuild
@@ -23,8 +23,8 @@ HOMEPAGE="https://github.com/ggml-org/llama.cpp";
 LICENSE="MIT"
 SLOT="0"
 CPU_FLAGS_X86=( avx avx2 f16c )
-IUSE="curl openblas +openmp blis hip cuda opencl vulkan"
-REQUIRED_USE="?? ( openblas blis )"
+IUSE="curl openblas +openmp blis hip cuda opencl vulkan flexiblas"
+REQUIRED_USE="?? ( openblas blis flexiblas )"
 
 # curl is needed for pulling models from huggingface
 # numpy is used by convert_hf_to_gguf.py
@@ -33,6 +33,7 @@ CDEPEND="
        openblas? ( sci-libs/openblas:= )
        openmp? ( llvm-runtimes/openmp:= )
        blis? ( sci-libs/blis:= )
+       flexiblas? ( sci-libs/flexiblas:= )
        hip? ( >=dev-util/hip-6.3:=
                >=sci-libs/hipBLAS-6.3:=
        )
@@ -99,6 +100,12 @@ src_configure() {
                )
        fi
 
+       if use flexiblas; then
+               mycmakeargs+=(
+                       -DGGML_BLAS=ON -DGGML_BLAS_VENDOR=FlexiBLAS
+               )
+       fi
+
        if use cuda; then
                local -x CUDAHOSTCXX="$(cuda_gccdir)"
                # tries to recreate dev symlinks

diff --git a/sci-misc/llama-cpp/metadata.xml b/sci-misc/llama-cpp/metadata.xml
index bd33ace1c0..63987291db 100644
--- a/sci-misc/llama-cpp/metadata.xml
+++ b/sci-misc/llama-cpp/metadata.xml
@@ -6,6 +6,7 @@
        </upstream>
        <use>
                <flag name="blis">Build a BLIS backend</flag>
+               <flag name="flexiblas">Build a FlexiBLAS backend</flag>
                <flag name="hip">Build a HIP (ROCm) backend</flag>
                <flag name="openblas">Build an OpenBLAS backend</flag>
                <flag name="opencl">Build an OpenCL backend, so far only works 
on Adreno and Intel GPUs</flag>

Reply via email to