commit:     bfaa93dcace907596bee18483e11f9831f5d6cf9
Author:     Yahor Berdnikau <egorr.berd <AT> gmail <DOT> com>
AuthorDate: Fri Jan 31 18:40:07 2025 +0000
Commit:     David Roman <davidroman96 <AT> gmail <DOT> com>
CommitDate: Fri Jan 31 18:40:59 2025 +0000
URL:        https://gitweb.gentoo.org/repo/proj/guru.git/commit/?id=bfaa93dc

app-misc/ollama: add 0.5.7

Bug: https://bugs.gentoo.org/946268
Signed-off-by: Yahor Berdnikau <egorr.berd <AT> gmail.com>

 app-misc/ollama/Manifest            |   2 +
 app-misc/ollama/ollama-0.5.7.ebuild | 113 ++++++++++++++++++++++++++++++++++++
 2 files changed, 115 insertions(+)

diff --git a/app-misc/ollama/Manifest b/app-misc/ollama/Manifest
new file mode 100644
index 000000000..0d00a5bb0
--- /dev/null
+++ b/app-misc/ollama/Manifest
@@ -0,0 +1,2 @@
+DIST ollama-0.5.7-deps.tar.xz 115361652 BLAKE2B 
8694b35ef2545481c2e1f6a9d7c462177f25e78aab79288197ec4b33190a905c7e92e54734725cde7f122e1161cbdaf9c72ae40cbeb0b8ee0af9075e3dbb6691
 SHA512 
d7abe6266120cb9e731e53f6e14e16d8991e76fbab06348de945f53dc407788324b850308e7200616d92dd17a417ad0a2491eddd543bbe2cfee6a66c8ab81840
+DIST ollama-0.5.7.gh.tar.gz 2345089 BLAKE2B 
7d5063e9f665ab2a957d449b38017e6a9bb435c938749161c711cfc35d8a0361e7f4db214e0782f3b51c70c909fc5be8b76ca342cda6163b5aca5fdd733c55d9
 SHA512 
ea8adcec4f8f932c422a400b8cafb4b983bfa0721cd14383ceb8e0a4f588ecd1289d2e1de46a916c1b34d13e5dab2825ef11a37fc3e797345348dea3bd9144fe

diff --git a/app-misc/ollama/ollama-0.5.7.ebuild 
b/app-misc/ollama/ollama-0.5.7.ebuild
new file mode 100644
index 000000000..408c054e1
--- /dev/null
+++ b/app-misc/ollama/ollama-0.5.7.ebuild
@@ -0,0 +1,113 @@
+# Copyright 2024-2025 Gentoo Authors
+# Distributed under the terms of the GNU General Public License v2
+
+EAPI=8
+
+ROCM_VERSION=6.1
+inherit go-module rocm
+
+DESCRIPTION="Get up and running with Llama 3, Mistral, Gemma, and other 
language models."
+HOMEPAGE="https://ollama.com";
+SRC_URI="https://github.com/ollama/${PN}/archive/refs/tags/v${PV}.tar.gz -> 
${P}.gh.tar.gz"
+SRC_URI+=" 
https://github.com/Tapchicoma/ebuild-deps/raw/refs/heads/main/go-deps/${PN}-${PV}-deps.tar.xz";
+S="${WORKDIR}/${PN}-${PV}"
+LICENSE="MIT"
+SLOT="0"
+
+IUSE="cuda video_cards_amdgpu
+cpu_flags_x86_avx cpu_flags_x86_avx2
+cpu_flags_x86_avx512f cpu_flags_x86_avx512vbmi cpu_flags_x86_avx512_vnni 
cpu_flags_x86_avx512_bf16
+"
+
+REQUIRED_USE="
+       cpu_flags_x86_avx2? ( cpu_flags_x86_avx )
+       cpu_flags_x86_avx512f? ( cpu_flags_x86_avx2 )
+       cpu_flags_x86_avx512vbmi? ( cpu_flags_x86_avx512f )
+       cpu_flags_x86_avx512_vnni? ( cpu_flags_x86_avx512f )
+       cpu_flags_x86_avx512_bf16? ( cpu_flags_x86_avx512f )
+"
+
+RDEPEND="
+       acct-group/ollama
+       acct-user/ollama
+"
+
+DEPEND="
+       >=dev-lang/go-1.23.4
+       >=dev-build/cmake-3.24
+       >=sys-devel/gcc-11.4.0
+       cuda? ( dev-util/nvidia-cuda-toolkit )
+       video_cards_amdgpu? (
+               sci-libs/hipBLAS[${ROCM_USEDEP}]
+       )
+"
+
+pkg_pretend() {
+       if use video_cards_amdgpu; then
+               ewarn "WARNING: AMD support in this ebuild are experimental"
+               einfo "If you run into issues, especially compiling 
dev-libs/rocm-opencl-runtime"
+               einfo "you may try the docker image here 
https://github.com/ROCm/ROCm-docker";
+               einfo "and follow instructions here"
+               einfo 
"https://rocm.docs.amd.com/projects/install-on-linux/en/latest/how-to/docker.html";
+       fi
+}
+
+src_prepare() {
+       default
+
+       if use video_cards_amdgpu; then
+               # --hip-version gets appended to the compile flags which isn't 
a known flag.
+               # This causes rocm builds to fail because 
-Wunused-command-line-argument is turned on.
+               # Use nuclear option to fix this.
+               # Disable -Werror's from go modules.
+               find "${S}" -name ".go" -exec sed -i "s/ -Werror / /g" {} + || 
die
+       fi
+}
+
+src_compile() {
+       CUSTOM_CPU_FLAGS=""
+       use cpu_flags_x86_avx && CUSTOM_CPU_FLAGS+="avx"
+       use cpu_flags_x86_avx2 && CUSTOM_CPU_FLAGS+=",avx2"
+       use cpu_flags_x86_avx512f && CUSTOM_CPU_FLAGS+=",avx512"
+       use cpu_flags_x86_avx512vbmi && CUSTOM_CPU_FLAGS+=",avx512vbmi"
+       use cpu_flags_x86_avx512_vnni && CUSTOM_CPU_FLAGS+=",avx512vnni"
+       use cpu_flags_x86_avx512_bf16 && CUSTOM_CPU_FLAGS+=",avx512bf16"
+
+       # Build basic ollama executable with cpu features built in
+       export CUSTOM_CPU_FLAGS
+
+       if use video_cards_amdgpu; then
+               export HIP_ARCHS=$(get_amdgpu_flags)
+               export HIP_PATH="/usr"
+       else
+               export OLLAMA_SKIP_ROCM_GENERATE=1
+       fi
+
+       if ! use cuda; then
+               export OLLAMA_SKIP_CUDA_GENERATE=1
+       fi
+       emake dist
+}
+
+src_install() {
+       dobin dist/linux-${ARCH}/bin/ollama
+
+       if [[ -d "dist/linux-${ARCH}/lib/ollama" ]] ; then
+               insinto /usr/lib
+               doins -r dist/linux-${ARCH}/lib/ollama
+       fi
+
+       doinitd "${FILESDIR}"/ollama
+}
+
+pkg_preinst() {
+       keepdir /var/log/ollama
+       fowners ollama:ollama /var/log/ollama
+}
+
+pkg_postinst() {
+       einfo "Quick guide:"
+       einfo "ollama serve"
+       einfo "ollama run llama3:70b"
+       einfo "See available models at https://ollama.com/library";
+}

Reply via email to