commit:     2942da049443968012e3b573b5206753ab323711
Author:     Matthias Maier <tamiko <AT> gentoo <DOT> org>
AuthorDate: Fri May 23 18:51:41 2025 +0000
Commit:     Matthias Maier <tamiko <AT> gentoo <DOT> org>
CommitDate: Tue Jun  3 04:42:05 2025 +0000
URL:        https://gitweb.gentoo.org/repo/dev/tamiko.git/commit/?id=2942da04

sci-ml/ollama: add a live build variant

Signed-off-by: Matthias Maier <tamiko <AT> gentoo.org>

 .../files/ollama-0.6.3-use-GNUInstallDirs.patch    |  26 +++
 sci-ml/ollama/files/ollama.confd                   |  11 +
 sci-ml/ollama/files/ollama.init                    |   9 +
 sci-ml/ollama/files/ollama.service                 |  14 ++
 sci-ml/ollama/metadata.xml                         |  13 ++
 sci-ml/ollama/ollama-9999.ebuild                   | 259 +++++++++++++++++++++
 6 files changed, 332 insertions(+)

diff --git a/sci-ml/ollama/files/ollama-0.6.3-use-GNUInstallDirs.patch 
b/sci-ml/ollama/files/ollama-0.6.3-use-GNUInstallDirs.patch
new file mode 100644
index 0000000..5a69109
--- /dev/null
+++ b/sci-ml/ollama/files/ollama-0.6.3-use-GNUInstallDirs.patch
@@ -0,0 +1,26 @@
+From 3e250053458fa33c97023ff8943e8c5daa4beeca Mon Sep 17 00:00:00 2001
+From: Paul Zander <[email protected]>
+Date: Mon, 31 Mar 2025 18:07:47 +0200
+Subject: [PATCH] use GNUInstallDirs
+
+Signed-off-by: Paul Zander <[email protected]>
+
+diff --git a/CMakeLists.txt b/CMakeLists.txt
+index 9e107fe..79407ea 100644
+--- a/CMakeLists.txt
++++ b/CMakeLists.txt
+@@ -35,8 +35,9 @@ if (CMAKE_OSX_ARCHITECTURES MATCHES "x86_64")
+     set(CMAKE_INSTALL_RPATH "@loader_path")
+ endif()
+ 
+-set(OLLAMA_BUILD_DIR ${CMAKE_BINARY_DIR}/lib/ollama)
+-set(OLLAMA_INSTALL_DIR ${CMAKE_INSTALL_PREFIX}/lib/ollama)
++include(GNUInstallDirs)
++set(OLLAMA_BUILD_DIR ${CMAKE_BINARY_DIR}/${CMAKE_INSTALL_LIBDIR}/ollama)
++set(OLLAMA_INSTALL_DIR ${CMAKE_INSTALL_FULL_LIBDIR}/ollama)
+ 
+ set(CMAKE_RUNTIME_OUTPUT_DIRECTORY         ${OLLAMA_BUILD_DIR})
+ set(CMAKE_RUNTIME_OUTPUT_DIRECTORY_DEBUG   ${OLLAMA_BUILD_DIR})
+-- 
+2.49.0
+

diff --git a/sci-ml/ollama/files/ollama.confd b/sci-ml/ollama/files/ollama.confd
new file mode 100644
index 0000000..fccb3db
--- /dev/null
+++ b/sci-ml/ollama/files/ollama.confd
@@ -0,0 +1,11 @@
+# Ollama allows cross-origin requests from 127.0.0.1 and 0.0.0.0 by default.
+# Additional origins can be configured with OLLAMA_ORIGINS.
+# export OLLAMA_ORIGINS="<ip>"
+
+# log to syslog
+# output_logger="logger -t \"$RC_SVCNAME\" -p daemon.info"
+# error_logger="logger -t \"$RC_SVCNAME\" -p daemon.err"
+
+# log to file
+output_log="/var/log/ollama/ollama.log"
+error_log="/var/log/ollama/ollama.log"

diff --git a/sci-ml/ollama/files/ollama.init b/sci-ml/ollama/files/ollama.init
new file mode 100644
index 0000000..50e2a8d
--- /dev/null
+++ b/sci-ml/ollama/files/ollama.init
@@ -0,0 +1,9 @@
+#!/sbin/openrc-run
+# Copyright 1999-2025 Gentoo Authors
+# Distributed under the terms of the GNU General Public License, v2
+
+supervisor=supervise-daemon
+description="Ollama Service"
+command="/usr/bin/ollama"
+command_args="serve"
+command_user="ollama:ollama"

diff --git a/sci-ml/ollama/files/ollama.service 
b/sci-ml/ollama/files/ollama.service
new file mode 100644
index 0000000..63bd355
--- /dev/null
+++ b/sci-ml/ollama/files/ollama.service
@@ -0,0 +1,14 @@
+[Unit]
+Description=Ollama Service
+After=network-online.target
+
+[Service]
+ExecStart=/usr/bin/ollama serve
+User=ollama
+Group=ollama
+Restart=always
+RestartSec=3
+Environment="PATH=$PATH"
+
+[Install]
+WantedBy=multi-user.target

diff --git a/sci-ml/ollama/metadata.xml b/sci-ml/ollama/metadata.xml
new file mode 100644
index 0000000..924f971
--- /dev/null
+++ b/sci-ml/ollama/metadata.xml
@@ -0,0 +1,13 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE pkgmetadata SYSTEM "https://www.gentoo.org/dtd/metadata.dtd";>
+<pkgmetadata>
+       <!-- maintainer-needed -->
+       <use>
+               <flag name="cuda">Enable NVIDIA CUDA support</flag>
+               <flag name="mkl">Use <pkg>sci-libs/mkl</pkg> for blas, lapack 
and sparse blas routines</flag>
+               <flag name="rocm">Enable ROCm gpu computing support</flag>
+       </use>
+       <upstream>
+               <remote-id type="github">ollama/ollama</remote-id>
+       </upstream>
+</pkgmetadata>

diff --git a/sci-ml/ollama/ollama-9999.ebuild b/sci-ml/ollama/ollama-9999.ebuild
new file mode 100644
index 0000000..a5af406
--- /dev/null
+++ b/sci-ml/ollama/ollama-9999.ebuild
@@ -0,0 +1,259 @@
+# Copyright 2024-2025 Gentoo Authors
+# Distributed under the terms of the GNU General Public License v2
+
+EAPI=8
+
+# supports ROCM/HIP >=5.5, but we define 6.1 due to the eclass
+ROCM_VERSION=6.1
+inherit cuda rocm
+inherit cmake
+inherit go-module systemd toolchain-funcs
+
+DESCRIPTION="Get up and running with Llama 3, Mistral, Gemma, and other 
language models."
+HOMEPAGE="https://ollama.com";
+
+if [[ ${PV} == *9999* ]]; then
+       inherit git-r3
+       EGIT_REPO_URI="https://github.com/ollama/ollama.git";
+else
+       SRC_URI="
+               https://github.com/ollama/${PN}/archive/refs/tags/v${PV}.tar.gz 
-> ${P}.gh.tar.gz
+       "
+       KEYWORDS="~amd64"
+fi
+
+LICENSE="MIT"
+SLOT="0"
+
+X86_CPU_FLAGS=(
+       avx
+       f16c
+       avx2
+       fma3
+       avx512f
+       avx512vbmi
+       avx512_vnni
+       avx512_bf16
+)
+
+CPU_FLAGS=( "${X86_CPU_FLAGS[@]/#/cpu_flags_x86_}" )
+IUSE="${CPU_FLAGS[*]} cuda blas mkl rocm"
+
+COMMON_DEPEND="
+       cuda? (
+               dev-util/nvidia-cuda-toolkit:=
+       )
+       blas? (
+               !mkl? (
+                       virtual/blas
+               )
+               mkl? (
+                       sci-libs/mkl
+               )
+       )
+       rocm? (
+               >=sci-libs/hipBLAS-5.5:=[${ROCM_USEDEP}]
+       )
+"
+
+DEPEND="
+       ${COMMON_DEPEND}
+       >=dev-lang/go-1.23.4
+"
+
+RDEPEND="
+       ${COMMON_DEPEND}
+"
+
+PATCHES=(
+       "${FILESDIR}/${PN}-0.6.3-use-GNUInstallDirs.patch"
+)
+
+src_unpack() {
+       if [[ "${PV}" == *9999* ]]; then
+               git-r3_src_unpack
+               go-module_live_vendor
+       else
+               go-module_src_unpack
+       fi
+}
+
+src_prepare() {
+       cmake_src_prepare
+
+       sed \
+               -e "/set(GGML_CCACHE/s/ON/OFF/g" \
+               -e "/PRE_INCLUDE_REGEXES.*cu/d" \
+               -e "/PRE_INCLUDE_REGEXES.*hip/d" \
+               -i CMakeLists.txt || die sed
+
+       sed \
+               -e "s/ -O3//g" \
+               -i ml/backend/ggml/ggml/src/ggml-cpu/cpu.go || die sed
+
+       # fix library location
+       sed \
+               -e "s#lib/ollama#$(get_libdir)/ollama#g" \
+               -i CMakeLists.txt || die sed
+
+       sed \
+               -e "s/\"..\", \"lib\"/\"..\", \"$(get_libdir)\"/" \
+               -e "s#\"lib/ollama\"#\"$(get_libdir)/ollama\"#" \
+               -i \
+                       ml/backend/ggml/ggml/src/ggml.go \
+                       discover/path.go \
+               || die
+
+       if use amd64; then
+               if ! use cpu_flags_x86_avx; then
+                       sed -e "/ggml_add_cpu_backend_variant(sandybridge/s/^/# 
/g" -i ml/backend/ggml/ggml/src/CMakeLists.txt || die
+                       # AVX)
+               fi
+               if
+                       ! use cpu_flags_x86_avx ||
+                       ! use cpu_flags_x86_f16c ||
+                       ! use cpu_flags_x86_avx2 ||
+                       ! use cpu_flags_x86_fma3; then
+                       sed -e "/ggml_add_cpu_backend_variant(haswell/s/^/# /g" 
-i ml/backend/ggml/ggml/src/CMakeLists.txt || die
+                       # AVX F16C AVX2 FMA
+               fi
+               if
+                       ! use cpu_flags_x86_avx ||
+                       ! use cpu_flags_x86_f16c ||
+                       ! use cpu_flags_x86_avx2 ||
+                       ! use cpu_flags_x86_fma3 ||
+                       ! use cpu_flags_x86_avx512f; then
+                       sed -e "/ggml_add_cpu_backend_variant(skylakex/s/^/# 
/g" -i ml/backend/ggml/ggml/src/CMakeLists.txt ||  die
+                       # AVX F16C AVX2 FMA AVX512
+               fi
+               if
+                       ! use cpu_flags_x86_avx ||
+                       ! use cpu_flags_x86_f16c ||
+                       ! use cpu_flags_x86_avx2 ||
+                       ! use cpu_flags_x86_fma3 ||
+                       ! use cpu_flags_x86_avx512f ||
+                       ! use cpu_flags_x86_avx512vbmi ||
+                       ! use cpu_flags_x86_avx512_vnni; then
+                       sed -e "/ggml_add_cpu_backend_variant(icelake/s/^/# /g" 
-i ml/backend/ggml/ggml/src/CMakeLists.txt || die
+                       # AVX F16C AVX2 FMA AVX512 AVX512_VBMI AVX512_VNNI
+               fi
+               if
+                       ! use cpu_flags_x86_avx ||
+                       ! use cpu_flags_x86_f16c ||
+                       ! use cpu_flags_x86_avx2 ||
+                       ! use cpu_flags_x86_fma3 ||
+                       ! use cpu_flags_x86_avx512_vnni; then
+                       sed -e "/ggml_add_cpu_backend_variant(alderlake/s/^/# 
/g" -i ml/backend/ggml/ggml/src/CMakeLists.txt || die
+                       # AVX F16C AVX2 FMA AVX_VNNI
+               fi
+
+               if
+                       ! use cpu_flags_x86_avx ||
+                       ! use cpu_flags_x86_f16c ||
+                       ! use cpu_flags_x86_avx2 ||
+                       ! use cpu_flags_x86_fma3 ||
+                       ! use cpu_flags_x86_avx512f ||
+                       ! use cpu_flags_x86_avx512vbmi ||
+                       ! use cpu_flags_x86_avx512_vnni ||
+                       ! use cpu_flags_x86_avx512_bf16 ; then
+                       sed -e 
"/ggml_add_cpu_backend_variant(sapphirerapids/s/^/# /g" -i 
ml/backend/ggml/ggml/src/CMakeLists.txt || die
+                       #AVX F16C AVX2 FMA AVX512 AVX512_VBMI AVX512_VNNI 
AVX512_BF16
+               fi
+               : # ml/backend/ggml/ggml/src/CMakeLists.txt
+       fi
+
+       # default
+       # return
+       if use cuda; then
+               cuda_src_prepare
+       fi
+
+       if use rocm; then
+               # --hip-version gets appended to the compile flags which isn't 
a known flag.
+               # This causes rocm builds to fail because 
-Wunused-command-line-argument is turned on.
+               # Use nuclear option to fix this.
+               # Disable -Werror's from go modules.
+               find "${S}" -name ".go" -exec sed -i "s/ -Werror / /g" {} + || 
die
+       fi
+}
+
+src_configure() {
+       local mycmakeargs=(
+               -DGGML_CCACHE="no"
+               -DGGML_BLAS="$(usex blas)"
+       )
+
+       if use blas; then
+               if use mkl; then
+                       mycmakeargs+=(
+                               -DGGML_BLAS_VENDOR="Intel"
+                       )
+               else
+                       mycmakeargs+=(
+                               -DGGML_BLAS_VENDOR="Generic"
+                       )
+               fi
+       fi
+       if use cuda; then
+               local -x CUDAHOSTCXX CUDAHOSTLD
+               CUDAHOSTCXX="$(cuda_gccdir)"
+               CUDAHOSTLD="$(tc-getCXX)"
+
+               cuda_add_sandbox -w
+               addpredict "/dev/char/"
+       else
+               mycmakeargs+=(
+                       -DCMAKE_CUDA_COMPILER="NOTFOUND"
+               )
+       fi
+
+       if use rocm; then
+               mycmakeargs+=(
+                       -DCMAKE_HIP_ARCHITECTURES="$(get_amdgpu_flags)"
+                       -DCMAKE_HIP_PLATFORM="amd"
+                       # ollama doesn't honor the default cmake options
+                       -DAMDGPU_TARGETS="$(get_amdgpu_flags)"
+               )
+
+               local -x HIP_PATH="${ESYSROOT}/usr"
+
+               check_amdgpu
+       else
+               mycmakeargs+=(
+                       -DCMAKE_HIP_COMPILER="NOTFOUND"
+               )
+       fi
+
+       cmake_src_configure
+}
+
+src_compile() {
+       ego build
+
+       cmake_src_compile
+}
+
+src_install() {
+       dobin ollama
+
+       cmake_src_install
+
+       newinitd "${FILESDIR}/ollama.init" "${PN}"
+       newconfd "${FILESDIR}/ollama.confd" "${PN}"
+
+       systemd_dounit "${FILESDIR}/ollama.service"
+}
+
+pkg_postinst() {
+       if [[ -z ${REPLACING_VERSIONS} ]] ; then
+               einfo "Quick guide:"
+               einfo "\tollama serve"
+               einfo "\tollama run llama3:70b"
+               einfo
+               einfo "See available models at https://ollama.com/library";
+       fi
+
+       if use cuda ; then
+               einfo "When using cuda the user running ${PN} has to be in the 
video group or it won't detect devices."
+       fi
+}

Reply via email to