branch: externals/ellama
commit 1d0da9110546fb8e415462691ee9d1008203914d
Author: Sergey Kostyaev <sskosty...@gmail.com>
Commit: Sergey Kostyaev <sskosty...@gmail.com>

    Improve ellama installation and configuration
    
    Updated the README.org file to provide clearer instructions on
    installing and configuring the ellama package. The default provider is
    now set to use any available ollama model, and users can customize the
    configuration by specifying a different model or LLM provider.
    
    Modified ellama.el to remove the hardcoded zephyr model and add
    functions to dynamically retrieve available ollama models. This allows
    for more flexible configuration without requiring manual updates when
    new models are added.
    
    Fix #227
---
 README.org | 15 +++++++--------
 ellama.el  | 54 ++++++++++++++++++++++++++++++++++++++++--------------
 2 files changed, 47 insertions(+), 22 deletions(-)

diff --git a/README.org b/README.org
index 209f3b34e2..aa2f5a1580 100644
--- a/README.org
+++ b/README.org
@@ -18,19 +18,18 @@ Assistant". Previous sentence was written by Ellama itself.
 
 ** Installation
 
-Just ~M-x~ ~package-install~ @@html:<kbd>@@Enter@@html:</kbd>@@ ~ellama~
-@@html:<kbd>@@Enter@@html:</kbd>@@. By default it uses
-[[https://github.com/jmorganca/ollama][ollama]] provider and
-[[https://ollama.ai/library/zephyr][zephyr]] model. If you ok with it,
-you need to install [[https://github.com/jmorganca/ollama][ollama]]
-and pull [[https://ollama.ai/library/zephyr][zephyr]] like this:
+Just ~M-x~ ~package-install~ @@html:<kbd>@@Enter@@html:</kbd>@@
+~ellama~ @@html:<kbd>@@Enter@@html:</kbd>@@. By default it uses 
[[https://github.com/jmorganca/ollama][ollama]]
+provider. If you ok with it, you need to install 
[[https://github.com/jmorganca/ollama][ollama]] and pull
+[[https://ollama.com/models][any ollama model]] like this:
 
 #+BEGIN_SRC shell
-  ollama pull zephyr
+  ollama pull qwen2.5:3b
 #+END_SRC
 
 You can use ~ellama~ with other model or other llm provider.
-In that case you should customize ellama configuration like this:
+Without any configuration, the first available ollama model will be used.
+You can customize ellama configuration like this:
 
 #+BEGIN_SRC  emacs-lisp
 (use-package ellama
diff --git a/ellama.el b/ellama.el
index dcb25ea3b1..dd07786a49 100644
--- a/ellama.el
+++ b/ellama.el
@@ -68,12 +68,7 @@
   :group 'ellama
   :type 'string)
 
-(defcustom ellama-provider
-  (progn
-    (declare-function make-llm-ollama "llm-ollama")
-    (require 'llm-ollama)
-    (make-llm-ollama
-     :chat-model "zephyr" :embedding-model "zephyr"))
+(defcustom ellama-provider nil
   "Backend LLM provider."
   :group 'ellama
   :type '(sexp :validate llm-standard-provider-p))
@@ -1815,7 +1810,9 @@ failure (with BUFFER current).
                          ellama--current-session))))
         (provider (if session
                       (ellama-session-provider session)
-                    (or (plist-get args :provider) ellama-provider)))
+                    (or (plist-get args :provider)
+                        ellama-provider
+                        (ellama-get-first-ollama-chat-model))))
         (buffer (or (plist-get args :buffer)
                     (when (ellama-session-p session)
                       (ellama-get-session-buffer (ellama-session-id session)))
@@ -2180,7 +2177,8 @@ the full response text when the request completes (with 
BUFFER current)."
                              (completing-read "Select model: " variants)
                              providers nil nil #'string=))
                     (or (plist-get args :provider)
-                        ellama-provider)))
+                        ellama-provider
+                        (ellama-get-first-ollama-chat-model))))
         (session (or (plist-get args :session)
                      (if (or create-session
                              current-prefix-arg
@@ -2727,17 +2725,45 @@ Call CALLBACK on result list of strings.  ARGS contains 
keys for fine control.
      (lambda (err)
        (user-error err)))))
 
+(defun ellama-get-ollama-model-names ()
+  "Get ollama model names."
+  (mapcar (lambda (s)
+           (car (split-string s)))
+         (seq-drop
+          (process-lines
+           (executable-find ellama-ollama-binary)
+           "ls")
+          ;; skip header line
+          1)))
+
+(defun ellama-embedding-model-p (name)
+  "Check if NAME is an embedding model."
+  (when-let ((model (llm-models-match name)))
+    (not (not (member 'embedding (llm-model-capabilities model))))))
+
+(defun ellama-get-ollama-chat-model-names ()
+  "Get ollama chat model names."
+  (cl-remove-if #'ellama-embedding-model-p (ellama-get-ollama-model-names)))
+
+(defun ellama-get-ollama-embedding-model-names ()
+  "Get ollama embedding model names."
+  (cl-remove-if-not #'ellama-embedding-model-p 
(ellama-get-ollama-model-names)))
+
+(defun ellama-get-first-ollama-chat-model ()
+  "Get first available ollama model."
+  (declare-function make-llm-ollama "ext:llm-ollama")
+  (when (executable-find ellama-ollama-binary)
+    (require 'llm-ollama)
+    (make-llm-ollama
+     :chat-model
+     (car (ellama-get-ollama-chat-model-names)))))
+
 (defun ellama-get-ollama-model-name ()
   "Get ollama model name from installed locally."
   (interactive)
   (completing-read
    "Select ollama model: "
-   (mapcar (lambda (s)
-            (car (split-string s)))
-          (seq-drop
-           (process-lines
-            (executable-find ellama-ollama-binary) "ls")
-           1))))
+   (ellama-get-ollama-model-names)))
 
 (defun ellama-get-ollama-local-model ()
   "Return llm provider for interactively selected ollama model."

Reply via email to