branch: externals/ellama commit a630643fe9b722ca78324b0f7b817680d5b4409d Merge: d6cdcbcf69 1d0da91105 Author: Sergey Kostyaev <s-kosty...@users.noreply.github.com> Commit: GitHub <nore...@github.com>
Merge pull request #226 from s-kostyaev/refactoring Remove default ellama provider and make it work with zero configuration --- README.org | 15 ++++++------ docs/instructions.org | 3 +++ ellama.el | 66 +++++++++++++++++++++++++++++++++++++-------------- 3 files changed, 58 insertions(+), 26 deletions(-) diff --git a/README.org b/README.org index 209f3b34e2..aa2f5a1580 100644 --- a/README.org +++ b/README.org @@ -18,19 +18,18 @@ Assistant". Previous sentence was written by Ellama itself. ** Installation -Just ~M-x~ ~package-install~ @@html:<kbd>@@Enter@@html:</kbd>@@ ~ellama~ -@@html:<kbd>@@Enter@@html:</kbd>@@. By default it uses -[[https://github.com/jmorganca/ollama][ollama]] provider and -[[https://ollama.ai/library/zephyr][zephyr]] model. If you ok with it, -you need to install [[https://github.com/jmorganca/ollama][ollama]] -and pull [[https://ollama.ai/library/zephyr][zephyr]] like this: +Just ~M-x~ ~package-install~ @@html:<kbd>@@Enter@@html:</kbd>@@ +~ellama~ @@html:<kbd>@@Enter@@html:</kbd>@@. By default it uses [[https://github.com/jmorganca/ollama][ollama]] +provider. If you ok with it, you need to install [[https://github.com/jmorganca/ollama][ollama]] and pull +[[https://ollama.com/models][any ollama model]] like this: #+BEGIN_SRC shell - ollama pull zephyr + ollama pull qwen2.5:3b #+END_SRC You can use ~ellama~ with other model or other llm provider. -In that case you should customize ellama configuration like this: +Without any configuration, the first available ollama model will be used. +You can customize ellama configuration like this: #+BEGIN_SRC emacs-lisp (use-package ellama diff --git a/docs/instructions.org b/docs/instructions.org index 0a123e3093..21b393f887 100644 --- a/docs/instructions.org +++ b/docs/instructions.org @@ -7,3 +7,6 @@ This project is written in emacs lisp. - Write simple and readable code. - Always use ~ert~ for tests. - All function names and custom variable names should start with ~ellama-~. +- Function docstrings should contain all arguments in uppercase, like SOME-ARG. +- In docstrings, if there are multiple sentences on one line, + sentences should be separated by two spaces. diff --git a/ellama.el b/ellama.el index 8defd63a87..dd07786a49 100644 --- a/ellama.el +++ b/ellama.el @@ -68,12 +68,7 @@ :group 'ellama :type 'string) -(defcustom ellama-provider - (progn - (declare-function make-llm-ollama "llm-ollama") - (require 'llm-ollama) - (make-llm-ollama - :chat-model "zephyr" :embedding-model "zephyr")) +(defcustom ellama-provider nil "Backend LLM provider." :group 'ellama :type '(sexp :validate llm-standard-provider-p)) @@ -1815,7 +1810,9 @@ failure (with BUFFER current). ellama--current-session)))) (provider (if session (ellama-session-provider session) - (or (plist-get args :provider) ellama-provider))) + (or (plist-get args :provider) + ellama-provider + (ellama-get-first-ollama-chat-model)))) (buffer (or (plist-get args :buffer) (when (ellama-session-p session) (ellama-get-session-buffer (ellama-session-id session))) @@ -1875,10 +1872,7 @@ failure (with BUFFER current). (undo-amalgamate-change-group ellama--change-group))))) (setq ellama--change-group (prepare-change-group)) (activate-change-group ellama--change-group) - (set-marker start point) - (set-marker end point) - (set-marker-insertion-type start nil) - (set-marker-insertion-type end t) + (ellama-set-markers start end point) (spinner-start ellama-spinner-type) (let ((request (llm-chat-streaming provider llm-prompt @@ -1923,6 +1917,13 @@ failure (with BUFFER current). (with-current-buffer buffer (setq ellama--current-request request))))))) +(defun ellama-set-markers (start end point) + "Set markers for START and END positions at POINT." + (set-marker start point) + (set-marker end point) + (set-marker-insertion-type start nil) + (set-marker-insertion-type end t)) + (defun ellama-chain (initial-prompt forms &optional acc) "Call chain of FORMS on INITIAL-PROMPT. ACC will collect responses in reverse order (previous answer will be on top). @@ -2176,7 +2177,8 @@ the full response text when the request completes (with BUFFER current)." (completing-read "Select model: " variants) providers nil nil #'string=)) (or (plist-get args :provider) - ellama-provider))) + ellama-provider + (ellama-get-first-ollama-chat-model)))) (session (or (plist-get args :session) (if (or create-session current-prefix-arg @@ -2723,17 +2725,45 @@ Call CALLBACK on result list of strings. ARGS contains keys for fine control. (lambda (err) (user-error err))))) +(defun ellama-get-ollama-model-names () + "Get ollama model names." + (mapcar (lambda (s) + (car (split-string s))) + (seq-drop + (process-lines + (executable-find ellama-ollama-binary) + "ls") + ;; skip header line + 1))) + +(defun ellama-embedding-model-p (name) + "Check if NAME is an embedding model." + (when-let ((model (llm-models-match name))) + (not (not (member 'embedding (llm-model-capabilities model)))))) + +(defun ellama-get-ollama-chat-model-names () + "Get ollama chat model names." + (cl-remove-if #'ellama-embedding-model-p (ellama-get-ollama-model-names))) + +(defun ellama-get-ollama-embedding-model-names () + "Get ollama embedding model names." + (cl-remove-if-not #'ellama-embedding-model-p (ellama-get-ollama-model-names))) + +(defun ellama-get-first-ollama-chat-model () + "Get first available ollama model." + (declare-function make-llm-ollama "ext:llm-ollama") + (when (executable-find ellama-ollama-binary) + (require 'llm-ollama) + (make-llm-ollama + :chat-model + (car (ellama-get-ollama-chat-model-names))))) + (defun ellama-get-ollama-model-name () "Get ollama model name from installed locally." (interactive) (completing-read "Select ollama model: " - (mapcar (lambda (s) - (car (split-string s))) - (seq-drop - (process-lines - (executable-find ellama-ollama-binary) "ls") - 1)))) + (ellama-get-ollama-model-names))) (defun ellama-get-ollama-local-model () "Return llm provider for interactively selected ollama model."