branch: externals/ellama
commit aad5adadc5b6937c520a8781af5986f5cf5630ea
Author: Sergey Kostyaev <sskosty...@gmail.com>
Commit: Sergey Kostyaev <sskosty...@gmail.com>

    Add ellama context header line mode
    
    Added `ellama-context-header-line-mode` and its globalized version
    `ellama-context-header-line-global-mode` to display context-specific 
information
    in the header line. Also, introduced a new configuration option
    `ellama-spinner-enabled` to enable spinner during text generation. Updated 
the
    `README.org` file with details on these changes.
    
    Fix #233
---
 README.org | 174 +++++++++++++++++++++++++++++++++++++------------------------
 ellama.el  | 118 ++++++++++++++++++++++++++++-------------
 2 files changed, 188 insertions(+), 104 deletions(-)

diff --git a/README.org b/README.org
index aa2f5a1580..1ee2cf5792 100644
--- a/README.org
+++ b/README.org
@@ -32,72 +32,74 @@ Without any configuration, the first available ollama model 
will be used.
 You can customize ellama configuration like this:
 
 #+BEGIN_SRC  emacs-lisp
-(use-package ellama
-  :ensure t
-  :bind ("C-c e" . ellama-transient-main-menu)
-  :init
-  ;; setup key bindings
-  ;; (setopt ellama-keymap-prefix "C-c e")
-  ;; language you want ellama to translate to
-  (setopt ellama-language "German")
-  ;; could be llm-openai for example
-  (require 'llm-ollama)
-  (setopt ellama-provider
-         (make-llm-ollama
-          ;; this model should be pulled to use it
-          ;; value should be the same as you print in terminal during pull
-          :chat-model "llama3:8b-instruct-q8_0"
-          :embedding-model "nomic-embed-text"
-          :default-chat-non-standard-params '(("num_ctx" . 8192))))
-  (setopt ellama-summarization-provider
-         (make-llm-ollama
-          :chat-model "qwen2.5:3b"
-          :embedding-model "nomic-embed-text"
-          :default-chat-non-standard-params '(("num_ctx" . 32768))))
-  (setopt ellama-coding-provider
-         (make-llm-ollama
-          :chat-model "qwen2.5-coder:3b"
-          :embedding-model "nomic-embed-text"
-          :default-chat-non-standard-params '(("num_ctx" . 32768))))
-  ;; Predefined llm providers for interactive switching.
-  ;; You shouldn't add ollama providers here - it can be selected interactively
-  ;; without it. It is just example.
-  (setopt ellama-providers
-         '(("zephyr" . (make-llm-ollama
-                        :chat-model "zephyr:7b-beta-q6_K"
-                        :embedding-model "zephyr:7b-beta-q6_K"))
-           ("mistral" . (make-llm-ollama
-                         :chat-model "mistral:7b-instruct-v0.2-q6_K"
-                         :embedding-model "mistral:7b-instruct-v0.2-q6_K"))
-           ("mixtral" . (make-llm-ollama
-                         :chat-model "mixtral:8x7b-instruct-v0.1-q3_K_M-4k"
-                         :embedding-model 
"mixtral:8x7b-instruct-v0.1-q3_K_M-4k"))))
-  ;; Naming new sessions with llm
-  (setopt ellama-naming-provider
-         (make-llm-ollama
-          :chat-model "llama3:8b-instruct-q8_0"
-          :embedding-model "nomic-embed-text"
-          :default-chat-non-standard-params '(("stop" . ("\n")))))
-  (setopt ellama-naming-scheme 'ellama-generate-name-by-llm)
-  ;; Translation llm provider
-  (setopt ellama-translation-provider
-         (make-llm-ollama
-          :chat-model "qwen2.5:3b"
-          :embedding-model "nomic-embed-text"
-          :default-chat-non-standard-params
-          '(("num_ctx" . 32768))))
-  (setopt ellama-extraction-provider (make-llm-ollama
-                                     :chat-model 
"qwen2.5-coder:7b-instruct-q8_0"
-                                     :embedding-model "nomic-embed-text"
-                                     :default-chat-non-standard-params
-                                     '(("num_ctx" . 32768))))
-  ;; customize display buffer behaviour
-  ;; see ~(info "(elisp) Buffer Display Action Functions")~
-  (setopt ellama-chat-display-action-function #'display-buffer-full-frame)
-  (setopt ellama-instant-display-action-function #'display-buffer-at-bottom)
-  :config
-  ;; send last message in chat buffer with C-c C-c
-  (add-hook 'org-ctrl-c-ctrl-c-hook #'ellama-chat-send-last-message))
+  (use-package ellama
+    :ensure t
+    :bind ("C-c e" . ellama-transient-main-menu)
+    :init
+    ;; setup key bindings
+    ;; (setopt ellama-keymap-prefix "C-c e")
+    ;; language you want ellama to translate to
+    (setopt ellama-language "German")
+    ;; could be llm-openai for example
+    (require 'llm-ollama)
+    (setopt ellama-provider
+         (make-llm-ollama
+          ;; this model should be pulled to use it
+          ;; value should be the same as you print in terminal during pull
+          :chat-model "llama3:8b-instruct-q8_0"
+          :embedding-model "nomic-embed-text"
+          :default-chat-non-standard-params '(("num_ctx" . 8192))))
+    (setopt ellama-summarization-provider
+         (make-llm-ollama
+          :chat-model "qwen2.5:3b"
+          :embedding-model "nomic-embed-text"
+          :default-chat-non-standard-params '(("num_ctx" . 32768))))
+    (setopt ellama-coding-provider
+         (make-llm-ollama
+          :chat-model "qwen2.5-coder:3b"
+          :embedding-model "nomic-embed-text"
+          :default-chat-non-standard-params '(("num_ctx" . 32768))))
+    ;; Predefined llm providers for interactive switching.
+    ;; You shouldn't add ollama providers here - it can be selected 
interactively
+    ;; without it. It is just example.
+    (setopt ellama-providers
+         '(("zephyr" . (make-llm-ollama
+                        :chat-model "zephyr:7b-beta-q6_K"
+                        :embedding-model "zephyr:7b-beta-q6_K"))
+           ("mistral" . (make-llm-ollama
+                         :chat-model "mistral:7b-instruct-v0.2-q6_K"
+                         :embedding-model "mistral:7b-instruct-v0.2-q6_K"))
+           ("mixtral" . (make-llm-ollama
+                         :chat-model "mixtral:8x7b-instruct-v0.1-q3_K_M-4k"
+                         :embedding-model 
"mixtral:8x7b-instruct-v0.1-q3_K_M-4k"))))
+    ;; Naming new sessions with llm
+    (setopt ellama-naming-provider
+         (make-llm-ollama
+          :chat-model "llama3:8b-instruct-q8_0"
+          :embedding-model "nomic-embed-text"
+          :default-chat-non-standard-params '(("stop" . ("\n")))))
+    (setopt ellama-naming-scheme 'ellama-generate-name-by-llm)
+    ;; Translation llm provider
+    (setopt ellama-translation-provider
+         (make-llm-ollama
+          :chat-model "qwen2.5:3b"
+          :embedding-model "nomic-embed-text"
+          :default-chat-non-standard-params
+          '(("num_ctx" . 32768))))
+    (setopt ellama-extraction-provider (make-llm-ollama
+                                     :chat-model 
"qwen2.5-coder:7b-instruct-q8_0"
+                                     :embedding-model "nomic-embed-text"
+                                     :default-chat-non-standard-params
+                                     '(("num_ctx" . 32768))))
+    ;; customize display buffer behaviour
+    ;; see ~(info "(elisp) Buffer Display Action Functions")~
+    (setopt ellama-chat-display-action-function #'display-buffer-full-frame)
+    (setopt ellama-instant-display-action-function #'display-buffer-at-bottom)
+    :config
+    ;; show ellama context in header line in all buffers
+    (ellama-context-header-line-global-mode +1)
+    ;; send last message in chat buffer with C-c C-c
+    (add-hook 'org-ctrl-c-ctrl-c-hook #'ellama-chat-send-last-message))
 #+END_SRC
 
 ** Commands
@@ -383,6 +385,7 @@ There are many supported providers: ~ollama~, ~open ai~, 
~vertex~,
 ~GPT4All~. For more information see 
[[https://elpa.gnu.org/packages/llm.html][llm documentation]].
 - ~ellama-providers~: association list of model llm providers with
   name as key.
+- ~ellama-spinner-enabled~: Enable spinner during text generation.
 - ~ellama-spinner-type~: Spinner type for ellama. Default type is
 ~progress-bar~.
 - ~ellama-ollama-binary~: Path to ollama binary.
@@ -427,7 +430,6 @@ argument generated text string.
 - ~ellama-context-poshandler~: Position handler for displaying context buffer.
   ~posframe-poshandler-frame-top-center~ will be used if not set.
 - ~ellama-context-border-width~: Border width for the context buffer.
-- ~ellama-context-element-padding-size~: Padding size for context elements.
 - ~ellama-session-remove-reasoning~: Remove internal reasoning from
   the session after ellama provide an answer. This can improve
   long-term communication with reasoning models. Enabled by default.
@@ -439,13 +441,49 @@ argument generated text string.
   ellama output to enhance the versatility of reasoning models across
   diverse applications.
 - ~ellama-context-posframe-enabled~: Enable showing posframe with
-  ellama context. Enabled by default.
+  ellama context.
 - ~ellama-manage-context-display-action-function~: Display action
   function for ~ellama-render-context~. Default value
   ~display-buffer-same-window~.
 - ~ellama-preview-context-element-display-action-function~: Display
   action function for ~ellama-preview-context-element~.
 
+** Minor modes
+
+*** ellama-context-header-line-mode
+
+*Description:*
+Toggle the Ellama Context header line mode. This minor mode updates the header 
line to display
+context-specific information.
+
+*Usage:*
+To enable or disable ~ellama-context-header-line-mode~, use the command:
+#+BEGIN_SRC emacs-lisp
+M-x ellama-context-header-line-mode
+#+END_SRC
+
+When enabled, this mode adds a hook to ~window-state-change-hook~ to update 
the header line whenever
+the window state changes. It also calls ~ellama-context-update-header-line~ to 
initialize the header
+line with context-specific information.
+
+When disabled, it removes the evaluation of ~(:eval (ellama-context-line))~ 
from
+~header-line-format~.
+
+*** ellama-context-header-line-global-mode
+
+*Description:*
+Globalized version of ~ellama-context-header-line-mode~. This mode ensures that
+~ellama-context-header-line-mode~ is enabled in all buffers.
+
+*Usage:*
+To enable or disable ~ellama-context-header-line-global-mode~, use the command:
+#+BEGIN_SRC emacs-lisp
+M-x ellama-context-header-line-global-mode
+#+END_SRC
+
+This globalized minor mode provides a convenient way to ensure that 
context-specific header line
+information is always available, regardless of the buffer being edited.
+
 ** Acknowledgments
 
 Thanks [[https://github.com/jmorganca][Jeffrey Morgan]] for excellent project 
[[https://github.com/jmorganca/ollama][ollama]]. This project
diff --git a/ellama.el b/ellama.el
index 20c92023ed..6ae9b28aab 100644
--- a/ellama.el
+++ b/ellama.el
@@ -5,7 +5,7 @@
 ;; Author: Sergey Kostyaev <sskosty...@gmail.com>
 ;; URL: http://github.com/s-kostyaev/ellama
 ;; Keywords: help local tools
-;; Package-Requires: ((emacs "28.1") (llm "0.22.0") (spinner "1.7.4") 
(transient "0.7") (compat "29.1") (posframe "1.4.0"))
+;; Package-Requires: ((emacs "28.1") (llm "0.22.0") (transient "0.7") (compat 
"29.1"))
 ;; Version: 1.2.5
 ;; SPDX-License-Identifier: GPL-3.0-or-later
 ;; Created: 8th Oct 2023
@@ -38,10 +38,8 @@
 (require 'eieio)
 (require 'llm)
 (require 'llm-provider-utils)
-(require 'spinner)
 (require 'transient)
 (require 'compat)
-(require 'posframe)
 (eval-when-compile (require 'rx))
 
 (defgroup ellama nil
@@ -116,13 +114,22 @@ Make reasoning models more useful for many cases."
   :type '(alist :key-type string
                :value-type (sexp :validate llm-standard-provider-p)))
 
+(defvar spinner-types)
+
 (defcustom ellama-spinner-type 'progress-bar
   "Spinner type for ellama."
   :group 'ellama
-  :type `(choice ,@(mapcar
-                   (lambda (type)
-                     `(const ,(car type)))
-                   spinner-types)))
+  :type `(choice ,@(if (boundp 'spinner-types)
+                      (mapcar
+                       (lambda (type)
+                         `(const ,(car type)))
+                       spinner-types)
+                    '(const progress-bar))))
+
+(defcustom ellama-spinner-enabled nil
+  "Enable spinner during text generation."
+  :group 'ellama
+  :type 'boolean)
 
 (defcustom ellama-command-map
   (let ((map (make-sparse-keymap)))
@@ -494,6 +501,7 @@ Too low value can break generated code by splitting long 
comment lines."
 (define-minor-mode ellama-request-mode
   "Minor mode for ellama buffers with active request to llm."
   :interactive nil
+  :lighter " ellama:generating"
   :keymap '(([remap keyboard-quit] . ellama--cancel-current-request-and-quit))
   (if ellama-request-mode
       (add-hook 'kill-buffer-hook 'ellama--cancel-current-request nil t)
@@ -823,9 +831,12 @@ If EPHEMERAL non nil new session will not be associated 
with any file."
 
 (defun ellama--cancel-current-request ()
   "Cancel current running request."
+  (declare-function spinner-stop "ext:spinner")
   (when ellama--current-request
     (llm-cancel-request ellama--current-request)
-    (spinner-stop)
+    (when ellama-spinner-enabled
+      (require 'spinner)
+      (spinner-stop))
     (setq ellama--current-request nil)))
 
 (defun ellama--cancel-current-request-and-quit ()
@@ -1028,7 +1039,7 @@ If EPHEMERAL non nil new session will not be associated 
with any file."
 
 (defvar ellama--context-buffer " *ellama-context*")
 
-(defcustom ellama-context-posframe-enabled t
+(defcustom ellama-context-posframe-enabled nil
   "Enable showing posframe with ellama context."
   :group 'ellama
   :type 'boolean)
@@ -1040,8 +1051,7 @@ If EPHEMERAL non nil new session will not be associated 
with any file."
   (setq ellama--global-context nil)
   (with-current-buffer ellama--context-buffer
     (erase-buffer))
-  (when ellama-context-posframe-enabled
-    (posframe-hide ellama--context-buffer)))
+  (ellama-update-context-show))
 
 ;; Context elements
 
@@ -1070,32 +1080,62 @@ If EPHEMERAL non nil new session will not be associated 
with any file."
   :group 'ellama
   :type 'integer)
 
-(defcustom ellama-context-element-padding-size 20
-  "Padding size for context elements."
-  :group 'ellama
-  :type 'integer)
-
-(defun ellama-update-context-posframe-show ()
-  "Update and show context posframe."
+(defun ellama-update-context-show ()
+  "Update and show context in posframe of header line."
+  (declare-function posframe-show "ext:posframe")
+  (declare-function posframe-hide "ext:posframe")
+  (with-current-buffer ellama--context-buffer
+    (erase-buffer)
+    (when ellama--global-context
+      (insert (format
+              " ellama ctx: %s"
+              (string-join
+               (mapcar
+                (lambda (el)
+                  (ellama-context-element-display el))
+                ellama--global-context)
+               "  ")))))
   (when ellama-context-posframe-enabled
-    (with-current-buffer ellama--context-buffer
-      (erase-buffer)
-      (when ellama--global-context
-       (insert (format
-                "context: %s"
-                (string-join
-                 (mapcar
-                  (lambda (el)
-                    (string-pad
-                     (ellama-context-element-display el) 
ellama-context-element-padding-size))
-                  ellama--global-context)
-                 "  ")))))
+    (require 'posframe)
     (if ellama--global-context
        (posframe-show
         ellama--context-buffer
         :poshandler ellama-context-poshandler
         :internal-border-width ellama-context-border-width)
-      (posframe-hide ellama--context-buffer))))
+      (posframe-hide ellama--context-buffer)))
+  (ellama-context-update-header-line))
+
+(defun ellama-context-line ()
+  "Return current global context line."
+  (with-current-buffer ellama--context-buffer
+    (buffer-substring-no-properties
+     (point-min) (point-max))))
+
+;;;###autoload
+(define-minor-mode ellama-context-header-line-mode
+  "Toggle Ellama Context header line mode."
+  :group 'ellama
+  (add-hook 'window-state-change-hook #'ellama-context-update-header-line)
+  (if ellama-context-header-line-mode
+      (ellama-context-update-header-line)
+    (setq header-line-format (delete '(:eval (ellama-context-line)) 
header-line-format))))
+
+;;;###autoload
+(define-globalized-minor-mode ellama-context-header-line-global-mode
+  ellama-context-header-line-mode
+  ellama-context-header-line-mode)
+
+(defun ellama-context-turn-on-header-line-mode ()
+  "Turn on `ellama-context-header-line-mode' if appropriate."
+  (when (or (eq major-mode 'text-mode)
+            (derived-mode-p 'text-mode))
+    (ellama-context-header-line-mode 1)))
+
+(defun ellama-context-update-header-line ()
+  "Update and display context information in the header line."
+  (if (and ellama-context-header-line-mode ellama--global-context)
+      (add-to-list 'header-line-format '(:eval (ellama-context-line)) t)
+    (setq header-line-format (delete '(:eval (ellama-context-line)) 
header-line-format))))
 
 (cl-defmethod ellama-context-element-add ((element ellama-context-element))
   "Add the ELEMENT to the Ellama context."
@@ -1104,7 +1144,7 @@ If EPHEMERAL non nil new session will not be associated 
with any file."
              :test #'equal-including-properties)
   (setf ellama--global-context (nreverse ellama--global-context))
   (get-buffer-create ellama--context-buffer t)
-  (ellama-update-context-posframe-show))
+  (ellama-update-context-show))
 
 (defcustom ellama-manage-context-display-action-function 
#'display-buffer-same-window
   "Display action function for `ellama-render-context'."
@@ -1203,7 +1243,7 @@ If EPHEMERAL non nil new session will not be associated 
with any file."
   (when-let ((elt (get-text-property (point) 'context-element)))
     (ellama-remove-context-element elt)
     (ellama-manage-context)
-    (ellama-update-context-posframe-show)))
+    (ellama-update-context-show)))
 
 ;; Buffer context element
 
@@ -1803,6 +1843,8 @@ failure (with BUFFER current).
 
 :on-done ON-DONE -- ON-DONE a function or list of functions that's called with
  the full response text when the request completes (with BUFFER current)."
+  (declare-function spinner-start "ext:spinner")
+  (declare-function spinner-stop "ext:spinner")
   (let* ((session-id (plist-get args :session-id))
         (session (or (plist-get args :session)
                      (when session-id
@@ -1869,7 +1911,9 @@ failure (with BUFFER current).
        (setq ellama--change-group (prepare-change-group))
        (activate-change-group ellama--change-group)
        (ellama-set-markers start end point)
-       (spinner-start ellama-spinner-type)
+       (when ellama-spinner-enabled
+         (require 'spinner)
+         (spinner-start ellama-spinner-type))
        (let ((request (llm-chat-streaming
                        provider
                        llm-prompt
@@ -1883,7 +1927,8 @@ failure (with BUFFER current).
                                      text)))
                          (with-current-buffer buffer
                            (accept-change-group ellama--change-group)
-                           (spinner-stop)
+                           (when ellama-spinner-enabled
+                             (spinner-stop))
                            (if (and (listp donecb)
                                     (functionp (car donecb)))
                                (mapc (lambda (fn) (funcall fn text))
@@ -1907,7 +1952,8 @@ failure (with BUFFER current).
                        (lambda (_ msg)
                          (with-current-buffer buffer
                            (cancel-change-group ellama--change-group)
-                           (spinner-stop)
+                           (when ellama-spinner-enabled
+                             (spinner-stop))
                            (funcall errcb msg)
                            (setq ellama--current-request nil)
                            (ellama-request-mode -1))))))

Reply via email to