branch: externals/llm commit 59fc3d7d29e1ce9f5dff1ee950aae7b25ee7c4e7 Author: Andrew Hyatt <ahy...@gmail.com> Commit: Andrew Hyatt <ahy...@gmail.com>
Ensure that all callbacks are in the original buffer If the original buffer has been killed, use a temporary buffer. This will fix one of the issues noticed in https://github.com/ahyatt/llm/issues/3. --- llm-ollama.el | 11 ++++++---- llm-openai.el | 46 +++++++++++++++++++++------------------- llm-request.el | 9 ++++++++ llm-tester.el | 67 ++++++++++++++++++++++++++++++++++++---------------------- llm-vertex.el | 20 ++++++++++-------- 5 files changed, 93 insertions(+), 60 deletions(-) diff --git a/llm-ollama.el b/llm-ollama.el index 73de3d5b76..26f859af4e 100644 --- a/llm-ollama.el +++ b/llm-ollama.el @@ -178,19 +178,22 @@ STREAMING if non-nil, turn on response streaming." (llm-chat-streaming provider prompt (lambda (_)) response-callback error-callback)) (cl-defmethod llm-chat-streaming ((provider llm-ollama) prompt partial-callback response-callback error-callback) - (llm-request-async (llm-ollama--url provider "generate") + (let ((buf (current-buffer))) + (llm-request-async (llm-ollama--url provider "generate") :data (llm-ollama--chat-request provider prompt) :on-success-raw (lambda (response) (setf (llm-chat-prompt-interactions prompt) (list (assoc-default 'context (llm-ollama--get-final-response response)))) - (funcall response-callback (llm-ollama--get-partial-chat-response response))) + (llm-request-callback-in-buffer + buf response-callback + (llm-ollama--get-partial-chat-response response))) :on-partial (lambda (data) (when-let ((response (llm-ollama--get-partial-chat-response data))) - (funcall partial-callback response))) + (llm-request-callback-in-buffer buf partial-callback response))) :on-error (lambda (_ _) ;; The problem with ollama is that it doesn't ;; seem to have an error response. - (funcall error-callback 'error "Unknown error calling ollama")))) + (llm-request-callback-in-buffer buf error-callback "Unknown error calling ollama"))))) (provide 'llm-ollama) diff --git a/llm-openai.el b/llm-openai.el index e63389cf0d..6165ad1af6 100644 --- a/llm-openai.el +++ b/llm-openai.el @@ -150,7 +150,8 @@ STREAMING if non-nil, turn on response streaming." (cl-defmethod llm-chat-async ((provider llm-openai) prompt response-callback error-callback) (unless (llm-openai-key provider) (error "To call Open AI API, the key must have been set")) - (llm-request-async "https://api.openai.com/v1/chat/completions" + (let ((buf (current-buffer))) + (llm-request-async "https://api.openai.com/v1/chat/completions" :headers `(("Authorization" . ,(format "Bearer %s" (llm-openai-key provider)))) :data (llm-openai--chat-request provider prompt) :on-success (lambda (data) @@ -158,13 +159,13 @@ STREAMING if non-nil, turn on response streaming." (setf (llm-chat-prompt-interactions prompt) (append (llm-chat-prompt-interactions prompt) (list (make-llm-chat-prompt-interaction :role 'assistant :content response)))) - (funcall response-callback response))) + (llm-request-callback-in-buffer buf response-callback response))) :on-error (lambda (_ data) (let ((errdata (cdr (assoc 'error data)))) - (funcall error-callback 'error + (llm-request-callback-in-buffer buf error-callback 'error (format "Problem calling Open AI: %s message: %s" (cdr (assoc 'type errdata)) - (cdr (assoc 'message errdata)))))))) + (cdr (assoc 'message errdata))))))))) (cl-defmethod llm-chat ((provider llm-openai) prompt) (unless (llm-openai-key provider) @@ -206,24 +207,25 @@ STREAMING if non-nil, turn on response streaming." (cl-defmethod llm-chat-streaming ((provider llm-openai) prompt partial-callback response-callback error-callback) (unless (llm-openai-key provider) (error "To call Open AI API, the key must have been set")) - (llm-request-async "https://api.openai.com/v1/chat/completions" - :headers `(("Authorization" . ,(format "Bearer %s" (llm-openai-key provider)))) - :data (llm-openai--chat-request provider prompt nil t) - :on-error (lambda (_ data) - (let ((errdata (cdr (assoc 'error data)))) - (funcall error-callback 'error - (format "Problem calling Open AI: %s message: %s" - (cdr (assoc 'type errdata)) - (cdr (assoc 'message errdata)))))) - :on-partial (lambda (data) - (when-let ((response (llm-openai--get-partial-chat-response data))) - (funcall partial-callback response))) - :on-success-raw (lambda (data) - (let ((response (llm-openai--get-partial-chat-response data))) - (setf (llm-chat-prompt-interactions prompt) - (append (llm-chat-prompt-interactions prompt) - (list (make-llm-chat-prompt-interaction :role 'assistant :content response)))) - (funcall response-callback response))))) + (let ((buf (current-buffer))) + (llm-request-async "https://api.openai.com/v1/chat/completions" + :headers `(("Authorization" . ,(format "Bearer %s" (llm-openai-key provider)))) + :data (llm-openai--chat-request provider prompt nil t) + :on-error (lambda (_ data) + (let ((errdata (cdr (assoc 'error data)))) + (llm-request-callback-in-buffer buf error-callback 'error + (format "Problem calling Open AI: %s message: %s" + (cdr (assoc 'type errdata)) + (cdr (assoc 'message errdata)))))) + :on-partial (lambda (data) + (when-let ((response (llm-openai--get-partial-chat-response data))) + (llm-request-callback-in-buffer buf partial-callback response))) + :on-success-raw (lambda (data) + (let ((response (llm-openai--get-partial-chat-response data))) + (setf (llm-chat-prompt-interactions prompt) + (append (llm-chat-prompt-interactions prompt) + (list (make-llm-chat-prompt-interaction :role 'assistant :content response)))) + (llm-request-callback-in-buffer buf response-callback response)))))) (provide 'llm-openai) diff --git a/llm-request.el b/llm-request.el index a1b24756e8..f711f51aaf 100644 --- a/llm-request.el +++ b/llm-request.el @@ -138,5 +138,14 @@ the buffer is turned into JSON and passed to ON-SUCCESS." #'llm-request--handle-new-content nil t)))))) +;; This is a useful method for getting out of the request buffer when it's time +;; to make callbacks. +(defun llm-request-callback-in-buffer (buf f &rest args) + "Run F with ARSG in the context of BUF. +But if BUF has been killed, use a temporary buffer instead." + (if (buffer-live-p buf) + (with-current-buffer buf (apply f args)) + (with-temp-buffer (apply f args)))) + (provide 'llm-request) ;;; llm-request.el ends here diff --git a/llm-tester.el b/llm-tester.el index 8b00851df7..37290d37c8 100644 --- a/llm-tester.el +++ b/llm-tester.el @@ -66,26 +66,29 @@ (defun llm-tester-chat-async (provider) "Test that PROVIDER can interact with the LLM chat." (message "Testing provider %s for chat" (type-of provider)) - (llm-chat-async - provider - (make-llm-chat-prompt - :interactions (list - (make-llm-chat-prompt-interaction - :role 'user - :content "Tell me a random cool feature of emacs.")) - :context "You must answer all questions as if you were the butler Jeeves from Jeeves and Wooster. Start all interactions with the phrase, 'Very good, sir.'" - :examples '(("Tell me the capital of France." . "Very good, sir. The capital of France is Paris, which I expect you to be familiar with, since you were just there last week with your Aunt Agatha.") - ("Could you take me to my favorite place?" . "Very good, sir. I believe you are referring to the Drone's Club, which I will take you to after you put on your evening attire.")) - :temperature 0.5 - :max-tokens 100) - (lambda (response) - (if response - (if (> (length response) 0) - (message "SUCCESS: Provider %s provided a response %s" (type-of provider) response) - (message "ERROR: Provider %s returned an empty response" (type-of provider))) - (message "ERROR: Provider %s did not return any response" (type-of provider)))) - (lambda (type message) - (message "ERROR: Provider %s returned an error of type %s with message %s" (type-of provider) type message)))) + (let ((buf (current-buffer))) + (llm-chat-async + provider + (make-llm-chat-prompt + :interactions (list + (make-llm-chat-prompt-interaction + :role 'user + :content "Tell me a random cool feature of emacs.")) + :context "You must answer all questions as if you were the butler Jeeves from Jeeves and Wooster. Start all interactions with the phrase, 'Very good, sir.'" + :examples '(("Tell me the capital of France." . "Very good, sir. The capital of France is Paris, which I expect you to be familiar with, since you were just there last week with your Aunt Agatha.") + ("Could you take me to my favorite place?" . "Very good, sir. I believe you are referring to the Drone's Club, which I will take you to after you put on your evening attire.")) + :temperature 0.5 + :max-tokens 100) + (lambda (response) + (unless (eq buf (current-buffer)) + (message "ERROR: Provider %s returned a response not in the original buffer" (type-of provider))) + (if response + (if (> (length response) 0) + (message "SUCCESS: Provider %s provided a response %s" (type-of provider) response) + (message "ERROR: Provider %s returned an empty response" (type-of provider))) + (message "ERROR: Provider %s did not return any response" (type-of provider)))) + (lambda (type message) + (message "ERROR: Provider %s returned an error of type %s with message %s" (type-of provider) type message))))) (defun llm-tester-chat-sync (provider) "Test that PROVIDER can interact with the LLM chat." @@ -112,7 +115,8 @@ "Test that PROVIDER can stream back LLM chat responses." (message "Testing provider %s for streaming chat" (type-of provider)) (let ((streamed) - (counter 0)) + (counter 0) + (buf (current-buffer))) (llm-chat-streaming provider (make-llm-chat-prompt @@ -123,13 +127,19 @@ :temperature 0.5 :max-tokens 200) (lambda (text) + (unless (eq buf (current-buffer)) + (message "ERROR: Provider %s returned a response not in the original buffer" (type-of provider))) (cl-incf counter) (setq streamed text)) (lambda (text) + (unless (eq buf (current-buffer)) + (message "ERROR: Provider %s returned a response not in the original buffer" (type-of provider))) (message "SUCCESS: Provider %s provided a streamed response %s in %d parts, complete text is: %s" (type-of provider) streamed counter text) (if (= 0 counter) (message "ERROR: Provider %s streaming request never happened!" (type-of provider)))) (lambda (type message) + (unless (eq buf (current-buffer)) + (message "ERROR: Provider %s returned a response not in the original buffer" (type-of provider))) (message "ERROR: Provider %s returned an error of type %s with message %s" (type-of provider) type message))))) (defun llm-tester-chat-conversation (provider chat-func) @@ -166,13 +176,16 @@ CHAT-FUNC should insert the chat response to the buffer." (message "Testing provider %s for conversation" (type-of provider)) (let ((prompt (llm-make-simple-chat-prompt "I'm currently testing conversational abilities. Please respond to each message with the ordinal number of your response, so just '1' for the first response, '2' for the second, and so on. It's important that I can verify that you are working with the full conversation history, so please let me know if you seem to be missing anything.")) - (outputs nil)) + (outputs nil) + (buf (current-buffer))) (llm-chat-async provider prompt (lambda (response) (push response outputs) (llm-chat-prompt-append-response prompt "This is the second message.") (llm-chat-async provider prompt (lambda (response) + (unless (eq buf (current-buffer)) + (message "ERROR: Provider %s returned a response not in the original buffer" (type-of provider))) (push response outputs) (llm-chat-prompt-append-response prompt "This is the third message.") (llm-chat-async provider prompt @@ -182,8 +195,12 @@ CHAT-FUNC should insert the chat response to the buffer." (lambda (type message) (message "ERROR: Provider %s returned an error of type %s with message %s" (type-of provider) type message)))) (lambda (type message) + (unless (eq buf (current-buffer)) + (message "ERROR: Provider %s returned a response not in the original buffer" (type-of provider))) (message "ERROR: Provider %s returned an error of type %s with message %s" (type-of provider) type message)))) (lambda (type message) + (unless (eq buf (current-buffer)) + (message "ERROR: Provider %s returned a response not in the original buffer" (type-of provider))) (message "ERROR: Provider %s returned an error of type %s with message %s" (type-of provider) type message))))) (defun llm-tester-chat-conversation-streaming (provider) @@ -195,18 +212,18 @@ CHAT-FUNC should insert the chat response to the buffer." (llm-chat-streaming-to-point provider prompt buf (with-current-buffer buf (point-max)) (lambda () - (with-current-buffer buf (goto-char (point-max)) (insert "\n")) + (goto-char (point-max)) (insert "\n") (llm-chat-prompt-append-response prompt "This is the second message.") (llm-chat-streaming-to-point provider prompt buf (with-current-buffer buf (point-max)) (lambda () - (with-current-buffer buf (goto-char (point-max)) (insert "\n")) + (goto-char (point-max)) (insert "\n") (llm-chat-prompt-append-response prompt "This is the third message.") (llm-chat-streaming-to-point provider prompt buf (with-current-buffer buf (point-max)) (lambda () - (message "SUCCESS: Provider %s provided a conversation with responses %s" (type-of provider) (with-current-buffer buf (buffer-string))) + (message "SUCCESS: Provider %s provided a conversation with responses %s" (type-of provider) (buffer-string)) (kill-buffer buf)))))))))) (defun llm-tester-all (provider) diff --git a/llm-vertex.el b/llm-vertex.el index dbe88598cd..6765c73735 100644 --- a/llm-vertex.el +++ b/llm-vertex.el @@ -265,7 +265,8 @@ If STREAMING is non-nil, use the URL for the streaming API." (cl-defmethod llm-chat-async ((provider llm-vertex) prompt response-callback error-callback) (llm-vertex-refresh-key provider) - (llm-request-async (llm-vertex--chat-url provider nil) + (let ((buf (current-buffer))) + (llm-request-async (llm-vertex--chat-url provider nil) :headers `(("Authorization" . ,(format "Bearer %s" (llm-vertex-key provider)))) :data (llm-vertex--chat-request-v1 prompt) :on-success (lambda (data) @@ -273,10 +274,10 @@ If STREAMING is non-nil, use the URL for the streaming API." (setf (llm-chat-prompt-interactions prompt) (append (llm-chat-prompt-interactions prompt) (list (make-llm-chat-prompt-interaction :role 'assistant :content response)))) - (funcall response-callback response))) + (llm-request-callback-in-buffer buf response-callback response))) :on-error (lambda (_ data) - (funcall error-callback 'error - (llm-vertex--error-message data))))) + (llm-request-callback-in-buffer buf error-callback 'error + (llm-vertex--error-message data)))))) (cl-defmethod llm-chat ((provider llm-vertex) prompt) (llm-vertex-refresh-key provider) @@ -293,21 +294,22 @@ If STREAMING is non-nil, use the URL for the streaming API." (cl-defmethod llm-chat-streaming ((provider llm-vertex) prompt partial-callback response-callback error-callback) (llm-vertex-refresh-key provider) - (llm-request-async (llm-vertex--chat-url provider t) + (let ((buf (current-buffer))) + (llm-request-async (llm-vertex--chat-url provider t) :headers `(("Authorization" . ,(format "Bearer %s" (llm-vertex-key provider)))) :data (llm-vertex--chat-request-ui prompt) :on-partial (lambda (partial) (when-let ((response (llm--vertex--get-partial-chat-ui-repsonse partial))) - (funcall partial-callback response))) + (llm-request-callback-in-buffer buf partial-callback response))) :on-success (lambda (data) (let ((response (llm-vertex--get-chat-response-ui data))) (setf (llm-chat-prompt-interactions prompt) (append (llm-chat-prompt-interactions prompt) (list (make-llm-chat-prompt-interaction :role 'assistant :content response)))) - (funcall response-callback response))) + (llm-request-callback-in-buffer buf response-callback response))) :on-error (lambda (_ data) - (funcall error-callback 'error - (llm-vertex--error-message data))))) + (llm-request-callback-in-buffer buf error-callback 'error + (llm-vertex--error-message data)))))) (provide 'llm-vertex)