branch: externals/llm
commit abbff2aa9d8c1df46c9b3e44d6b2e96861f3fd50
Author: Andrew Hyatt <ahy...@gmail.com>
Commit: Andrew Hyatt <ahy...@gmail.com>

    Change method name to llm-chat (without "-response"), update README
---
 README.org    |  5 ++++-
 llm-fake.el   |  6 +++---
 llm-openai.el | 10 +++++-----
 llm-test.el   |  8 ++++----
 llm-tester.el |  4 ++--
 llm-vertex.el | 12 ++++++------
 llm.el        |  8 ++++----
 7 files changed, 28 insertions(+), 25 deletions(-)

diff --git a/README.org b/README.org
index dea73f1a66..7856b6ef49 100644
--- a/README.org
+++ b/README.org
@@ -12,13 +12,16 @@ Clients should require the module, =llm=, and code against 
it.  Most functions a
 
 A list of all the functions:
 
-- ~llm-chat-response provider prompt~:  With user-chosen ~provider~ , and a 
~llm-chat-prompt~ structure (containing context, examples, interactions, and 
parameters such as temperature and max tokens), send that prompt to the LLM and 
wait for the string output.
+- ~llm-chat provider prompt~:  With user-chosen ~provider~ , and a 
~llm-chat-prompt~ structure (containing context, examples, interactions, and 
parameters such as temperature and max tokens), send that prompt to the LLM and 
wait for the string output.
+- ~llm-chat-async provider prompt response-callback error-callback~: Same as 
~llm-chat~, but executes in the background.  Takes a ~response-callback~ which 
will be called with the text response.  The ~error-callback~ will be called in 
case of error, with the error symbol and an error message.
 - ~llm-embedding provider string~: With the user-chosen ~provider~, send a 
string and get an embedding, which is a large vector of floating point values.  
The embedding represents the semantic meaning of the string, and the vector can 
be compared against other vectors, where smaller distances between the vectors 
represent greater semantic similarity.
+- ~llm-embedding-async provider string vector-callback error-callback~: Same 
as ~llm-embedding~ but this is processed asynchronously. ~vector-callback~ is 
called with the vector embedding, and, in case of error, ~error-callback~ is 
called with the same arguments as in ~llm-chat-async~.
 
 All of the providers currently implemented.
 
 - =llm-openai=.  This is the interface to Open AI's Chat GPT.  The user must 
set their key, and select their preferred chat and embedding model.
 - =llm-vertex=.  This is the interface to Google Cloud's Vertex API.  The user 
needs to set their project number.  In addition, to get authenticated, the user 
must have logged in initially, and have a valid path in 
~llm-vertex-gcloud-binary~.  Users can also configure 
~llm-vertex-gcloud-region~ for using a region closer to their location.  It 
defaults to ="us-central1"=  The provider can also contain the user's chosen 
embedding and chat model.
+- =llm-fake=.  This is a provider that is useful for developers using this 
library, to be able to understand what is being sent to the =llm= library 
without actually sending anything over the wire.
 
 If you are interested in creating a provider, please send a pull request, or 
open a bug.
 
diff --git a/llm-fake.el b/llm-fake.el
index 93b0b210d0..95aea76400 100644
--- a/llm-fake.el
+++ b/llm-fake.el
@@ -52,11 +52,11 @@ message cons. If nil, the response will be a simple vector."
     (t (funcall error-callback (car err) (cdr err))))
   nil)
 
-(cl-defmethod llm-chat-response ((provider llm-fake) prompt)
+(cl-defmethod llm-chat ((provider llm-fake) prompt)
   (when (llm-fake-output-to-buffer provider)
     (with-current-buffer (get-buffer-create (llm-fake-output-to-buffer 
provider))
       (goto-char (point-max))
-      (insert "\nCall to llm-chat-response\n"  (llm-chat-prompt-to-text 
prompt) "\n")))
+      (insert "\nCall to llm-chat\n"  (llm-chat-prompt-to-text prompt) "\n")))
   (if (llm-fake-chat-action-func provider)
       (let* ((f (llm-fake-chat-action-func provider))
              (result (funcall f)))
@@ -64,7 +64,7 @@ message cons. If nil, the response will be a simple vector."
                 ('string result)
                 ('cons (signal (car result) (cdr result)))
                 (_ (error "Incorrect type found in `chat-action-func': %s" 
(type-of result)))))
-    "Sample response from `llm-chat-response-async'"))
+    "Sample response from `llm-chat-async'"))
 
 (cl-defmethod llm-embedding ((provider llm-fake) string)
   (when (llm-fake-output-to-buffer provider)
diff --git a/llm-openai.el b/llm-openai.el
index a0e0c4fe56..76d6ab45cd 100644
--- a/llm-openai.el
+++ b/llm-openai.el
@@ -89,7 +89,7 @@ should wait until the response is received."
                                         (lambda (_ error-message) (error 
error-message)) t)
     response))
 
-(defun llm-openai--chat-response (provider prompt response-callback 
error-callback &optional return-json-spec sync)
+(defun llm-openai--chat (provider prompt response-callback error-callback 
&optional return-json-spec sync)
   "Main method to send a PROMPT as a chat prompt to Open AI.
 RETURN-JSON-SPEC, if specified, is a JSON spec to return from the
 Open AI API.
@@ -160,12 +160,12 @@ SYNC is non-nil when the request should wait until the 
response is received."
                                              (assoc-default 'type (cdar data))
                                              (assoc-default 'message (cdar 
data)))))))))
 
-(cl-defmethod llm-chat-response-async ((provider llm-openai) prompt 
response-callback error-callback)
-  (llm-openai--chat-response provider prompt response-callback error-callback))
+(cl-defmethod llm-chat-async ((provider llm-openai) prompt response-callback 
error-callback)
+  (llm-openai--chat provider prompt response-callback error-callback))
 
-(cl-defmethod llm-chat-response ((provider llm-openai) prompt)
+(cl-defmethod llm-chat ((provider llm-openai) prompt)
   (let ((response))
-    (llm-openai--chat-response provider prompt
+    (llm-openai--chat provider prompt
                                (lambda (result) (setq response result))
                                (lambda (_ msg) (error msg))
                                nil t)
diff --git a/llm-test.el b/llm-test.el
index e7a87676ad..e394c5895c 100644
--- a/llm-test.el
+++ b/llm-test.el
@@ -42,15 +42,15 @@
                           "Test"))))
 
 (ert-deftest llm-test-chat ()
-  (should-error (llm-chat-response nil "Test"))
-  (should-error (llm-chat-response-async nil "Test"))
-  (should-error (llm-chat-response
+  (should-error (llm-chat nil "Test"))
+  (should-error (llm-chat-async nil "Test"))
+  (should-error (llm-chat
                  (make-llm-fake
                   :chat-action-func (lambda () (cons 'error "my message")))
                  (make-llm-chat-prompt)))
   (should (equal
            "Response"
-           (llm-chat-response (make-llm-fake :chat-action-func (lambda () 
"Response"))
+           (llm-chat (make-llm-fake :chat-action-func (lambda () "Response"))
                               (make-llm-chat-prompt)))))
 
 ;;; llm-test.el ends here
diff --git a/llm-tester.el b/llm-tester.el
index c6e09c9e09..0bc9723649 100644
--- a/llm-tester.el
+++ b/llm-tester.el
@@ -66,7 +66,7 @@
 (defun llm-tester-chat-async (provider)
   "Test that PROVIDER can interact with the LLM chat."
   (message "Testing provider %s for chat" (type-of provider))
-  (llm-chat-response-async
+  (llm-chat-async
    provider
    (make-llm-chat-prompt
     :interactions (list
@@ -90,7 +90,7 @@
 (defun llm-tester-chat-sync (provider)
   "Test that PROVIDER can interact with the LLM chat."
   (message "Testing provider %s for chat" (type-of provider))
-  (let ((response (llm-chat-response
+  (let ((response (llm-chat
                    provider
                    (make-llm-chat-prompt
                     :interactions (list
diff --git a/llm-vertex.el b/llm-vertex.el
index e072ee533a..3fdd50c245 100644
--- a/llm-vertex.el
+++ b/llm-vertex.el
@@ -110,10 +110,10 @@ response is available to return."
                            (lambda (_ error-message) (error error-message)) t)
     response))
 
-(defun llm-vertex--chat-response (provider prompt response-callback 
error-callback sync)
+(defun llm-vertex--chat (provider prompt response-callback error-callback sync)
   "Get the chat response for PROMPT.
 PROVIDER, RESPONSE-CALLBACK, ERROR-CALLBACK are all the same as
-`llm-chat-response-async'. SYNC, when non-nil, will wait until
+`llm-chat-async'. SYNC, when non-nil, will wait until
 the response is available to return."
   (llm-vertex-refresh-key provider)
   (llm-vertex-maybe-warn)
@@ -163,12 +163,12 @@ the response is available to return."
                                                     (assoc-default 'message 
(assoc-default 'error data))
                                                     data))))))))
 
-(cl-defmethod llm-chat-response-async ((provider llm-vertex) prompt 
response-callback error-callback)
-  (llm-vertex--chat-response provider prompt response-callback error-callback 
nil))
+(cl-defmethod llm-chat-async ((provider llm-vertex) prompt response-callback 
error-callback)
+  (llm-vertex--chat provider prompt response-callback error-callback nil))
 
-(cl-defmethod llm-chat-response ((provider llm-vertex) prompt)
+(cl-defmethod llm-chat ((provider llm-vertex) prompt)
   (let ((response))
-    (llm-vertex--chat-response provider prompt
+    (llm-vertex--chat provider prompt
                                (lambda (result) (setq response result))
                                (lambda (_ error-message) (error 
error-message)) t)
     response))
diff --git a/llm.el b/llm.el
index 408a5ab17b..e7c05c18a4 100644
--- a/llm.el
+++ b/llm.el
@@ -90,16 +90,16 @@ an LLM, and don't need the more advanced features that the
 `llm-chat-prompt' struct makes available."
   (make-llm-chat-prompt :interactions (list (make-llm-chat-prompt-interaction 
:role 'user :content text))))
 
-(cl-defgeneric llm-chat-response (provider prompt)
+(cl-defgeneric llm-chat (provider prompt)
   "Return a response to PROMPT from PROVIDER.
 PROMPT is a `llm-chat-prompt'. The response is a string."
   (ignore provider prompt)
   (signal 'not-implemented nil))
 
-(cl-defmethod llm-chat-response ((_ (eql nil)) _)
+(cl-defmethod llm-chat ((_ (eql nil)) _)
   (error "LLM provider was nil.  Please set the provider in the application 
you are using."))
 
-(cl-defgeneric llm-chat-response-async (provider prompt response-callback 
error-callback)
+(cl-defgeneric llm-chat-async (provider prompt response-callback 
error-callback)
   "Return a response to PROMPT from PROVIDER.
 PROMPT is a `llm-chat-prompt'.
 RESPONSE-CALLBACK receives the string response.
@@ -107,7 +107,7 @@ ERROR-CALLBACK receives the error response."
   (ignore provider prompt response-callback error-callback)
   (signal 'not-implemented nil))
 
-(cl-defmethod llm-chat-response-async ((_ (eql nil)) _ _ _)
+(cl-defmethod llm-chat-async ((_ (eql nil)) _ _ _)
   (error "LLM provider was nil.  Please set the provider in the application 
you are using."))
 
 (cl-defgeneric llm-embedding (provider string)

Reply via email to