branch: externals/llm
commit 68af88f63db1fa872f250a6bd7c58f5e111022e6
Author: Andrew Hyatt <ahy...@gmail.com>
Commit: GitHub <nore...@github.com>

    Upgrade the default chat models for Claude and Open AI (#65)
    
    * Upgrade the default chat models for Claude and Open AI
    
    * Add information about change to NEWS.org
---
 NEWS.org      | 1 +
 llm-claude.el | 2 +-
 llm-openai.el | 3 +--
 llm-vertex.el | 2 +-
 4 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/NEWS.org b/NEWS.org
index 5fbde80c16..9771a8111b 100644
--- a/NEWS.org
+++ b/NEWS.org
@@ -2,6 +2,7 @@
 - Fix compiled functions not being evaluated in =llm-prompt=.
 - Use Ollama's new =embed= API instead of the obsolete one.
 - Fix issue in Open AI streaming function calling.
+- Update Open AI and Claude default chat models to the later models.
 * Version 0.17.1
 - Support Ollama function calling, for models which support it.
 - Make sure every model, even unknown models, return some value for 
~llm-chat-token-limit~.
diff --git a/llm-claude.el b/llm-claude.el
index 9a9c5d07f1..fc934557bf 100644
--- a/llm-claude.el
+++ b/llm-claude.el
@@ -33,7 +33,7 @@
 ;; Models defined at https://docs.anthropic.com/claude/docs/models-overview
 (cl-defstruct (llm-claude (:include llm-standard-chat-provider))
   (key nil :read-only t)
-  (chat-model "claude-3-opus-20240229" :read-only t))
+  (chat-model "claude-3-5-sonnet-20240620" :read-only t))
 
 (cl-defmethod llm-nonfree-message-info ((_ llm-claude))
   "Return Claude's nonfree ToS."
diff --git a/llm-openai.el b/llm-openai.el
index 27a126bd01..4c94189ce9 100644
--- a/llm-openai.el
+++ b/llm-openai.el
@@ -159,8 +159,7 @@ STREAMING if non-nil, turn on response streaming."
                            `(("name" . 
,(llm-chat-prompt-function-call-result-function-name fc)))))))
                      (llm-chat-prompt-interactions prompt)))
           request-alist)
-    (push `("model" . ,(or (llm-openai-chat-model provider)
-                           "gpt-3.5-turbo-0613")) request-alist)
+    (push `("model" . ,(or (llm-openai-chat-model provider) "gpt-4o")) 
request-alist)
     (when (llm-chat-prompt-temperature prompt)
       (push `("temperature" . ,(* (llm-chat-prompt-temperature prompt) 2.0)) 
request-alist))
     (when (llm-chat-prompt-max-tokens prompt)
diff --git a/llm-vertex.el b/llm-vertex.el
index 0cecb86a32..33d38723d9 100644
--- a/llm-vertex.el
+++ b/llm-vertex.el
@@ -276,7 +276,7 @@ If STREAMING is non-nil, use the URL for the streaming API."
 
 (cl-defmethod llm-name ((_ llm-vertex))
   "Return the name of the provider."
-  "Gemini")
+  "Vertex Gemini")
 
 (defun llm-vertex--chat-token-limit (model)
   "Get token limit for MODEL."

Reply via email to