branch: externals/llm
commit 366d365ecf8725be2593501bc33fe8ef762c8d0b
Author: Andrew Hyatt <ahy...@gmail.com>
Commit: GitHub <nore...@github.com>

    Add Open AI's Chat GPT 4.1 models (#186)
---
 NEWS.org      |  1 +
 llm-models.el | 28 +++++++++++++++++++++++++++-
 llm-openai.el |  9 +++++----
 3 files changed, 33 insertions(+), 5 deletions(-)

diff --git a/NEWS.org b/NEWS.org
index 0f3b7cd859..d7970e0278 100644
--- a/NEWS.org
+++ b/NEWS.org
@@ -2,6 +2,7 @@
 - Add =llm-ollama-authed= provider, which is like Ollama but takes a key.
 - Set Gemini 2.5 Pro to be the default Gemini model
 - Fix =llm-batch-embeddings-async= so it returns all embeddings
+- Add Open AI 4.1, o3, Gemini 2.5 Flash
 * Version 0.24.2
 - Fix issue with some Open AI compatible providers needing models to be passed 
by giving a non-nil default.
 - Add Gemini 2.5 Pro
diff --git a/llm-models.el b/llm-models.el
index dbedeced80..be81782bf5 100644
--- a/llm-models.el
+++ b/llm-models.el
@@ -80,6 +80,26 @@ REGEX is a regular expression that can be used to identify 
the model, uniquely (
     :capabilities '(generation)
     :context-length 30000
     :regex "o3-mini")
+   (make-llm-model
+    :name "o4 Mini" :symbol 'o4-mini
+    :capabilities '(generation tool-use image-input json-response)
+    :context-length 30000
+    :regex "o4-mini")
+   (make-llm-model
+    :name "o3" :symbol 'o3
+    :capabilities '(generation tool-use image-input json-response)
+    :context-length 30000
+    :regex "o3\\'")
+   (make-llm-model
+    :name "GPT-4.1" :symbol 'gpt-4.1
+    :capabilities '(generation tool-use image-input json-response)
+    :context-length 30000
+    :regex "gpt-4\\.1$")
+   (make-llm-model
+    :name "GPT-4.1 Nano" :symbol 'gpt-4.1-nano
+    :capabilities '(generation tool-use image-input json-response)
+    :context-length 30000
+    :regex "gpt-4\\.1-nano")
    (make-llm-model
     :name "GPT-4 Turbo" :symbol 'gpt-4-turbo
     :capabilities '(generation tool-use image-input)
@@ -144,9 +164,15 @@ REGEX is a regular expression that can be used to identify 
the model, uniquely (
     :regex "gemini-2\\.0-flash-thinking")
    (make-llm-model
     :name "Gemini 2.5 Pro" :symbol 'gemini-2.5-pro
-    :capabilities '(generation tool-use image-input audio-input video-input)
+    :capabilities '(generation tool-use image-input audio-input video-input 
json-response)
     :context-length 1048576
     :regex "gemini-2\\.5-pro")
+   (make-llm-model
+    :name "Gemini 2.5 Flash" :symbol 'gemini-2.5-flash
+    :capabilities '(generation tool-use image-input audio-input video-input 
json-response
+                               pdf-input caching)
+    :context-length 1048576
+    :regex "gemini-2\\.5-flash")
    (make-llm-model
     :name "Gemini 2.0 Pro" :symbol 'gemini-2.0-pro
     :capabilities '(generation tool-use image-input audio-input video-input)
diff --git a/llm-openai.el b/llm-openai.el
index d08139e884..6339fedc78 100644
--- a/llm-openai.el
+++ b/llm-openai.el
@@ -370,10 +370,11 @@ RESPONSE can be nil if the response is complete."
   (llm-provider-utils-model-token-limit (llm-openai-chat-model provider)))
 
 (cl-defmethod llm-capabilities ((provider llm-openai))
-  (append '(streaming embeddings tool-use streaming-tool-use json-response 
model-list)
-          (when-let ((model (llm-models-match (llm-openai-chat-model 
provider))))
-            (seq-intersection (llm-model-capabilities model)
-                              '(image-input)))))
+  (seq-uniq
+   (append '(streaming embeddings tool-use streaming-tool-use json-response 
model-list)
+           (when-let ((model (llm-models-match (llm-openai-chat-model 
provider))))
+             (seq-intersection (llm-model-capabilities model)
+                               '(image-input))))))
 
 (cl-defmethod llm-capabilities ((provider llm-openai-compatible))
   (append '(streaming model-list)

Reply via email to