branch: elpa/gptel
commit 1e27322633b97145cef3c6f432e1d71ef5db518f
Author: Karthik Chikmagalur <karthikchikmaga...@gmail.com>
Commit: Karthik Chikmagalur <karthikchikmaga...@gmail.com>

    gptel-openai: Update tool call options for reasoning models
    
    * gptel-openai.el (gptel--request-data): When using the o* series
    of "reasoning" OpenAI models, ensure that the temperature and
    parallel tool calling options are disabled.  This is in addition
    to disabling streaming, from before.
---
 gptel-openai.el | 13 ++++++++-----
 1 file changed, 8 insertions(+), 5 deletions(-)

diff --git a/gptel-openai.el b/gptel-openai.el
index d5746f6ec8..5126941de3 100644
--- a/gptel-openai.el
+++ b/gptel-openai.el
@@ -270,8 +270,10 @@ Mutate state INFO with response metadata."
   (let ((prompts-plist
          `(:model ,(gptel--model-name gptel-model)
            :messages [,@prompts]
-           :stream ,(or gptel-stream :json-false))))
-    (when gptel-temperature
+           :stream ,(or gptel-stream :json-false)))
+        (reasoning-model-p ; TODO: Embed this capability in the model's 
properties
+         (memq gptel-model '(o1 o1-preview o1-mini o3-mini o3))))
+    (when (and gptel-temperature (not reasoning-model-p))
       (plist-put prompts-plist :temperature gptel-temperature))
     (when gptel-use-tools
       (when (eq gptel-use-tools 'force)
@@ -279,12 +281,13 @@ Mutate state INFO with response metadata."
       (when gptel-tools
         (plist-put prompts-plist :tools
                    (gptel--parse-tools backend gptel-tools))
-        (plist-put prompts-plist :parallel_tool_calls t)))
+        (unless reasoning-model-p
+          (plist-put prompts-plist :parallel_tool_calls t))))
     (when gptel-max-tokens
       ;; HACK: The OpenAI API has deprecated max_tokens, but we still need it
       ;; for OpenAI-compatible APIs like GPT4All (#485)
-      (plist-put prompts-plist (if (memq gptel-model '(o1 o1-preview o1-mini 
o3-mini))
-                                   :max_completion_tokens :max_tokens)
+      (plist-put prompts-plist
+                 (if reasoning-model-p :max_completion_tokens :max_tokens)
                  gptel-max-tokens))
     ;; Merge request params with model and backend params.
     (gptel--merge-plists

Reply via email to