branch: externals/llm commit 886e20c51969a4e28ab62a4fcf9ff31d8e4a42bd Author: Andrew Hyatt <ahy...@gmail.com> Commit: Andrew Hyatt <ahy...@gmail.com>
Simplify and stop making assumption about Open AI streaming Other compatible Open AI may differ slightly, making my regexes not work. Now, we still use regexes, but they are much simpler and should be conformant to the event-stream spec. This should fix https://github.com/ahyatt/llm/issues/32 --- NEWS.org | 1 + llm-openai.el | 6 ++++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/NEWS.org b/NEWS.org index 398a4f9b02..4c86b2bcf2 100644 --- a/NEWS.org +++ b/NEWS.org @@ -1,5 +1,6 @@ * Version 0.12.1 - Fix issue in =llm-ollama= with not using provider host for sync embeddings. +- Fix issue in =llm-openai= where were incompatible with some Open AI-compatible backends due to assumptions about inconsequential JSON details. * Version 0.12.0 - Add provider =llm-claude=, for Anthropic's Claude. * Version 0.11.0 diff --git a/llm-openai.el b/llm-openai.el index 5927bf60b4..778dba8cf1 100644 --- a/llm-openai.el +++ b/llm-openai.el @@ -265,7 +265,7 @@ them from 1 to however many are sent.") (last-response llm-openai-last-response)) (with-temp-buffer (insert response) - (let* ((complete-rx (rx (seq "finish_reason\":" (1+ (or ?\[ ?\] alpha)) "}]}" line-end))) + (let* ((complete-rx (rx (seq line-start "data: "))) (end-pos (save-excursion (goto-char (point-max)) (when (search-backward-regexp complete-rx @@ -273,7 +273,9 @@ them from 1 to however many are sent.") (line-end-position))))) (when end-pos (let* ((all-lines (seq-filter - (lambda (line) (string-match-p complete-rx line)) + (lambda (line) (and (string-match-p complete-rx line) + (not (string-match-p (rx (seq line-start "data: [DONE]")) + line)))) (split-string (buffer-substring-no-properties 1 end-pos) "\n"))) (processed-lines (mapcar (lambda (line)