This is an automated email from the ASF dual-hosted git repository.

baoyuan pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/apisix.git


The following commit(s) were added to refs/heads/master by this push:
     new 6dd766418 fix(ai-proxy): set llm variables default value to 0 (#12549)
6dd766418 is described below

commit 6dd766418b2168751f57233afaf1bfc6f0ceb131
Author: Ashish Tiwari <[email protected]>
AuthorDate: Fri Aug 29 06:54:25 2025 +0530

    fix(ai-proxy): set llm variables default value to 0 (#12549)
---
 apisix/cli/ngx_tpl.lua                    |   6 +-
 apisix/plugins/ai-drivers/openai-base.lua |   2 +-
 t/APISIX.pm                               |   6 +-
 t/cli/test_access_log.sh                  |  26 ++++++++
 t/plugin/ai-proxy3.t                      | 105 ++++++++++++++++++++++++++++++
 5 files changed, 138 insertions(+), 7 deletions(-)

diff --git a/apisix/cli/ngx_tpl.lua b/apisix/cli/ngx_tpl.lua
index 296a651a5..33be2ea75 100644
--- a/apisix/cli/ngx_tpl.lua
+++ b/apisix/cli/ngx_tpl.lua
@@ -809,10 +809,10 @@ http {
             set $llm_content_risk_level         '';
             set $request_type               'traditional_http';
 
-            set $llm_time_to_first_token        '';
+            set $llm_time_to_first_token        '0';
             set $llm_model                      '';
-            set $llm_prompt_tokens              '';
-            set $llm_completion_tokens          '';
+            set $llm_prompt_tokens              '0';
+            set $llm_completion_tokens          '0';
 
 
             access_by_lua_block {
diff --git a/apisix/plugins/ai-drivers/openai-base.lua 
b/apisix/plugins/ai-drivers/openai-base.lua
index 0b105d729..6ed0bc404 100644
--- a/apisix/plugins/ai-drivers/openai-base.lua
+++ b/apisix/plugins/ai-drivers/openai-base.lua
@@ -97,7 +97,7 @@ local function read_response(ctx, res)
                 return
             end
 
-            if ctx.var.llm_time_to_first_token == "" then
+            if ctx.var.llm_time_to_first_token == "0" then
                 ctx.var.llm_time_to_first_token = math.floor(
                                                 (ngx_now() - 
ctx.llm_request_start_time) * 1000)
             end
diff --git a/t/APISIX.pm b/t/APISIX.pm
index 3c82110a3..57ae00f80 100644
--- a/t/APISIX.pm
+++ b/t/APISIX.pm
@@ -860,10 +860,10 @@ _EOC_
             set \$llm_content_risk_level         '';
             set \$request_type               'traditional_http';
 
-            set \$llm_time_to_first_token        '';
+            set \$llm_time_to_first_token        '0';
             set \$llm_model                      '';
-            set \$llm_prompt_tokens              '';
-            set \$llm_completion_tokens          '';
+            set \$llm_prompt_tokens              '0';
+            set \$llm_completion_tokens          '0';
 
             access_log $apisix_home/t/servroot/logs/access.log main;
 
diff --git a/t/cli/test_access_log.sh b/t/cli/test_access_log.sh
index 736b98775..ce08e1dbb 100755
--- a/t/cli/test_access_log.sh
+++ b/t/cli/test_access_log.sh
@@ -138,6 +138,32 @@ make stop
 
 echo "passed: access log with JSON format"
 
+# access log with unset llm_token JSON format
+
+echo '
+nginx_config:
+  http:
+    access_log_format: |-
+      {"@timestamp": "$time_iso8601", "client_ip": "$remote_addr", "status": 
"$status", "llm_prompt_tokens": $llm_prompt_tokens}
+    access_log_format_escape: json
+' > conf/config.yaml
+
+make init
+make run
+sleep 0.1
+curl http://127.0.0.1:9080/hello2
+sleep 4
+tail -n 1 logs/access.log > output.log
+
+if [ `grep -c '"llm_prompt_tokens": 0' output.log` -eq '0' ]; then
+    echo "failed: invalid JSON log in access log"
+    exit 1
+fi
+
+make stop
+
+echo "passed: access log with JSON format and llm_prompt_tokens"
+
 # check uninitialized variable in access log when access admin
 git checkout conf/config.yaml
 
diff --git a/t/plugin/ai-proxy3.t b/t/plugin/ai-proxy3.t
index ec771f690..e05ce01c0 100644
--- a/t/plugin/ai-proxy3.t
+++ b/t/plugin/ai-proxy3.t
@@ -224,3 +224,108 @@ POST /anything
 --- response_body eval
 qr/.*assistant.*/
 --- no_error_log
+
+
+
+=== TEST 5: create a ai-proxy-multi route with delay streaming ai 
endpoint(every event delay 200ms)
+--- config
+    location /t {
+        content_by_lua_block {
+            local t = require("lib.test_admin").test
+            local code, body = t('/apisix/admin/routes/1',
+                 ngx.HTTP_PUT,
+                 [[{
+                    "uri": "/anything",
+                    "plugins": {
+                        "ai-proxy-multi": {
+                            "instances": [
+                                {
+                                    "name": "self-hosted",
+                                    "provider": "openai-compatible",
+                                    "weight": 1,
+                                    "auth": {
+                                        "header": {
+                                            "Authorization": "Bearer token"
+                                        }
+                                    },
+                                    "options": {
+                                        "model": "gpt-3.5-turbo",
+                                        "stream": true
+                                    },
+                                    "override": {
+                                        "endpoint": 
"http://localhost:7737/v1/chat/completions?delay=true";
+                                    }
+                                }
+                            ],
+                            "ssl_verify": false
+                        }
+                    }
+                 }]]
+            )
+            if code >= 300 then
+                ngx.status = code
+            end
+            ngx.say(body)
+        }
+    }
+--- response_body
+passed
+
+
+
+=== TEST 6: assert access log contains right llm variable
+--- config
+    location /t {
+        content_by_lua_block {
+            local http = require("resty.http")
+            local httpc = http.new()
+            local core = require("apisix.core")
+            local ok, err = httpc:connect({
+                scheme = "http",
+                host = "localhost",
+                port = ngx.var.server_port,
+            })
+            if not ok then
+                ngx.status = 500
+                ngx.say(err)
+                return
+            end
+            local params = {
+                method = "POST",
+                headers = {
+                    ["Content-Type"] = "application/json",
+                },
+                path = "/anything",
+                body = [[{
+                    "messages": [
+                        { "role": "system", "content": "some content" }
+                    ],
+                    "model": "gpt-4"
+                }]],
+            }
+            local res, err = httpc:request(params)
+            if not res then
+                ngx.status = 500
+                ngx.say(err)
+                return
+            end
+            local final_res = {}
+            local inspect = require("inspect")
+            while true do
+                local chunk, err = res.body_reader() -- will read chunk by 
chunk
+                if err then
+                    core.log.error("failed to read response chunk: ", err)
+                    break
+                end
+                if not chunk then
+                    break
+                end
+                core.table.insert_tail(final_res, chunk)
+            end
+            ngx.print(#final_res .. final_res[6])
+        }
+    }
+--- response_body_like eval
+qr/6data: \[DONE\]\n\n/
+--- access_log eval
+qr/.*gpt-3.5-turbo 2\d\d 15 20.*/

Reply via email to