bneradt commented on code in PR #12852:
URL: https://github.com/apache/trafficserver/pull/12852#discussion_r2806314761


##########
src/proxy/http/HttpTransact.cc:
##########
@@ -7300,9 +7456,19 @@ HttpTransact::what_is_document_freshness(State *s, 
HTTPHdr *client_request, HTTP
   uint32_t   cc_mask, cooked_cc_mask;
   uint32_t   os_specifies_revalidate;
 
-  if (s->cache_open_write_fail_action & 
static_cast<MgmtByte>(CacheOpenWriteFailAction_t::STALE_ON_REVALIDATE)) {
+  // This check works for STALE_ON_REVALIDATE(0x2), 
ERROR_ON_MISS_STALE_ON_REVALIDATE(0x3), and
+  // READ_RETRY_STALE_ON_REVALIDATE(0x6).
+  // We return FRESH (not STALE) intentionally to bypass the revalidation code 
path in
+  // HandleCacheOpenReadHit. Returning STALE would trigger origin server 
contact for revalidation,
+  // but for write lock failure scenarios we want to serve the stale content 
directly without
+  // revalidation. The serving_stale_due_to_write_lock flag tracks that we're 
actually serving
+  // stale content, so VIA strings and statistics can be correctly attributed.
+  // When evaluate_actual_freshness is true, skip this short-circuit to get 
the real freshness.
+  if (!evaluate_actual_freshness &&
+      (s->cache_open_write_fail_action & 
static_cast<MgmtByte>(CacheOpenWriteFailAction_t::STALE_ON_REVALIDATE))) {
     if (is_stale_cache_response_returnable(s)) {
-      TxnDbg(dbg_ctl_http_match, "cache_serve_stale_on_write_lock_fail, return 
FRESH");
+      TxnDbg(dbg_ctl_http_match, "cache_serve_stale_on_write_lock_fail, return 
FRESH to bypass revalidation");
+      s->serving_stale_due_to_write_lock = true;

Review Comment:
   Maybe an unexpected modification of state in a function that seems 
read-only. Perhaps move this out of the function?



##########
src/proxy/http/HttpTransact.cc:
##########
@@ -3207,21 +3234,102 @@ HttpTransact::handle_cache_write_lock(State *s)
   }
 
   if (s->cache_info.write_lock_state == CacheWriteLock_t::READ_RETRY) {
-    TxnDbg(dbg_ctl_http_error, "calling hdr_info.server_request.destroy");
-    s->hdr_info.server_request.destroy();
-    HandleCacheOpenReadHitFreshness(s);
-  } else {
-    StateMachineAction_t next;
-    next = how_to_open_connection(s);
-    if (next == StateMachineAction_t::ORIGIN_SERVER_OPEN || next == 
StateMachineAction_t::ORIGIN_SERVER_RAW_OPEN) {
-      s->next_action = next;
-      TRANSACT_RETURN(next, nullptr);
+    // For READ_RETRY with cached object, evaluate actual freshness to decide 
the path.
+    // If the initial lookup was a MISS, cache_lookup_complete_deferred is 
true and we
+    // need to fire the hook here with the final result. If the initial lookup 
was a
+    // HIT_STALE (revalidation case), the hook already fired and deferred is 
false.
+    CacheHTTPInfo *obj = s->cache_info.object_read;
+    if (obj != nullptr) {
+      // Restore request/response times from cached object for freshness 
calculations and Age header.
+      // Similar to HandleCacheOpenReadHitFreshness, handle clock skew by 
capping times.
+      s->request_sent_time      = obj->request_sent_time_get();
+      s->response_received_time = obj->response_received_time_get();
+      s->request_sent_time      = std::min(s->client_request_time, 
s->request_sent_time);
+      s->response_received_time = std::min(s->client_request_time, 
s->response_received_time);
+
+      // Evaluate actual document freshness. Pass true to skip the 
STALE_ON_REVALIDATE
+      // short-circuit so we get the real freshness, not the "return FRESH to 
bypass revalidation" result.
+      Freshness_t freshness = what_is_document_freshness(s, 
&s->hdr_info.client_request, obj->response_get(), true);
+
+      if (freshness == Freshness_t::FRESH || freshness == 
Freshness_t::WARNING) {
+        // Object is fresh - serve it from cache for both action 5 and 6.
+        // This is the main benefit of request collapsing: we found a valid 
cached object.
+        // Clear stale-related state in case it was set during initial stale 
short-circuit.
+        s->serving_stale_due_to_write_lock = false;
+        s->cache_info.stale_fallback       = nullptr;
+
+        // Destroy server_request since we're serving from cache.
+        TxnDbg(dbg_ctl_http_trans, "READ_RETRY: found fresh object, serving 
from cache");
+        s->hdr_info.server_request.destroy();
+        s->cache_lookup_result =
+          (freshness == Freshness_t::FRESH) ? CacheLookupResult_t::HIT_FRESH : 
CacheLookupResult_t::HIT_WARNING;
+
+        if (s->cache_lookup_complete_deferred) {
+          s->cache_lookup_complete_deferred = false;
+          TRANSACT_RETURN(StateMachineAction_t::API_CACHE_LOOKUP_COMPLETE, 
HandleCacheOpenReadHit);
+        }
+        HandleCacheOpenReadHit(s);
+      } else {
+        // Object is stale. Save it as potential fallback, then trigger actual 
cache retry.
+        // HandleCacheOpenReadMiss will serve stale fallback (action 6) or go 
to origin (action 5).
+        if (is_stale_cache_response_returnable(s)) {
+          s->cache_info.stale_fallback = s->cache_info.object_read;

Review Comment:
   Claude lists this as a potential use after free:
   
   object_read is a non-owning pointer to CacheVC::alternate, memory owned by 
the CacheVC. When the new CACHE_LOOKUP is triggered, the old CacheVC can be 
destroyed (e.g., in HttpCacheSM::state_cache_open_read, the old cache_read_vc 
is overwritten without being explicitly closed in the non-redirect path). At 
that point, stale_fallback becomes a dangling pointer. Later, in 
HandleCacheOpenReadMiss, the code does:
   
   ```cpp
   s->cache_info.object_read = s->cache_info.stale_fallback;
   ```
   
   This would dereference freed memory. The fix should deep-copy the stale 
object into owned storage (e.g., HTTPInfo stale_fallback_store as a value 
member, not a pointer, and copy via CacheHTTPInfo::copy()) before triggering 
the new lookup.
   



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to