aminghadersohi commented on code in PR #37200:
URL: https://github.com/apache/superset/pull/37200#discussion_r2770698314
##########
superset/mcp_service/middleware.py:
##########
@@ -755,3 +759,167 @@ def _filter_response(self, response: Any, object_type:
str, user: Any) -> Any:
"Unknown response type for field filtering: %s", type(response)
)
return response
+
+
+class ResponseSizeGuardMiddleware(Middleware):
+ """
+ Middleware that prevents oversized responses from overwhelming LLM clients.
+
+ When a tool response exceeds the configured token limit, this middleware
+ intercepts it and returns a helpful error message with suggestions for
+ reducing the response size.
+
+ This is critical for protecting LLM clients like Claude Desktop which can
+ crash or become unresponsive when receiving extremely large responses.
+
+ Configuration via MCP_RESPONSE_SIZE_CONFIG in superset_config.py:
+ - enabled: Toggle the guard on/off (default: True)
+ - token_limit: Maximum estimated tokens per response (default: 25,000)
+ - warn_threshold_pct: Log warnings above this % of limit (default: 80%)
+ - excluded_tools: Tools to skip checking
+ """
+
+ def __init__(
+ self,
+ token_limit: int = DEFAULT_TOKEN_LIMIT,
+ warn_threshold_pct: int = DEFAULT_WARN_THRESHOLD_PCT,
+ excluded_tools: list[str] | None = None,
+ ) -> None:
+ self.token_limit = token_limit
+ self.warn_threshold_pct = warn_threshold_pct
+ self.warn_threshold = int(token_limit * warn_threshold_pct / 100)
+ self.excluded_tools = set(excluded_tools or [])
+
+ async def on_call_tool(
+ self,
+ context: MiddlewareContext,
+ call_next: Callable[[MiddlewareContext], Awaitable[Any]],
+ ) -> Any:
+ """Check response size after tool execution."""
+ tool_name = getattr(context.message, "name", "unknown")
+
+ # Skip excluded tools
+ if tool_name in self.excluded_tools:
+ return await call_next(context)
+
+ # Execute the tool
+ response = await call_next(context)
+
+ # Estimate response token count (guard against huge responses causing
OOM)
+ from superset.mcp_service.utils.token_utils import (
+ estimate_response_tokens,
+ format_size_limit_error,
+ )
+
+ try:
+ estimated_tokens = estimate_response_tokens(response)
+ except MemoryError as me:
+ logger.warning(
+ "MemoryError while estimating tokens for %s: %s", tool_name, me
+ )
+ # Treat as over limit to avoid further serialization
+ estimated_tokens = self.token_limit + 1
+ except Exception as e: # noqa: BLE001
+ logger.warning(
+ "Failed to estimate response tokens for %s: %s", tool_name, e
+ )
+ # Conservative fallback: block rather than risk OOM
+ estimated_tokens = self.token_limit + 1
+
+ # Log warning if approaching limit
+ if estimated_tokens > self.warn_threshold:
+ logger.warning(
+ "Response size warning for %s: ~%d tokens (%.0f%% of %d
limit)",
+ tool_name,
+ estimated_tokens,
+ (estimated_tokens / self.token_limit) * 100,
+ self.token_limit,
+ )
+
+ # Block if over limit
+ if estimated_tokens > self.token_limit:
+ # Extract params for smart suggestions
+ params = getattr(context.message, "params", {}) or {}
+
+ # Log the blocked response
+ logger.error(
+ "Response blocked for %s: ~%d tokens exceeds limit of %d",
+ tool_name,
+ estimated_tokens,
+ self.token_limit,
+ )
+
+ # Log to event logger for monitoring
+ try:
+ user_id = get_user_id()
+ event_logger.log(
+ user_id=user_id,
+ action="mcp_response_size_exceeded",
+ curated_payload={
+ "tool": tool_name,
+ "estimated_tokens": estimated_tokens,
+ "token_limit": self.token_limit,
+ "params": params,
+ },
+ )
+ except Exception as log_error: # noqa: BLE001
+ logger.warning("Failed to log size exceeded event: %s",
log_error)
+
+ # Generate helpful error message with suggestions
+ # Avoid passing the full `response` (which may be huge) into the
formatter
+ # to prevent large-memory operations during error formatting.
+ error_message = format_size_limit_error(
+ tool_name=tool_name,
+ params=params,
+ estimated_tokens=estimated_tokens,
+ token_limit=self.token_limit,
+ response=None,
+ )
+
+ raise ToolError(error_message)
+
+ return response
+
+
+def create_response_size_guard_middleware() -> ResponseSizeGuardMiddleware |
None:
+ """
+ Factory function to create ResponseSizeGuardMiddleware from config.
+
+ Reads configuration from Flask app's MCP_RESPONSE_SIZE_CONFIG.
+ Returns None if the guard is disabled.
+
+ Returns:
+ ResponseSizeGuardMiddleware instance or None if disabled
+ """
+ try:
+ from superset.mcp_service.flask_singleton import get_flask_app
+ from superset.mcp_service.mcp_config import MCP_RESPONSE_SIZE_CONFIG
+
+ flask_app = get_flask_app()
+
+ # Get config from Flask app, falling back to defaults
+ config = flask_app.config.get(
+ "MCP_RESPONSE_SIZE_CONFIG", MCP_RESPONSE_SIZE_CONFIG
+ )
+
+ if not config.get("enabled", True):
+ logger.info("Response size guard is disabled")
+ return None
+
+ middleware = ResponseSizeGuardMiddleware(
+ token_limit=config.get("token_limit", DEFAULT_TOKEN_LIMIT),
+ warn_threshold_pct=config.get(
+ "warn_threshold_pct", DEFAULT_WARN_THRESHOLD_PCT
+ ),
Review Comment:
Fixed. The factory function now uses `int()` coercion:
`token_limit=int(config.get("token_limit", DEFAULT_TOKEN_LIMIT))` and same for
`warn_threshold_pct`.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]