Changed code to support older Python versions

This commit is contained in:
Malasaur 2025-12-01 23:27:09 +01:00
parent eb92d2d36f
commit 582458cdd0
5027 changed files with 794942 additions and 4 deletions

View file

@ -0,0 +1,47 @@
from sentry_sdk.integrations import DidNotEnable, Integration
try:
import pydantic_ai # type: ignore
except ImportError:
raise DidNotEnable("pydantic-ai not installed")
from .patches import (
_patch_agent_run,
_patch_graph_nodes,
_patch_model_request,
_patch_tool_execution,
)
class PydanticAIIntegration(Integration):
identifier = "pydantic_ai"
origin = f"auto.ai.{identifier}"
def __init__(self, include_prompts=True):
# type: (bool) -> None
"""
Initialize the Pydantic AI integration.
Args:
include_prompts: Whether to include prompts and messages in span data.
Requires send_default_pii=True. Defaults to True.
"""
self.include_prompts = include_prompts
@staticmethod
def setup_once():
# type: () -> None
"""
Set up the pydantic-ai integration.
This patches the key methods in pydantic-ai to create Sentry spans for:
- Agent invocations (Agent.run methods)
- Model requests (AI client calls)
- Tool executions
"""
_patch_agent_run()
_patch_graph_nodes()
_patch_model_request()
_patch_tool_execution()

View file

@ -0,0 +1 @@
SPAN_ORIGIN = "auto.ai.pydantic_ai"

View file

@ -0,0 +1,4 @@
from .agent_run import _patch_agent_run # noqa: F401
from .graph_nodes import _patch_graph_nodes # noqa: F401
from .model_request import _patch_model_request # noqa: F401
from .tools import _patch_tool_execution # noqa: F401

View file

@ -0,0 +1,215 @@
from functools import wraps
import sentry_sdk
from sentry_sdk.integrations import DidNotEnable
from ..spans import invoke_agent_span, update_invoke_agent_span
from ..utils import _capture_exception, pop_agent, push_agent
from typing import TYPE_CHECKING
try:
from pydantic_ai.agent import Agent # type: ignore
except ImportError:
raise DidNotEnable("pydantic-ai not installed")
if TYPE_CHECKING:
from typing import Any, Callable, Optional
class _StreamingContextManagerWrapper:
"""Wrapper for streaming methods that return async context managers."""
def __init__(
self,
agent,
original_ctx_manager,
user_prompt,
model,
model_settings,
is_streaming=True,
):
# type: (Any, Any, Any, Any, Any, bool) -> None
self.agent = agent
self.original_ctx_manager = original_ctx_manager
self.user_prompt = user_prompt
self.model = model
self.model_settings = model_settings
self.is_streaming = is_streaming
self._isolation_scope = None # type: Any
self._span = None # type: Optional[sentry_sdk.tracing.Span]
self._result = None # type: Any
async def __aenter__(self):
# type: () -> Any
# Set up isolation scope and invoke_agent span
self._isolation_scope = sentry_sdk.isolation_scope()
self._isolation_scope.__enter__()
# Create invoke_agent span (will be closed in __aexit__)
self._span = invoke_agent_span(
self.user_prompt,
self.agent,
self.model,
self.model_settings,
self.is_streaming,
)
self._span.__enter__()
# Push agent to contextvar stack after span is successfully created and entered
# This ensures proper pairing with pop_agent() in __aexit__ even if exceptions occur
push_agent(self.agent, self.is_streaming)
# Enter the original context manager
result = await self.original_ctx_manager.__aenter__()
self._result = result
return result
async def __aexit__(self, exc_type, exc_val, exc_tb):
# type: (Any, Any, Any) -> None
try:
# Exit the original context manager first
await self.original_ctx_manager.__aexit__(exc_type, exc_val, exc_tb)
# Update span with output if successful
if exc_type is None and self._result and hasattr(self._result, "output"):
output = (
self._result.output if hasattr(self._result, "output") else None
)
if self._span is not None:
update_invoke_agent_span(self._span, output)
finally:
# Pop agent from contextvar stack
pop_agent()
# Clean up invoke span
if self._span:
self._span.__exit__(exc_type, exc_val, exc_tb)
# Clean up isolation scope
if self._isolation_scope:
self._isolation_scope.__exit__(exc_type, exc_val, exc_tb)
def _create_run_wrapper(original_func, is_streaming=False):
# type: (Callable[..., Any], bool) -> Callable[..., Any]
"""
Wraps the Agent.run method to create an invoke_agent span.
Args:
original_func: The original run method
is_streaming: Whether this is a streaming method (for future use)
"""
@wraps(original_func)
async def wrapper(self, *args, **kwargs):
# type: (Any, *Any, **Any) -> Any
# Isolate each workflow so that when agents are run in asyncio tasks they
# don't touch each other's scopes
with sentry_sdk.isolation_scope():
# Extract parameters for the span
user_prompt = kwargs.get("user_prompt") or (args[0] if args else None)
model = kwargs.get("model")
model_settings = kwargs.get("model_settings")
# Create invoke_agent span
with invoke_agent_span(
user_prompt, self, model, model_settings, is_streaming
) as span:
# Push agent to contextvar stack after span is successfully created and entered
# This ensures proper pairing with pop_agent() in finally even if exceptions occur
push_agent(self, is_streaming)
try:
result = await original_func(self, *args, **kwargs)
# Update span with output
output = result.output if hasattr(result, "output") else None
update_invoke_agent_span(span, output)
return result
except Exception as exc:
_capture_exception(exc)
raise exc from None
finally:
# Pop agent from contextvar stack
pop_agent()
return wrapper
def _create_streaming_wrapper(original_func):
# type: (Callable[..., Any]) -> Callable[..., Any]
"""
Wraps run_stream method that returns an async context manager.
"""
@wraps(original_func)
def wrapper(self, *args, **kwargs):
# type: (Any, *Any, **Any) -> Any
# Extract parameters for the span
user_prompt = kwargs.get("user_prompt") or (args[0] if args else None)
model = kwargs.get("model")
model_settings = kwargs.get("model_settings")
# Call original function to get the context manager
original_ctx_manager = original_func(self, *args, **kwargs)
# Wrap it with our instrumentation
return _StreamingContextManagerWrapper(
agent=self,
original_ctx_manager=original_ctx_manager,
user_prompt=user_prompt,
model=model,
model_settings=model_settings,
is_streaming=True,
)
return wrapper
def _create_streaming_events_wrapper(original_func):
# type: (Callable[..., Any]) -> Callable[..., Any]
"""
Wraps run_stream_events method - no span needed as it delegates to run().
Note: run_stream_events internally calls self.run() with an event_stream_handler,
so the invoke_agent span will be created by the run() wrapper.
"""
@wraps(original_func)
async def wrapper(self, *args, **kwargs):
# type: (Any, *Any, **Any) -> Any
# Just call the original generator - it will call run() which has the instrumentation
try:
async for event in original_func(self, *args, **kwargs):
yield event
except Exception as exc:
_capture_exception(exc)
raise exc from None
return wrapper
def _patch_agent_run():
# type: () -> None
"""
Patches the Agent run methods to create spans for agent execution.
This patches both non-streaming (run, run_sync) and streaming
(run_stream, run_stream_events) methods.
"""
# Store original methods
original_run = Agent.run
original_run_stream = Agent.run_stream
original_run_stream_events = Agent.run_stream_events
# Wrap and apply patches for non-streaming methods
Agent.run = _create_run_wrapper(original_run, is_streaming=False)
# Wrap and apply patches for streaming methods
Agent.run_stream = _create_streaming_wrapper(original_run_stream)
Agent.run_stream_events = _create_streaming_events_wrapper(
original_run_stream_events
)

View file

@ -0,0 +1,110 @@
from contextlib import asynccontextmanager
from functools import wraps
import sentry_sdk
from sentry_sdk.integrations import DidNotEnable
from ..spans import (
ai_client_span,
update_ai_client_span,
)
try:
from pydantic_ai._agent_graph import ModelRequestNode # type: ignore
except ImportError:
raise DidNotEnable("pydantic-ai not installed")
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from typing import Any, Callable
def _extract_span_data(node, ctx):
# type: (Any, Any) -> tuple[list[Any], Any, Any]
"""Extract common data needed for creating chat spans.
Returns:
Tuple of (messages, model, model_settings)
"""
# Extract model and settings from context
model = None
model_settings = None
if hasattr(ctx, "deps"):
model = getattr(ctx.deps, "model", None)
model_settings = getattr(ctx.deps, "model_settings", None)
# Build full message list: history + current request
messages = []
if hasattr(ctx, "state") and hasattr(ctx.state, "message_history"):
messages.extend(ctx.state.message_history)
current_request = getattr(node, "request", None)
if current_request:
messages.append(current_request)
return messages, model, model_settings
def _patch_graph_nodes():
# type: () -> None
"""
Patches the graph node execution to create appropriate spans.
ModelRequestNode -> Creates ai_client span for model requests
CallToolsNode -> Handles tool calls (spans created in tool patching)
"""
# Patch ModelRequestNode to create ai_client spans
original_model_request_run = ModelRequestNode.run
@wraps(original_model_request_run)
async def wrapped_model_request_run(self, ctx):
# type: (Any, Any) -> Any
messages, model, model_settings = _extract_span_data(self, ctx)
with ai_client_span(messages, None, model, model_settings) as span:
result = await original_model_request_run(self, ctx)
# Extract response from result if available
model_response = None
if hasattr(result, "model_response"):
model_response = result.model_response
update_ai_client_span(span, model_response)
return result
ModelRequestNode.run = wrapped_model_request_run
# Patch ModelRequestNode.stream for streaming requests
original_model_request_stream = ModelRequestNode.stream
def create_wrapped_stream(original_stream_method):
# type: (Callable[..., Any]) -> Callable[..., Any]
"""Create a wrapper for ModelRequestNode.stream that creates chat spans."""
@asynccontextmanager
@wraps(original_stream_method)
async def wrapped_model_request_stream(self, ctx):
# type: (Any, Any) -> Any
messages, model, model_settings = _extract_span_data(self, ctx)
# Create chat span for streaming request
with ai_client_span(messages, None, model, model_settings) as span:
# Call the original stream method
async with original_stream_method(self, ctx) as stream:
yield stream
# After streaming completes, update span with response data
# The ModelRequestNode stores the final response in _result
model_response = None
if hasattr(self, "_result") and self._result is not None:
# _result is a NextNode containing the model_response
if hasattr(self._result, "model_response"):
model_response = self._result.model_response
update_ai_client_span(span, model_response)
return wrapped_model_request_stream
ModelRequestNode.stream = create_wrapped_stream(original_model_request_stream)

View file

@ -0,0 +1,40 @@
from functools import wraps
from typing import TYPE_CHECKING
from sentry_sdk.integrations import DidNotEnable
try:
from pydantic_ai import models # type: ignore
except ImportError:
raise DidNotEnable("pydantic-ai not installed")
from ..spans import ai_client_span, update_ai_client_span
if TYPE_CHECKING:
from typing import Any
def _patch_model_request():
# type: () -> None
"""
Patches model request execution to create AI client spans.
In pydantic-ai, model requests are handled through the Model interface.
We need to patch the request method on models to create spans.
"""
# Patch the base Model class's request method
if hasattr(models, "Model"):
original_request = models.Model.request
@wraps(original_request)
async def wrapped_request(self, messages, *args, **kwargs):
# type: (Any, Any, *Any, **Any) -> Any
# Pass all messages (full conversation history)
with ai_client_span(messages, None, self, None) as span:
result = await original_request(self, messages, *args, **kwargs)
update_ai_client_span(span, result)
return result
models.Model.request = wrapped_request

View file

@ -0,0 +1,98 @@
from functools import wraps
from sentry_sdk.integrations import DidNotEnable
import sentry_sdk
from ..spans import execute_tool_span, update_execute_tool_span
from ..utils import (
_capture_exception,
get_current_agent,
)
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from typing import Any
try:
from pydantic_ai.mcp import MCPServer # type: ignore
HAS_MCP = True
except ImportError:
HAS_MCP = False
try:
from pydantic_ai._tool_manager import ToolManager # type: ignore
except ImportError:
raise DidNotEnable("pydantic-ai not installed")
def _patch_tool_execution():
# type: () -> None
"""
Patch ToolManager._call_tool to create execute_tool spans.
This is the single point where ALL tool calls flow through in pydantic_ai,
regardless of toolset type (function, MCP, combined, wrapper, etc.).
By patching here, we avoid:
- Patching multiple toolset classes
- Dealing with signature mismatches from instrumented MCP servers
- Complex nested toolset handling
"""
original_call_tool = ToolManager._call_tool
@wraps(original_call_tool)
async def wrapped_call_tool(self, call, *args, **kwargs):
# type: (Any, Any, *Any, **Any) -> Any
# Extract tool info before calling original
name = call.tool_name
tool = self.tools.get(name) if self.tools else None
# Determine tool type by checking tool.toolset
tool_type = "function" # default
if tool and HAS_MCP and isinstance(tool.toolset, MCPServer):
tool_type = "mcp"
# Get agent from contextvar
agent = get_current_agent()
if agent and tool:
try:
args_dict = call.args_as_dict()
except Exception:
args_dict = call.args if isinstance(call.args, dict) else {}
# Create execute_tool span
# Nesting is handled by isolation_scope() to ensure proper parent-child relationships
with sentry_sdk.isolation_scope():
with execute_tool_span(
name,
args_dict,
agent,
tool_type=tool_type,
) as span:
try:
result = await original_call_tool(
self,
call,
*args,
**kwargs,
)
update_execute_tool_span(span, result)
return result
except Exception as exc:
_capture_exception(exc)
raise exc from None
# No span context - just call original
return await original_call_tool(
self,
call,
*args,
**kwargs,
)
ToolManager._call_tool = wrapped_call_tool

View file

@ -0,0 +1,3 @@
from .ai_client import ai_client_span, update_ai_client_span # noqa: F401
from .execute_tool import execute_tool_span, update_execute_tool_span # noqa: F401
from .invoke_agent import invoke_agent_span, update_invoke_agent_span # noqa: F401

View file

@ -0,0 +1,246 @@
import sentry_sdk
from sentry_sdk.ai.utils import set_data_normalized
from sentry_sdk.consts import OP, SPANDATA
from sentry_sdk.utils import safe_serialize
from ..consts import SPAN_ORIGIN
from ..utils import (
_set_agent_data,
_set_available_tools,
_set_model_data,
_should_send_prompts,
_get_model_name,
get_current_agent,
get_is_streaming,
)
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from typing import Any, List, Dict
from pydantic_ai.usage import RequestUsage # type: ignore
try:
from pydantic_ai.messages import ( # type: ignore
BaseToolCallPart,
BaseToolReturnPart,
SystemPromptPart,
UserPromptPart,
TextPart,
ThinkingPart,
)
except ImportError:
# Fallback if these classes are not available
BaseToolCallPart = None
BaseToolReturnPart = None
SystemPromptPart = None
UserPromptPart = None
TextPart = None
ThinkingPart = None
def _set_usage_data(span, usage):
# type: (sentry_sdk.tracing.Span, RequestUsage) -> None
"""Set token usage data on a span."""
if usage is None:
return
if hasattr(usage, "input_tokens") and usage.input_tokens is not None:
span.set_data(SPANDATA.GEN_AI_USAGE_INPUT_TOKENS, usage.input_tokens)
if hasattr(usage, "output_tokens") and usage.output_tokens is not None:
span.set_data(SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS, usage.output_tokens)
if hasattr(usage, "total_tokens") and usage.total_tokens is not None:
span.set_data(SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS, usage.total_tokens)
def _set_input_messages(span, messages):
# type: (sentry_sdk.tracing.Span, Any) -> None
"""Set input messages data on a span."""
if not _should_send_prompts():
return
if not messages:
return
try:
formatted_messages = []
system_prompt = None
# Extract system prompt from any ModelRequest with instructions
for msg in messages:
if hasattr(msg, "instructions") and msg.instructions:
system_prompt = msg.instructions
break
# Add system prompt as first message if present
if system_prompt:
formatted_messages.append(
{"role": "system", "content": [{"type": "text", "text": system_prompt}]}
)
for msg in messages:
if hasattr(msg, "parts"):
for part in msg.parts:
role = "user"
# Use isinstance checks with proper base classes
if SystemPromptPart and isinstance(part, SystemPromptPart):
role = "system"
elif (
(TextPart and isinstance(part, TextPart))
or (ThinkingPart and isinstance(part, ThinkingPart))
or (BaseToolCallPart and isinstance(part, BaseToolCallPart))
):
role = "assistant"
elif BaseToolReturnPart and isinstance(part, BaseToolReturnPart):
role = "tool"
content = [] # type: List[Dict[str, Any] | str]
tool_calls = None
tool_call_id = None
# Handle ToolCallPart (assistant requesting tool use)
if BaseToolCallPart and isinstance(part, BaseToolCallPart):
tool_call_data = {}
if hasattr(part, "tool_name"):
tool_call_data["name"] = part.tool_name
if hasattr(part, "args"):
tool_call_data["arguments"] = safe_serialize(part.args)
if tool_call_data:
tool_calls = [tool_call_data]
# Handle ToolReturnPart (tool result)
elif BaseToolReturnPart and isinstance(part, BaseToolReturnPart):
if hasattr(part, "tool_name"):
tool_call_id = part.tool_name
if hasattr(part, "content"):
content.append({"type": "text", "text": str(part.content)})
# Handle regular content
elif hasattr(part, "content"):
if isinstance(part.content, str):
content.append({"type": "text", "text": part.content})
elif isinstance(part.content, list):
for item in part.content:
if isinstance(item, str):
content.append({"type": "text", "text": item})
else:
content.append(safe_serialize(item))
else:
content.append({"type": "text", "text": str(part.content)})
# Add message if we have content or tool calls
if content or tool_calls:
message = {"role": role} # type: Dict[str, Any]
if content:
message["content"] = content
if tool_calls:
message["tool_calls"] = tool_calls
if tool_call_id:
message["tool_call_id"] = tool_call_id
formatted_messages.append(message)
if formatted_messages:
set_data_normalized(
span, SPANDATA.GEN_AI_REQUEST_MESSAGES, formatted_messages, unpack=False
)
except Exception:
# If we fail to format messages, just skip it
pass
def _set_output_data(span, response):
# type: (sentry_sdk.tracing.Span, Any) -> None
"""Set output data on a span."""
if not _should_send_prompts():
return
if not response:
return
set_data_normalized(span, SPANDATA.GEN_AI_RESPONSE_MODEL, response.model_name)
try:
# Extract text from ModelResponse
if hasattr(response, "parts"):
texts = []
tool_calls = []
for part in response.parts:
if TextPart and isinstance(part, TextPart) and hasattr(part, "content"):
texts.append(part.content)
elif BaseToolCallPart and isinstance(part, BaseToolCallPart):
tool_call_data = {
"type": "function",
}
if hasattr(part, "tool_name"):
tool_call_data["name"] = part.tool_name
if hasattr(part, "args"):
tool_call_data["arguments"] = safe_serialize(part.args)
tool_calls.append(tool_call_data)
if texts:
set_data_normalized(span, SPANDATA.GEN_AI_RESPONSE_TEXT, texts)
if tool_calls:
span.set_data(
SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS, safe_serialize(tool_calls)
)
except Exception:
# If we fail to format output, just skip it
pass
def ai_client_span(messages, agent, model, model_settings):
# type: (Any, Any, Any, Any) -> sentry_sdk.tracing.Span
"""Create a span for an AI client call (model request).
Args:
messages: Full conversation history (list of messages)
agent: Agent object
model: Model object
model_settings: Model settings
"""
# Determine model name for span name
model_obj = model
if agent and hasattr(agent, "model"):
model_obj = agent.model
model_name = _get_model_name(model_obj) or "unknown"
span = sentry_sdk.start_span(
op=OP.GEN_AI_CHAT,
name=f"chat {model_name}",
origin=SPAN_ORIGIN,
)
span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "chat")
_set_agent_data(span, agent)
_set_model_data(span, model, model_settings)
# Set streaming flag from contextvar
span.set_data(SPANDATA.GEN_AI_RESPONSE_STREAMING, get_is_streaming())
# Add available tools if agent is available
agent_obj = agent or get_current_agent()
_set_available_tools(span, agent_obj)
# Set input messages (full conversation history)
if messages:
_set_input_messages(span, messages)
return span
def update_ai_client_span(span, model_response):
# type: (sentry_sdk.tracing.Span, Any) -> None
"""Update the AI client span with response data."""
if not span:
return
# Set usage data if available
if model_response and hasattr(model_response, "usage"):
_set_usage_data(span, model_response.usage)
# Set output data
_set_output_data(span, model_response)

View file

@ -0,0 +1,49 @@
import sentry_sdk
from sentry_sdk.consts import OP, SPANDATA
from sentry_sdk.utils import safe_serialize
from ..consts import SPAN_ORIGIN
from ..utils import _set_agent_data, _should_send_prompts
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from typing import Any, Optional
def execute_tool_span(tool_name, tool_args, agent, tool_type="function"):
# type: (str, Any, Any, str) -> sentry_sdk.tracing.Span
"""Create a span for tool execution.
Args:
tool_name: The name of the tool being executed
tool_args: The arguments passed to the tool
agent: The agent executing the tool
tool_type: The type of tool ("function" for regular tools, "mcp" for MCP services)
"""
span = sentry_sdk.start_span(
op=OP.GEN_AI_EXECUTE_TOOL,
name=f"execute_tool {tool_name}",
origin=SPAN_ORIGIN,
)
span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "execute_tool")
span.set_data(SPANDATA.GEN_AI_TOOL_TYPE, tool_type)
span.set_data(SPANDATA.GEN_AI_TOOL_NAME, tool_name)
_set_agent_data(span, agent)
if _should_send_prompts() and tool_args is not None:
span.set_data(SPANDATA.GEN_AI_TOOL_INPUT, safe_serialize(tool_args))
return span
def update_execute_tool_span(span, result):
# type: (sentry_sdk.tracing.Span, Any) -> None
"""Update the execute tool span with the result."""
if not span:
return
if _should_send_prompts() and result is not None:
span.set_data(SPANDATA.GEN_AI_TOOL_OUTPUT, safe_serialize(result))

View file

@ -0,0 +1,112 @@
import sentry_sdk
from sentry_sdk.ai.utils import get_start_span_function, set_data_normalized
from sentry_sdk.consts import OP, SPANDATA
from ..consts import SPAN_ORIGIN
from ..utils import (
_set_agent_data,
_set_available_tools,
_set_model_data,
_should_send_prompts,
)
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from typing import Any
def invoke_agent_span(user_prompt, agent, model, model_settings, is_streaming=False):
# type: (Any, Any, Any, Any, bool) -> sentry_sdk.tracing.Span
"""Create a span for invoking the agent."""
# Determine agent name for span
name = "agent"
if agent and getattr(agent, "name", None):
name = agent.name
span = get_start_span_function()(
op=OP.GEN_AI_INVOKE_AGENT,
name=f"invoke_agent {name}",
origin=SPAN_ORIGIN,
)
span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent")
_set_agent_data(span, agent)
_set_model_data(span, model, model_settings)
_set_available_tools(span, agent)
# Add user prompt and system prompts if available and prompts are enabled
if _should_send_prompts():
messages = []
# Add system prompts (both instructions and system_prompt)
system_texts = []
if agent:
# Check for system_prompt
system_prompts = getattr(agent, "_system_prompts", None) or []
for prompt in system_prompts:
if isinstance(prompt, str):
system_texts.append(prompt)
# Check for instructions (stored in _instructions)
instructions = getattr(agent, "_instructions", None)
if instructions:
if isinstance(instructions, str):
system_texts.append(instructions)
elif isinstance(instructions, (list, tuple)):
for instr in instructions:
if isinstance(instr, str):
system_texts.append(instr)
elif callable(instr):
# Skip dynamic/callable instructions
pass
# Add all system texts as system messages
for system_text in system_texts:
messages.append(
{
"content": [{"text": system_text, "type": "text"}],
"role": "system",
}
)
# Add user prompt
if user_prompt:
if isinstance(user_prompt, str):
messages.append(
{
"content": [{"text": user_prompt, "type": "text"}],
"role": "user",
}
)
elif isinstance(user_prompt, list):
# Handle list of user content
content = []
for item in user_prompt:
if isinstance(item, str):
content.append({"text": item, "type": "text"})
if content:
messages.append(
{
"content": content,
"role": "user",
}
)
if messages:
set_data_normalized(
span, SPANDATA.GEN_AI_REQUEST_MESSAGES, messages, unpack=False
)
return span
def update_invoke_agent_span(span, output):
# type: (sentry_sdk.tracing.Span, Any) -> None
"""Update and close the invoke agent span."""
if span and _should_send_prompts() and output:
set_data_normalized(
span, SPANDATA.GEN_AI_RESPONSE_TEXT, str(output), unpack=False
)

View file

@ -0,0 +1,223 @@
import sentry_sdk
from contextvars import ContextVar
from sentry_sdk.consts import SPANDATA
from sentry_sdk.scope import should_send_default_pii
from sentry_sdk.tracing_utils import set_span_errored
from sentry_sdk.utils import event_from_exception, safe_serialize
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from typing import Any, Optional
# Store the current agent context in a contextvar for re-entrant safety
# Using a list as a stack to support nested agent calls
_agent_context_stack = ContextVar("pydantic_ai_agent_context_stack", default=[]) # type: ContextVar[list[dict[str, Any]]]
def push_agent(agent, is_streaming=False):
# type: (Any, bool) -> None
"""Push an agent context onto the stack along with its streaming flag."""
stack = _agent_context_stack.get().copy()
stack.append({"agent": agent, "is_streaming": is_streaming})
_agent_context_stack.set(stack)
def pop_agent():
# type: () -> None
"""Pop an agent context from the stack."""
stack = _agent_context_stack.get().copy()
if stack:
stack.pop()
_agent_context_stack.set(stack)
def get_current_agent():
# type: () -> Any
"""Get the current agent from the contextvar stack."""
stack = _agent_context_stack.get()
if stack:
return stack[-1]["agent"]
return None
def get_is_streaming():
# type: () -> bool
"""Get the streaming flag from the contextvar stack."""
stack = _agent_context_stack.get()
if stack:
return stack[-1].get("is_streaming", False)
return False
def _should_send_prompts():
# type: () -> bool
"""
Check if prompts should be sent to Sentry.
This checks both send_default_pii and the include_prompts integration setting.
"""
if not should_send_default_pii():
return False
from . import PydanticAIIntegration
# Get the integration instance from the client
integration = sentry_sdk.get_client().get_integration(PydanticAIIntegration)
if integration is None:
return False
return getattr(integration, "include_prompts", False)
def _set_agent_data(span, agent):
# type: (sentry_sdk.tracing.Span, Any) -> None
"""Set agent-related data on a span.
Args:
span: The span to set data on
agent: Agent object (can be None, will try to get from contextvar if not provided)
"""
# Extract agent name from agent object or contextvar
agent_obj = agent
if not agent_obj:
# Try to get from contextvar
agent_obj = get_current_agent()
if agent_obj and hasattr(agent_obj, "name") and agent_obj.name:
span.set_data(SPANDATA.GEN_AI_AGENT_NAME, agent_obj.name)
def _get_model_name(model_obj):
# type: (Any) -> Optional[str]
"""Extract model name from a model object.
Args:
model_obj: Model object to extract name from
Returns:
Model name string or None if not found
"""
if not model_obj:
return None
if hasattr(model_obj, "model_name"):
return model_obj.model_name
elif hasattr(model_obj, "name"):
try:
return model_obj.name()
except Exception:
return str(model_obj)
elif isinstance(model_obj, str):
return model_obj
else:
return str(model_obj)
def _set_model_data(span, model, model_settings):
# type: (sentry_sdk.tracing.Span, Any, Any) -> None
"""Set model-related data on a span.
Args:
span: The span to set data on
model: Model object (can be None, will try to get from agent if not provided)
model_settings: Model settings (can be None, will try to get from agent if not provided)
"""
# Try to get agent from contextvar if we need it
agent_obj = get_current_agent()
# Extract model information
model_obj = model
if not model_obj and agent_obj and hasattr(agent_obj, "model"):
model_obj = agent_obj.model
if model_obj:
# Set system from model
if hasattr(model_obj, "system"):
span.set_data(SPANDATA.GEN_AI_SYSTEM, model_obj.system)
# Set model name
model_name = _get_model_name(model_obj)
if model_name:
span.set_data(SPANDATA.GEN_AI_REQUEST_MODEL, model_name)
# Extract model settings
settings = model_settings
if not settings and agent_obj and hasattr(agent_obj, "model_settings"):
settings = agent_obj.model_settings
if settings:
settings_map = {
"max_tokens": SPANDATA.GEN_AI_REQUEST_MAX_TOKENS,
"temperature": SPANDATA.GEN_AI_REQUEST_TEMPERATURE,
"top_p": SPANDATA.GEN_AI_REQUEST_TOP_P,
"frequency_penalty": SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY,
"presence_penalty": SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY,
}
# ModelSettings is a TypedDict (dict at runtime), so use dict access
if isinstance(settings, dict):
for setting_name, spandata_key in settings_map.items():
value = settings.get(setting_name)
if value is not None:
span.set_data(spandata_key, value)
else:
# Fallback for object-style settings
for setting_name, spandata_key in settings_map.items():
if hasattr(settings, setting_name):
value = getattr(settings, setting_name)
if value is not None:
span.set_data(spandata_key, value)
def _set_available_tools(span, agent):
# type: (sentry_sdk.tracing.Span, Any) -> None
"""Set available tools data on a span from an agent's function toolset.
Args:
span: The span to set data on
agent: Agent object with _function_toolset attribute
"""
if not agent or not hasattr(agent, "_function_toolset"):
return
try:
tools = []
# Get tools from the function toolset
if hasattr(agent._function_toolset, "tools"):
for tool_name, tool in agent._function_toolset.tools.items():
tool_info = {"name": tool_name}
# Add description from function_schema if available
if hasattr(tool, "function_schema"):
schema = tool.function_schema
if getattr(schema, "description", None):
tool_info["description"] = schema.description
# Add parameters from json_schema
if getattr(schema, "json_schema", None):
tool_info["parameters"] = schema.json_schema
tools.append(tool_info)
if tools:
span.set_data(
SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS, safe_serialize(tools)
)
except Exception:
# If we can't extract tools, just skip it
pass
def _capture_exception(exc):
# type: (Any) -> None
set_span_errored()
event, hint = event_from_exception(
exc,
client_options=sentry_sdk.get_client().options,
mechanism={"type": "pydantic_ai", "handled": False},
)
sentry_sdk.capture_event(event, hint=hint)