Changed code to support older Python versions

This commit is contained in:
Malasaur 2025-12-01 23:27:09 +01:00
parent eb92d2d36f
commit 582458cdd0
5027 changed files with 794942 additions and 4 deletions

View file

@ -0,0 +1,61 @@
from sentry_sdk.integrations import DidNotEnable, Integration
from .patches import (
_create_get_model_wrapper,
_create_get_all_tools_wrapper,
_create_run_wrapper,
_patch_agent_run,
_patch_error_tracing,
)
try:
# "agents" is too generic. If someone has an agents.py file in their project
# or another package that's importable via "agents", no ImportError would not
# be thrown and the integration would enable itself even if openai-agents is
# not installed. That's why we're adding the second, more specific import
# after it, even if we don't use it.
import agents
from agents.run import DEFAULT_AGENT_RUNNER
except ImportError:
raise DidNotEnable("OpenAI Agents not installed")
def _patch_runner():
# type: () -> None
# Create the root span for one full agent run (including eventual handoffs)
# Note agents.run.DEFAULT_AGENT_RUNNER.run_sync is a wrapper around
# agents.run.DEFAULT_AGENT_RUNNER.run. It does not need to be wrapped separately.
# TODO-anton: Also patch streaming runner: agents.Runner.run_streamed
agents.run.DEFAULT_AGENT_RUNNER.run = _create_run_wrapper(
agents.run.DEFAULT_AGENT_RUNNER.run
)
# Creating the actual spans for each agent run.
_patch_agent_run()
def _patch_model():
# type: () -> None
agents.run.AgentRunner._get_model = classmethod(
_create_get_model_wrapper(agents.run.AgentRunner._get_model),
)
def _patch_tools():
# type: () -> None
agents.run.AgentRunner._get_all_tools = classmethod(
_create_get_all_tools_wrapper(agents.run.AgentRunner._get_all_tools),
)
class OpenAIAgentsIntegration(Integration):
identifier = "openai_agents"
@staticmethod
def setup_once():
# type: () -> None
_patch_error_tracing()
_patch_tools()
_patch_model()
_patch_runner()

View file

@ -0,0 +1 @@
SPAN_ORIGIN = "auto.ai.openai_agents"

View file

@ -0,0 +1,5 @@
from .models import _create_get_model_wrapper # noqa: F401
from .tools import _create_get_all_tools_wrapper # noqa: F401
from .runner import _create_run_wrapper # noqa: F401
from .agent_run import _patch_agent_run # noqa: F401
from .error_tracing import _patch_error_tracing # noqa: F401

View file

@ -0,0 +1,140 @@
from functools import wraps
from sentry_sdk.integrations import DidNotEnable
from ..spans import invoke_agent_span, update_invoke_agent_span, handoff_span
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from typing import Any, Optional
try:
import agents
except ImportError:
raise DidNotEnable("OpenAI Agents not installed")
def _patch_agent_run():
# type: () -> None
"""
Patches AgentRunner methods to create agent invocation spans.
This directly patches the execution flow to track when agents start and stop.
"""
# Store original methods
original_run_single_turn = agents.run.AgentRunner._run_single_turn
original_execute_handoffs = agents._run_impl.RunImpl.execute_handoffs
original_execute_final_output = agents._run_impl.RunImpl.execute_final_output
def _start_invoke_agent_span(context_wrapper, agent, kwargs):
# type: (agents.RunContextWrapper, agents.Agent, dict[str, Any]) -> None
"""Start an agent invocation span"""
# Store the agent on the context wrapper so we can access it later
context_wrapper._sentry_current_agent = agent
invoke_agent_span(context_wrapper, agent, kwargs)
def _end_invoke_agent_span(context_wrapper, agent, output=None):
# type: (agents.RunContextWrapper, agents.Agent, Optional[Any]) -> None
"""End the agent invocation span"""
# Clear the stored agent
if hasattr(context_wrapper, "_sentry_current_agent"):
delattr(context_wrapper, "_sentry_current_agent")
update_invoke_agent_span(context_wrapper, agent, output)
def _has_active_agent_span(context_wrapper):
# type: (agents.RunContextWrapper) -> bool
"""Check if there's an active agent span for this context"""
return getattr(context_wrapper, "_sentry_current_agent", None) is not None
def _get_current_agent(context_wrapper):
# type: (agents.RunContextWrapper) -> Optional[agents.Agent]
"""Get the current agent from context wrapper"""
return getattr(context_wrapper, "_sentry_current_agent", None)
@wraps(
original_run_single_turn.__func__
if hasattr(original_run_single_turn, "__func__")
else original_run_single_turn
)
async def patched_run_single_turn(cls, *args, **kwargs):
# type: (agents.Runner, *Any, **Any) -> Any
"""Patched _run_single_turn that creates agent invocation spans"""
agent = kwargs.get("agent")
context_wrapper = kwargs.get("context_wrapper")
should_run_agent_start_hooks = kwargs.get("should_run_agent_start_hooks")
# Start agent span when agent starts (but only once per agent)
if should_run_agent_start_hooks and agent and context_wrapper:
# End any existing span for a different agent
if _has_active_agent_span(context_wrapper):
current_agent = _get_current_agent(context_wrapper)
if current_agent and current_agent != agent:
_end_invoke_agent_span(context_wrapper, current_agent)
_start_invoke_agent_span(context_wrapper, agent, kwargs)
# Call original method with all the correct parameters
result = await original_run_single_turn(*args, **kwargs)
return result
@wraps(
original_execute_handoffs.__func__
if hasattr(original_execute_handoffs, "__func__")
else original_execute_handoffs
)
async def patched_execute_handoffs(cls, *args, **kwargs):
# type: (agents.Runner, *Any, **Any) -> Any
"""Patched execute_handoffs that creates handoff spans and ends agent span for handoffs"""
context_wrapper = kwargs.get("context_wrapper")
run_handoffs = kwargs.get("run_handoffs")
agent = kwargs.get("agent")
# Create Sentry handoff span for the first handoff (agents library only processes the first one)
if run_handoffs:
first_handoff = run_handoffs[0]
handoff_agent_name = first_handoff.handoff.agent_name
handoff_span(context_wrapper, agent, handoff_agent_name)
# Call original method with all parameters
try:
result = await original_execute_handoffs(*args, **kwargs)
finally:
# End span for current agent after handoff processing is complete
if agent and context_wrapper and _has_active_agent_span(context_wrapper):
_end_invoke_agent_span(context_wrapper, agent)
return result
@wraps(
original_execute_final_output.__func__
if hasattr(original_execute_final_output, "__func__")
else original_execute_final_output
)
async def patched_execute_final_output(cls, *args, **kwargs):
# type: (agents.Runner, *Any, **Any) -> Any
"""Patched execute_final_output that ends agent span for final outputs"""
agent = kwargs.get("agent")
context_wrapper = kwargs.get("context_wrapper")
final_output = kwargs.get("final_output")
# Call original method with all parameters
try:
result = await original_execute_final_output(*args, **kwargs)
finally:
# End span for current agent after final output processing is complete
if agent and context_wrapper and _has_active_agent_span(context_wrapper):
_end_invoke_agent_span(context_wrapper, agent, final_output)
return result
# Apply patches
agents.run.AgentRunner._run_single_turn = classmethod(patched_run_single_turn)
agents._run_impl.RunImpl.execute_handoffs = classmethod(patched_execute_handoffs)
agents._run_impl.RunImpl.execute_final_output = classmethod(
patched_execute_final_output
)

View file

@ -0,0 +1,77 @@
from functools import wraps
import sentry_sdk
from sentry_sdk.consts import SPANSTATUS
from sentry_sdk.tracing_utils import set_span_errored
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from typing import Any, Callable, Optional
def _patch_error_tracing():
# type: () -> None
"""
Patches agents error tracing function to inject our span error logic
when a tool execution fails.
In newer versions, the function is at: agents.util._error_tracing.attach_error_to_current_span
In older versions, it was at: agents._utils.attach_error_to_current_span
This works even when the module or function doesn't exist.
"""
error_tracing_module = None
# Try newer location first (agents.util._error_tracing)
try:
from agents.util import _error_tracing
error_tracing_module = _error_tracing
except (ImportError, AttributeError):
pass
# Try older location (agents._utils)
if error_tracing_module is None:
try:
import agents._utils
error_tracing_module = agents._utils
except (ImportError, AttributeError):
# Module doesn't exist in either location, nothing to patch
return
# Check if the function exists
if not hasattr(error_tracing_module, "attach_error_to_current_span"):
return
original_attach_error = error_tracing_module.attach_error_to_current_span
@wraps(original_attach_error)
def sentry_attach_error_to_current_span(error, *args, **kwargs):
# type: (Any, *Any, **Any) -> Any
"""
Wraps agents' error attachment to also set Sentry span status to error.
This allows us to properly track tool execution errors even though
the agents library swallows exceptions.
"""
# Set the current Sentry span to errored
current_span = sentry_sdk.get_current_span()
if current_span is not None:
set_span_errored(current_span)
current_span.set_data("span.status", "error")
# Optionally capture the error details if we have them
if hasattr(error, "__class__"):
current_span.set_data("error.type", error.__class__.__name__)
if hasattr(error, "__str__"):
error_message = str(error)
if error_message:
current_span.set_data("error.message", error_message)
# Call the original function
return original_attach_error(error, *args, **kwargs)
error_tracing_module.attach_error_to_current_span = (
sentry_attach_error_to_current_span
)

View file

@ -0,0 +1,50 @@
from functools import wraps
from sentry_sdk.integrations import DidNotEnable
from ..spans import ai_client_span, update_ai_client_span
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from typing import Any, Callable
try:
import agents
except ImportError:
raise DidNotEnable("OpenAI Agents not installed")
def _create_get_model_wrapper(original_get_model):
# type: (Callable[..., Any]) -> Callable[..., Any]
"""
Wraps the agents.Runner._get_model method to wrap the get_response method of the model to create a AI client span.
"""
@wraps(
original_get_model.__func__
if hasattr(original_get_model, "__func__")
else original_get_model
)
def wrapped_get_model(cls, agent, run_config):
# type: (agents.Runner, agents.Agent, agents.RunConfig) -> agents.Model
model = original_get_model(agent, run_config)
original_get_response = model.get_response
@wraps(original_get_response)
async def wrapped_get_response(*args, **kwargs):
# type: (*Any, **Any) -> Any
with ai_client_span(agent, kwargs) as span:
result = await original_get_response(*args, **kwargs)
update_ai_client_span(span, agent, kwargs, result)
return result
model.get_response = wrapped_get_response
return model
return wrapped_get_model

View file

@ -0,0 +1,45 @@
from functools import wraps
import sentry_sdk
from ..spans import agent_workflow_span
from ..utils import _capture_exception
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from typing import Any, Callable
def _create_run_wrapper(original_func):
# type: (Callable[..., Any]) -> Callable[..., Any]
"""
Wraps the agents.Runner.run methods to create a root span for the agent workflow runs.
Note agents.Runner.run_sync() is a wrapper around agents.Runner.run(),
so it does not need to be wrapped separately.
"""
@wraps(original_func)
async def wrapper(*args, **kwargs):
# type: (*Any, **Any) -> Any
# Isolate each workflow so that when agents are run in asyncio tasks they
# don't touch each other's scopes
with sentry_sdk.isolation_scope():
agent = args[0]
with agent_workflow_span(agent):
result = None
try:
result = await original_func(*args, **kwargs)
return result
except Exception as exc:
_capture_exception(exc)
# It could be that there is a "invoke agent" span still open
current_span = sentry_sdk.get_current_span()
if current_span is not None and current_span.timestamp is None:
current_span.__exit__(None, None, None)
raise exc from None
return wrapper

View file

@ -0,0 +1,77 @@
from functools import wraps
from sentry_sdk.integrations import DidNotEnable
from ..spans import execute_tool_span, update_execute_tool_span
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from typing import Any, Callable
try:
import agents
except ImportError:
raise DidNotEnable("OpenAI Agents not installed")
def _create_get_all_tools_wrapper(original_get_all_tools):
# type: (Callable[..., Any]) -> Callable[..., Any]
"""
Wraps the agents.Runner._get_all_tools method of the Runner class to wrap all function tools with Sentry instrumentation.
"""
@wraps(
original_get_all_tools.__func__
if hasattr(original_get_all_tools, "__func__")
else original_get_all_tools
)
async def wrapped_get_all_tools(cls, agent, context_wrapper):
# type: (agents.Runner, agents.Agent, agents.RunContextWrapper) -> list[agents.Tool]
# Get the original tools
tools = await original_get_all_tools(agent, context_wrapper)
wrapped_tools = []
for tool in tools:
# Wrap only the function tools (for now)
if tool.__class__.__name__ != "FunctionTool":
wrapped_tools.append(tool)
continue
# Create a new FunctionTool with our wrapped invoke method
original_on_invoke = tool.on_invoke_tool
def create_wrapped_invoke(current_tool, current_on_invoke):
# type: (agents.Tool, Callable[..., Any]) -> Callable[..., Any]
@wraps(current_on_invoke)
async def sentry_wrapped_on_invoke_tool(*args, **kwargs):
# type: (*Any, **Any) -> Any
with execute_tool_span(current_tool, *args, **kwargs) as span:
# We can not capture exceptions in tool execution here because
# `_on_invoke_tool` is swallowing the exception here:
# https://github.com/openai/openai-agents-python/blob/main/src/agents/tool.py#L409-L422
# And because function_tool is a decorator with `default_tool_error_function` set as a default parameter
# I was unable to monkey patch it because those are evaluated at module import time
# and the SDK is too late to patch it. I was also unable to patch `_on_invoke_tool_impl`
# because it is nested inside this import time code. As if they made it hard to patch on purpose...
result = await current_on_invoke(*args, **kwargs)
update_execute_tool_span(span, agent, current_tool, result)
return result
return sentry_wrapped_on_invoke_tool
wrapped_tool = agents.FunctionTool(
name=tool.name,
description=tool.description,
params_json_schema=tool.params_json_schema,
on_invoke_tool=create_wrapped_invoke(tool, original_on_invoke),
strict_json_schema=tool.strict_json_schema,
is_enabled=tool.is_enabled,
)
wrapped_tools.append(wrapped_tool)
return wrapped_tools
return wrapped_get_all_tools

View file

@ -0,0 +1,5 @@
from .agent_workflow import agent_workflow_span # noqa: F401
from .ai_client import ai_client_span, update_ai_client_span # noqa: F401
from .execute_tool import execute_tool_span, update_execute_tool_span # noqa: F401
from .handoff import handoff_span # noqa: F401
from .invoke_agent import invoke_agent_span, update_invoke_agent_span # noqa: F401

View file

@ -0,0 +1,21 @@
import sentry_sdk
from sentry_sdk.ai.utils import get_start_span_function
from ..consts import SPAN_ORIGIN
from typing import TYPE_CHECKING
if TYPE_CHECKING:
import agents
def agent_workflow_span(agent):
# type: (agents.Agent) -> sentry_sdk.tracing.Span
# Create a transaction or a span if an transaction is already active
span = get_start_span_function()(
name=f"{agent.name} workflow",
origin=SPAN_ORIGIN,
)
return span

View file

@ -0,0 +1,42 @@
import sentry_sdk
from sentry_sdk.consts import OP, SPANDATA
from ..consts import SPAN_ORIGIN
from ..utils import (
_set_agent_data,
_set_input_data,
_set_output_data,
_set_usage_data,
_create_mcp_execute_tool_spans,
)
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from agents import Agent
from typing import Any
def ai_client_span(agent, get_response_kwargs):
# type: (Agent, dict[str, Any]) -> sentry_sdk.tracing.Span
# TODO-anton: implement other types of operations. Now "chat" is hardcoded.
model_name = agent.model.model if hasattr(agent.model, "model") else agent.model
span = sentry_sdk.start_span(
op=OP.GEN_AI_CHAT,
description=f"chat {model_name}",
origin=SPAN_ORIGIN,
)
# TODO-anton: remove hardcoded stuff and replace something that also works for embedding and so on
span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "chat")
_set_agent_data(span, agent)
_set_input_data(span, get_response_kwargs)
return span
def update_ai_client_span(span, agent, get_response_kwargs, result):
# type: (sentry_sdk.tracing.Span, Agent, dict[str, Any], Any) -> None
_set_usage_data(span, result.usage)
_set_output_data(span, result)
_create_mcp_execute_tool_spans(span, result)

View file

@ -0,0 +1,48 @@
import sentry_sdk
from sentry_sdk.consts import OP, SPANDATA, SPANSTATUS
from sentry_sdk.scope import should_send_default_pii
from ..consts import SPAN_ORIGIN
from ..utils import _set_agent_data
from typing import TYPE_CHECKING
if TYPE_CHECKING:
import agents
from typing import Any
def execute_tool_span(tool, *args, **kwargs):
# type: (agents.Tool, *Any, **Any) -> sentry_sdk.tracing.Span
span = sentry_sdk.start_span(
op=OP.GEN_AI_EXECUTE_TOOL,
name=f"execute_tool {tool.name}",
origin=SPAN_ORIGIN,
)
span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "execute_tool")
if tool.__class__.__name__ == "FunctionTool":
span.set_data(SPANDATA.GEN_AI_TOOL_TYPE, "function")
span.set_data(SPANDATA.GEN_AI_TOOL_NAME, tool.name)
span.set_data(SPANDATA.GEN_AI_TOOL_DESCRIPTION, tool.description)
if should_send_default_pii():
input = args[1]
span.set_data(SPANDATA.GEN_AI_TOOL_INPUT, input)
return span
def update_execute_tool_span(span, agent, tool, result):
# type: (sentry_sdk.tracing.Span, agents.Agent, agents.Tool, Any) -> None
_set_agent_data(span, agent)
if isinstance(result, str) and result.startswith(
"An error occurred while running the tool"
):
span.set_status(SPANSTATUS.ERROR)
if should_send_default_pii():
span.set_data(SPANDATA.GEN_AI_TOOL_OUTPUT, result)

View file

@ -0,0 +1,19 @@
import sentry_sdk
from sentry_sdk.consts import OP, SPANDATA
from ..consts import SPAN_ORIGIN
from typing import TYPE_CHECKING
if TYPE_CHECKING:
import agents
def handoff_span(context, from_agent, to_agent_name):
# type: (agents.RunContextWrapper, agents.Agent, str) -> None
with sentry_sdk.start_span(
op=OP.GEN_AI_HANDOFF,
name=f"handoff from {from_agent.name} to {to_agent_name}",
origin=SPAN_ORIGIN,
) as span:
span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "handoff")

View file

@ -0,0 +1,86 @@
import sentry_sdk
from sentry_sdk.ai.utils import (
get_start_span_function,
set_data_normalized,
normalize_message_roles,
)
from sentry_sdk.consts import OP, SPANDATA
from sentry_sdk.scope import should_send_default_pii
from sentry_sdk.utils import safe_serialize
from ..consts import SPAN_ORIGIN
from ..utils import _set_agent_data
from typing import TYPE_CHECKING
if TYPE_CHECKING:
import agents
from typing import Any
def invoke_agent_span(context, agent, kwargs):
# type: (agents.RunContextWrapper, agents.Agent, dict[str, Any]) -> sentry_sdk.tracing.Span
start_span_function = get_start_span_function()
span = start_span_function(
op=OP.GEN_AI_INVOKE_AGENT,
name=f"invoke_agent {agent.name}",
origin=SPAN_ORIGIN,
)
span.__enter__()
span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent")
if should_send_default_pii():
messages = []
if agent.instructions:
message = (
agent.instructions
if isinstance(agent.instructions, str)
else safe_serialize(agent.instructions)
)
messages.append(
{
"content": [{"text": message, "type": "text"}],
"role": "system",
}
)
original_input = kwargs.get("original_input")
if original_input is not None:
message = (
original_input
if isinstance(original_input, str)
else safe_serialize(original_input)
)
messages.append(
{
"content": [{"text": message, "type": "text"}],
"role": "user",
}
)
if len(messages) > 0:
normalized_messages = normalize_message_roles(messages)
set_data_normalized(
span,
SPANDATA.GEN_AI_REQUEST_MESSAGES,
normalized_messages,
unpack=False,
)
_set_agent_data(span, agent)
return span
def update_invoke_agent_span(context, agent, output):
# type: (agents.RunContextWrapper, agents.Agent, Any) -> None
span = sentry_sdk.get_current_span()
if span:
if should_send_default_pii():
set_data_normalized(
span, SPANDATA.GEN_AI_RESPONSE_TEXT, output, unpack=False
)
span.__exit__(None, None, None)

View file

@ -0,0 +1,199 @@
import sentry_sdk
from sentry_sdk.ai.utils import (
GEN_AI_ALLOWED_MESSAGE_ROLES,
normalize_message_roles,
set_data_normalized,
normalize_message_role,
)
from sentry_sdk.consts import SPANDATA, SPANSTATUS, OP
from sentry_sdk.integrations import DidNotEnable
from sentry_sdk.scope import should_send_default_pii
from sentry_sdk.tracing_utils import set_span_errored
from sentry_sdk.utils import event_from_exception, safe_serialize
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from typing import Any
from agents import Usage
try:
import agents
except ImportError:
raise DidNotEnable("OpenAI Agents not installed")
def _capture_exception(exc):
# type: (Any) -> None
set_span_errored()
event, hint = event_from_exception(
exc,
client_options=sentry_sdk.get_client().options,
mechanism={"type": "openai_agents", "handled": False},
)
sentry_sdk.capture_event(event, hint=hint)
def _set_agent_data(span, agent):
# type: (sentry_sdk.tracing.Span, agents.Agent) -> None
span.set_data(
SPANDATA.GEN_AI_SYSTEM, "openai"
) # See footnote for https://opentelemetry.io/docs/specs/semconv/registry/attributes/gen-ai/#gen-ai-system for explanation why.
span.set_data(SPANDATA.GEN_AI_AGENT_NAME, agent.name)
if agent.model_settings.max_tokens:
span.set_data(
SPANDATA.GEN_AI_REQUEST_MAX_TOKENS, agent.model_settings.max_tokens
)
if agent.model:
model_name = agent.model.model if hasattr(agent.model, "model") else agent.model
span.set_data(SPANDATA.GEN_AI_REQUEST_MODEL, model_name)
if agent.model_settings.presence_penalty:
span.set_data(
SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY,
agent.model_settings.presence_penalty,
)
if agent.model_settings.temperature:
span.set_data(
SPANDATA.GEN_AI_REQUEST_TEMPERATURE, agent.model_settings.temperature
)
if agent.model_settings.top_p:
span.set_data(SPANDATA.GEN_AI_REQUEST_TOP_P, agent.model_settings.top_p)
if agent.model_settings.frequency_penalty:
span.set_data(
SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY,
agent.model_settings.frequency_penalty,
)
if len(agent.tools) > 0:
span.set_data(
SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS,
safe_serialize([vars(tool) for tool in agent.tools]),
)
def _set_usage_data(span, usage):
# type: (sentry_sdk.tracing.Span, Usage) -> None
span.set_data(SPANDATA.GEN_AI_USAGE_INPUT_TOKENS, usage.input_tokens)
span.set_data(
SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED,
usage.input_tokens_details.cached_tokens,
)
span.set_data(SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS, usage.output_tokens)
span.set_data(
SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS_REASONING,
usage.output_tokens_details.reasoning_tokens,
)
span.set_data(SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS, usage.total_tokens)
def _set_input_data(span, get_response_kwargs):
# type: (sentry_sdk.tracing.Span, dict[str, Any]) -> None
if not should_send_default_pii():
return
request_messages = []
system_instructions = get_response_kwargs.get("system_instructions")
if system_instructions:
request_messages.append(
{
"role": GEN_AI_ALLOWED_MESSAGE_ROLES.SYSTEM,
"content": [{"type": "text", "text": system_instructions}],
}
)
for message in get_response_kwargs.get("input", []):
if "role" in message:
normalized_role = normalize_message_role(message.get("role"))
request_messages.append(
{
"role": normalized_role,
"content": [{"type": "text", "text": message.get("content")}],
}
)
else:
if message.get("type") == "function_call":
request_messages.append(
{
"role": GEN_AI_ALLOWED_MESSAGE_ROLES.ASSISTANT,
"content": [message],
}
)
elif message.get("type") == "function_call_output":
request_messages.append(
{
"role": GEN_AI_ALLOWED_MESSAGE_ROLES.TOOL,
"content": [message],
}
)
set_data_normalized(
span,
SPANDATA.GEN_AI_REQUEST_MESSAGES,
normalize_message_roles(request_messages),
unpack=False,
)
def _set_output_data(span, result):
# type: (sentry_sdk.tracing.Span, Any) -> None
if not should_send_default_pii():
return
output_messages = {
"response": [],
"tool": [],
} # type: (dict[str, list[Any]])
for output in result.output:
if output.type == "function_call":
output_messages["tool"].append(output.dict())
elif output.type == "message":
for output_message in output.content:
try:
output_messages["response"].append(output_message.text)
except AttributeError:
# Unknown output message type, just return the json
output_messages["response"].append(output_message.dict())
if len(output_messages["tool"]) > 0:
span.set_data(
SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS, safe_serialize(output_messages["tool"])
)
if len(output_messages["response"]) > 0:
set_data_normalized(
span, SPANDATA.GEN_AI_RESPONSE_TEXT, output_messages["response"]
)
def _create_mcp_execute_tool_spans(span, result):
# type: (sentry_sdk.tracing.Span, agents.Result) -> None
for output in result.output:
if output.__class__.__name__ == "McpCall":
with sentry_sdk.start_span(
op=OP.GEN_AI_EXECUTE_TOOL,
description=f"execute_tool {output.name}",
start_timestamp=span.start_timestamp,
) as execute_tool_span:
set_data_normalized(execute_tool_span, SPANDATA.GEN_AI_TOOL_TYPE, "mcp")
set_data_normalized(
execute_tool_span, SPANDATA.GEN_AI_TOOL_NAME, output.name
)
if should_send_default_pii():
execute_tool_span.set_data(
SPANDATA.GEN_AI_TOOL_INPUT, output.arguments
)
execute_tool_span.set_data(
SPANDATA.GEN_AI_TOOL_OUTPUT, output.output
)
if output.error:
execute_tool_span.set_status(SPANSTATUS.ERROR)