monocle-apptrace 0.5.0b1__py3-none-any.whl → 0.5.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of monocle-apptrace might be problematic. Click here for more details.
- monocle_apptrace/exporters/file_exporter.py +2 -1
- monocle_apptrace/instrumentation/common/__init__.py +7 -5
- monocle_apptrace/instrumentation/common/constants.py +103 -12
- monocle_apptrace/instrumentation/common/instrumentor.py +1 -6
- monocle_apptrace/instrumentation/common/method_wrappers.py +10 -125
- monocle_apptrace/instrumentation/common/scope_wrapper.py +126 -0
- monocle_apptrace/instrumentation/common/span_handler.py +32 -8
- monocle_apptrace/instrumentation/common/utils.py +34 -3
- monocle_apptrace/instrumentation/common/wrapper.py +208 -41
- monocle_apptrace/instrumentation/common/wrapper_method.py +9 -1
- monocle_apptrace/instrumentation/metamodel/a2a/entities/inference.py +3 -1
- monocle_apptrace/instrumentation/metamodel/adk/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/adk/_helper.py +206 -0
- monocle_apptrace/instrumentation/metamodel/adk/entities/agent.py +111 -0
- monocle_apptrace/instrumentation/metamodel/adk/entities/tool.py +59 -0
- monocle_apptrace/instrumentation/metamodel/adk/methods.py +31 -0
- monocle_apptrace/instrumentation/metamodel/agents/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/agents/_helper.py +225 -0
- monocle_apptrace/instrumentation/metamodel/agents/agents_processor.py +174 -0
- monocle_apptrace/instrumentation/metamodel/agents/entities/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/agents/entities/inference.py +196 -0
- monocle_apptrace/instrumentation/metamodel/agents/methods.py +55 -0
- monocle_apptrace/instrumentation/metamodel/aiohttp/entities/http.py +2 -1
- monocle_apptrace/instrumentation/metamodel/anthropic/_helper.py +82 -5
- monocle_apptrace/instrumentation/metamodel/anthropic/entities/inference.py +6 -1
- monocle_apptrace/instrumentation/metamodel/azfunc/entities/http.py +2 -1
- monocle_apptrace/instrumentation/metamodel/azureaiinference/entities/inference.py +2 -1
- monocle_apptrace/instrumentation/metamodel/botocore/entities/inference.py +2 -1
- monocle_apptrace/instrumentation/metamodel/fastapi/entities/http.py +2 -1
- monocle_apptrace/instrumentation/metamodel/fastapi/methods.py +18 -18
- monocle_apptrace/instrumentation/metamodel/finish_types.py +79 -1
- monocle_apptrace/instrumentation/metamodel/flask/entities/http.py +2 -1
- monocle_apptrace/instrumentation/metamodel/gemini/entities/inference.py +7 -3
- monocle_apptrace/instrumentation/metamodel/gemini/entities/retrieval.py +2 -1
- monocle_apptrace/instrumentation/metamodel/gemini/methods.py +8 -1
- monocle_apptrace/instrumentation/metamodel/haystack/_helper.py +64 -0
- monocle_apptrace/instrumentation/metamodel/haystack/entities/inference.py +12 -1
- monocle_apptrace/instrumentation/metamodel/haystack/entities/retrieval.py +2 -1
- monocle_apptrace/instrumentation/metamodel/lambdafunc/entities/http.py +2 -1
- monocle_apptrace/instrumentation/metamodel/langchain/_helper.py +18 -0
- monocle_apptrace/instrumentation/metamodel/langchain/entities/inference.py +6 -1
- monocle_apptrace/instrumentation/metamodel/langchain/entities/retrieval.py +2 -1
- monocle_apptrace/instrumentation/metamodel/langgraph/_helper.py +6 -0
- monocle_apptrace/instrumentation/metamodel/langgraph/entities/inference.py +10 -5
- monocle_apptrace/instrumentation/metamodel/langgraph/langgraph_processor.py +11 -4
- monocle_apptrace/instrumentation/metamodel/langgraph/methods.py +27 -23
- monocle_apptrace/instrumentation/metamodel/litellm/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/litellm/_helper.py +89 -0
- monocle_apptrace/instrumentation/metamodel/litellm/entities/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/litellm/entities/inference.py +109 -0
- monocle_apptrace/instrumentation/metamodel/litellm/methods.py +19 -0
- monocle_apptrace/instrumentation/metamodel/llamaindex/entities/agent.py +9 -4
- monocle_apptrace/instrumentation/metamodel/llamaindex/entities/inference.py +2 -1
- monocle_apptrace/instrumentation/metamodel/llamaindex/entities/retrieval.py +2 -1
- monocle_apptrace/instrumentation/metamodel/llamaindex/llamaindex_processor.py +14 -3
- monocle_apptrace/instrumentation/metamodel/llamaindex/methods.py +1 -1
- monocle_apptrace/instrumentation/metamodel/mcp/_helper.py +2 -1
- monocle_apptrace/instrumentation/metamodel/mcp/entities/inference.py +3 -1
- monocle_apptrace/instrumentation/metamodel/mcp/mcp_processor.py +0 -5
- monocle_apptrace/instrumentation/metamodel/mcp/methods.py +1 -1
- monocle_apptrace/instrumentation/metamodel/openai/_helper.py +110 -5
- monocle_apptrace/instrumentation/metamodel/openai/entities/inference.py +59 -13
- monocle_apptrace/instrumentation/metamodel/requests/entities/http.py +2 -1
- monocle_apptrace/instrumentation/metamodel/teamsai/_helper.py +12 -1
- monocle_apptrace/instrumentation/metamodel/teamsai/entities/inference/teamsai_output_processor.py +12 -1
- {monocle_apptrace-0.5.0b1.dist-info → monocle_apptrace-0.5.1.dist-info}/METADATA +15 -10
- {monocle_apptrace-0.5.0b1.dist-info → monocle_apptrace-0.5.1.dist-info}/RECORD +70 -53
- {monocle_apptrace-0.5.0b1.dist-info → monocle_apptrace-0.5.1.dist-info}/WHEEL +0 -0
- {monocle_apptrace-0.5.0b1.dist-info → monocle_apptrace-0.5.1.dist-info}/licenses/LICENSE +0 -0
- {monocle_apptrace-0.5.0b1.dist-info → monocle_apptrace-0.5.1.dist-info}/licenses/NOTICE +0 -0
|
@@ -0,0 +1,196 @@
|
|
|
1
|
+
from monocle_apptrace.instrumentation.common.constants import AGENT_REQUEST_SPAN_NAME, SPAN_SUBTYPES, SPAN_TYPES
|
|
2
|
+
from monocle_apptrace.instrumentation.metamodel.agents import _helper
|
|
3
|
+
|
|
4
|
+
AGENT = {
|
|
5
|
+
"type": SPAN_TYPES.AGENTIC_INVOCATION,
|
|
6
|
+
"subtype": SPAN_SUBTYPES.ROUTING,
|
|
7
|
+
"attributes": [
|
|
8
|
+
[
|
|
9
|
+
{
|
|
10
|
+
"_comment": "agent type",
|
|
11
|
+
"attribute": "type",
|
|
12
|
+
"accessor": lambda arguments: _helper.AGENTS_AGENT_NAME_KEY,
|
|
13
|
+
},
|
|
14
|
+
{
|
|
15
|
+
"_comment": "name of the agent",
|
|
16
|
+
"attribute": "name",
|
|
17
|
+
"accessor": lambda arguments: _helper.get_agent_name(arguments),
|
|
18
|
+
},
|
|
19
|
+
{
|
|
20
|
+
"_comment": "agent description",
|
|
21
|
+
"attribute": "description",
|
|
22
|
+
"accessor": lambda arguments: _helper.get_agent_description(arguments),
|
|
23
|
+
},
|
|
24
|
+
{
|
|
25
|
+
"_comment": "agent instructions",
|
|
26
|
+
"attribute": "instructions",
|
|
27
|
+
"accessor": lambda arguments: _helper.get_agent_instructions(arguments),
|
|
28
|
+
},
|
|
29
|
+
]
|
|
30
|
+
],
|
|
31
|
+
"events": [
|
|
32
|
+
{
|
|
33
|
+
"name": "data.input",
|
|
34
|
+
"attributes": [
|
|
35
|
+
{
|
|
36
|
+
"_comment": "this is Agent input",
|
|
37
|
+
"attribute": "query",
|
|
38
|
+
"accessor": lambda arguments: _helper.extract_agent_input(
|
|
39
|
+
arguments
|
|
40
|
+
),
|
|
41
|
+
}
|
|
42
|
+
],
|
|
43
|
+
},
|
|
44
|
+
{
|
|
45
|
+
"name": "data.output",
|
|
46
|
+
"attributes": [
|
|
47
|
+
{
|
|
48
|
+
"_comment": "this is response from Agent",
|
|
49
|
+
"attribute": "response",
|
|
50
|
+
"accessor": lambda arguments: _helper.extract_agent_response(
|
|
51
|
+
arguments["result"]
|
|
52
|
+
),
|
|
53
|
+
}
|
|
54
|
+
],
|
|
55
|
+
},
|
|
56
|
+
{
|
|
57
|
+
"name": "metadata",
|
|
58
|
+
"attributes": [
|
|
59
|
+
{
|
|
60
|
+
"_comment": "this is metadata from Agent response",
|
|
61
|
+
"accessor": lambda arguments: _helper.update_span_from_agent_response(
|
|
62
|
+
arguments["result"]
|
|
63
|
+
),
|
|
64
|
+
}
|
|
65
|
+
],
|
|
66
|
+
},
|
|
67
|
+
],
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
AGENT_REQUEST = {
|
|
71
|
+
"type": AGENT_REQUEST_SPAN_NAME,
|
|
72
|
+
"subtype": SPAN_SUBTYPES.PLANNING,
|
|
73
|
+
"attributes": [
|
|
74
|
+
[
|
|
75
|
+
{
|
|
76
|
+
"_comment": "agent type",
|
|
77
|
+
"attribute": "type",
|
|
78
|
+
"accessor": lambda arguments: _helper.AGENTS_AGENT_NAME_KEY,
|
|
79
|
+
}
|
|
80
|
+
],
|
|
81
|
+
],
|
|
82
|
+
"events": [
|
|
83
|
+
{
|
|
84
|
+
"name": "data.input",
|
|
85
|
+
"attributes": [
|
|
86
|
+
{
|
|
87
|
+
"_comment": "this is Agent input",
|
|
88
|
+
"attribute": "input",
|
|
89
|
+
"accessor": lambda arguments: _helper.extract_agent_input(
|
|
90
|
+
arguments
|
|
91
|
+
),
|
|
92
|
+
}
|
|
93
|
+
],
|
|
94
|
+
},
|
|
95
|
+
{
|
|
96
|
+
"name": "data.output",
|
|
97
|
+
"attributes": [
|
|
98
|
+
{
|
|
99
|
+
"_comment": "this is response from Agent",
|
|
100
|
+
"attribute": "response",
|
|
101
|
+
"accessor": lambda arguments: _helper.extract_agent_response(
|
|
102
|
+
arguments["result"]
|
|
103
|
+
),
|
|
104
|
+
}
|
|
105
|
+
],
|
|
106
|
+
},
|
|
107
|
+
],
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
TOOLS = {
|
|
111
|
+
"type": SPAN_TYPES.AGENTIC_TOOL_INVOCATION,
|
|
112
|
+
"subtype": SPAN_SUBTYPES.ROUTING,
|
|
113
|
+
"attributes": [
|
|
114
|
+
[
|
|
115
|
+
{
|
|
116
|
+
"_comment": "tool type",
|
|
117
|
+
"attribute": "type",
|
|
118
|
+
"accessor": lambda arguments: _helper.get_tool_type(arguments["span"]),
|
|
119
|
+
},
|
|
120
|
+
{
|
|
121
|
+
"_comment": "name of the tool",
|
|
122
|
+
"attribute": "name",
|
|
123
|
+
"accessor": lambda arguments: _helper.get_tool_name(
|
|
124
|
+
arguments["instance"]
|
|
125
|
+
),
|
|
126
|
+
},
|
|
127
|
+
{
|
|
128
|
+
"_comment": "tool description",
|
|
129
|
+
"attribute": "description",
|
|
130
|
+
"accessor": lambda arguments: _helper.get_tool_description(
|
|
131
|
+
arguments["instance"]
|
|
132
|
+
),
|
|
133
|
+
},
|
|
134
|
+
],
|
|
135
|
+
[
|
|
136
|
+
{
|
|
137
|
+
"_comment": "name of the agent",
|
|
138
|
+
"attribute": "name",
|
|
139
|
+
"accessor": lambda arguments: _helper.get_source_agent(),
|
|
140
|
+
},
|
|
141
|
+
{
|
|
142
|
+
"_comment": "agent type",
|
|
143
|
+
"attribute": "type",
|
|
144
|
+
"accessor": lambda arguments: _helper.AGENTS_AGENT_NAME_KEY,
|
|
145
|
+
},
|
|
146
|
+
],
|
|
147
|
+
],
|
|
148
|
+
"events": [
|
|
149
|
+
{
|
|
150
|
+
"name": "data.input",
|
|
151
|
+
"attributes": [
|
|
152
|
+
{
|
|
153
|
+
"_comment": "this is Tool input",
|
|
154
|
+
"attribute": "Inputs",
|
|
155
|
+
"accessor": lambda arguments: _helper.extract_tool_input(arguments),
|
|
156
|
+
},
|
|
157
|
+
],
|
|
158
|
+
},
|
|
159
|
+
{
|
|
160
|
+
"name": "data.output",
|
|
161
|
+
"attributes": [
|
|
162
|
+
{
|
|
163
|
+
"_comment": "this is response from Tool",
|
|
164
|
+
"attribute": "response",
|
|
165
|
+
"accessor": lambda arguments: _helper.extract_tool_response(
|
|
166
|
+
arguments["result"]
|
|
167
|
+
),
|
|
168
|
+
}
|
|
169
|
+
],
|
|
170
|
+
},
|
|
171
|
+
],
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
AGENT_DELEGATION = {
|
|
175
|
+
"type": SPAN_TYPES.AGENTIC_DELEGATION,
|
|
176
|
+
"subtype": SPAN_SUBTYPES.ROUTING,
|
|
177
|
+
"attributes": [
|
|
178
|
+
[
|
|
179
|
+
{
|
|
180
|
+
"_comment": "agent type",
|
|
181
|
+
"attribute": "type",
|
|
182
|
+
"accessor": lambda arguments: _helper.AGENTS_AGENT_NAME_KEY,
|
|
183
|
+
},
|
|
184
|
+
{
|
|
185
|
+
"_comment": "name of the source agent",
|
|
186
|
+
"attribute": "from_agent",
|
|
187
|
+
"accessor": lambda arguments: _helper.get_source_agent(),
|
|
188
|
+
},
|
|
189
|
+
{
|
|
190
|
+
"_comment": "name of the target agent",
|
|
191
|
+
"attribute": "to_agent",
|
|
192
|
+
"accessor": lambda arguments: _helper.extract_handoff_target(arguments),
|
|
193
|
+
},
|
|
194
|
+
]
|
|
195
|
+
],
|
|
196
|
+
}
|
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
from monocle_apptrace.instrumentation.common.wrapper import task_wrapper, atask_wrapper
|
|
2
|
+
from monocle_apptrace.instrumentation.metamodel.agents.entities.inference import (
|
|
3
|
+
AGENT,
|
|
4
|
+
AGENT_DELEGATION,
|
|
5
|
+
TOOLS,
|
|
6
|
+
AGENT_REQUEST
|
|
7
|
+
)
|
|
8
|
+
from monocle_apptrace.instrumentation.metamodel.agents.agents_processor import (
|
|
9
|
+
constructor_wrapper,
|
|
10
|
+
handoff_constructor_wrapper,
|
|
11
|
+
)
|
|
12
|
+
|
|
13
|
+
AGENTS_METHODS = [
|
|
14
|
+
# Main agent runner methods
|
|
15
|
+
{
|
|
16
|
+
"package": "agents.run",
|
|
17
|
+
"object": "Runner",
|
|
18
|
+
"method": "run",
|
|
19
|
+
"wrapper_method": atask_wrapper,
|
|
20
|
+
"span_handler": "agents_agent_handler",
|
|
21
|
+
"output_processor": AGENT_REQUEST,
|
|
22
|
+
},
|
|
23
|
+
{
|
|
24
|
+
"package": "agents.run",
|
|
25
|
+
"object": "Runner",
|
|
26
|
+
"method": "run_sync",
|
|
27
|
+
"wrapper_method": task_wrapper,
|
|
28
|
+
"span_handler": "agents_agent_handler",
|
|
29
|
+
"output_processor": AGENT_REQUEST,
|
|
30
|
+
},
|
|
31
|
+
# AgentRunner class methods (internal runner)
|
|
32
|
+
{
|
|
33
|
+
"package": "agents.run",
|
|
34
|
+
"object": "AgentRunner",
|
|
35
|
+
"method": "_run_single_turn",
|
|
36
|
+
"wrapper_method": atask_wrapper,
|
|
37
|
+
"span_handler": "agents_agent_handler",
|
|
38
|
+
"output_processor": AGENT,
|
|
39
|
+
},
|
|
40
|
+
# Function tool decorator - wrap the function_tool function directly
|
|
41
|
+
{
|
|
42
|
+
"package": "agents.tool",
|
|
43
|
+
"object": "FunctionTool",
|
|
44
|
+
"method": "__init__", # Empty string means wrap the function itself
|
|
45
|
+
"wrapper_method": constructor_wrapper,
|
|
46
|
+
"output_processor": TOOLS,
|
|
47
|
+
},
|
|
48
|
+
{
|
|
49
|
+
"package": "agents.handoffs",
|
|
50
|
+
"object": "Handoff",
|
|
51
|
+
"method": "__init__", # Empty string means wrap the function itself
|
|
52
|
+
"wrapper_method": handoff_constructor_wrapper,
|
|
53
|
+
"output_processor": AGENT_DELEGATION,
|
|
54
|
+
},
|
|
55
|
+
]
|
|
@@ -5,6 +5,7 @@ and assistant messages from various input formats.
|
|
|
5
5
|
|
|
6
6
|
import json
|
|
7
7
|
import logging
|
|
8
|
+
from opentelemetry.context import get_value
|
|
8
9
|
from monocle_apptrace.instrumentation.common.utils import (
|
|
9
10
|
Option,
|
|
10
11
|
get_json_dumps,
|
|
@@ -15,6 +16,7 @@ from monocle_apptrace.instrumentation.common.utils import (
|
|
|
15
16
|
get_exception_message,
|
|
16
17
|
)
|
|
17
18
|
from monocle_apptrace.instrumentation.metamodel.finish_types import map_anthropic_finish_reason_to_finish_type
|
|
19
|
+
from monocle_apptrace.instrumentation.common.constants import AGENT_PREFIX_KEY, INFERENCE_AGENT_DELEGATION, INFERENCE_TURN_END, INFERENCE_TOOL_CALL
|
|
18
20
|
|
|
19
21
|
|
|
20
22
|
logger = logging.getLogger(__name__)
|
|
@@ -60,10 +62,41 @@ def extract_assistant_message(arguments):
|
|
|
60
62
|
if status == 'success':
|
|
61
63
|
messages = []
|
|
62
64
|
role = response.role if hasattr(response, 'role') else "assistant"
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
65
|
+
|
|
66
|
+
# Handle tool use content blocks
|
|
67
|
+
if hasattr(response, "content") and response.content:
|
|
68
|
+
tools = []
|
|
69
|
+
text_content = []
|
|
70
|
+
|
|
71
|
+
for content_block in response.content:
|
|
72
|
+
if hasattr(content_block, "type"):
|
|
73
|
+
if content_block.type == "tool_use":
|
|
74
|
+
# Extract tool use information
|
|
75
|
+
tool_info = {
|
|
76
|
+
"tool_id": getattr(content_block, "id", ""),
|
|
77
|
+
"tool_name": getattr(content_block, "name", ""),
|
|
78
|
+
"tool_arguments": getattr(content_block, "input", "")
|
|
79
|
+
}
|
|
80
|
+
tools.append(tool_info)
|
|
81
|
+
elif content_block.type == "text":
|
|
82
|
+
# Extract text content
|
|
83
|
+
if hasattr(content_block, "text"):
|
|
84
|
+
text_content.append(content_block.text)
|
|
85
|
+
|
|
86
|
+
# If we have tools, add them to the message
|
|
87
|
+
if tools:
|
|
88
|
+
messages.append({"tools": tools})
|
|
89
|
+
|
|
90
|
+
# If we have text content, add it to the message
|
|
91
|
+
if text_content:
|
|
92
|
+
messages.append({role: " ".join(text_content)})
|
|
93
|
+
|
|
94
|
+
# Fallback to original logic if no content blocks were processed
|
|
95
|
+
if not messages and len(response.content) > 0:
|
|
96
|
+
if hasattr(response.content[0], "text"):
|
|
97
|
+
messages.append({role: response.content[0].text})
|
|
98
|
+
|
|
99
|
+
# Return first message if list is not empty
|
|
67
100
|
return get_json_dumps(messages[0]) if messages else ""
|
|
68
101
|
else:
|
|
69
102
|
if arguments["exception"] is not None:
|
|
@@ -103,4 +136,48 @@ def extract_finish_reason(arguments):
|
|
|
103
136
|
|
|
104
137
|
def map_finish_reason_to_finish_type(finish_reason):
|
|
105
138
|
"""Map Anthropic stop_reason to finish_type, similar to OpenAI mapping."""
|
|
106
|
-
return map_anthropic_finish_reason_to_finish_type(finish_reason)
|
|
139
|
+
return map_anthropic_finish_reason_to_finish_type(finish_reason)
|
|
140
|
+
|
|
141
|
+
def agent_inference_type(arguments):
|
|
142
|
+
"""Extract agent inference type from Anthropic response"""
|
|
143
|
+
try:
|
|
144
|
+
status = get_status_code(arguments)
|
|
145
|
+
if status == 'success' or status == 'completed':
|
|
146
|
+
response = arguments["result"]
|
|
147
|
+
|
|
148
|
+
# Check if stop_reason indicates tool use
|
|
149
|
+
if hasattr(response, "stop_reason") and response.stop_reason == "tool_use":
|
|
150
|
+
# Check if this is agent delegation by looking at tool names
|
|
151
|
+
if hasattr(response, "content") and response.content:
|
|
152
|
+
agent_prefix = get_value(AGENT_PREFIX_KEY)
|
|
153
|
+
for content_block in response.content:
|
|
154
|
+
if (hasattr(content_block, "type") and
|
|
155
|
+
content_block.type == "tool_use" and
|
|
156
|
+
hasattr(content_block, "name")):
|
|
157
|
+
tool_name = content_block.name
|
|
158
|
+
if agent_prefix and tool_name.startswith(agent_prefix):
|
|
159
|
+
return INFERENCE_AGENT_DELEGATION
|
|
160
|
+
# If we found tool use but no agent delegation, it's a regular tool call
|
|
161
|
+
return INFERENCE_TOOL_CALL
|
|
162
|
+
|
|
163
|
+
# Fallback: check the extracted message for tool content
|
|
164
|
+
assistant_message = extract_assistant_message(arguments)
|
|
165
|
+
if assistant_message:
|
|
166
|
+
try:
|
|
167
|
+
message = json.loads(assistant_message)
|
|
168
|
+
if message and isinstance(message, dict):
|
|
169
|
+
assistant_content = message.get("assistant", "")
|
|
170
|
+
if assistant_content:
|
|
171
|
+
agent_prefix = get_value(AGENT_PREFIX_KEY)
|
|
172
|
+
if agent_prefix and agent_prefix in assistant_content:
|
|
173
|
+
return INFERENCE_AGENT_DELEGATION
|
|
174
|
+
except (json.JSONDecodeError, TypeError):
|
|
175
|
+
# If JSON parsing fails, fall back to string analysis
|
|
176
|
+
agent_prefix = get_value(AGENT_PREFIX_KEY)
|
|
177
|
+
if agent_prefix and agent_prefix in assistant_message:
|
|
178
|
+
return INFERENCE_AGENT_DELEGATION
|
|
179
|
+
|
|
180
|
+
return INFERENCE_TURN_END
|
|
181
|
+
except Exception as e:
|
|
182
|
+
logger.warning("Warning: Error occurred in agent_inference_type: %s", str(e))
|
|
183
|
+
return INFERENCE_TURN_END
|
|
@@ -1,10 +1,11 @@
|
|
|
1
|
+
from monocle_apptrace.instrumentation.common.constants import SPAN_TYPES
|
|
1
2
|
from monocle_apptrace.instrumentation.metamodel.anthropic import (
|
|
2
3
|
_helper,
|
|
3
4
|
)
|
|
4
5
|
from monocle_apptrace.instrumentation.common.utils import (get_error_message, resolve_from_alias)
|
|
5
6
|
|
|
6
7
|
INFERENCE = {
|
|
7
|
-
"type":
|
|
8
|
+
"type": SPAN_TYPES.INFERENCE,
|
|
8
9
|
"attributes": [
|
|
9
10
|
[
|
|
10
11
|
{
|
|
@@ -79,6 +80,10 @@ INFERENCE = {
|
|
|
79
80
|
"_comment": "finish type mapped from finish reason",
|
|
80
81
|
"attribute": "finish_type",
|
|
81
82
|
"accessor": lambda arguments: _helper.map_finish_reason_to_finish_type(_helper.extract_finish_reason(arguments))
|
|
83
|
+
},
|
|
84
|
+
{
|
|
85
|
+
"attribute": "inference_sub_type",
|
|
86
|
+
"accessor": lambda arguments: _helper.agent_inference_type(arguments)
|
|
82
87
|
}
|
|
83
88
|
]
|
|
84
89
|
}
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
import logging
|
|
2
2
|
import time
|
|
3
3
|
from types import SimpleNamespace
|
|
4
|
+
from monocle_apptrace.instrumentation.common.constants import SPAN_TYPES
|
|
4
5
|
from monocle_apptrace.instrumentation.metamodel.azureaiinference import _helper
|
|
5
6
|
from monocle_apptrace.instrumentation.common.utils import (
|
|
6
7
|
get_error_message,
|
|
@@ -133,7 +134,7 @@ def process_stream(to_wrap, response, span_processor):
|
|
|
133
134
|
|
|
134
135
|
|
|
135
136
|
INFERENCE = {
|
|
136
|
-
"type":
|
|
137
|
+
"type": SPAN_TYPES.INFERENCE,
|
|
137
138
|
"is_auto_close": lambda kwargs: kwargs.get("stream", False) is False,
|
|
138
139
|
"response_processor": process_stream,
|
|
139
140
|
"attributes": [
|
|
@@ -1,9 +1,10 @@
|
|
|
1
|
+
from monocle_apptrace.instrumentation.common.constants import SPAN_TYPES
|
|
1
2
|
from monocle_apptrace.instrumentation.metamodel.botocore import (
|
|
2
3
|
_helper,
|
|
3
4
|
)
|
|
4
5
|
from monocle_apptrace.instrumentation.common.utils import (get_error_message, get_llm_type, get_status,)
|
|
5
6
|
INFERENCE = {
|
|
6
|
-
"type":
|
|
7
|
+
"type": SPAN_TYPES.INFERENCE,
|
|
7
8
|
"attributes": [
|
|
8
9
|
[
|
|
9
10
|
{
|
|
@@ -2,22 +2,22 @@ from monocle_apptrace.instrumentation.common.wrapper import atask_wrapper
|
|
|
2
2
|
from monocle_apptrace.instrumentation.metamodel.fastapi.entities.http import FASTAPI_HTTP_PROCESSOR, FASTAPI_RESPONSE_PROCESSOR
|
|
3
3
|
|
|
4
4
|
FASTAPI_METHODS = [
|
|
5
|
-
{
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
},
|
|
14
|
-
{
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
}
|
|
5
|
+
# {
|
|
6
|
+
# "package": "fastapi",
|
|
7
|
+
# "object": "FastAPI",
|
|
8
|
+
# "method": "__call__",
|
|
9
|
+
# "wrapper_method": atask_wrapper,
|
|
10
|
+
# "span_name": "fastapi.request",
|
|
11
|
+
# "span_handler": "fastapi_handler",
|
|
12
|
+
# "output_processor": FASTAPI_HTTP_PROCESSOR,
|
|
13
|
+
# },
|
|
14
|
+
# {
|
|
15
|
+
# "package": "starlette.responses",
|
|
16
|
+
# "object": "Response",
|
|
17
|
+
# "method": "__call__",
|
|
18
|
+
# "span_name": "fastapi.response",
|
|
19
|
+
# "wrapper_method": atask_wrapper,
|
|
20
|
+
# "span_handler": "fastapi_response_handler",
|
|
21
|
+
# "output_processor": FASTAPI_RESPONSE_PROCESSOR
|
|
22
|
+
# }
|
|
23
23
|
]
|
|
@@ -11,6 +11,7 @@ class FinishType(Enum):
|
|
|
11
11
|
TRUNCATED = "truncated"
|
|
12
12
|
CONTENT_FILTER = "content_filter"
|
|
13
13
|
ERROR = "error"
|
|
14
|
+
TOOL_CALL_ERROR = "tool_call_error"
|
|
14
15
|
REFUSAL = "refusal"
|
|
15
16
|
RATE_LIMITED = "rate_limited"
|
|
16
17
|
|
|
@@ -39,6 +40,7 @@ GEMINI_FINISH_REASON_MAPPING = {
|
|
|
39
40
|
"MAX_TOKENS": FinishType.TRUNCATED.value,
|
|
40
41
|
"SAFETY": FinishType.CONTENT_FILTER.value,
|
|
41
42
|
"RECITATION": FinishType.CONTENT_FILTER.value,
|
|
43
|
+
"MALFORMED_FUNCTION_CALL": FinishType.TOOL_CALL_ERROR.value,
|
|
42
44
|
"OTHER": FinishType.ERROR.value,
|
|
43
45
|
"FINISH_REASON_UNSPECIFIED": None
|
|
44
46
|
}
|
|
@@ -231,6 +233,48 @@ TEAMSAI_FINISH_REASON_MAPPING = {
|
|
|
231
233
|
"rate_limited": FinishType.RATE_LIMITED.value,
|
|
232
234
|
"invalid_response": FinishType.ERROR.value,
|
|
233
235
|
}
|
|
236
|
+
# Haystack finish reason mapping
|
|
237
|
+
HAYSTACK_FINISH_REASON_MAPPING = {
|
|
238
|
+
# Standard completion reasons
|
|
239
|
+
"stop": FinishType.SUCCESS.value,
|
|
240
|
+
"complete": FinishType.SUCCESS.value,
|
|
241
|
+
"finished": FinishType.SUCCESS.value,
|
|
242
|
+
|
|
243
|
+
# Token limits
|
|
244
|
+
"length": FinishType.TRUNCATED.value,
|
|
245
|
+
"max_tokens": FinishType.TRUNCATED.value,
|
|
246
|
+
"token_limit": FinishType.TRUNCATED.value,
|
|
247
|
+
|
|
248
|
+
# Tool/function calling
|
|
249
|
+
"tool_calls": FinishType.SUCCESS.value,
|
|
250
|
+
"function_call": FinishType.SUCCESS.value,
|
|
251
|
+
|
|
252
|
+
# Content filtering and safety
|
|
253
|
+
"content_filter": FinishType.CONTENT_FILTER.value,
|
|
254
|
+
"safety": FinishType.CONTENT_FILTER.value,
|
|
255
|
+
"filtered": FinishType.CONTENT_FILTER.value,
|
|
256
|
+
|
|
257
|
+
# Errors
|
|
258
|
+
"error": FinishType.ERROR.value,
|
|
259
|
+
"failed": FinishType.ERROR.value,
|
|
260
|
+
"exception": FinishType.ERROR.value,
|
|
261
|
+
|
|
262
|
+
# Provider-specific reasons that might pass through LangChain
|
|
263
|
+
# OpenAI reasons
|
|
264
|
+
"stop": FinishType.SUCCESS.value, # Already defined above
|
|
265
|
+
|
|
266
|
+
# Anthropic reasons
|
|
267
|
+
"end_turn": FinishType.SUCCESS.value,
|
|
268
|
+
"stop_sequence": FinishType.SUCCESS.value,
|
|
269
|
+
|
|
270
|
+
# Gemini reasons
|
|
271
|
+
"STOP": FinishType.SUCCESS.value,
|
|
272
|
+
"SAFETY": FinishType.CONTENT_FILTER.value,
|
|
273
|
+
"RECITATION": FinishType.CONTENT_FILTER.value,
|
|
274
|
+
"OTHER": FinishType.ERROR.value,
|
|
275
|
+
}
|
|
276
|
+
|
|
277
|
+
ADK_FINISH_REASON_MAPPING = GEMINI_FINISH_REASON_MAPPING
|
|
234
278
|
|
|
235
279
|
def map_openai_finish_reason_to_finish_type(finish_reason):
|
|
236
280
|
"""Map OpenAI finish_reason to standardized finish_type."""
|
|
@@ -368,6 +412,34 @@ def map_bedrock_finish_reason_to_finish_type(finish_reason):
|
|
|
368
412
|
|
|
369
413
|
return None
|
|
370
414
|
|
|
415
|
+
def map_haystack_finish_reason_to_finish_type(finish_reason):
|
|
416
|
+
"""Map Haystack finish_reason to standardized finish_type."""
|
|
417
|
+
if not finish_reason:
|
|
418
|
+
return None
|
|
419
|
+
|
|
420
|
+
# Convert to lowercase for case-insensitive matching
|
|
421
|
+
finish_reason_lower = finish_reason.lower() if isinstance(finish_reason, str) else str(finish_reason).lower()
|
|
422
|
+
|
|
423
|
+
# Try direct mapping first
|
|
424
|
+
if finish_reason in HAYSTACK_FINISH_REASON_MAPPING:
|
|
425
|
+
return HAYSTACK_FINISH_REASON_MAPPING[finish_reason]
|
|
426
|
+
|
|
427
|
+
# Try lowercase mapping
|
|
428
|
+
if finish_reason_lower in HAYSTACK_FINISH_REASON_MAPPING:
|
|
429
|
+
return HAYSTACK_FINISH_REASON_MAPPING[finish_reason_lower]
|
|
430
|
+
|
|
431
|
+
# If no direct mapping, try to infer from common patterns
|
|
432
|
+
if any(keyword in finish_reason_lower for keyword in ['stop', 'complete', 'success', 'done']):
|
|
433
|
+
return FinishType.SUCCESS.value
|
|
434
|
+
elif any(keyword in finish_reason_lower for keyword in ['length', 'token', 'limit', 'truncat']):
|
|
435
|
+
return FinishType.TRUNCATED.value
|
|
436
|
+
elif any(keyword in finish_reason_lower for keyword in ['filter', 'safety', 'block']):
|
|
437
|
+
return FinishType.CONTENT_FILTER.value
|
|
438
|
+
elif any(keyword in finish_reason_lower for keyword in ['error', 'fail', 'exception']):
|
|
439
|
+
return FinishType.ERROR.value
|
|
440
|
+
|
|
441
|
+
return None
|
|
442
|
+
|
|
371
443
|
def map_teamsai_finish_reason_to_finish_type(finish_reason):
|
|
372
444
|
"""Map TeamsAI finish_reason to standardized finish_type."""
|
|
373
445
|
if not finish_reason:
|
|
@@ -384,4 +456,10 @@ def map_teamsai_finish_reason_to_finish_type(finish_reason):
|
|
|
384
456
|
if finish_reason_lower in TEAMSAI_FINISH_REASON_MAPPING:
|
|
385
457
|
return TEAMSAI_FINISH_REASON_MAPPING[finish_reason_lower]
|
|
386
458
|
|
|
387
|
-
return None
|
|
459
|
+
return None
|
|
460
|
+
|
|
461
|
+
def map_adk_finish_reason_to_finish_type(finish_reason):
|
|
462
|
+
"""Map ADK finish_reason to standardized finish_type."""
|
|
463
|
+
if not finish_reason:
|
|
464
|
+
return None
|
|
465
|
+
return ADK_FINISH_REASON_MAPPING.get(finish_reason, None)
|
|
@@ -1,10 +1,11 @@
|
|
|
1
|
+
from monocle_apptrace.instrumentation.common.constants import SPAN_TYPES
|
|
1
2
|
from monocle_apptrace.instrumentation.metamodel.gemini import (
|
|
2
3
|
_helper,
|
|
3
4
|
)
|
|
4
5
|
from monocle_apptrace.instrumentation.common.utils import get_error_message
|
|
5
6
|
|
|
6
7
|
INFERENCE = {
|
|
7
|
-
"type":
|
|
8
|
+
"type": SPAN_TYPES.INFERENCE,
|
|
8
9
|
"attributes": [
|
|
9
10
|
[
|
|
10
11
|
{
|
|
@@ -15,8 +16,11 @@ INFERENCE = {
|
|
|
15
16
|
{
|
|
16
17
|
"attribute": "inference_endpoint",
|
|
17
18
|
"accessor": lambda arguments: _helper.extract_inference_endpoint(arguments['instance'])
|
|
18
|
-
}
|
|
19
|
-
|
|
19
|
+
},
|
|
20
|
+
{
|
|
21
|
+
"attribute": "provider_name",
|
|
22
|
+
"accessor": lambda arguments: 'gcp'
|
|
23
|
+
} ],
|
|
20
24
|
[
|
|
21
25
|
{
|
|
22
26
|
"_comment": "LLM Model",
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
from monocle_apptrace.instrumentation.common.wrapper import task_wrapper
|
|
1
|
+
from monocle_apptrace.instrumentation.common.wrapper import atask_wrapper, task_wrapper
|
|
2
2
|
from monocle_apptrace.instrumentation.metamodel.gemini.entities.inference import (
|
|
3
3
|
INFERENCE,
|
|
4
4
|
)
|
|
@@ -14,6 +14,13 @@ GEMINI_METHODS = [
|
|
|
14
14
|
"wrapper_method": task_wrapper,
|
|
15
15
|
"output_processor": INFERENCE,
|
|
16
16
|
},
|
|
17
|
+
{
|
|
18
|
+
"package": "google.genai.models",
|
|
19
|
+
"object": "AsyncModels",
|
|
20
|
+
"method": "generate_content",
|
|
21
|
+
"wrapper_method": atask_wrapper,
|
|
22
|
+
"output_processor": INFERENCE,
|
|
23
|
+
},
|
|
17
24
|
{
|
|
18
25
|
"package": "google.genai.models",
|
|
19
26
|
"object": "Models",
|