monocle-apptrace 0.4.2__py3-none-any.whl → 0.5.0b1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of monocle-apptrace might be problematic. Click here for more details.
- monocle_apptrace/__main__.py +1 -1
- monocle_apptrace/exporters/file_exporter.py +123 -36
- monocle_apptrace/instrumentation/common/__init__.py +16 -1
- monocle_apptrace/instrumentation/common/constants.py +6 -1
- monocle_apptrace/instrumentation/common/instrumentor.py +19 -152
- monocle_apptrace/instrumentation/common/method_wrappers.py +380 -0
- monocle_apptrace/instrumentation/common/span_handler.py +39 -24
- monocle_apptrace/instrumentation/common/utils.py +20 -14
- monocle_apptrace/instrumentation/common/wrapper.py +10 -9
- monocle_apptrace/instrumentation/common/wrapper_method.py +39 -1
- monocle_apptrace/instrumentation/metamodel/a2a/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/a2a/_helper.py +37 -0
- monocle_apptrace/instrumentation/metamodel/a2a/entities/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/a2a/entities/inference.py +112 -0
- monocle_apptrace/instrumentation/metamodel/a2a/methods.py +22 -0
- monocle_apptrace/instrumentation/metamodel/aiohttp/_helper.py +6 -11
- monocle_apptrace/instrumentation/metamodel/anthropic/_helper.py +35 -18
- monocle_apptrace/instrumentation/metamodel/anthropic/entities/inference.py +14 -10
- monocle_apptrace/instrumentation/metamodel/azfunc/_helper.py +13 -11
- monocle_apptrace/instrumentation/metamodel/azfunc/entities/http.py +5 -0
- monocle_apptrace/instrumentation/metamodel/azureaiinference/_helper.py +88 -8
- monocle_apptrace/instrumentation/metamodel/azureaiinference/entities/inference.py +22 -8
- monocle_apptrace/instrumentation/metamodel/botocore/_helper.py +92 -16
- monocle_apptrace/instrumentation/metamodel/botocore/entities/inference.py +13 -8
- monocle_apptrace/instrumentation/metamodel/botocore/handlers/botocore_span_handler.py +1 -1
- monocle_apptrace/instrumentation/metamodel/fastapi/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/fastapi/_helper.py +82 -0
- monocle_apptrace/instrumentation/metamodel/fastapi/entities/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/fastapi/entities/http.py +44 -0
- monocle_apptrace/instrumentation/metamodel/fastapi/methods.py +23 -0
- monocle_apptrace/instrumentation/metamodel/finish_types.py +387 -0
- monocle_apptrace/instrumentation/metamodel/flask/_helper.py +6 -11
- monocle_apptrace/instrumentation/metamodel/gemini/_helper.py +51 -7
- monocle_apptrace/instrumentation/metamodel/gemini/entities/inference.py +17 -9
- monocle_apptrace/instrumentation/metamodel/gemini/entities/retrieval.py +43 -0
- monocle_apptrace/instrumentation/metamodel/gemini/methods.py +10 -0
- monocle_apptrace/instrumentation/metamodel/haystack/_helper.py +15 -8
- monocle_apptrace/instrumentation/metamodel/haystack/entities/inference.py +5 -10
- monocle_apptrace/instrumentation/metamodel/haystack/methods.py +7 -0
- monocle_apptrace/instrumentation/metamodel/lambdafunc/_helper.py +78 -0
- monocle_apptrace/instrumentation/metamodel/lambdafunc/entities/http.py +51 -0
- monocle_apptrace/instrumentation/metamodel/lambdafunc/methods.py +23 -0
- monocle_apptrace/instrumentation/metamodel/lambdafunc/wrapper.py +23 -0
- monocle_apptrace/instrumentation/metamodel/langchain/_helper.py +127 -19
- monocle_apptrace/instrumentation/metamodel/langchain/entities/inference.py +15 -10
- monocle_apptrace/instrumentation/metamodel/langgraph/_helper.py +67 -10
- monocle_apptrace/instrumentation/metamodel/langgraph/entities/inference.py +127 -20
- monocle_apptrace/instrumentation/metamodel/langgraph/langgraph_processor.py +43 -0
- monocle_apptrace/instrumentation/metamodel/langgraph/methods.py +29 -5
- monocle_apptrace/instrumentation/metamodel/llamaindex/_helper.py +227 -16
- monocle_apptrace/instrumentation/metamodel/llamaindex/entities/agent.py +127 -10
- monocle_apptrace/instrumentation/metamodel/llamaindex/entities/inference.py +13 -8
- monocle_apptrace/instrumentation/metamodel/llamaindex/llamaindex_processor.py +51 -0
- monocle_apptrace/instrumentation/metamodel/llamaindex/methods.py +68 -1
- monocle_apptrace/instrumentation/metamodel/mcp/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/mcp/_helper.py +118 -0
- monocle_apptrace/instrumentation/metamodel/mcp/entities/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/mcp/entities/inference.py +48 -0
- monocle_apptrace/instrumentation/metamodel/mcp/mcp_processor.py +13 -0
- monocle_apptrace/instrumentation/metamodel/mcp/methods.py +21 -0
- monocle_apptrace/instrumentation/metamodel/openai/_helper.py +83 -16
- monocle_apptrace/instrumentation/metamodel/openai/entities/inference.py +103 -92
- monocle_apptrace/instrumentation/metamodel/openai/entities/retrieval.py +1 -1
- monocle_apptrace/instrumentation/metamodel/teamsai/_helper.py +41 -22
- monocle_apptrace/instrumentation/metamodel/teamsai/entities/inference/actionplanner_output_processor.py +1 -1
- monocle_apptrace/instrumentation/metamodel/teamsai/entities/inference/teamsai_output_processor.py +5 -9
- monocle_apptrace/instrumentation/metamodel/teamsai/sample.json +0 -4
- {monocle_apptrace-0.4.2.dist-info → monocle_apptrace-0.5.0b1.dist-info}/METADATA +14 -3
- {monocle_apptrace-0.4.2.dist-info → monocle_apptrace-0.5.0b1.dist-info}/RECORD +72 -47
- {monocle_apptrace-0.4.2.dist-info → monocle_apptrace-0.5.0b1.dist-info}/WHEEL +0 -0
- {monocle_apptrace-0.4.2.dist-info → monocle_apptrace-0.5.0b1.dist-info}/licenses/LICENSE +0 -0
- {monocle_apptrace-0.4.2.dist-info → monocle_apptrace-0.5.0b1.dist-info}/licenses/NOTICE +0 -0
|
@@ -1,24 +1,25 @@
|
|
|
1
1
|
from monocle_apptrace.instrumentation.metamodel.langgraph import (
|
|
2
2
|
_helper
|
|
3
3
|
)
|
|
4
|
-
|
|
5
|
-
|
|
4
|
+
|
|
5
|
+
AGENT = {
|
|
6
|
+
"type": "agentic.invocation",
|
|
6
7
|
"attributes": [
|
|
7
8
|
[
|
|
8
9
|
{
|
|
9
10
|
"_comment": "agent type",
|
|
10
11
|
"attribute": "type",
|
|
11
|
-
"accessor": lambda arguments:'agent.
|
|
12
|
+
"accessor": lambda arguments:'agent.langgraph'
|
|
12
13
|
},
|
|
13
14
|
{
|
|
14
15
|
"_comment": "name of the agent",
|
|
15
16
|
"attribute": "name",
|
|
16
|
-
"accessor": lambda arguments:arguments['instance']
|
|
17
|
+
"accessor": lambda arguments: _helper.get_agent_name(arguments['instance'])
|
|
17
18
|
},
|
|
18
19
|
{
|
|
19
|
-
"_comment": "agent
|
|
20
|
-
"attribute": "
|
|
21
|
-
"accessor": lambda arguments: _helper.
|
|
20
|
+
"_comment": "agent description",
|
|
21
|
+
"attribute": "description",
|
|
22
|
+
"accessor": lambda arguments: _helper.get_agent_description(arguments['instance'])
|
|
22
23
|
}
|
|
23
24
|
]
|
|
24
25
|
],
|
|
@@ -27,9 +28,9 @@ INFERENCE = {
|
|
|
27
28
|
"name":"data.input",
|
|
28
29
|
"attributes": [
|
|
29
30
|
{
|
|
30
|
-
"_comment": "this is
|
|
31
|
+
"_comment": "this is Agent input",
|
|
31
32
|
"attribute": "query",
|
|
32
|
-
"accessor": lambda arguments: _helper.
|
|
33
|
+
"accessor": lambda arguments: _helper.extract_agent_input(arguments)
|
|
33
34
|
}
|
|
34
35
|
]
|
|
35
36
|
},
|
|
@@ -39,18 +40,124 @@ INFERENCE = {
|
|
|
39
40
|
{
|
|
40
41
|
"_comment": "this is response from LLM",
|
|
41
42
|
"attribute": "response",
|
|
42
|
-
"accessor": lambda arguments: _helper.
|
|
43
|
+
"accessor": lambda arguments: _helper.extract_agent_response(arguments['result'])
|
|
43
44
|
}
|
|
44
45
|
]
|
|
45
|
-
},
|
|
46
|
-
{
|
|
47
|
-
"name": "metadata",
|
|
48
|
-
"attributes": [
|
|
49
|
-
{
|
|
50
|
-
"_comment": "this is metadata usage from LLM",
|
|
51
|
-
"accessor": lambda arguments: _helper.update_span_from_llm_response(arguments['result'])
|
|
52
|
-
}
|
|
53
|
-
]
|
|
54
46
|
}
|
|
55
47
|
]
|
|
56
|
-
}
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
AGENT_REQUEST = {
|
|
51
|
+
"type": "agentic.request",
|
|
52
|
+
"attributes": [
|
|
53
|
+
[
|
|
54
|
+
{
|
|
55
|
+
"_comment": "agent type",
|
|
56
|
+
"attribute": "type",
|
|
57
|
+
"accessor": lambda arguments:'agent.langgraph'
|
|
58
|
+
}
|
|
59
|
+
],
|
|
60
|
+
],
|
|
61
|
+
"events": [
|
|
62
|
+
{
|
|
63
|
+
"name":"data.input",
|
|
64
|
+
"attributes": [
|
|
65
|
+
{
|
|
66
|
+
"_comment": "this is Agent input",
|
|
67
|
+
"attribute": "input",
|
|
68
|
+
"accessor": lambda arguments: _helper.extract_agent_input(arguments)
|
|
69
|
+
}
|
|
70
|
+
]
|
|
71
|
+
},
|
|
72
|
+
{
|
|
73
|
+
"name":"data.output",
|
|
74
|
+
"attributes": [
|
|
75
|
+
{
|
|
76
|
+
"_comment": "this is response from LLM",
|
|
77
|
+
"attribute": "response",
|
|
78
|
+
"accessor": lambda arguments: _helper.extract_agent_response(arguments['result'])
|
|
79
|
+
}
|
|
80
|
+
]
|
|
81
|
+
}
|
|
82
|
+
]
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
TOOLS = {
|
|
86
|
+
"type": "agentic.tool.invocation",
|
|
87
|
+
"attributes": [
|
|
88
|
+
[
|
|
89
|
+
{
|
|
90
|
+
"_comment": "tool type",
|
|
91
|
+
"attribute": "type",
|
|
92
|
+
"accessor": lambda arguments:'tool.langgraph'
|
|
93
|
+
},
|
|
94
|
+
{
|
|
95
|
+
"_comment": "name of the tool",
|
|
96
|
+
"attribute": "name",
|
|
97
|
+
"accessor": lambda arguments: _helper.get_tool_name(arguments['instance'])
|
|
98
|
+
},
|
|
99
|
+
{
|
|
100
|
+
"_comment": "tool description",
|
|
101
|
+
"attribute": "description",
|
|
102
|
+
"accessor": lambda arguments: _helper.get_tool_description(arguments['instance'])
|
|
103
|
+
}
|
|
104
|
+
],
|
|
105
|
+
[
|
|
106
|
+
{
|
|
107
|
+
"_comment": "name of the agent",
|
|
108
|
+
"attribute": "name",
|
|
109
|
+
"accessor": lambda arguments: _helper.get_source_agent()
|
|
110
|
+
},
|
|
111
|
+
{
|
|
112
|
+
"_comment": "agent type",
|
|
113
|
+
"attribute": "type",
|
|
114
|
+
"accessor": lambda arguments:'agent.langgraph'
|
|
115
|
+
}
|
|
116
|
+
]
|
|
117
|
+
],
|
|
118
|
+
"events": [
|
|
119
|
+
{
|
|
120
|
+
"name":"data.input",
|
|
121
|
+
"attributes": [
|
|
122
|
+
{
|
|
123
|
+
"_comment": "this is Tool input",
|
|
124
|
+
"attribute": "Inputs",
|
|
125
|
+
"accessor": lambda arguments: _helper.extract_tool_input(arguments)
|
|
126
|
+
},
|
|
127
|
+
]
|
|
128
|
+
},
|
|
129
|
+
{
|
|
130
|
+
"name":"data.output",
|
|
131
|
+
"attributes": [
|
|
132
|
+
{
|
|
133
|
+
"_comment": "this is response from Tool",
|
|
134
|
+
"attribute": "response",
|
|
135
|
+
"accessor": lambda arguments: _helper.extract_tool_response(arguments['result'])
|
|
136
|
+
}
|
|
137
|
+
]
|
|
138
|
+
}
|
|
139
|
+
]
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
AGENT_DELEGATION = {
|
|
143
|
+
"type": "agentic.delegation",
|
|
144
|
+
"attributes": [
|
|
145
|
+
[
|
|
146
|
+
{
|
|
147
|
+
"_comment": "agent type",
|
|
148
|
+
"attribute": "type",
|
|
149
|
+
"accessor": lambda arguments:'agent.langgraph'
|
|
150
|
+
},
|
|
151
|
+
{
|
|
152
|
+
"_comment": "name of the agent",
|
|
153
|
+
"attribute": "from_agent",
|
|
154
|
+
"accessor": lambda arguments: _helper.get_source_agent()
|
|
155
|
+
},
|
|
156
|
+
{
|
|
157
|
+
"_comment": "name of the agent called",
|
|
158
|
+
"attribute": "to_agent",
|
|
159
|
+
"accessor": lambda arguments: _helper.get_target_agent(arguments['instance'])
|
|
160
|
+
}
|
|
161
|
+
]
|
|
162
|
+
]
|
|
163
|
+
}
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
from opentelemetry.context import set_value, attach, detach
|
|
2
|
+
from monocle_apptrace.instrumentation.common.span_handler import SpanHandler
|
|
3
|
+
from monocle_apptrace.instrumentation.metamodel.langgraph._helper import (
|
|
4
|
+
get_name, is_root_agent_name, is_delegation_tool, LANGGRAPTH_AGENT_NAME_KEY
|
|
5
|
+
|
|
6
|
+
)
|
|
7
|
+
from monocle_apptrace.instrumentation.metamodel.langgraph.entities.inference import (
|
|
8
|
+
AGENT_DELEGATION, AGENT_REQUEST
|
|
9
|
+
)
|
|
10
|
+
|
|
11
|
+
class LanggraphAgentHandler(SpanHandler):
|
|
12
|
+
def pre_tracing(self, to_wrap, wrapped, instance, args, kwargs):
|
|
13
|
+
return attach(set_value(LANGGRAPTH_AGENT_NAME_KEY, get_name(instance)))
|
|
14
|
+
|
|
15
|
+
def post_tracing(self, to_wrap, wrapped, instance, args, kwargs, result, token):
|
|
16
|
+
if token is not None:
|
|
17
|
+
detach(token)
|
|
18
|
+
|
|
19
|
+
# In multi agent scenarios, the root agent is the one that orchestrates the other agents. LangGraph generates an extra root level invoke()
|
|
20
|
+
# call on top of the supervisor agent invoke().
|
|
21
|
+
# This span handler resets the parent invoke call as generic type to avoid duplicate attributes/events in supervisor span and this root span.
|
|
22
|
+
def hydrate_span(self, to_wrap, wrapped, instance, args, kwargs, result, span, parent_span = None, ex:Exception = None) -> bool:
|
|
23
|
+
if is_root_agent_name(instance) and "parent.agent.span" in span.attributes:
|
|
24
|
+
agent_request_wrapper = to_wrap.copy()
|
|
25
|
+
agent_request_wrapper["output_processor"] = AGENT_REQUEST
|
|
26
|
+
else:
|
|
27
|
+
agent_request_wrapper = to_wrap
|
|
28
|
+
if hasattr(instance, 'name') and parent_span is not None and not SpanHandler.is_root_span(parent_span):
|
|
29
|
+
parent_span.set_attribute("parent.agent.span", True)
|
|
30
|
+
return super().hydrate_span(agent_request_wrapper, wrapped, instance, args, kwargs, result, span, parent_span, ex)
|
|
31
|
+
|
|
32
|
+
class LanggraphToolHandler(SpanHandler):
|
|
33
|
+
# LangGraph uses an internal tool to initate delegation to other agents. The method is tool invoke() with tool name as `transfer_to_<agent_name>`.
|
|
34
|
+
# Hence we usea different output processor for tool invoke() to format the span as agentic.delegation.
|
|
35
|
+
def hydrate_span(self, to_wrap, wrapped, instance, args, kwargs, result, span, parent_span = None, ex:Exception = None) -> bool:
|
|
36
|
+
if is_delegation_tool(instance):
|
|
37
|
+
agent_request_wrapper = to_wrap.copy()
|
|
38
|
+
agent_request_wrapper["output_processor"] = AGENT_DELEGATION
|
|
39
|
+
else:
|
|
40
|
+
agent_request_wrapper = to_wrap
|
|
41
|
+
|
|
42
|
+
return super().hydrate_span(agent_request_wrapper, wrapped, instance, args, kwargs, result, span, parent_span, ex)
|
|
43
|
+
|
|
@@ -1,14 +1,38 @@
|
|
|
1
|
-
from monocle_apptrace.instrumentation.common.wrapper import task_wrapper
|
|
1
|
+
from monocle_apptrace.instrumentation.common.wrapper import task_wrapper, atask_wrapper
|
|
2
2
|
from monocle_apptrace.instrumentation.metamodel.langgraph.entities.inference import (
|
|
3
|
-
|
|
3
|
+
AGENT, TOOLS
|
|
4
4
|
)
|
|
5
5
|
LANGGRAPH_METHODS = [
|
|
6
6
|
{
|
|
7
7
|
"package": "langgraph.graph.state",
|
|
8
8
|
"object": "CompiledStateGraph",
|
|
9
9
|
"method": "invoke",
|
|
10
|
-
"span_name": "langgraph.graph.invoke",
|
|
11
10
|
"wrapper_method": task_wrapper,
|
|
12
|
-
|
|
13
|
-
|
|
11
|
+
"span_handler":"langgraph_agent_handler",
|
|
12
|
+
"output_processor": AGENT
|
|
13
|
+
},
|
|
14
|
+
{
|
|
15
|
+
"package": "langgraph.graph.state",
|
|
16
|
+
"object": "CompiledStateGraph",
|
|
17
|
+
"method": "ainvoke",
|
|
18
|
+
"wrapper_method": atask_wrapper,
|
|
19
|
+
"span_handler":"langgraph_agent_handler",
|
|
20
|
+
"output_processor": AGENT
|
|
21
|
+
},
|
|
22
|
+
{
|
|
23
|
+
"package": "langchain_core.tools.base",
|
|
24
|
+
"object": "BaseTool",
|
|
25
|
+
"method": "run",
|
|
26
|
+
"wrapper_method": task_wrapper,
|
|
27
|
+
"span_handler":"langgraph_tool_handler",
|
|
28
|
+
"output_processor": TOOLS
|
|
29
|
+
},
|
|
30
|
+
{
|
|
31
|
+
"package": "langchain_core.tools.base",
|
|
32
|
+
"object": "BaseTool",
|
|
33
|
+
"method": "arun",
|
|
34
|
+
"wrapper_method": atask_wrapper,
|
|
35
|
+
"span_handler":"langgraph_tool_handler",
|
|
36
|
+
"output_processor": TOOLS
|
|
37
|
+
},
|
|
14
38
|
]
|
|
@@ -3,20 +3,29 @@ This module provides utility functions for extracting system, user,
|
|
|
3
3
|
and assistant messages from various input formats.
|
|
4
4
|
"""
|
|
5
5
|
|
|
6
|
+
from ast import arguments
|
|
6
7
|
import logging
|
|
7
8
|
from urllib.parse import urlparse
|
|
8
9
|
from opentelemetry.sdk.trace import Span
|
|
10
|
+
from opentelemetry.context import get_value
|
|
9
11
|
from monocle_apptrace.instrumentation.common.utils import (
|
|
10
12
|
Option,
|
|
13
|
+
get_json_dumps,
|
|
11
14
|
get_keys_as_tuple,
|
|
12
15
|
get_nested_value,
|
|
13
16
|
try_option,
|
|
14
17
|
get_exception_message,
|
|
15
18
|
get_status_code,
|
|
16
19
|
)
|
|
20
|
+
from monocle_apptrace.instrumentation.metamodel.finish_types import map_llamaindex_finish_reason_to_finish_type
|
|
17
21
|
|
|
22
|
+
LLAMAINDEX_AGENT_NAME_KEY = "_active_agent_name"
|
|
18
23
|
logger = logging.getLogger(__name__)
|
|
19
24
|
|
|
25
|
+
def get_status(result):
|
|
26
|
+
if result is not None and hasattr(result, 'status'):
|
|
27
|
+
return result.status
|
|
28
|
+
return None
|
|
20
29
|
|
|
21
30
|
def extract_tools(instance):
|
|
22
31
|
tools = []
|
|
@@ -34,6 +43,70 @@ def extract_tools(instance):
|
|
|
34
43
|
tools.append(tool_name)
|
|
35
44
|
return tools
|
|
36
45
|
|
|
46
|
+
def get_tool_name(args, instance):
|
|
47
|
+
if len(args) > 1:
|
|
48
|
+
if hasattr(args[1], 'metadata') and hasattr(args[1].metadata, 'name'):
|
|
49
|
+
return args[1].metadata.name
|
|
50
|
+
return ""
|
|
51
|
+
else:
|
|
52
|
+
if hasattr(instance, 'metadata') and hasattr(instance.metadata, 'name'):
|
|
53
|
+
return instance.metadata.name
|
|
54
|
+
return ""
|
|
55
|
+
|
|
56
|
+
def get_tool_description(arguments):
|
|
57
|
+
if len(arguments['args']) > 1:
|
|
58
|
+
if hasattr(arguments['args'][1], 'metadata') and hasattr(arguments['args'][1].metadata, 'description'):
|
|
59
|
+
return arguments['args'][1].metadata.description
|
|
60
|
+
return ""
|
|
61
|
+
else:
|
|
62
|
+
if hasattr(arguments['instance'], 'metadata') and hasattr(arguments['instance'].metadata, 'description'):
|
|
63
|
+
return arguments['instance'].metadata.description
|
|
64
|
+
return ""
|
|
65
|
+
|
|
66
|
+
def extract_tool_args(arguments):
|
|
67
|
+
tool_args = []
|
|
68
|
+
if len(arguments['args']) > 1:
|
|
69
|
+
for key, value in arguments['args'][2].items():
|
|
70
|
+
# check if value is builtin type or a string
|
|
71
|
+
if value is not None and isinstance(value, (str, int, float, bool)):
|
|
72
|
+
tool_args.append({key, value})
|
|
73
|
+
else:
|
|
74
|
+
for key, value in arguments['kwargs'].items():
|
|
75
|
+
# check if value is builtin type or a string
|
|
76
|
+
if value is not None and isinstance(value, (str, int, float, bool)):
|
|
77
|
+
tool_args.append({key, value})
|
|
78
|
+
return [get_json_dumps(tool_arg) for tool_arg in tool_args]
|
|
79
|
+
|
|
80
|
+
def extract_tool_response(response):
|
|
81
|
+
if hasattr(response, 'raw_output'):
|
|
82
|
+
return response.raw_output
|
|
83
|
+
return ""
|
|
84
|
+
|
|
85
|
+
def is_delegation_tool(args, instance) -> bool:
|
|
86
|
+
return get_tool_name(args, instance) == "handoff"
|
|
87
|
+
|
|
88
|
+
def get_agent_name(instance) -> str:
|
|
89
|
+
if hasattr(instance, 'name'):
|
|
90
|
+
return instance.name
|
|
91
|
+
else:
|
|
92
|
+
return instance.__class__.__name__
|
|
93
|
+
|
|
94
|
+
def get_agent_description(instance) -> str:
|
|
95
|
+
if hasattr(instance, 'description'):
|
|
96
|
+
return instance.description
|
|
97
|
+
return ""
|
|
98
|
+
|
|
99
|
+
def get_source_agent(parent_span:Span) -> str:
|
|
100
|
+
source_agent_name = parent_span.attributes.get(LLAMAINDEX_AGENT_NAME_KEY, "")
|
|
101
|
+
if source_agent_name == "" and parent_span.name.startswith("llama_index.core.agent.ReActAgent."):
|
|
102
|
+
# Fallback to the agent name from the parent span if not set
|
|
103
|
+
source_agent_name = "ReactAgent"
|
|
104
|
+
return source_agent_name
|
|
105
|
+
|
|
106
|
+
def get_target_agent(results) -> str:
|
|
107
|
+
if hasattr(results, 'raw_input'):
|
|
108
|
+
return results.raw_input.get('kwargs', {}).get("to_agent", "")
|
|
109
|
+
return ""
|
|
37
110
|
|
|
38
111
|
def extract_messages(args):
|
|
39
112
|
"""Extract system and user messages"""
|
|
@@ -50,38 +123,69 @@ def extract_messages(args):
|
|
|
50
123
|
if isinstance(args, (list, tuple)) and args:
|
|
51
124
|
for msg in args[0]:
|
|
52
125
|
process_message(msg)
|
|
126
|
+
elif args and isinstance(args, tuple):
|
|
127
|
+
messages.append(args[0])
|
|
53
128
|
if isinstance(args, dict):
|
|
54
129
|
for msg in args.get("messages", []):
|
|
55
130
|
process_message(msg)
|
|
56
|
-
|
|
57
|
-
messages.append(args[0])
|
|
131
|
+
|
|
58
132
|
|
|
59
|
-
return [
|
|
133
|
+
return [get_json_dumps(message) for message in messages]
|
|
60
134
|
|
|
61
135
|
except Exception as e:
|
|
62
136
|
logger.warning("Error in extract_messages: %s", str(e))
|
|
63
137
|
return []
|
|
64
138
|
|
|
139
|
+
def extract_agent_input(args):
|
|
140
|
+
if isinstance(args, (list, tuple)):
|
|
141
|
+
input_args = []
|
|
142
|
+
for arg in args:
|
|
143
|
+
if isinstance(arg, (str, dict)):
|
|
144
|
+
input_args.append(arg)
|
|
145
|
+
elif hasattr(arg, 'raw') and isinstance(arg.raw, str):
|
|
146
|
+
input_args.append(arg.raw)
|
|
147
|
+
return input_args
|
|
148
|
+
elif isinstance(args, str):
|
|
149
|
+
return [args]
|
|
150
|
+
return ""
|
|
151
|
+
|
|
152
|
+
def extract_agent_response(arguments):
|
|
153
|
+
status = get_status_code(arguments)
|
|
154
|
+
if status == 'success':
|
|
155
|
+
if hasattr(arguments['result'], 'response'):
|
|
156
|
+
if hasattr(arguments['result'].response, 'content'):
|
|
157
|
+
return arguments['result'].response.content
|
|
158
|
+
return arguments['result'].response
|
|
159
|
+
return ""
|
|
160
|
+
else:
|
|
161
|
+
if arguments["exception"] is not None:
|
|
162
|
+
return get_exception_message(arguments)
|
|
163
|
+
elif hasattr(arguments['result'], "error"):
|
|
164
|
+
return arguments['result'].error
|
|
165
|
+
|
|
65
166
|
def extract_assistant_message(arguments):
|
|
66
167
|
status = get_status_code(arguments)
|
|
67
|
-
|
|
168
|
+
messages = []
|
|
169
|
+
role = "assistant"
|
|
68
170
|
if status == 'success':
|
|
69
171
|
if isinstance(arguments['result'], str):
|
|
70
|
-
|
|
172
|
+
messages.append({role: arguments['result']})
|
|
71
173
|
if hasattr(arguments['result'], "content"):
|
|
72
|
-
|
|
174
|
+
messages.append({role: arguments['result'].content})
|
|
73
175
|
if hasattr(arguments['result'], "message") and hasattr(arguments['result'].message, "content"):
|
|
74
|
-
|
|
176
|
+
role = getattr(arguments['result'].message, 'role', role)
|
|
177
|
+
if hasattr(role, 'value'):
|
|
178
|
+
role = role.value
|
|
179
|
+
messages.append({role: arguments['result'].message.content})
|
|
75
180
|
if hasattr(arguments['result'],"response") and isinstance(arguments['result'].response, str):
|
|
76
|
-
|
|
181
|
+
messages.append({role: arguments['result'].response})
|
|
77
182
|
else:
|
|
78
183
|
if arguments["exception"] is not None:
|
|
79
|
-
|
|
80
|
-
elif hasattr(
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
return response
|
|
184
|
+
return get_exception_message(arguments)
|
|
185
|
+
elif hasattr(arguments['result'], "error"):
|
|
186
|
+
return arguments['result'].error
|
|
84
187
|
|
|
188
|
+
return get_json_dumps(messages[0]) if messages else ""
|
|
85
189
|
|
|
86
190
|
def extract_query_from_content(content):
|
|
87
191
|
try:
|
|
@@ -108,6 +212,8 @@ def extract_provider_name(instance):
|
|
|
108
212
|
provider_url: Option[str]= try_option(getattr, instance, 'api_base').and_then(lambda url: urlparse(url).hostname)
|
|
109
213
|
if hasattr(instance,'_client'):
|
|
110
214
|
provider_url:Option[str] = try_option(getattr, instance._client.base_url,'host')
|
|
215
|
+
if hasattr(instance, 'model') and isinstance(instance.model, str) and 'gemini' in instance.model.lower():
|
|
216
|
+
provider_url: Option[str] = try_option(lambda: 'gemini.googleapis.com')
|
|
111
217
|
return provider_url.unwrap_or(None)
|
|
112
218
|
|
|
113
219
|
|
|
@@ -117,6 +223,8 @@ def extract_inference_endpoint(instance):
|
|
|
117
223
|
inference_endpoint: Option[str] = try_option(getattr, instance._client.sdk_configuration, 'server_url').map(str)
|
|
118
224
|
if hasattr(instance._client,'base_url'):
|
|
119
225
|
inference_endpoint: Option[str] = try_option(getattr, instance._client, 'base_url').map(str)
|
|
226
|
+
if hasattr(instance, 'model') and isinstance(instance.model, str) and 'gemini' in instance.model.lower():
|
|
227
|
+
inference_endpoint = try_option(lambda: f"https://generativelanguage.googleapis.com/v1beta/models/{instance.model}:generateContent")
|
|
120
228
|
return inference_endpoint.unwrap_or(extract_provider_name(instance))
|
|
121
229
|
|
|
122
230
|
|
|
@@ -175,10 +283,113 @@ def update_span_from_llm_response(response, instance):
|
|
|
175
283
|
if response is not None and hasattr(response, "raw"):
|
|
176
284
|
if response.raw is not None:
|
|
177
285
|
token_usage = response.raw.get("usage") if isinstance(response.raw, dict) else getattr(response.raw, "usage", None)
|
|
286
|
+
if token_usage is None:
|
|
287
|
+
token_usage = response.raw.get("usage_metadata") if isinstance(response.raw, dict) else getattr(response.raw,
|
|
288
|
+
"usage_metadata", None)
|
|
178
289
|
if token_usage is not None:
|
|
179
290
|
temperature = instance.__dict__.get("temperature", None)
|
|
180
291
|
meta_dict.update({"temperature": temperature})
|
|
181
|
-
meta_dict.update({"completion_tokens": getattr(token_usage, "completion_tokens",None) or getattr(token_usage,"output_tokens",None)})
|
|
182
|
-
meta_dict.update({"prompt_tokens": getattr(token_usage, "prompt_tokens",None) or getattr(token_usage,"input_tokens",None)})
|
|
183
|
-
|
|
292
|
+
meta_dict.update({"completion_tokens": getattr(token_usage, "completion_tokens",None) or getattr(token_usage,"output_tokens",None) or token_usage.get("candidates_token_count",None)})
|
|
293
|
+
meta_dict.update({"prompt_tokens": getattr(token_usage, "prompt_tokens",None) or getattr(token_usage,"input_tokens",None) or token_usage.get("prompt_token_count",None)})
|
|
294
|
+
total_tokens = getattr(token_usage, "total_tokens", None)
|
|
295
|
+
if total_tokens is not None:
|
|
296
|
+
meta_dict.update({"total_tokens": total_tokens})
|
|
297
|
+
else:
|
|
298
|
+
output_tokens = getattr(token_usage, "output_tokens", None)
|
|
299
|
+
input_tokens = getattr(token_usage, "input_tokens", None)
|
|
300
|
+
if output_tokens is not None and input_tokens is not None:
|
|
301
|
+
meta_dict.update({"total_tokens": output_tokens + input_tokens})
|
|
302
|
+
else:
|
|
303
|
+
meta_dict.update({ "total_tokens": token_usage.get("total_token_count", None)})
|
|
304
|
+
|
|
184
305
|
return meta_dict
|
|
306
|
+
|
|
307
|
+
def extract_finish_reason(arguments):
|
|
308
|
+
"""Extract finish_reason from LlamaIndex response."""
|
|
309
|
+
try:
|
|
310
|
+
# Handle exception cases first
|
|
311
|
+
if arguments.get("exception") is not None:
|
|
312
|
+
return "error"
|
|
313
|
+
|
|
314
|
+
response = arguments.get("result")
|
|
315
|
+
if response is None:
|
|
316
|
+
return None
|
|
317
|
+
|
|
318
|
+
# Check various possible locations for finish_reason in LlamaIndex responses
|
|
319
|
+
|
|
320
|
+
# Direct finish_reason attribute
|
|
321
|
+
if hasattr(response, "finish_reason") and response.finish_reason:
|
|
322
|
+
return response.finish_reason
|
|
323
|
+
|
|
324
|
+
# Check if response has raw attribute (common in LlamaIndex)
|
|
325
|
+
if hasattr(response, "raw") and response.raw:
|
|
326
|
+
raw_response = response.raw
|
|
327
|
+
if isinstance(raw_response, dict):
|
|
328
|
+
# Check for finish_reason in raw response
|
|
329
|
+
if "finish_reason" in raw_response:
|
|
330
|
+
return raw_response["finish_reason"]
|
|
331
|
+
if "stop_reason" in raw_response:
|
|
332
|
+
return raw_response["stop_reason"]
|
|
333
|
+
# Check for choices structure (OpenAI-style)
|
|
334
|
+
if "choices" in raw_response and raw_response["choices"]:
|
|
335
|
+
choice = raw_response["choices"][0]
|
|
336
|
+
if isinstance(choice, dict) and "finish_reason" in choice:
|
|
337
|
+
return choice["finish_reason"]
|
|
338
|
+
elif hasattr(raw_response, "choices") and raw_response.choices:
|
|
339
|
+
# Handle object-style raw response
|
|
340
|
+
choice = raw_response.choices[0]
|
|
341
|
+
if hasattr(choice, "finish_reason"):
|
|
342
|
+
return choice.finish_reason
|
|
343
|
+
|
|
344
|
+
# Check for additional metadata
|
|
345
|
+
if hasattr(response, "additional_kwargs") and response.additional_kwargs:
|
|
346
|
+
kwargs = response.additional_kwargs
|
|
347
|
+
if isinstance(kwargs, dict):
|
|
348
|
+
for key in ["finish_reason", "stop_reason"]:
|
|
349
|
+
if key in kwargs:
|
|
350
|
+
return kwargs[key]
|
|
351
|
+
|
|
352
|
+
# Check for response metadata
|
|
353
|
+
if hasattr(response, "response_metadata") and response.response_metadata:
|
|
354
|
+
metadata = response.response_metadata
|
|
355
|
+
if isinstance(metadata, dict):
|
|
356
|
+
for key in ["finish_reason", "stop_reason"]:
|
|
357
|
+
if key in metadata:
|
|
358
|
+
return metadata[key]
|
|
359
|
+
|
|
360
|
+
# Check for source nodes or other LlamaIndex-specific attributes
|
|
361
|
+
if hasattr(response, "source_nodes") and response.source_nodes:
|
|
362
|
+
# If we have source nodes, it's likely a successful retrieval
|
|
363
|
+
return "stop"
|
|
364
|
+
|
|
365
|
+
# If no specific finish reason found, infer from status
|
|
366
|
+
status_code = get_status_code(arguments)
|
|
367
|
+
if status_code == 'success':
|
|
368
|
+
return "stop" # Default success finish reason
|
|
369
|
+
elif status_code == 'error':
|
|
370
|
+
return "error"
|
|
371
|
+
|
|
372
|
+
except Exception as e:
|
|
373
|
+
logger.warning("Warning: Error occurred in extract_finish_reason: %s", str(e))
|
|
374
|
+
return None
|
|
375
|
+
|
|
376
|
+
return None
|
|
377
|
+
|
|
378
|
+
|
|
379
|
+
def map_finish_reason_to_finish_type(finish_reason):
|
|
380
|
+
"""Map LlamaIndex finish_reason to finish_type."""
|
|
381
|
+
return map_llamaindex_finish_reason_to_finish_type(finish_reason)
|
|
382
|
+
|
|
383
|
+
def extract_agent_request_input(kwargs):
|
|
384
|
+
if "user_msg" in kwargs:
|
|
385
|
+
return kwargs["user_msg"]
|
|
386
|
+
return ""
|
|
387
|
+
|
|
388
|
+
def extract_agent_request_output(arguments):
|
|
389
|
+
if hasattr(arguments['result'], 'response'):
|
|
390
|
+
if hasattr(arguments['result'].response, 'content'):
|
|
391
|
+
return arguments['result'].response.content
|
|
392
|
+
return arguments['result'].response
|
|
393
|
+
elif hasattr(arguments['result'], 'raw_output'):
|
|
394
|
+
return arguments['result'].raw_output
|
|
395
|
+
return ""
|