monocle-apptrace 0.4.1__py3-none-any.whl → 0.5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of monocle-apptrace might be problematic. Click here for more details.
- monocle_apptrace/__main__.py +1 -1
- monocle_apptrace/exporters/file_exporter.py +125 -37
- monocle_apptrace/instrumentation/common/__init__.py +16 -1
- monocle_apptrace/instrumentation/common/constants.py +14 -1
- monocle_apptrace/instrumentation/common/instrumentor.py +19 -152
- monocle_apptrace/instrumentation/common/method_wrappers.py +376 -0
- monocle_apptrace/instrumentation/common/span_handler.py +58 -32
- monocle_apptrace/instrumentation/common/utils.py +52 -15
- monocle_apptrace/instrumentation/common/wrapper.py +124 -18
- monocle_apptrace/instrumentation/common/wrapper_method.py +48 -1
- monocle_apptrace/instrumentation/metamodel/a2a/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/a2a/_helper.py +37 -0
- monocle_apptrace/instrumentation/metamodel/a2a/entities/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/a2a/entities/inference.py +112 -0
- monocle_apptrace/instrumentation/metamodel/a2a/methods.py +22 -0
- monocle_apptrace/instrumentation/metamodel/adk/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/adk/_helper.py +182 -0
- monocle_apptrace/instrumentation/metamodel/adk/entities/agent.py +50 -0
- monocle_apptrace/instrumentation/metamodel/adk/entities/tool.py +57 -0
- monocle_apptrace/instrumentation/metamodel/adk/methods.py +24 -0
- monocle_apptrace/instrumentation/metamodel/agents/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/agents/_helper.py +220 -0
- monocle_apptrace/instrumentation/metamodel/agents/agents_processor.py +152 -0
- monocle_apptrace/instrumentation/metamodel/agents/entities/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/agents/entities/inference.py +191 -0
- monocle_apptrace/instrumentation/metamodel/agents/methods.py +56 -0
- monocle_apptrace/instrumentation/metamodel/aiohttp/_helper.py +6 -11
- monocle_apptrace/instrumentation/metamodel/anthropic/_helper.py +112 -18
- monocle_apptrace/instrumentation/metamodel/anthropic/entities/inference.py +18 -10
- monocle_apptrace/instrumentation/metamodel/azfunc/_helper.py +13 -11
- monocle_apptrace/instrumentation/metamodel/azfunc/entities/http.py +5 -0
- monocle_apptrace/instrumentation/metamodel/azureaiinference/_helper.py +88 -8
- monocle_apptrace/instrumentation/metamodel/azureaiinference/entities/inference.py +22 -8
- monocle_apptrace/instrumentation/metamodel/botocore/_helper.py +92 -16
- monocle_apptrace/instrumentation/metamodel/botocore/entities/inference.py +13 -8
- monocle_apptrace/instrumentation/metamodel/botocore/handlers/botocore_span_handler.py +1 -1
- monocle_apptrace/instrumentation/metamodel/fastapi/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/fastapi/_helper.py +82 -0
- monocle_apptrace/instrumentation/metamodel/fastapi/entities/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/fastapi/entities/http.py +44 -0
- monocle_apptrace/instrumentation/metamodel/fastapi/methods.py +23 -0
- monocle_apptrace/instrumentation/metamodel/finish_types.py +463 -0
- monocle_apptrace/instrumentation/metamodel/flask/_helper.py +6 -11
- monocle_apptrace/instrumentation/metamodel/gemini/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/gemini/_helper.py +120 -0
- monocle_apptrace/instrumentation/metamodel/gemini/entities/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/gemini/entities/inference.py +86 -0
- monocle_apptrace/instrumentation/metamodel/gemini/entities/retrieval.py +43 -0
- monocle_apptrace/instrumentation/metamodel/gemini/methods.py +31 -0
- monocle_apptrace/instrumentation/metamodel/haystack/_helper.py +79 -8
- monocle_apptrace/instrumentation/metamodel/haystack/entities/inference.py +15 -10
- monocle_apptrace/instrumentation/metamodel/haystack/methods.py +7 -0
- monocle_apptrace/instrumentation/metamodel/lambdafunc/_helper.py +78 -0
- monocle_apptrace/instrumentation/metamodel/lambdafunc/entities/http.py +51 -0
- monocle_apptrace/instrumentation/metamodel/lambdafunc/methods.py +23 -0
- monocle_apptrace/instrumentation/metamodel/lambdafunc/wrapper.py +23 -0
- monocle_apptrace/instrumentation/metamodel/langchain/_helper.py +145 -19
- monocle_apptrace/instrumentation/metamodel/langchain/entities/inference.py +19 -10
- monocle_apptrace/instrumentation/metamodel/langgraph/_helper.py +67 -10
- monocle_apptrace/instrumentation/metamodel/langgraph/entities/inference.py +127 -20
- monocle_apptrace/instrumentation/metamodel/langgraph/langgraph_processor.py +46 -0
- monocle_apptrace/instrumentation/metamodel/langgraph/methods.py +35 -9
- monocle_apptrace/instrumentation/metamodel/litellm/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/litellm/_helper.py +89 -0
- monocle_apptrace/instrumentation/metamodel/litellm/entities/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/litellm/entities/inference.py +108 -0
- monocle_apptrace/instrumentation/metamodel/litellm/methods.py +19 -0
- monocle_apptrace/instrumentation/metamodel/llamaindex/_helper.py +227 -16
- monocle_apptrace/instrumentation/metamodel/llamaindex/entities/agent.py +127 -10
- monocle_apptrace/instrumentation/metamodel/llamaindex/entities/inference.py +13 -8
- monocle_apptrace/instrumentation/metamodel/llamaindex/llamaindex_processor.py +62 -0
- monocle_apptrace/instrumentation/metamodel/llamaindex/methods.py +68 -1
- monocle_apptrace/instrumentation/metamodel/mcp/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/mcp/_helper.py +118 -0
- monocle_apptrace/instrumentation/metamodel/mcp/entities/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/mcp/entities/inference.py +48 -0
- monocle_apptrace/instrumentation/metamodel/mcp/mcp_processor.py +8 -0
- monocle_apptrace/instrumentation/metamodel/mcp/methods.py +21 -0
- monocle_apptrace/instrumentation/metamodel/openai/_helper.py +188 -16
- monocle_apptrace/instrumentation/metamodel/openai/entities/inference.py +148 -92
- monocle_apptrace/instrumentation/metamodel/openai/entities/retrieval.py +1 -1
- monocle_apptrace/instrumentation/metamodel/teamsai/_helper.py +53 -23
- monocle_apptrace/instrumentation/metamodel/teamsai/entities/inference/actionplanner_output_processor.py +1 -1
- monocle_apptrace/instrumentation/metamodel/teamsai/entities/inference/teamsai_output_processor.py +15 -9
- monocle_apptrace/instrumentation/metamodel/teamsai/sample.json +0 -4
- {monocle_apptrace-0.4.1.dist-info → monocle_apptrace-0.5.0.dist-info}/METADATA +27 -11
- monocle_apptrace-0.5.0.dist-info/RECORD +142 -0
- monocle_apptrace-0.4.1.dist-info/RECORD +0 -96
- {monocle_apptrace-0.4.1.dist-info → monocle_apptrace-0.5.0.dist-info}/WHEEL +0 -0
- {monocle_apptrace-0.4.1.dist-info → monocle_apptrace-0.5.0.dist-info}/licenses/LICENSE +0 -0
- {monocle_apptrace-0.4.1.dist-info → monocle_apptrace-0.5.0.dist-info}/licenses/NOTICE +0 -0
|
@@ -0,0 +1,89 @@
|
|
|
1
|
+
"""
|
|
2
|
+
This module provides utility functions for extracting system, user,
|
|
3
|
+
and assistant messages from various input formats.
|
|
4
|
+
"""
|
|
5
|
+
import json
|
|
6
|
+
import logging
|
|
7
|
+
from monocle_apptrace.instrumentation.common.utils import (
|
|
8
|
+
Option,
|
|
9
|
+
get_json_dumps,
|
|
10
|
+
try_option,
|
|
11
|
+
get_exception_message,
|
|
12
|
+
get_status_code,
|
|
13
|
+
)
|
|
14
|
+
|
|
15
|
+
logger = logging.getLogger(__name__)
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def extract_messages(kwargs):
|
|
19
|
+
"""Extract system and user messages"""
|
|
20
|
+
try:
|
|
21
|
+
messages = []
|
|
22
|
+
if 'messages' in kwargs and len(kwargs['messages']) > 0:
|
|
23
|
+
for msg in kwargs['messages']:
|
|
24
|
+
if msg.get('content') and msg.get('role'):
|
|
25
|
+
messages.append({msg['role']: msg['content']})
|
|
26
|
+
|
|
27
|
+
return [get_json_dumps(message) for message in messages]
|
|
28
|
+
except Exception as e:
|
|
29
|
+
logger.warning("Warning: Error occurred in extract_messages: %s", str(e))
|
|
30
|
+
return []
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def extract_assistant_message(arguments):
|
|
34
|
+
try:
|
|
35
|
+
messages = []
|
|
36
|
+
status = get_status_code(arguments)
|
|
37
|
+
if status == 'success' or status == 'completed':
|
|
38
|
+
response = arguments["result"]
|
|
39
|
+
if (response is not None and hasattr(response, "choices") and len(response.choices) > 0):
|
|
40
|
+
if hasattr(response.choices[0], "message"):
|
|
41
|
+
role = (
|
|
42
|
+
response.choices[0].message.role
|
|
43
|
+
if hasattr(response.choices[0].message, "role")
|
|
44
|
+
else "assistant"
|
|
45
|
+
)
|
|
46
|
+
messages.append({role: response.choices[0].message.content})
|
|
47
|
+
return get_json_dumps(messages[0]) if messages else ""
|
|
48
|
+
else:
|
|
49
|
+
if arguments["exception"] is not None:
|
|
50
|
+
return get_exception_message(arguments)
|
|
51
|
+
elif hasattr(arguments["result"], "error"):
|
|
52
|
+
return arguments["result"].error
|
|
53
|
+
|
|
54
|
+
except (IndexError, AttributeError) as e:
|
|
55
|
+
logger.warning(
|
|
56
|
+
"Warning: Error occurred in extract_assistant_message: %s", str(e)
|
|
57
|
+
)
|
|
58
|
+
return None
|
|
59
|
+
|
|
60
|
+
def extract_provider_name(url):
|
|
61
|
+
"""Extract host from a URL string (e.g., https://api.openai.com/v1/ -> api.openai.com)"""
|
|
62
|
+
if not url:
|
|
63
|
+
return None
|
|
64
|
+
return url.split("//")[-1].split("/")[0]
|
|
65
|
+
|
|
66
|
+
def resolve_from_alias(my_map, alias):
|
|
67
|
+
"""Find a alias that is not none from list of aliases"""
|
|
68
|
+
|
|
69
|
+
for i in alias:
|
|
70
|
+
if i in my_map.keys():
|
|
71
|
+
return my_map[i]
|
|
72
|
+
return None
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
def update_span_from_llm_response(response):
|
|
76
|
+
meta_dict = {}
|
|
77
|
+
token_usage = None
|
|
78
|
+
if response is not None:
|
|
79
|
+
if token_usage is None and hasattr(response, "usage") and response.usage is not None:
|
|
80
|
+
token_usage = response.usage
|
|
81
|
+
elif token_usage is None and hasattr(response, "response_metadata"):
|
|
82
|
+
token_usage = getattr(response.response_metadata, "token_usage", None) \
|
|
83
|
+
if hasattr(response.response_metadata, "token_usage") \
|
|
84
|
+
else response.response_metadata.get("token_usage", None)
|
|
85
|
+
if token_usage is not None:
|
|
86
|
+
meta_dict.update({"completion_tokens": getattr(token_usage, "completion_tokens", None) or getattr(token_usage, "output_tokens", None)})
|
|
87
|
+
meta_dict.update({"prompt_tokens": getattr(token_usage, "prompt_tokens", None) or getattr(token_usage, "input_tokens", None)})
|
|
88
|
+
meta_dict.update({"total_tokens": getattr(token_usage, "total_tokens")})
|
|
89
|
+
return meta_dict
|
|
File without changes
|
|
@@ -0,0 +1,108 @@
|
|
|
1
|
+
from monocle_apptrace.instrumentation.metamodel.litellm import (
|
|
2
|
+
_helper,
|
|
3
|
+
)
|
|
4
|
+
from monocle_apptrace.instrumentation.common.utils import (
|
|
5
|
+
get_error_message,
|
|
6
|
+
resolve_from_alias,
|
|
7
|
+
get_llm_type,
|
|
8
|
+
)
|
|
9
|
+
INFERENCE = {
|
|
10
|
+
"type": "inference",
|
|
11
|
+
"attributes": [
|
|
12
|
+
[
|
|
13
|
+
{
|
|
14
|
+
"_comment": "provider type ,name , deployment , inference_endpoint",
|
|
15
|
+
"attribute": "type",
|
|
16
|
+
"accessor": lambda arguments: "inference."
|
|
17
|
+
+ (get_llm_type(arguments['instance']) or 'generic')
|
|
18
|
+
},
|
|
19
|
+
{
|
|
20
|
+
"attribute": "provider_name",
|
|
21
|
+
"accessor": lambda arguments: _helper.extract_provider_name(
|
|
22
|
+
resolve_from_alias(
|
|
23
|
+
arguments["kwargs"],
|
|
24
|
+
["azure_endpoint", "api_base", "endpoint"],
|
|
25
|
+
)
|
|
26
|
+
),
|
|
27
|
+
},
|
|
28
|
+
{
|
|
29
|
+
"attribute": "deployment",
|
|
30
|
+
"accessor": lambda arguments: resolve_from_alias(
|
|
31
|
+
arguments["kwargs"].__dict__,
|
|
32
|
+
[
|
|
33
|
+
"engine",
|
|
34
|
+
"azure_deployment",
|
|
35
|
+
"deployment_name",
|
|
36
|
+
"deployment_id",
|
|
37
|
+
"deployment",
|
|
38
|
+
],
|
|
39
|
+
),
|
|
40
|
+
},
|
|
41
|
+
{
|
|
42
|
+
"attribute": "inference_endpoint",
|
|
43
|
+
"accessor": lambda arguments: resolve_from_alias(
|
|
44
|
+
arguments["kwargs"],
|
|
45
|
+
["azure_endpoint", "api_base", "endpoint"],
|
|
46
|
+
)
|
|
47
|
+
},
|
|
48
|
+
],
|
|
49
|
+
[
|
|
50
|
+
{
|
|
51
|
+
"_comment": "LLM Model",
|
|
52
|
+
"attribute": "name",
|
|
53
|
+
"accessor": lambda arguments: resolve_from_alias(
|
|
54
|
+
arguments["kwargs"],
|
|
55
|
+
["model", "model_name", "endpoint_name", "deployment_name"],
|
|
56
|
+
),
|
|
57
|
+
},
|
|
58
|
+
{
|
|
59
|
+
"attribute": "type",
|
|
60
|
+
"accessor": lambda arguments: "model.llm."
|
|
61
|
+
+ resolve_from_alias(
|
|
62
|
+
arguments["kwargs"],
|
|
63
|
+
["model", "model_name", "endpoint_name", "deployment_name"],
|
|
64
|
+
),
|
|
65
|
+
},
|
|
66
|
+
],
|
|
67
|
+
],
|
|
68
|
+
"events": [
|
|
69
|
+
{
|
|
70
|
+
"name": "data.input",
|
|
71
|
+
"attributes": [
|
|
72
|
+
{
|
|
73
|
+
"_comment": "this is instruction and user query to LLM",
|
|
74
|
+
"attribute": "input",
|
|
75
|
+
"accessor": lambda arguments: _helper.extract_messages(
|
|
76
|
+
arguments["kwargs"]
|
|
77
|
+
),
|
|
78
|
+
}
|
|
79
|
+
],
|
|
80
|
+
},
|
|
81
|
+
{
|
|
82
|
+
"name": "data.output",
|
|
83
|
+
"attributes": [
|
|
84
|
+
|
|
85
|
+
{
|
|
86
|
+
"attribute": "error_code",
|
|
87
|
+
"accessor": lambda arguments: get_error_message(arguments)
|
|
88
|
+
},
|
|
89
|
+
{
|
|
90
|
+
"_comment": "this is result from LLM",
|
|
91
|
+
"attribute": "response",
|
|
92
|
+
"accessor": lambda arguments: _helper.extract_assistant_message(arguments),
|
|
93
|
+
}
|
|
94
|
+
],
|
|
95
|
+
},
|
|
96
|
+
{
|
|
97
|
+
"name": "metadata",
|
|
98
|
+
"attributes": [
|
|
99
|
+
{
|
|
100
|
+
"_comment": "this is metadata usage from LLM",
|
|
101
|
+
"accessor": lambda arguments: _helper.update_span_from_llm_response(
|
|
102
|
+
arguments["result"]
|
|
103
|
+
),
|
|
104
|
+
}
|
|
105
|
+
],
|
|
106
|
+
},
|
|
107
|
+
],
|
|
108
|
+
}
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
from monocle_apptrace.instrumentation.common.wrapper import atask_wrapper, task_wrapper
|
|
2
|
+
from monocle_apptrace.instrumentation.metamodel.litellm.entities.inference import INFERENCE
|
|
3
|
+
|
|
4
|
+
LITELLM_METHODS = [
|
|
5
|
+
{
|
|
6
|
+
"package": "litellm.llms.openai.openai",
|
|
7
|
+
"object": "OpenAIChatCompletion",
|
|
8
|
+
"method": "completion",
|
|
9
|
+
"wrapper_method": task_wrapper,
|
|
10
|
+
"output_processor": INFERENCE
|
|
11
|
+
},
|
|
12
|
+
{
|
|
13
|
+
"package": "litellm.llms.azure.azure",
|
|
14
|
+
"object": "AzureChatCompletion",
|
|
15
|
+
"method": "completion",
|
|
16
|
+
"wrapper_method": task_wrapper,
|
|
17
|
+
"output_processor": INFERENCE
|
|
18
|
+
}
|
|
19
|
+
]
|
|
@@ -3,20 +3,29 @@ This module provides utility functions for extracting system, user,
|
|
|
3
3
|
and assistant messages from various input formats.
|
|
4
4
|
"""
|
|
5
5
|
|
|
6
|
+
from ast import arguments
|
|
6
7
|
import logging
|
|
7
8
|
from urllib.parse import urlparse
|
|
8
9
|
from opentelemetry.sdk.trace import Span
|
|
10
|
+
from opentelemetry.context import get_value
|
|
9
11
|
from monocle_apptrace.instrumentation.common.utils import (
|
|
10
12
|
Option,
|
|
13
|
+
get_json_dumps,
|
|
11
14
|
get_keys_as_tuple,
|
|
12
15
|
get_nested_value,
|
|
13
16
|
try_option,
|
|
14
17
|
get_exception_message,
|
|
15
18
|
get_status_code,
|
|
16
19
|
)
|
|
20
|
+
from monocle_apptrace.instrumentation.metamodel.finish_types import map_llamaindex_finish_reason_to_finish_type
|
|
17
21
|
|
|
22
|
+
LLAMAINDEX_AGENT_NAME_KEY = "_active_agent_name"
|
|
18
23
|
logger = logging.getLogger(__name__)
|
|
19
24
|
|
|
25
|
+
def get_status(result):
|
|
26
|
+
if result is not None and hasattr(result, 'status'):
|
|
27
|
+
return result.status
|
|
28
|
+
return None
|
|
20
29
|
|
|
21
30
|
def extract_tools(instance):
|
|
22
31
|
tools = []
|
|
@@ -34,6 +43,70 @@ def extract_tools(instance):
|
|
|
34
43
|
tools.append(tool_name)
|
|
35
44
|
return tools
|
|
36
45
|
|
|
46
|
+
def get_tool_name(args, instance):
|
|
47
|
+
if len(args) > 1:
|
|
48
|
+
if hasattr(args[1], 'metadata') and hasattr(args[1].metadata, 'name'):
|
|
49
|
+
return args[1].metadata.name
|
|
50
|
+
return ""
|
|
51
|
+
else:
|
|
52
|
+
if hasattr(instance, 'metadata') and hasattr(instance.metadata, 'name'):
|
|
53
|
+
return instance.metadata.name
|
|
54
|
+
return ""
|
|
55
|
+
|
|
56
|
+
def get_tool_description(arguments):
|
|
57
|
+
if len(arguments['args']) > 1:
|
|
58
|
+
if hasattr(arguments['args'][1], 'metadata') and hasattr(arguments['args'][1].metadata, 'description'):
|
|
59
|
+
return arguments['args'][1].metadata.description
|
|
60
|
+
return ""
|
|
61
|
+
else:
|
|
62
|
+
if hasattr(arguments['instance'], 'metadata') and hasattr(arguments['instance'].metadata, 'description'):
|
|
63
|
+
return arguments['instance'].metadata.description
|
|
64
|
+
return ""
|
|
65
|
+
|
|
66
|
+
def extract_tool_args(arguments):
|
|
67
|
+
tool_args = []
|
|
68
|
+
if len(arguments['args']) > 1:
|
|
69
|
+
for key, value in arguments['args'][2].items():
|
|
70
|
+
# check if value is builtin type or a string
|
|
71
|
+
if value is not None and isinstance(value, (str, int, float, bool)):
|
|
72
|
+
tool_args.append({key, value})
|
|
73
|
+
else:
|
|
74
|
+
for key, value in arguments['kwargs'].items():
|
|
75
|
+
# check if value is builtin type or a string
|
|
76
|
+
if value is not None and isinstance(value, (str, int, float, bool)):
|
|
77
|
+
tool_args.append({key, value})
|
|
78
|
+
return [get_json_dumps(tool_arg) for tool_arg in tool_args]
|
|
79
|
+
|
|
80
|
+
def extract_tool_response(response):
|
|
81
|
+
if hasattr(response, 'raw_output'):
|
|
82
|
+
return response.raw_output
|
|
83
|
+
return ""
|
|
84
|
+
|
|
85
|
+
def is_delegation_tool(args, instance) -> bool:
|
|
86
|
+
return get_tool_name(args, instance) == "handoff"
|
|
87
|
+
|
|
88
|
+
def get_agent_name(instance) -> str:
|
|
89
|
+
if hasattr(instance, 'name'):
|
|
90
|
+
return instance.name
|
|
91
|
+
else:
|
|
92
|
+
return instance.__class__.__name__
|
|
93
|
+
|
|
94
|
+
def get_agent_description(instance) -> str:
|
|
95
|
+
if hasattr(instance, 'description'):
|
|
96
|
+
return instance.description
|
|
97
|
+
return ""
|
|
98
|
+
|
|
99
|
+
def get_source_agent(parent_span:Span) -> str:
|
|
100
|
+
source_agent_name = parent_span.attributes.get(LLAMAINDEX_AGENT_NAME_KEY, "")
|
|
101
|
+
if source_agent_name == "" and parent_span.name.startswith("llama_index.core.agent.ReActAgent."):
|
|
102
|
+
# Fallback to the agent name from the parent span if not set
|
|
103
|
+
source_agent_name = "ReactAgent"
|
|
104
|
+
return source_agent_name
|
|
105
|
+
|
|
106
|
+
def get_target_agent(results) -> str:
|
|
107
|
+
if hasattr(results, 'raw_input'):
|
|
108
|
+
return results.raw_input.get('kwargs', {}).get("to_agent", "")
|
|
109
|
+
return ""
|
|
37
110
|
|
|
38
111
|
def extract_messages(args):
|
|
39
112
|
"""Extract system and user messages"""
|
|
@@ -50,38 +123,69 @@ def extract_messages(args):
|
|
|
50
123
|
if isinstance(args, (list, tuple)) and args:
|
|
51
124
|
for msg in args[0]:
|
|
52
125
|
process_message(msg)
|
|
126
|
+
elif args and isinstance(args, tuple):
|
|
127
|
+
messages.append(args[0])
|
|
53
128
|
if isinstance(args, dict):
|
|
54
129
|
for msg in args.get("messages", []):
|
|
55
130
|
process_message(msg)
|
|
56
|
-
|
|
57
|
-
messages.append(args[0])
|
|
131
|
+
|
|
58
132
|
|
|
59
|
-
return [
|
|
133
|
+
return [get_json_dumps(message) for message in messages]
|
|
60
134
|
|
|
61
135
|
except Exception as e:
|
|
62
136
|
logger.warning("Error in extract_messages: %s", str(e))
|
|
63
137
|
return []
|
|
64
138
|
|
|
139
|
+
def extract_agent_input(args):
|
|
140
|
+
if isinstance(args, (list, tuple)):
|
|
141
|
+
input_args = []
|
|
142
|
+
for arg in args:
|
|
143
|
+
if isinstance(arg, (str, dict)):
|
|
144
|
+
input_args.append(arg)
|
|
145
|
+
elif hasattr(arg, 'raw') and isinstance(arg.raw, str):
|
|
146
|
+
input_args.append(arg.raw)
|
|
147
|
+
return input_args
|
|
148
|
+
elif isinstance(args, str):
|
|
149
|
+
return [args]
|
|
150
|
+
return ""
|
|
151
|
+
|
|
152
|
+
def extract_agent_response(arguments):
|
|
153
|
+
status = get_status_code(arguments)
|
|
154
|
+
if status == 'success':
|
|
155
|
+
if hasattr(arguments['result'], 'response'):
|
|
156
|
+
if hasattr(arguments['result'].response, 'content'):
|
|
157
|
+
return arguments['result'].response.content
|
|
158
|
+
return arguments['result'].response
|
|
159
|
+
return ""
|
|
160
|
+
else:
|
|
161
|
+
if arguments["exception"] is not None:
|
|
162
|
+
return get_exception_message(arguments)
|
|
163
|
+
elif hasattr(arguments['result'], "error"):
|
|
164
|
+
return arguments['result'].error
|
|
165
|
+
|
|
65
166
|
def extract_assistant_message(arguments):
|
|
66
167
|
status = get_status_code(arguments)
|
|
67
|
-
|
|
168
|
+
messages = []
|
|
169
|
+
role = "assistant"
|
|
68
170
|
if status == 'success':
|
|
69
171
|
if isinstance(arguments['result'], str):
|
|
70
|
-
|
|
172
|
+
messages.append({role: arguments['result']})
|
|
71
173
|
if hasattr(arguments['result'], "content"):
|
|
72
|
-
|
|
174
|
+
messages.append({role: arguments['result'].content})
|
|
73
175
|
if hasattr(arguments['result'], "message") and hasattr(arguments['result'].message, "content"):
|
|
74
|
-
|
|
176
|
+
role = getattr(arguments['result'].message, 'role', role)
|
|
177
|
+
if hasattr(role, 'value'):
|
|
178
|
+
role = role.value
|
|
179
|
+
messages.append({role: arguments['result'].message.content})
|
|
75
180
|
if hasattr(arguments['result'],"response") and isinstance(arguments['result'].response, str):
|
|
76
|
-
|
|
181
|
+
messages.append({role: arguments['result'].response})
|
|
77
182
|
else:
|
|
78
183
|
if arguments["exception"] is not None:
|
|
79
|
-
|
|
80
|
-
elif hasattr(
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
return response
|
|
184
|
+
return get_exception_message(arguments)
|
|
185
|
+
elif hasattr(arguments['result'], "error"):
|
|
186
|
+
return arguments['result'].error
|
|
84
187
|
|
|
188
|
+
return get_json_dumps(messages[0]) if messages else ""
|
|
85
189
|
|
|
86
190
|
def extract_query_from_content(content):
|
|
87
191
|
try:
|
|
@@ -108,6 +212,8 @@ def extract_provider_name(instance):
|
|
|
108
212
|
provider_url: Option[str]= try_option(getattr, instance, 'api_base').and_then(lambda url: urlparse(url).hostname)
|
|
109
213
|
if hasattr(instance,'_client'):
|
|
110
214
|
provider_url:Option[str] = try_option(getattr, instance._client.base_url,'host')
|
|
215
|
+
if hasattr(instance, 'model') and isinstance(instance.model, str) and 'gemini' in instance.model.lower():
|
|
216
|
+
provider_url: Option[str] = try_option(lambda: 'gemini.googleapis.com')
|
|
111
217
|
return provider_url.unwrap_or(None)
|
|
112
218
|
|
|
113
219
|
|
|
@@ -117,6 +223,8 @@ def extract_inference_endpoint(instance):
|
|
|
117
223
|
inference_endpoint: Option[str] = try_option(getattr, instance._client.sdk_configuration, 'server_url').map(str)
|
|
118
224
|
if hasattr(instance._client,'base_url'):
|
|
119
225
|
inference_endpoint: Option[str] = try_option(getattr, instance._client, 'base_url').map(str)
|
|
226
|
+
if hasattr(instance, 'model') and isinstance(instance.model, str) and 'gemini' in instance.model.lower():
|
|
227
|
+
inference_endpoint = try_option(lambda: f"https://generativelanguage.googleapis.com/v1beta/models/{instance.model}:generateContent")
|
|
120
228
|
return inference_endpoint.unwrap_or(extract_provider_name(instance))
|
|
121
229
|
|
|
122
230
|
|
|
@@ -175,10 +283,113 @@ def update_span_from_llm_response(response, instance):
|
|
|
175
283
|
if response is not None and hasattr(response, "raw"):
|
|
176
284
|
if response.raw is not None:
|
|
177
285
|
token_usage = response.raw.get("usage") if isinstance(response.raw, dict) else getattr(response.raw, "usage", None)
|
|
286
|
+
if token_usage is None:
|
|
287
|
+
token_usage = response.raw.get("usage_metadata") if isinstance(response.raw, dict) else getattr(response.raw,
|
|
288
|
+
"usage_metadata", None)
|
|
178
289
|
if token_usage is not None:
|
|
179
290
|
temperature = instance.__dict__.get("temperature", None)
|
|
180
291
|
meta_dict.update({"temperature": temperature})
|
|
181
|
-
meta_dict.update({"completion_tokens": getattr(token_usage, "completion_tokens",None) or getattr(token_usage,"output_tokens",None)})
|
|
182
|
-
meta_dict.update({"prompt_tokens": getattr(token_usage, "prompt_tokens",None) or getattr(token_usage,"input_tokens",None)})
|
|
183
|
-
|
|
292
|
+
meta_dict.update({"completion_tokens": getattr(token_usage, "completion_tokens",None) or getattr(token_usage,"output_tokens",None) or token_usage.get("candidates_token_count",None)})
|
|
293
|
+
meta_dict.update({"prompt_tokens": getattr(token_usage, "prompt_tokens",None) or getattr(token_usage,"input_tokens",None) or token_usage.get("prompt_token_count",None)})
|
|
294
|
+
total_tokens = getattr(token_usage, "total_tokens", None)
|
|
295
|
+
if total_tokens is not None:
|
|
296
|
+
meta_dict.update({"total_tokens": total_tokens})
|
|
297
|
+
else:
|
|
298
|
+
output_tokens = getattr(token_usage, "output_tokens", None)
|
|
299
|
+
input_tokens = getattr(token_usage, "input_tokens", None)
|
|
300
|
+
if output_tokens is not None and input_tokens is not None:
|
|
301
|
+
meta_dict.update({"total_tokens": output_tokens + input_tokens})
|
|
302
|
+
else:
|
|
303
|
+
meta_dict.update({ "total_tokens": token_usage.get("total_token_count", None)})
|
|
304
|
+
|
|
184
305
|
return meta_dict
|
|
306
|
+
|
|
307
|
+
def extract_finish_reason(arguments):
|
|
308
|
+
"""Extract finish_reason from LlamaIndex response."""
|
|
309
|
+
try:
|
|
310
|
+
# Handle exception cases first
|
|
311
|
+
if arguments.get("exception") is not None:
|
|
312
|
+
return "error"
|
|
313
|
+
|
|
314
|
+
response = arguments.get("result")
|
|
315
|
+
if response is None:
|
|
316
|
+
return None
|
|
317
|
+
|
|
318
|
+
# Check various possible locations for finish_reason in LlamaIndex responses
|
|
319
|
+
|
|
320
|
+
# Direct finish_reason attribute
|
|
321
|
+
if hasattr(response, "finish_reason") and response.finish_reason:
|
|
322
|
+
return response.finish_reason
|
|
323
|
+
|
|
324
|
+
# Check if response has raw attribute (common in LlamaIndex)
|
|
325
|
+
if hasattr(response, "raw") and response.raw:
|
|
326
|
+
raw_response = response.raw
|
|
327
|
+
if isinstance(raw_response, dict):
|
|
328
|
+
# Check for finish_reason in raw response
|
|
329
|
+
if "finish_reason" in raw_response:
|
|
330
|
+
return raw_response["finish_reason"]
|
|
331
|
+
if "stop_reason" in raw_response:
|
|
332
|
+
return raw_response["stop_reason"]
|
|
333
|
+
# Check for choices structure (OpenAI-style)
|
|
334
|
+
if "choices" in raw_response and raw_response["choices"]:
|
|
335
|
+
choice = raw_response["choices"][0]
|
|
336
|
+
if isinstance(choice, dict) and "finish_reason" in choice:
|
|
337
|
+
return choice["finish_reason"]
|
|
338
|
+
elif hasattr(raw_response, "choices") and raw_response.choices:
|
|
339
|
+
# Handle object-style raw response
|
|
340
|
+
choice = raw_response.choices[0]
|
|
341
|
+
if hasattr(choice, "finish_reason"):
|
|
342
|
+
return choice.finish_reason
|
|
343
|
+
|
|
344
|
+
# Check for additional metadata
|
|
345
|
+
if hasattr(response, "additional_kwargs") and response.additional_kwargs:
|
|
346
|
+
kwargs = response.additional_kwargs
|
|
347
|
+
if isinstance(kwargs, dict):
|
|
348
|
+
for key in ["finish_reason", "stop_reason"]:
|
|
349
|
+
if key in kwargs:
|
|
350
|
+
return kwargs[key]
|
|
351
|
+
|
|
352
|
+
# Check for response metadata
|
|
353
|
+
if hasattr(response, "response_metadata") and response.response_metadata:
|
|
354
|
+
metadata = response.response_metadata
|
|
355
|
+
if isinstance(metadata, dict):
|
|
356
|
+
for key in ["finish_reason", "stop_reason"]:
|
|
357
|
+
if key in metadata:
|
|
358
|
+
return metadata[key]
|
|
359
|
+
|
|
360
|
+
# Check for source nodes or other LlamaIndex-specific attributes
|
|
361
|
+
if hasattr(response, "source_nodes") and response.source_nodes:
|
|
362
|
+
# If we have source nodes, it's likely a successful retrieval
|
|
363
|
+
return "stop"
|
|
364
|
+
|
|
365
|
+
# If no specific finish reason found, infer from status
|
|
366
|
+
status_code = get_status_code(arguments)
|
|
367
|
+
if status_code == 'success':
|
|
368
|
+
return "stop" # Default success finish reason
|
|
369
|
+
elif status_code == 'error':
|
|
370
|
+
return "error"
|
|
371
|
+
|
|
372
|
+
except Exception as e:
|
|
373
|
+
logger.warning("Warning: Error occurred in extract_finish_reason: %s", str(e))
|
|
374
|
+
return None
|
|
375
|
+
|
|
376
|
+
return None
|
|
377
|
+
|
|
378
|
+
|
|
379
|
+
def map_finish_reason_to_finish_type(finish_reason):
|
|
380
|
+
"""Map LlamaIndex finish_reason to finish_type."""
|
|
381
|
+
return map_llamaindex_finish_reason_to_finish_type(finish_reason)
|
|
382
|
+
|
|
383
|
+
def extract_agent_request_input(kwargs):
|
|
384
|
+
if "user_msg" in kwargs:
|
|
385
|
+
return kwargs["user_msg"]
|
|
386
|
+
return ""
|
|
387
|
+
|
|
388
|
+
def extract_agent_request_output(arguments):
|
|
389
|
+
if hasattr(arguments['result'], 'response'):
|
|
390
|
+
if hasattr(arguments['result'].response, 'content'):
|
|
391
|
+
return arguments['result'].response.content
|
|
392
|
+
return arguments['result'].response
|
|
393
|
+
elif hasattr(arguments['result'], 'raw_output'):
|
|
394
|
+
return arguments['result'].raw_output
|
|
395
|
+
return ""
|