monocle-apptrace 0.3.0b3__py3-none-any.whl → 0.3.0b5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of monocle-apptrace might be problematic. Click here for more details.
- monocle_apptrace/__main__.py +19 -0
- monocle_apptrace/exporters/aws/s3_exporter.py +17 -8
- monocle_apptrace/exporters/monocle_exporters.py +5 -4
- monocle_apptrace/instrumentation/common/constants.py +5 -0
- monocle_apptrace/instrumentation/common/instrumentor.py +82 -11
- monocle_apptrace/instrumentation/common/span_handler.py +27 -12
- monocle_apptrace/instrumentation/common/utils.py +112 -5
- monocle_apptrace/instrumentation/common/wrapper.py +48 -23
- monocle_apptrace/instrumentation/common/wrapper_method.py +30 -7
- monocle_apptrace/instrumentation/metamodel/botocore/_helper.py +0 -31
- monocle_apptrace/instrumentation/metamodel/botocore/handlers/botocore_span_handler.py +25 -0
- monocle_apptrace/instrumentation/metamodel/botocore/methods.py +6 -6
- monocle_apptrace/instrumentation/metamodel/flask/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/flask/_helper.py +29 -0
- monocle_apptrace/instrumentation/metamodel/flask/methods.py +13 -0
- monocle_apptrace/instrumentation/metamodel/langchain/_helper.py +6 -0
- monocle_apptrace/instrumentation/metamodel/langgraph/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/langgraph/_helper.py +48 -0
- monocle_apptrace/instrumentation/metamodel/langgraph/entities/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/langgraph/entities/inference.py +56 -0
- monocle_apptrace/instrumentation/metamodel/langgraph/methods.py +14 -0
- monocle_apptrace/instrumentation/metamodel/llamaindex/_helper.py +37 -19
- monocle_apptrace/instrumentation/metamodel/llamaindex/entities/agent.py +47 -0
- monocle_apptrace/instrumentation/metamodel/llamaindex/methods.py +9 -0
- monocle_apptrace/instrumentation/metamodel/openai/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/openai/_helper.py +88 -0
- monocle_apptrace/instrumentation/metamodel/openai/entities/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/openai/entities/inference.py +71 -0
- monocle_apptrace/instrumentation/metamodel/openai/entities/retrieval.py +24 -0
- monocle_apptrace/instrumentation/metamodel/openai/methods.py +25 -0
- monocle_apptrace/instrumentation/metamodel/requests/__init__.py +2 -0
- monocle_apptrace/instrumentation/metamodel/requests/_helper.py +31 -0
- monocle_apptrace/instrumentation/metamodel/requests/methods.py +12 -0
- {monocle_apptrace-0.3.0b3.dist-info → monocle_apptrace-0.3.0b5.dist-info}/METADATA +2 -1
- {monocle_apptrace-0.3.0b3.dist-info → monocle_apptrace-0.3.0b5.dist-info}/RECORD +38 -18
- {monocle_apptrace-0.3.0b3.dist-info → monocle_apptrace-0.3.0b5.dist-info}/WHEEL +0 -0
- {monocle_apptrace-0.3.0b3.dist-info → monocle_apptrace-0.3.0b5.dist-info}/licenses/LICENSE +0 -0
- {monocle_apptrace-0.3.0b3.dist-info → monocle_apptrace-0.3.0b5.dist-info}/licenses/NOTICE +0 -0
|
@@ -1,12 +1,20 @@
|
|
|
1
1
|
# pylint: disable=too-few-public-methods
|
|
2
|
-
from
|
|
2
|
+
from typing import Any, Dict
|
|
3
|
+
from monocle_apptrace.instrumentation.common.wrapper import task_wrapper, scope_wrapper
|
|
4
|
+
from monocle_apptrace.instrumentation.common.span_handler import SpanHandler
|
|
3
5
|
from monocle_apptrace.instrumentation.metamodel.botocore.methods import BOTOCORE_METHODS
|
|
6
|
+
from monocle_apptrace.instrumentation.metamodel.botocore.handlers.botocore_span_handler import BotoCoreSpanHandler
|
|
4
7
|
from monocle_apptrace.instrumentation.metamodel.langchain.methods import (
|
|
5
8
|
LANGCHAIN_METHODS,
|
|
6
9
|
)
|
|
7
10
|
from monocle_apptrace.instrumentation.metamodel.llamaindex.methods import (LLAMAINDEX_METHODS, )
|
|
8
11
|
from monocle_apptrace.instrumentation.metamodel.haystack.methods import (HAYSTACK_METHODS, )
|
|
9
|
-
|
|
12
|
+
from monocle_apptrace.instrumentation.metamodel.openai.methods import (OPENAI_METHODS,)
|
|
13
|
+
from monocle_apptrace.instrumentation.metamodel.langgraph.methods import LANGGRAPH_METHODS
|
|
14
|
+
from monocle_apptrace.instrumentation.metamodel.flask.methods import (FLASK_METHODS, )
|
|
15
|
+
from monocle_apptrace.instrumentation.metamodel.flask._helper import FlaskSpanHandler
|
|
16
|
+
from monocle_apptrace.instrumentation.metamodel.requests.methods import (REQUESTS_METHODS, )
|
|
17
|
+
from monocle_apptrace.instrumentation.metamodel.requests._helper import RequestSpanHandler
|
|
10
18
|
|
|
11
19
|
class WrapperMethod:
|
|
12
20
|
def __init__(
|
|
@@ -17,16 +25,21 @@ class WrapperMethod:
|
|
|
17
25
|
span_name: str = None,
|
|
18
26
|
output_processor : str = None,
|
|
19
27
|
wrapper_method = task_wrapper,
|
|
20
|
-
span_handler = 'default'
|
|
28
|
+
span_handler = 'default',
|
|
29
|
+
scope_name: str = None
|
|
21
30
|
):
|
|
22
31
|
self.package = package
|
|
23
32
|
self.object = object_name
|
|
24
33
|
self.method = method
|
|
25
34
|
self.span_name = span_name
|
|
26
35
|
self.output_processor=output_processor
|
|
27
|
-
self.span_handler = span_handler
|
|
28
36
|
|
|
29
|
-
self.
|
|
37
|
+
self.span_handler:SpanHandler.__class__ = span_handler
|
|
38
|
+
self.scope_name = scope_name
|
|
39
|
+
if scope_name:
|
|
40
|
+
self.wrapper_method = scope_wrapper
|
|
41
|
+
else:
|
|
42
|
+
self.wrapper_method = wrapper_method
|
|
30
43
|
|
|
31
44
|
def to_dict(self) -> dict:
|
|
32
45
|
# Create a dictionary representation of the instance
|
|
@@ -37,9 +50,19 @@ class WrapperMethod:
|
|
|
37
50
|
'span_name': self.span_name,
|
|
38
51
|
'output_processor': self.output_processor,
|
|
39
52
|
'wrapper_method': self.wrapper_method,
|
|
40
|
-
'span_handler': self.span_handler
|
|
53
|
+
'span_handler': self.span_handler,
|
|
54
|
+
'scope_name': self.scope_name
|
|
41
55
|
}
|
|
42
56
|
return instance_dict
|
|
43
57
|
|
|
58
|
+
def get_span_handler(self) -> SpanHandler:
|
|
59
|
+
return self.span_handler()
|
|
60
|
+
|
|
61
|
+
DEFAULT_METHODS_LIST = LANGCHAIN_METHODS + LLAMAINDEX_METHODS + HAYSTACK_METHODS + BOTOCORE_METHODS + FLASK_METHODS + REQUESTS_METHODS + LANGGRAPH_METHODS + OPENAI_METHODS
|
|
44
62
|
|
|
45
|
-
|
|
63
|
+
MONOCLE_SPAN_HANDLERS: Dict[str, SpanHandler] = {
|
|
64
|
+
"default": SpanHandler(),
|
|
65
|
+
"botocore_handler": BotoCoreSpanHandler(),
|
|
66
|
+
"flask_handler": FlaskSpanHandler(),
|
|
67
|
+
"request_handler": RequestSpanHandler()
|
|
68
|
+
}
|
|
@@ -81,37 +81,6 @@ def resolve_from_alias(my_map, alias):
|
|
|
81
81
|
return my_map[i]
|
|
82
82
|
return None
|
|
83
83
|
|
|
84
|
-
|
|
85
|
-
def botocore_processor(tracer, to_wrap, wrapped, instance, args, kwargs,return_value):
|
|
86
|
-
service_name = kwargs.get("service_name")
|
|
87
|
-
service_method_mapping = {
|
|
88
|
-
"sagemaker-runtime": "invoke_endpoint",
|
|
89
|
-
"bedrock-runtime": "converse",
|
|
90
|
-
}
|
|
91
|
-
if service_name in service_method_mapping:
|
|
92
|
-
method_name = service_method_mapping[service_name]
|
|
93
|
-
original_method = getattr(return_value, method_name, None)
|
|
94
|
-
|
|
95
|
-
if original_method:
|
|
96
|
-
instrumented_method = _instrumented_endpoint_invoke(
|
|
97
|
-
to_wrap, wrapped,return_value, original_method, tracer, service_name
|
|
98
|
-
)
|
|
99
|
-
setattr(return_value, method_name, instrumented_method)
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
def _instrumented_endpoint_invoke(to_wrap,wrapped, instance, fn, tracer,service_name):
|
|
103
|
-
@wraps(fn)
|
|
104
|
-
def with_instrumentation(*args, **kwargs):
|
|
105
|
-
span_name="botocore-"+service_name+"-invoke-endpoint"
|
|
106
|
-
handler = SpanHandler()
|
|
107
|
-
with tracer.start_as_current_span(span_name) as span:
|
|
108
|
-
response = fn(*args, **kwargs)
|
|
109
|
-
handler.hydrate_span(to_wrap, span=span,wrapped=wrapped, instance=instance,args=args, kwargs=kwargs, result=response)
|
|
110
|
-
return response
|
|
111
|
-
|
|
112
|
-
return with_instrumentation
|
|
113
|
-
|
|
114
|
-
|
|
115
84
|
def update_span_from_llm_response(response, instance):
|
|
116
85
|
meta_dict = {}
|
|
117
86
|
if response is not None and isinstance(response, dict) and "usage" in response:
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
from monocle_apptrace.instrumentation.common.span_handler import SpanHandler
|
|
2
|
+
|
|
3
|
+
class BotoCoreSpanHandler(SpanHandler):
|
|
4
|
+
|
|
5
|
+
def _botocore_processor(self, to_wrap, wrapped, instance, args, kwargs, return_value):
|
|
6
|
+
service_name = kwargs.get("service_name")
|
|
7
|
+
service_method_mapping = {
|
|
8
|
+
"sagemaker-runtime": "invoke_endpoint",
|
|
9
|
+
"bedrock-runtime": "converse",
|
|
10
|
+
}
|
|
11
|
+
if service_name in service_method_mapping:
|
|
12
|
+
method_name = service_method_mapping[service_name]
|
|
13
|
+
original_method = getattr(return_value, method_name, None)
|
|
14
|
+
span_name = "botocore-" + service_name + "-invoke-endpoint"
|
|
15
|
+
# wrap_util(original_method, span_name)
|
|
16
|
+
if original_method:
|
|
17
|
+
instrumentor = self.instrumentor
|
|
18
|
+
if instrumentor:
|
|
19
|
+
instrumented_method = instrumentor(to_wrap, wrapped, span_name, return_value, original_method)
|
|
20
|
+
setattr(return_value, method_name, instrumented_method)
|
|
21
|
+
|
|
22
|
+
def post_tracing(self, to_wrap, wrapped, instance, args, kwargs, return_value):
|
|
23
|
+
self._botocore_processor(to_wrap=to_wrap, wrapped=wrapped, instance=instance, return_value=return_value, args=args,
|
|
24
|
+
kwargs=kwargs)
|
|
25
|
+
return super().pre_tracing(to_wrap, wrapped, instance, args, kwargs)
|
|
@@ -3,14 +3,14 @@ from monocle_apptrace.instrumentation.metamodel.botocore.entities.inference impo
|
|
|
3
3
|
INFERENCE,
|
|
4
4
|
)
|
|
5
5
|
|
|
6
|
-
BOTOCORE_METHODS = [
|
|
7
|
-
|
|
6
|
+
BOTOCORE_METHODS = [
|
|
7
|
+
{
|
|
8
8
|
"package": "botocore.client",
|
|
9
9
|
"object": "ClientCreator",
|
|
10
10
|
"method": "create_client",
|
|
11
11
|
"wrapper_method": task_wrapper,
|
|
12
|
-
"
|
|
13
|
-
"output_processor": INFERENCE
|
|
14
|
-
|
|
15
|
-
}
|
|
12
|
+
"span_handler":"botocore_handler",
|
|
13
|
+
"output_processor": INFERENCE,
|
|
14
|
+
"skip_span": True
|
|
15
|
+
}
|
|
16
16
|
]
|
|
File without changes
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
from threading import local
|
|
2
|
+
from monocle_apptrace.instrumentation.common.utils import extract_http_headers, clear_http_scopes
|
|
3
|
+
from opentelemetry.propagate import extract
|
|
4
|
+
from opentelemetry.context import Context, attach, detach
|
|
5
|
+
from monocle_apptrace.instrumentation.common.span_handler import SpanHandler
|
|
6
|
+
token_data = local()
|
|
7
|
+
token_data.current_token = None
|
|
8
|
+
|
|
9
|
+
def flask_pre_tracing(args):
|
|
10
|
+
headers = dict()
|
|
11
|
+
for key, value in args[0].items():
|
|
12
|
+
if key.startswith("HTTP_"):
|
|
13
|
+
new_key = key[5:].lower().replace("_", "-")
|
|
14
|
+
headers[new_key] = value
|
|
15
|
+
token_data.current_token = extract_http_headers(headers)
|
|
16
|
+
|
|
17
|
+
def flask_post_tracing():
|
|
18
|
+
clear_http_scopes(token_data.current_token)
|
|
19
|
+
token_data.current_token = None
|
|
20
|
+
|
|
21
|
+
class FlaskSpanHandler(SpanHandler):
|
|
22
|
+
|
|
23
|
+
def pre_tracing(self, to_wrap, wrapped, instance, args, kwargs):
|
|
24
|
+
flask_pre_tracing(args)
|
|
25
|
+
return super().pre_tracing(to_wrap, wrapped, instance, args, kwargs)
|
|
26
|
+
|
|
27
|
+
def post_tracing(self, to_wrap, wrapped, instance, args, kwargs, return_value):
|
|
28
|
+
flask_post_tracing()
|
|
29
|
+
return super().post_tracing(to_wrap, wrapped, instance, args, kwargs, return_value)
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
from monocle_apptrace.instrumentation.common.wrapper import task_wrapper
|
|
2
|
+
|
|
3
|
+
FLASK_METHODS = [
|
|
4
|
+
{
|
|
5
|
+
"package": "flask.app",
|
|
6
|
+
"object": "Flask",
|
|
7
|
+
"method": "wsgi_app",
|
|
8
|
+
"span_name": "Flask.wsgi_app",
|
|
9
|
+
"wrapper_method": task_wrapper,
|
|
10
|
+
"span_handler": "flask_handler",
|
|
11
|
+
"skip_span": True
|
|
12
|
+
}
|
|
13
|
+
]
|
|
@@ -19,7 +19,13 @@ def extract_messages(args):
|
|
|
19
19
|
"""Extract system and user messages"""
|
|
20
20
|
try:
|
|
21
21
|
messages = []
|
|
22
|
+
if args and isinstance(args, (list, tuple)) and hasattr(args[0], 'text'):
|
|
23
|
+
return [args[0].text]
|
|
22
24
|
if args and isinstance(args, (list, tuple)) and len(args) > 0:
|
|
25
|
+
if isinstance(args[0], list) and len(args[0]) > 0:
|
|
26
|
+
first_msg = args[0][0]
|
|
27
|
+
if hasattr(first_msg, 'content') and hasattr(first_msg, 'type') and first_msg.type == "human":
|
|
28
|
+
return args[0][0].content
|
|
23
29
|
if hasattr(args[0], "messages") and isinstance(args[0].messages, list):
|
|
24
30
|
for msg in args[0].messages:
|
|
25
31
|
if hasattr(msg, 'content') and hasattr(msg, 'type'):
|
|
File without changes
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
from monocle_apptrace.instrumentation.common.utils import resolve_from_alias
|
|
2
|
+
import logging
|
|
3
|
+
logger = logging.getLogger(__name__)
|
|
4
|
+
|
|
5
|
+
def handle_openai_response(response):
|
|
6
|
+
try:
|
|
7
|
+
if 'messages' in response:
|
|
8
|
+
output = response["messages"][-1]
|
|
9
|
+
return str(output.content)
|
|
10
|
+
except Exception as e:
|
|
11
|
+
logger.warning("Warning: Error occurred in handle_openai_response: %s", str(e))
|
|
12
|
+
return ""
|
|
13
|
+
|
|
14
|
+
def agent_instructions(arguments):
|
|
15
|
+
if callable(arguments['kwargs']['agent'].instructions):
|
|
16
|
+
return arguments['kwargs']['agent'].instructions(arguments['kwargs']['context_variables'])
|
|
17
|
+
else:
|
|
18
|
+
return arguments['kwargs']['agent'].instructions
|
|
19
|
+
|
|
20
|
+
def extract_input(arguments):
|
|
21
|
+
history = arguments['result']['messages']
|
|
22
|
+
for message in history:
|
|
23
|
+
if hasattr(message, 'content') and hasattr(message, 'type') and message.type == "human": # Check if the message is a HumanMessage
|
|
24
|
+
return message.content
|
|
25
|
+
|
|
26
|
+
def get_inference_endpoint(arguments):
|
|
27
|
+
inference_endpoint = resolve_from_alias(arguments['instance'].client.__dict__, ['azure_endpoint', 'api_base', '_base_url'])
|
|
28
|
+
return str(inference_endpoint)
|
|
29
|
+
|
|
30
|
+
def tools(instance):
|
|
31
|
+
if hasattr(instance,'nodes') and ('tools' in instance.nodes):
|
|
32
|
+
tools= instance.nodes['tools']
|
|
33
|
+
if hasattr(tools,'bound') and hasattr(tools.bound,'tools_by_name'):
|
|
34
|
+
return list(tools.bound.tools_by_name.keys())
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
def update_span_from_llm_response(response):
|
|
38
|
+
meta_dict = {}
|
|
39
|
+
token_usage = None
|
|
40
|
+
if response is not None and "messages" in response:
|
|
41
|
+
token = response["messages"][-1]
|
|
42
|
+
if token.response_metadata is not None:
|
|
43
|
+
token_usage = token.response_metadata["token_usage"]
|
|
44
|
+
if token_usage is not None:
|
|
45
|
+
meta_dict.update({"completion_tokens": token_usage.get('completion_tokens')})
|
|
46
|
+
meta_dict.update({"prompt_tokens": token_usage.get('prompt_tokens')})
|
|
47
|
+
meta_dict.update({"total_tokens": token_usage.get('total_tokens')})
|
|
48
|
+
return meta_dict
|
|
File without changes
|
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
from monocle_apptrace.instrumentation.metamodel.langgraph import (
|
|
2
|
+
_helper
|
|
3
|
+
)
|
|
4
|
+
INFERENCE = {
|
|
5
|
+
"type": "agent",
|
|
6
|
+
"attributes": [
|
|
7
|
+
[
|
|
8
|
+
{
|
|
9
|
+
"_comment": "agent type",
|
|
10
|
+
"attribute": "type",
|
|
11
|
+
"accessor": lambda arguments:'agent.oai'
|
|
12
|
+
},
|
|
13
|
+
{
|
|
14
|
+
"_comment": "name of the agent",
|
|
15
|
+
"attribute": "name",
|
|
16
|
+
"accessor": lambda arguments:arguments['instance'].name
|
|
17
|
+
},
|
|
18
|
+
{
|
|
19
|
+
"_comment": "agent tools",
|
|
20
|
+
"attribute": "tools",
|
|
21
|
+
"accessor": lambda arguments: _helper.tools(arguments['instance'])
|
|
22
|
+
}
|
|
23
|
+
]
|
|
24
|
+
],
|
|
25
|
+
"events": [
|
|
26
|
+
{
|
|
27
|
+
"name":"data.input",
|
|
28
|
+
"attributes": [
|
|
29
|
+
{
|
|
30
|
+
"_comment": "this is LLM input",
|
|
31
|
+
"attribute": "query",
|
|
32
|
+
"accessor": lambda arguments: _helper.extract_input(arguments)
|
|
33
|
+
}
|
|
34
|
+
]
|
|
35
|
+
},
|
|
36
|
+
{
|
|
37
|
+
"name":"data.output",
|
|
38
|
+
"attributes": [
|
|
39
|
+
{
|
|
40
|
+
"_comment": "this is response from LLM",
|
|
41
|
+
"attribute": "response",
|
|
42
|
+
"accessor": lambda arguments: _helper.handle_openai_response(arguments['result'])
|
|
43
|
+
}
|
|
44
|
+
]
|
|
45
|
+
},
|
|
46
|
+
{
|
|
47
|
+
"name": "metadata",
|
|
48
|
+
"attributes": [
|
|
49
|
+
{
|
|
50
|
+
"_comment": "this is metadata usage from LLM",
|
|
51
|
+
"accessor": lambda arguments: _helper.update_span_from_llm_response(arguments['result'])
|
|
52
|
+
}
|
|
53
|
+
]
|
|
54
|
+
}
|
|
55
|
+
]
|
|
56
|
+
}
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
from monocle_apptrace.instrumentation.common.wrapper import task_wrapper
|
|
2
|
+
from monocle_apptrace.instrumentation.metamodel.langgraph.entities.inference import (
|
|
3
|
+
INFERENCE,
|
|
4
|
+
)
|
|
5
|
+
LANGGRAPH_METHODS = [
|
|
6
|
+
{
|
|
7
|
+
"package": "langgraph.graph.state",
|
|
8
|
+
"object": "CompiledStateGraph",
|
|
9
|
+
"method": "invoke",
|
|
10
|
+
"span_name": "langgraph.graph.invoke",
|
|
11
|
+
"wrapper_method": task_wrapper,
|
|
12
|
+
"output_processor": INFERENCE
|
|
13
|
+
}
|
|
14
|
+
]
|
|
@@ -16,34 +16,50 @@ from monocle_apptrace.instrumentation.common.utils import (
|
|
|
16
16
|
logger = logging.getLogger(__name__)
|
|
17
17
|
|
|
18
18
|
|
|
19
|
+
def extract_tools(instance):
|
|
20
|
+
tools = []
|
|
21
|
+
if not hasattr(instance, 'state') or not hasattr(instance.state, 'task_dict'):
|
|
22
|
+
return []
|
|
23
|
+
try:
|
|
24
|
+
data = next(iter(instance.state.task_dict.values())).task
|
|
25
|
+
except (AttributeError, StopIteration):
|
|
26
|
+
return []
|
|
27
|
+
|
|
28
|
+
if hasattr(data,'extra_state') and 'sources' in data.extra_state:
|
|
29
|
+
for tool_output in data.extra_state['sources']:
|
|
30
|
+
tool_name = tool_output.tool_name
|
|
31
|
+
if tool_name:
|
|
32
|
+
tools.append(tool_name)
|
|
33
|
+
return tools
|
|
34
|
+
|
|
35
|
+
|
|
19
36
|
def extract_messages(args):
|
|
20
37
|
"""Extract system and user messages"""
|
|
21
38
|
try:
|
|
22
39
|
messages = []
|
|
23
|
-
|
|
40
|
+
|
|
41
|
+
def process_message(msg):
|
|
42
|
+
"""Processes a single message and extracts relevant information."""
|
|
43
|
+
if hasattr(msg, 'content') and hasattr(msg, 'role'):
|
|
44
|
+
role = getattr(msg.role, 'value', msg.role)
|
|
45
|
+
content = msg.content if role == "system" else extract_query_from_content(msg.content)
|
|
46
|
+
messages.append({role: content})
|
|
47
|
+
|
|
48
|
+
if isinstance(args, (list, tuple)) and args:
|
|
24
49
|
for msg in args[0]:
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
if role == "system":
|
|
28
|
-
messages.append({role: msg.content})
|
|
29
|
-
elif role in ["user", "human"]:
|
|
30
|
-
user_message = extract_query_from_content(msg.content)
|
|
31
|
-
messages.append({role: user_message})
|
|
32
|
-
if args and isinstance(args, dict):
|
|
50
|
+
process_message(msg)
|
|
51
|
+
if isinstance(args, dict):
|
|
33
52
|
for msg in args.get("messages", []):
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
elif role in ["user", "human"]:
|
|
39
|
-
user_message = msg.content
|
|
40
|
-
messages.append({role: user_message})
|
|
53
|
+
process_message(msg)
|
|
54
|
+
if args and isinstance(args, tuple):
|
|
55
|
+
messages.append(args[0])
|
|
56
|
+
|
|
41
57
|
return [str(message) for message in messages]
|
|
58
|
+
|
|
42
59
|
except Exception as e:
|
|
43
|
-
logger.warning("
|
|
60
|
+
logger.warning("Error in extract_messages: %s", str(e))
|
|
44
61
|
return []
|
|
45
62
|
|
|
46
|
-
|
|
47
63
|
def extract_assistant_message(response):
|
|
48
64
|
try:
|
|
49
65
|
if isinstance(response, str):
|
|
@@ -52,6 +68,8 @@ def extract_assistant_message(response):
|
|
|
52
68
|
return [response.content]
|
|
53
69
|
if hasattr(response, "message") and hasattr(response.message, "content"):
|
|
54
70
|
return [response.message.content]
|
|
71
|
+
if hasattr(response,"response") and isinstance(response.response, str):
|
|
72
|
+
return [response.response]
|
|
55
73
|
except Exception as e:
|
|
56
74
|
logger.warning("Warning: Error occurred in extract_assistant_message: %s", str(e))
|
|
57
75
|
return []
|
|
@@ -63,7 +81,7 @@ def extract_query_from_content(content):
|
|
|
63
81
|
answer_prefix = "Answer:"
|
|
64
82
|
query_start = content.find(query_prefix)
|
|
65
83
|
if query_start == -1:
|
|
66
|
-
return
|
|
84
|
+
return content
|
|
67
85
|
|
|
68
86
|
query_start += len(query_prefix)
|
|
69
87
|
answer_start = content.find(answer_prefix, query_start)
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
from monocle_apptrace.instrumentation.metamodel.llamaindex import (
|
|
2
|
+
_helper,
|
|
3
|
+
)
|
|
4
|
+
|
|
5
|
+
AGENT = {
|
|
6
|
+
"type": "agent",
|
|
7
|
+
"attributes": [
|
|
8
|
+
[
|
|
9
|
+
{
|
|
10
|
+
"_comment": "Agent name, type and Tools.",
|
|
11
|
+
"attribute": "name",
|
|
12
|
+
"accessor": lambda arguments: arguments['instance'].__class__.__name__
|
|
13
|
+
},
|
|
14
|
+
{
|
|
15
|
+
"attribute": "type",
|
|
16
|
+
"accessor": lambda arguments: 'Agent.oai'
|
|
17
|
+
},
|
|
18
|
+
{
|
|
19
|
+
"attribute": "tools",
|
|
20
|
+
"accessor": lambda arguments: _helper.extract_tools(arguments['instance'])
|
|
21
|
+
}
|
|
22
|
+
]
|
|
23
|
+
|
|
24
|
+
],
|
|
25
|
+
"events": [
|
|
26
|
+
{"name": "data.input",
|
|
27
|
+
"attributes": [
|
|
28
|
+
|
|
29
|
+
{
|
|
30
|
+
"_comment": "this is instruction and user query to LLM",
|
|
31
|
+
"attribute": "input",
|
|
32
|
+
"accessor": lambda arguments: _helper.extract_messages(arguments['args'])
|
|
33
|
+
}
|
|
34
|
+
]
|
|
35
|
+
},
|
|
36
|
+
{
|
|
37
|
+
"name": "data.output",
|
|
38
|
+
"attributes": [
|
|
39
|
+
{
|
|
40
|
+
"_comment": "this is response from LLM",
|
|
41
|
+
"attribute": "response",
|
|
42
|
+
"accessor": lambda arguments: _helper.extract_assistant_message(arguments['result'])
|
|
43
|
+
}
|
|
44
|
+
]
|
|
45
|
+
}
|
|
46
|
+
]
|
|
47
|
+
}
|
|
@@ -2,6 +2,7 @@ from monocle_apptrace.instrumentation.common.wrapper import atask_wrapper, task_
|
|
|
2
2
|
from monocle_apptrace.instrumentation.metamodel.llamaindex.entities.inference import (
|
|
3
3
|
INFERENCE,
|
|
4
4
|
)
|
|
5
|
+
from monocle_apptrace.instrumentation.metamodel.llamaindex.entities.agent import AGENT
|
|
5
6
|
from monocle_apptrace.instrumentation.metamodel.llamaindex.entities.retrieval import (
|
|
6
7
|
RETRIEVAL,
|
|
7
8
|
)
|
|
@@ -85,5 +86,13 @@ LLAMAINDEX_METHODS = [
|
|
|
85
86
|
"span_name": "llamaindex.mistralai",
|
|
86
87
|
"wrapper_method": atask_wrapper,
|
|
87
88
|
"output_processor": INFERENCE
|
|
89
|
+
},
|
|
90
|
+
{
|
|
91
|
+
"package": "llama_index.core.agent",
|
|
92
|
+
"object": "ReActAgent",
|
|
93
|
+
"method": "chat",
|
|
94
|
+
"span_name": "react.agent",
|
|
95
|
+
"wrapper_method": task_wrapper,
|
|
96
|
+
"output_processor": AGENT
|
|
88
97
|
}
|
|
89
98
|
]
|
|
File without changes
|
|
@@ -0,0 +1,88 @@
|
|
|
1
|
+
"""
|
|
2
|
+
This module provides utility functions for extracting system, user,
|
|
3
|
+
and assistant messages from various input formats.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import logging
|
|
7
|
+
from monocle_apptrace.instrumentation.common.utils import (
|
|
8
|
+
Option,
|
|
9
|
+
get_keys_as_tuple,
|
|
10
|
+
get_nested_value,
|
|
11
|
+
try_option,
|
|
12
|
+
)
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
logger = logging.getLogger(__name__)
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def extract_messages(kwargs):
|
|
19
|
+
"""Extract system and user messages"""
|
|
20
|
+
try:
|
|
21
|
+
messages = []
|
|
22
|
+
if 'messages' in kwargs and len(kwargs['messages']) >0:
|
|
23
|
+
for msg in kwargs['messages']:
|
|
24
|
+
if msg.get('content') and msg.get('role'):
|
|
25
|
+
messages.append({msg['role']: msg['content']})
|
|
26
|
+
|
|
27
|
+
return [str(message) for message in messages]
|
|
28
|
+
except Exception as e:
|
|
29
|
+
logger.warning("Warning: Error occurred in extract_messages: %s", str(e))
|
|
30
|
+
return []
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def extract_assistant_message(response):
|
|
34
|
+
try:
|
|
35
|
+
if hasattr(response,"choices") and len(response.choices) >0:
|
|
36
|
+
if hasattr(response.choices[0],"message"):
|
|
37
|
+
return response.choices[0].message.content
|
|
38
|
+
except (IndexError, AttributeError) as e:
|
|
39
|
+
logger.warning("Warning: Error occurred in extract_assistant_message: %s", str(e))
|
|
40
|
+
return None
|
|
41
|
+
|
|
42
|
+
def extract_provider_name(instance):
|
|
43
|
+
provider_url: Option[str] = try_option(getattr, instance._client.base_url, 'host')
|
|
44
|
+
return provider_url.unwrap_or(None)
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def extract_inference_endpoint(instance):
|
|
48
|
+
inference_endpoint: Option[str] = try_option(getattr, instance._client, 'base_url').map(str)
|
|
49
|
+
if inference_endpoint.is_none() and "meta" in instance.client.__dict__:
|
|
50
|
+
inference_endpoint = try_option(getattr, instance.client.meta, 'endpoint_url').map(str)
|
|
51
|
+
|
|
52
|
+
return inference_endpoint.unwrap_or(extract_provider_name(instance))
|
|
53
|
+
|
|
54
|
+
def resolve_from_alias(my_map, alias):
|
|
55
|
+
"""Find a alias that is not none from list of aliases"""
|
|
56
|
+
|
|
57
|
+
for i in alias:
|
|
58
|
+
if i in my_map.keys():
|
|
59
|
+
return my_map[i]
|
|
60
|
+
return None
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
def update_input_span_events(args):
|
|
64
|
+
return args[0] if len(args) > 0 else ""
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
def update_output_span_events(results):
|
|
68
|
+
output_arg_text = " ".join([doc.page_content for doc in results if hasattr(doc, 'page_content')])
|
|
69
|
+
if len(output_arg_text) > 100:
|
|
70
|
+
output_arg_text = output_arg_text[:100] + "..."
|
|
71
|
+
return output_arg_text
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
def update_span_from_llm_response(response):
|
|
75
|
+
meta_dict = {}
|
|
76
|
+
if response is not None and hasattr(response, "usage"):
|
|
77
|
+
if hasattr(response, "usage") and response.usage is not None:
|
|
78
|
+
token_usage = response.usage
|
|
79
|
+
else:
|
|
80
|
+
response_metadata = response.response_metadata
|
|
81
|
+
token_usage = response_metadata.get("token_usage")
|
|
82
|
+
if token_usage is not None:
|
|
83
|
+
meta_dict.update(
|
|
84
|
+
{"completion_tokens": getattr(response.usage, "completion_tokens", None)})
|
|
85
|
+
meta_dict.update({"prompt_tokens": getattr(response.usage, "prompt_tokens", None)})
|
|
86
|
+
meta_dict.update({"total_tokens": getattr(response.usage, "total_tokens", None)})
|
|
87
|
+
return meta_dict
|
|
88
|
+
|
|
File without changes
|
|
@@ -0,0 +1,71 @@
|
|
|
1
|
+
from monocle_apptrace.instrumentation.metamodel.openai import (
|
|
2
|
+
_helper,
|
|
3
|
+
)
|
|
4
|
+
from monocle_apptrace.instrumentation.common.utils import resolve_from_alias
|
|
5
|
+
|
|
6
|
+
INFERENCE = {
|
|
7
|
+
"type": "inference",
|
|
8
|
+
"attributes": [
|
|
9
|
+
[
|
|
10
|
+
{
|
|
11
|
+
"_comment": "provider type ,name , deployment , inference_endpoint",
|
|
12
|
+
"attribute": "type",
|
|
13
|
+
"accessor": lambda arguments: 'inference.azure_oai'
|
|
14
|
+
},
|
|
15
|
+
{
|
|
16
|
+
"attribute": "provider_name",
|
|
17
|
+
"accessor": lambda arguments: _helper.extract_provider_name(arguments['instance'])
|
|
18
|
+
},
|
|
19
|
+
{
|
|
20
|
+
"attribute": "deployment",
|
|
21
|
+
"accessor": lambda arguments: resolve_from_alias(arguments['instance'].__dict__, ['engine', 'azure_deployment', 'deployment_name', 'deployment_id', 'deployment'])
|
|
22
|
+
},
|
|
23
|
+
{
|
|
24
|
+
"attribute": "inference_endpoint",
|
|
25
|
+
"accessor": lambda arguments: resolve_from_alias(arguments['instance'].__dict__, ['azure_endpoint', 'api_base', 'endpoint']) or _helper.extract_inference_endpoint(arguments['instance'])
|
|
26
|
+
}
|
|
27
|
+
],
|
|
28
|
+
[
|
|
29
|
+
{
|
|
30
|
+
"_comment": "LLM Model",
|
|
31
|
+
"attribute": "name",
|
|
32
|
+
"accessor": lambda arguments: resolve_from_alias(arguments['kwargs'], ['model', 'model_name', 'endpoint_name', 'deployment_name'])
|
|
33
|
+
},
|
|
34
|
+
{
|
|
35
|
+
"attribute": "type",
|
|
36
|
+
"accessor": lambda arguments: 'model.llm.' + resolve_from_alias(arguments['kwargs'], ['model', 'model_name', 'endpoint_name', 'deployment_name'])
|
|
37
|
+
}
|
|
38
|
+
]
|
|
39
|
+
],
|
|
40
|
+
"events": [
|
|
41
|
+
{"name": "data.input",
|
|
42
|
+
"attributes": [
|
|
43
|
+
|
|
44
|
+
{
|
|
45
|
+
"_comment": "this is instruction and user query to LLM",
|
|
46
|
+
"attribute": "input",
|
|
47
|
+
"accessor": lambda arguments: _helper.extract_messages(arguments['kwargs'])
|
|
48
|
+
}
|
|
49
|
+
]
|
|
50
|
+
},
|
|
51
|
+
{
|
|
52
|
+
"name": "data.output",
|
|
53
|
+
"attributes": [
|
|
54
|
+
{
|
|
55
|
+
"_comment": "this is result from LLM",
|
|
56
|
+
"attribute": "response",
|
|
57
|
+
"accessor": lambda arguments: _helper.extract_assistant_message(arguments['result'])
|
|
58
|
+
}
|
|
59
|
+
]
|
|
60
|
+
},
|
|
61
|
+
{
|
|
62
|
+
"name": "metadata",
|
|
63
|
+
"attributes": [
|
|
64
|
+
{
|
|
65
|
+
"_comment": "this is metadata usage from LLM",
|
|
66
|
+
"accessor": lambda arguments: _helper.update_span_from_llm_response(arguments['result'])
|
|
67
|
+
}
|
|
68
|
+
]
|
|
69
|
+
}
|
|
70
|
+
]
|
|
71
|
+
}
|