monocle-apptrace 0.4.1__py3-none-any.whl → 0.5.0b1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of monocle-apptrace might be problematic. Click here for more details.
- monocle_apptrace/__main__.py +1 -1
- monocle_apptrace/exporters/file_exporter.py +123 -36
- monocle_apptrace/instrumentation/common/__init__.py +16 -1
- monocle_apptrace/instrumentation/common/constants.py +6 -1
- monocle_apptrace/instrumentation/common/instrumentor.py +19 -152
- monocle_apptrace/instrumentation/common/method_wrappers.py +380 -0
- monocle_apptrace/instrumentation/common/span_handler.py +39 -24
- monocle_apptrace/instrumentation/common/utils.py +20 -14
- monocle_apptrace/instrumentation/common/wrapper.py +10 -9
- monocle_apptrace/instrumentation/common/wrapper_method.py +40 -1
- monocle_apptrace/instrumentation/metamodel/a2a/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/a2a/_helper.py +37 -0
- monocle_apptrace/instrumentation/metamodel/a2a/entities/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/a2a/entities/inference.py +112 -0
- monocle_apptrace/instrumentation/metamodel/a2a/methods.py +22 -0
- monocle_apptrace/instrumentation/metamodel/aiohttp/_helper.py +6 -11
- monocle_apptrace/instrumentation/metamodel/anthropic/_helper.py +35 -18
- monocle_apptrace/instrumentation/metamodel/anthropic/entities/inference.py +14 -10
- monocle_apptrace/instrumentation/metamodel/azfunc/_helper.py +13 -11
- monocle_apptrace/instrumentation/metamodel/azfunc/entities/http.py +5 -0
- monocle_apptrace/instrumentation/metamodel/azureaiinference/_helper.py +88 -8
- monocle_apptrace/instrumentation/metamodel/azureaiinference/entities/inference.py +22 -8
- monocle_apptrace/instrumentation/metamodel/botocore/_helper.py +92 -16
- monocle_apptrace/instrumentation/metamodel/botocore/entities/inference.py +13 -8
- monocle_apptrace/instrumentation/metamodel/botocore/handlers/botocore_span_handler.py +1 -1
- monocle_apptrace/instrumentation/metamodel/fastapi/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/fastapi/_helper.py +82 -0
- monocle_apptrace/instrumentation/metamodel/fastapi/entities/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/fastapi/entities/http.py +44 -0
- monocle_apptrace/instrumentation/metamodel/fastapi/methods.py +23 -0
- monocle_apptrace/instrumentation/metamodel/finish_types.py +387 -0
- monocle_apptrace/instrumentation/metamodel/flask/_helper.py +6 -11
- monocle_apptrace/instrumentation/metamodel/gemini/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/gemini/_helper.py +120 -0
- monocle_apptrace/instrumentation/metamodel/gemini/entities/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/gemini/entities/inference.py +83 -0
- monocle_apptrace/instrumentation/metamodel/gemini/entities/retrieval.py +43 -0
- monocle_apptrace/instrumentation/metamodel/gemini/methods.py +24 -0
- monocle_apptrace/instrumentation/metamodel/haystack/_helper.py +15 -8
- monocle_apptrace/instrumentation/metamodel/haystack/entities/inference.py +5 -10
- monocle_apptrace/instrumentation/metamodel/haystack/methods.py +7 -0
- monocle_apptrace/instrumentation/metamodel/lambdafunc/_helper.py +78 -0
- monocle_apptrace/instrumentation/metamodel/lambdafunc/entities/http.py +51 -0
- monocle_apptrace/instrumentation/metamodel/lambdafunc/methods.py +23 -0
- monocle_apptrace/instrumentation/metamodel/lambdafunc/wrapper.py +23 -0
- monocle_apptrace/instrumentation/metamodel/langchain/_helper.py +127 -19
- monocle_apptrace/instrumentation/metamodel/langchain/entities/inference.py +15 -10
- monocle_apptrace/instrumentation/metamodel/langgraph/_helper.py +67 -10
- monocle_apptrace/instrumentation/metamodel/langgraph/entities/inference.py +127 -20
- monocle_apptrace/instrumentation/metamodel/langgraph/langgraph_processor.py +43 -0
- monocle_apptrace/instrumentation/metamodel/langgraph/methods.py +29 -5
- monocle_apptrace/instrumentation/metamodel/llamaindex/_helper.py +227 -16
- monocle_apptrace/instrumentation/metamodel/llamaindex/entities/agent.py +127 -10
- monocle_apptrace/instrumentation/metamodel/llamaindex/entities/inference.py +13 -8
- monocle_apptrace/instrumentation/metamodel/llamaindex/llamaindex_processor.py +51 -0
- monocle_apptrace/instrumentation/metamodel/llamaindex/methods.py +68 -1
- monocle_apptrace/instrumentation/metamodel/mcp/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/mcp/_helper.py +118 -0
- monocle_apptrace/instrumentation/metamodel/mcp/entities/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/mcp/entities/inference.py +48 -0
- monocle_apptrace/instrumentation/metamodel/mcp/mcp_processor.py +13 -0
- monocle_apptrace/instrumentation/metamodel/mcp/methods.py +21 -0
- monocle_apptrace/instrumentation/metamodel/openai/_helper.py +83 -16
- monocle_apptrace/instrumentation/metamodel/openai/entities/inference.py +103 -92
- monocle_apptrace/instrumentation/metamodel/openai/entities/retrieval.py +1 -1
- monocle_apptrace/instrumentation/metamodel/teamsai/_helper.py +41 -22
- monocle_apptrace/instrumentation/metamodel/teamsai/entities/inference/actionplanner_output_processor.py +1 -1
- monocle_apptrace/instrumentation/metamodel/teamsai/entities/inference/teamsai_output_processor.py +5 -9
- monocle_apptrace/instrumentation/metamodel/teamsai/sample.json +0 -4
- {monocle_apptrace-0.4.1.dist-info → monocle_apptrace-0.5.0b1.dist-info}/METADATA +14 -3
- {monocle_apptrace-0.4.1.dist-info → monocle_apptrace-0.5.0b1.dist-info}/RECORD +74 -44
- {monocle_apptrace-0.4.1.dist-info → monocle_apptrace-0.5.0b1.dist-info}/WHEEL +0 -0
- {monocle_apptrace-0.4.1.dist-info → monocle_apptrace-0.5.0b1.dist-info}/licenses/LICENSE +0 -0
- {monocle_apptrace-0.4.1.dist-info → monocle_apptrace-0.5.0b1.dist-info}/licenses/NOTICE +0 -0
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
from monocle_apptrace.instrumentation.common.wrapper import task_wrapper
|
|
2
|
+
from monocle_apptrace.instrumentation.metamodel.gemini.entities.inference import (
|
|
3
|
+
INFERENCE,
|
|
4
|
+
)
|
|
5
|
+
from monocle_apptrace.instrumentation.metamodel.gemini.entities.retrieval import (
|
|
6
|
+
RETRIEVAL,
|
|
7
|
+
)
|
|
8
|
+
|
|
9
|
+
GEMINI_METHODS = [
|
|
10
|
+
{
|
|
11
|
+
"package": "google.genai.models",
|
|
12
|
+
"object": "Models",
|
|
13
|
+
"method": "generate_content",
|
|
14
|
+
"wrapper_method": task_wrapper,
|
|
15
|
+
"output_processor": INFERENCE,
|
|
16
|
+
},
|
|
17
|
+
{
|
|
18
|
+
"package": "google.genai.models",
|
|
19
|
+
"object": "Models",
|
|
20
|
+
"method": "embed_content",
|
|
21
|
+
"wrapper_method": task_wrapper,
|
|
22
|
+
"output_processor": RETRIEVAL,
|
|
23
|
+
}
|
|
24
|
+
]
|
|
@@ -2,6 +2,7 @@ import logging
|
|
|
2
2
|
|
|
3
3
|
from monocle_apptrace.instrumentation.common.utils import (
|
|
4
4
|
Option,
|
|
5
|
+
get_json_dumps,
|
|
5
6
|
get_keys_as_tuple,
|
|
6
7
|
get_nested_value,
|
|
7
8
|
try_option,
|
|
@@ -56,24 +57,27 @@ def extract_question_from_prompt(content):
|
|
|
56
57
|
|
|
57
58
|
def extract_assistant_message(arguments):
|
|
58
59
|
status = get_status_code(arguments)
|
|
59
|
-
|
|
60
|
+
messages = []
|
|
61
|
+
role = "assistant"
|
|
60
62
|
if status == 'success':
|
|
63
|
+
response = ""
|
|
61
64
|
if "replies" in arguments['result']:
|
|
62
65
|
reply = arguments['result']["replies"][0]
|
|
66
|
+
if hasattr(reply, role) and hasattr(reply,role, "value") and isinstance(reply.role.value, str):
|
|
67
|
+
role = reply.role.value or role
|
|
63
68
|
if hasattr(reply, 'content'):
|
|
64
69
|
response = reply.content
|
|
65
70
|
elif hasattr(reply, 'text'):
|
|
66
71
|
response = reply.text
|
|
67
72
|
else:
|
|
68
73
|
response = reply
|
|
74
|
+
messages.append({role: response})
|
|
69
75
|
else:
|
|
70
76
|
if arguments["exception"] is not None:
|
|
71
|
-
|
|
72
|
-
elif hasattr(
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
return response
|
|
76
|
-
|
|
77
|
+
return get_exception_message(arguments)
|
|
78
|
+
elif hasattr(arguments["result"], "error"):
|
|
79
|
+
return arguments['result'].error
|
|
80
|
+
return get_json_dumps(messages[0]) if messages else ""
|
|
77
81
|
|
|
78
82
|
def get_vectorstore_deployment(my_map):
|
|
79
83
|
if isinstance(my_map, dict):
|
|
@@ -112,7 +116,10 @@ def resolve_from_alias(my_map, alias):
|
|
|
112
116
|
return None
|
|
113
117
|
|
|
114
118
|
def extract_inference_endpoint(instance):
|
|
115
|
-
|
|
119
|
+
if hasattr(instance, '_model_name') and isinstance(instance._model_name, str) and 'gemini' in instance._model_name.lower():
|
|
120
|
+
inference_endpoint = try_option(lambda: f"https://generativelanguage.googleapis.com/v1beta/models/{instance._model_name}:generateContent")
|
|
121
|
+
if hasattr(instance, 'client') and hasattr(instance.client, 'base_url'):
|
|
122
|
+
inference_endpoint: Option[str] = try_option(getattr, instance.client, 'base_url').map(str)
|
|
116
123
|
if inference_endpoint.is_none():
|
|
117
124
|
inference_endpoint = try_option(getattr, instance.client.meta, 'endpoint_url').map(str)
|
|
118
125
|
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
from monocle_apptrace.instrumentation.metamodel.haystack import (
|
|
2
2
|
_helper,
|
|
3
3
|
)
|
|
4
|
-
from monocle_apptrace.instrumentation.common.utils import
|
|
4
|
+
from monocle_apptrace.instrumentation.common.utils import get_error_message, get_llm_type
|
|
5
5
|
|
|
6
6
|
INFERENCE = {
|
|
7
7
|
"type": "inference.framework",
|
|
@@ -35,12 +35,12 @@ INFERENCE = {
|
|
|
35
35
|
"_comment": "LLM Model",
|
|
36
36
|
"attribute": "name",
|
|
37
37
|
"accessor": lambda arguments: _helper.resolve_from_alias(arguments['instance'].__dict__,
|
|
38
|
-
['model', 'model_name'])
|
|
38
|
+
['model', 'model_name','_model_name'])
|
|
39
39
|
},
|
|
40
40
|
{
|
|
41
41
|
"attribute": "type",
|
|
42
42
|
"accessor": lambda arguments: 'model.llm.' + _helper.resolve_from_alias(arguments['instance'].__dict__,
|
|
43
|
-
['model', 'model_name'])
|
|
43
|
+
['model', 'model_name','_model_name'])
|
|
44
44
|
}
|
|
45
45
|
]
|
|
46
46
|
],
|
|
@@ -59,13 +59,8 @@ INFERENCE = {
|
|
|
59
59
|
"name": "data.output",
|
|
60
60
|
"attributes": [
|
|
61
61
|
{
|
|
62
|
-
"
|
|
63
|
-
"
|
|
64
|
-
"accessor": lambda arguments: get_status(arguments)
|
|
65
|
-
},
|
|
66
|
-
{
|
|
67
|
-
"attribute": "status_code",
|
|
68
|
-
"accessor": lambda arguments: get_status_code(arguments)
|
|
62
|
+
"attribute": "error_code",
|
|
63
|
+
"accessor": lambda arguments: get_error_message(arguments)
|
|
69
64
|
},
|
|
70
65
|
{
|
|
71
66
|
"attribute": "response",
|
|
@@ -44,4 +44,11 @@ HAYSTACK_METHODS = [
|
|
|
44
44
|
"wrapper_method": task_wrapper,
|
|
45
45
|
"output_processor": INFERENCE
|
|
46
46
|
},
|
|
47
|
+
{
|
|
48
|
+
"package": "haystack_integrations.components.generators.google_ai",
|
|
49
|
+
"object": "GoogleAIGeminiChatGenerator",
|
|
50
|
+
"method": "run",
|
|
51
|
+
"wrapper_method": task_wrapper,
|
|
52
|
+
"output_processor": INFERENCE
|
|
53
|
+
},
|
|
47
54
|
]
|
|
@@ -0,0 +1,78 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
from threading import local
|
|
3
|
+
from monocle_apptrace.instrumentation.common.utils import extract_http_headers, clear_http_scopes, try_option, Option, \
|
|
4
|
+
MonocleSpanException
|
|
5
|
+
from monocle_apptrace.instrumentation.common.span_handler import SpanHandler
|
|
6
|
+
from monocle_apptrace.instrumentation.common.constants import HTTP_SUCCESS_CODES
|
|
7
|
+
from urllib.parse import unquote, urlparse, ParseResult
|
|
8
|
+
|
|
9
|
+
logger = logging.getLogger(__name__)
|
|
10
|
+
MAX_DATA_LENGTH = 1000
|
|
11
|
+
token_data = local()
|
|
12
|
+
token_data.current_token = None
|
|
13
|
+
|
|
14
|
+
def get_url(kwargs) -> ParseResult:
|
|
15
|
+
url_str = try_option(lambda k: k.get('path'), kwargs['event'])
|
|
16
|
+
url = url_str.unwrap_or(None)
|
|
17
|
+
if url is not None:
|
|
18
|
+
return urlparse(url)
|
|
19
|
+
else:
|
|
20
|
+
return None
|
|
21
|
+
|
|
22
|
+
def get_route(args) -> str:
|
|
23
|
+
event = args[1]
|
|
24
|
+
route = event.get("path") or event.get("requestContext", {}).get("path")
|
|
25
|
+
return route
|
|
26
|
+
|
|
27
|
+
def get_method(args) -> str:
|
|
28
|
+
event = args[1]
|
|
29
|
+
http_method = event.get("httpMethod") or event.get("requestContext", {}).get("httpMethod")
|
|
30
|
+
return http_method
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def get_params(args) -> dict:
|
|
34
|
+
event = args[1]
|
|
35
|
+
question = None
|
|
36
|
+
query_params = event.get('queryStringParameters', {})
|
|
37
|
+
if isinstance(query_params, dict):
|
|
38
|
+
question = query_params.get('question')
|
|
39
|
+
return question
|
|
40
|
+
|
|
41
|
+
def get_body(args) -> dict:
|
|
42
|
+
event = args[1]
|
|
43
|
+
body = event.get("body")
|
|
44
|
+
return body
|
|
45
|
+
|
|
46
|
+
def extract_response(result) -> str:
|
|
47
|
+
if isinstance(result, dict) and 'body' in result:
|
|
48
|
+
response = result['body']
|
|
49
|
+
if isinstance(response, bytes):
|
|
50
|
+
response = response.decode('utf-8', errors='ignore')
|
|
51
|
+
else:
|
|
52
|
+
response = ""
|
|
53
|
+
return response
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
def extract_status(result) -> str:
|
|
57
|
+
status = f"{result['statusCode']}" if isinstance(result, dict) and 'statusCode' in result else ""
|
|
58
|
+
if status not in HTTP_SUCCESS_CODES:
|
|
59
|
+
error_message = extract_response(result)
|
|
60
|
+
raise MonocleSpanException(f"error: {status} - {error_message}")
|
|
61
|
+
return status
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
def lambda_func_pre_tracing(kwargs):
|
|
65
|
+
headers = kwargs['event'].get('headers', {}) if 'event' in kwargs else {}
|
|
66
|
+
return extract_http_headers(headers)
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
def lambda_func_post_tracing(token):
|
|
70
|
+
clear_http_scopes(token)
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
class lambdaSpanHandler(SpanHandler):
|
|
74
|
+
def pre_tracing(self, to_wrap, wrapped, instance, args, kwargs):
|
|
75
|
+
return lambda_func_pre_tracing(kwargs)
|
|
76
|
+
|
|
77
|
+
def post_tracing(self, to_wrap, wrapped, instance, args, kwargs, return_value,token):
|
|
78
|
+
lambda_func_post_tracing(token)
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
from monocle_apptrace.instrumentation.metamodel.lambdafunc import _helper
|
|
2
|
+
LAMBDA_HTTP_PROCESSOR = {
|
|
3
|
+
"type": "http.process",
|
|
4
|
+
"attributes": [
|
|
5
|
+
[
|
|
6
|
+
{
|
|
7
|
+
"_comment": "request method, request URI",
|
|
8
|
+
"attribute": "method",
|
|
9
|
+
"accessor": lambda arguments: _helper.get_method(arguments['args'])
|
|
10
|
+
},
|
|
11
|
+
{
|
|
12
|
+
"_comment": "request method, request URI",
|
|
13
|
+
"attribute": "route",
|
|
14
|
+
"accessor": lambda arguments: _helper.get_route(arguments['args'])
|
|
15
|
+
},
|
|
16
|
+
{
|
|
17
|
+
"_comment": "request method, request URI",
|
|
18
|
+
"attribute": "body",
|
|
19
|
+
"accessor": lambda arguments: _helper.get_body(arguments['args'])
|
|
20
|
+
},
|
|
21
|
+
]
|
|
22
|
+
],
|
|
23
|
+
"events": [
|
|
24
|
+
{
|
|
25
|
+
"name": "data.input",
|
|
26
|
+
"attributes": [
|
|
27
|
+
{
|
|
28
|
+
"_comment": "route params",
|
|
29
|
+
"attribute": "params",
|
|
30
|
+
"accessor": lambda arguments: _helper.get_params(arguments['args'])
|
|
31
|
+
}
|
|
32
|
+
]
|
|
33
|
+
},
|
|
34
|
+
{
|
|
35
|
+
"name": "data.output",
|
|
36
|
+
"attributes": [
|
|
37
|
+
{
|
|
38
|
+
"_comment": "status from HTTP response",
|
|
39
|
+
"attribute": "status",
|
|
40
|
+
"accessor": lambda arguments: _helper.extract_status(arguments['result'])
|
|
41
|
+
},
|
|
42
|
+
{
|
|
43
|
+
"_comment": "this is result from LLM",
|
|
44
|
+
"attribute": "response",
|
|
45
|
+
"accessor": lambda arguments: _helper.extract_response(arguments['result'])
|
|
46
|
+
}
|
|
47
|
+
]
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
]
|
|
51
|
+
}
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
from monocle_apptrace.instrumentation.common.wrapper import atask_wrapper, task_wrapper
|
|
2
|
+
from monocle_apptrace.instrumentation.metamodel.lambdafunc.entities.http import LAMBDA_HTTP_PROCESSOR
|
|
3
|
+
|
|
4
|
+
LAMBDA_HTTP_METHODS = [
|
|
5
|
+
{
|
|
6
|
+
"package": "monocle_apptrace.instrumentation.metamodel.lambdafunc.wrapper",
|
|
7
|
+
"object": "LambdaFunctionRouteWrapper",
|
|
8
|
+
"method": "run_async",
|
|
9
|
+
"span_name": "lambda_function_route",
|
|
10
|
+
"wrapper_method": atask_wrapper,
|
|
11
|
+
"span_handler": "lambda_func_handler",
|
|
12
|
+
"output_processor": LAMBDA_HTTP_PROCESSOR
|
|
13
|
+
},
|
|
14
|
+
{
|
|
15
|
+
"package": "monocle_apptrace.instrumentation.metamodel.lambdafunc.wrapper",
|
|
16
|
+
"object": "LambdaFunctionRouteWrapper",
|
|
17
|
+
"method": "run_sync",
|
|
18
|
+
"span_name": "lambda_function_route",
|
|
19
|
+
"wrapper_method": task_wrapper,
|
|
20
|
+
"span_handler": "lambda_func_handler",
|
|
21
|
+
"output_processor": LAMBDA_HTTP_PROCESSOR
|
|
22
|
+
}
|
|
23
|
+
]
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
from functools import wraps
|
|
2
|
+
import inspect
|
|
3
|
+
|
|
4
|
+
def monocle_trace_lambda_function_route(func):
|
|
5
|
+
if inspect.iscoroutinefunction(func):
|
|
6
|
+
@wraps(func)
|
|
7
|
+
async def wrapper(*args, **kwargs):
|
|
8
|
+
return await LambdaFunctionRouteWrapper.run_async(func, *args, **kwargs)
|
|
9
|
+
return wrapper
|
|
10
|
+
else:
|
|
11
|
+
@wraps(func)
|
|
12
|
+
def wrapper(*args, **kwargs):
|
|
13
|
+
return LambdaFunctionRouteWrapper.run_sync(func, *args, **kwargs)
|
|
14
|
+
return wrapper
|
|
15
|
+
|
|
16
|
+
class LambdaFunctionRouteWrapper:
|
|
17
|
+
@staticmethod
|
|
18
|
+
async def run_async(func, *args, **kwargs):
|
|
19
|
+
return await func(*args, **kwargs)
|
|
20
|
+
|
|
21
|
+
@staticmethod
|
|
22
|
+
def run_sync(func, *args, **kwargs):
|
|
23
|
+
return func(*args, **kwargs)
|
|
@@ -6,12 +6,14 @@ and assistant messages from various input formats.
|
|
|
6
6
|
import logging
|
|
7
7
|
from monocle_apptrace.instrumentation.common.utils import (
|
|
8
8
|
Option,
|
|
9
|
+
get_json_dumps,
|
|
9
10
|
get_keys_as_tuple,
|
|
10
11
|
get_nested_value,
|
|
11
12
|
try_option,
|
|
12
13
|
get_exception_message,
|
|
13
14
|
get_status_code,
|
|
14
15
|
)
|
|
16
|
+
from monocle_apptrace.instrumentation.metamodel.finish_types import map_langchain_finish_reason_to_finish_type
|
|
15
17
|
|
|
16
18
|
|
|
17
19
|
logger = logging.getLogger(__name__)
|
|
@@ -32,45 +34,64 @@ def extract_messages(args):
|
|
|
32
34
|
for msg in args[0].messages:
|
|
33
35
|
if hasattr(msg, 'content') and hasattr(msg, 'type'):
|
|
34
36
|
messages.append({msg.type: msg.content})
|
|
35
|
-
|
|
37
|
+
else:
|
|
38
|
+
for msg in args[0]:
|
|
39
|
+
if hasattr(msg, 'content') and hasattr(msg, 'type') and msg.content:
|
|
40
|
+
messages.append({msg.type: msg.content})
|
|
41
|
+
elif hasattr(msg, 'tool_calls') and msg.tool_calls:
|
|
42
|
+
messages.append({msg.type: get_json_dumps(msg.tool_calls)})
|
|
43
|
+
return [get_json_dumps(d) for d in messages]
|
|
36
44
|
except Exception as e:
|
|
37
45
|
logger.warning("Warning: Error occurred in extract_messages: %s", str(e))
|
|
38
46
|
return []
|
|
39
47
|
|
|
40
48
|
def extract_assistant_message(arguments):
|
|
41
49
|
status = get_status_code(arguments)
|
|
42
|
-
|
|
50
|
+
messages = []
|
|
51
|
+
role = "assistant"
|
|
43
52
|
if status == 'success':
|
|
44
53
|
if isinstance(arguments['result'], str):
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
54
|
+
messages.append({role: arguments['result']})
|
|
55
|
+
elif hasattr(arguments['result'], "content") and arguments['result'].content != "":
|
|
56
|
+
role = arguments['result'].type if hasattr(arguments['result'], 'type') else role
|
|
57
|
+
messages.append({role: arguments['result'].content})
|
|
58
|
+
elif hasattr(arguments['result'], "message") and hasattr(arguments['result'].message, "content") and arguments['result'].message.content != "":
|
|
59
|
+
role = arguments['result'].type if hasattr(arguments['result'], 'type') else role
|
|
60
|
+
messages.append({role: arguments['result'].message.content})
|
|
61
|
+
elif hasattr(arguments['result'], "tool_calls"):
|
|
62
|
+
role = arguments['result'].type if hasattr(arguments['result'], 'type') else role
|
|
63
|
+
messages.append({role: arguments['result'].tool_calls[0]})
|
|
50
64
|
else:
|
|
51
65
|
if arguments["exception"] is not None:
|
|
52
|
-
|
|
66
|
+
messages.append({role: get_exception_message(arguments)})
|
|
53
67
|
elif hasattr(arguments["result"], "error"):
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
return response
|
|
57
|
-
|
|
68
|
+
return arguments["result"].error
|
|
69
|
+
return get_json_dumps(messages[0]) if messages else ""
|
|
58
70
|
|
|
59
71
|
def extract_provider_name(instance):
|
|
60
|
-
provider_url: Option[str] = None
|
|
61
|
-
if hasattr(instance,'client'):
|
|
72
|
+
provider_url: Option[str] = Option(None)
|
|
73
|
+
if hasattr(instance, 'client'):
|
|
74
|
+
provider_url: Option[str] = try_option(getattr, instance.client, 'universe_domain')
|
|
75
|
+
if hasattr(instance,'client') and hasattr(instance.client, '_client') and hasattr(instance.client._client, 'base_url'):
|
|
76
|
+
# If the client has a base_url, extract the host from it
|
|
62
77
|
provider_url: Option[str] = try_option(getattr, instance.client._client.base_url, 'host')
|
|
63
|
-
if hasattr(instance, '_client'):
|
|
78
|
+
if hasattr(instance, '_client') and hasattr(instance._client, 'base_url'):
|
|
64
79
|
provider_url = try_option(getattr, instance._client.base_url, 'host')
|
|
65
80
|
return provider_url.unwrap_or(None)
|
|
66
81
|
|
|
67
82
|
|
|
68
83
|
def extract_inference_endpoint(instance):
|
|
69
84
|
inference_endpoint: Option[str] = None
|
|
70
|
-
|
|
85
|
+
# instance.client.meta.endpoint_url
|
|
86
|
+
if hasattr(instance, 'client') and hasattr(instance.client, 'transport'):
|
|
87
|
+
inference_endpoint: Option[str] = try_option(getattr, instance.client.transport, 'host')
|
|
88
|
+
|
|
89
|
+
if hasattr(instance, 'client') and hasattr(instance.client, 'meta') and hasattr(instance.client.meta, 'endpoint_url'):
|
|
90
|
+
inference_endpoint: Option[str] = try_option(getattr, instance.client.meta, 'endpoint_url').map(str)
|
|
91
|
+
|
|
92
|
+
if hasattr(instance,'client') and hasattr(instance.client, '_client'):
|
|
71
93
|
inference_endpoint: Option[str] = try_option(getattr, instance.client._client, 'base_url').map(str)
|
|
72
|
-
|
|
73
|
-
inference_endpoint = try_option(getattr, instance.client.meta, 'endpoint_url').map(str)
|
|
94
|
+
|
|
74
95
|
if hasattr(instance,'_client'):
|
|
75
96
|
inference_endpoint = try_option(getattr, instance._client, 'base_url').map(str)
|
|
76
97
|
|
|
@@ -138,4 +159,91 @@ def update_span_from_llm_response(response, instance):
|
|
|
138
159
|
{"completion_tokens": token_usage.get("completion_tokens") or token_usage.get("output_tokens")})
|
|
139
160
|
meta_dict.update({"prompt_tokens": token_usage.get("prompt_tokens") or token_usage.get("input_tokens")})
|
|
140
161
|
meta_dict.update({"total_tokens": token_usage.get("total_tokens")})
|
|
141
|
-
return meta_dict
|
|
162
|
+
return meta_dict
|
|
163
|
+
|
|
164
|
+
def extract_finish_reason(arguments):
|
|
165
|
+
"""Extract finish_reason from LangChain response."""
|
|
166
|
+
try:
|
|
167
|
+
# Handle exception cases first
|
|
168
|
+
if arguments.get("exception") is not None:
|
|
169
|
+
# If there's an exception, it's typically an error finish type
|
|
170
|
+
return "error"
|
|
171
|
+
|
|
172
|
+
response = arguments.get("result")
|
|
173
|
+
if response is None:
|
|
174
|
+
return None
|
|
175
|
+
|
|
176
|
+
# Check various possible locations for finish_reason in LangChain responses
|
|
177
|
+
|
|
178
|
+
# Direct finish_reason attribute
|
|
179
|
+
if hasattr(response, "finish_reason") and response.finish_reason:
|
|
180
|
+
return response.finish_reason
|
|
181
|
+
|
|
182
|
+
# Response metadata (common in LangChain)
|
|
183
|
+
if hasattr(response, "response_metadata") and response.response_metadata:
|
|
184
|
+
metadata = response.response_metadata
|
|
185
|
+
if isinstance(metadata, dict):
|
|
186
|
+
# Check for finish_reason in metadata
|
|
187
|
+
if "finish_reason" in metadata:
|
|
188
|
+
return metadata["finish_reason"]
|
|
189
|
+
# Check for stop_reason (Anthropic style through LangChain)
|
|
190
|
+
if "stop_reason" in metadata:
|
|
191
|
+
return metadata["stop_reason"]
|
|
192
|
+
# Check for other common finish reason keys
|
|
193
|
+
for key in ["completion_reason", "end_reason", "status"]:
|
|
194
|
+
if key in metadata:
|
|
195
|
+
return metadata[key]
|
|
196
|
+
|
|
197
|
+
# Check if response has generation_info (some LangChain models)
|
|
198
|
+
if hasattr(response, "generation_info") and response.generation_info:
|
|
199
|
+
gen_info = response.generation_info
|
|
200
|
+
if isinstance(gen_info, dict):
|
|
201
|
+
for key in ["finish_reason", "stop_reason", "completion_reason"]:
|
|
202
|
+
if key in gen_info:
|
|
203
|
+
return gen_info[key]
|
|
204
|
+
|
|
205
|
+
# Check if response has llm_output (batch responses)
|
|
206
|
+
if hasattr(response, "llm_output") and response.llm_output:
|
|
207
|
+
llm_output = response.llm_output
|
|
208
|
+
if isinstance(llm_output, dict):
|
|
209
|
+
for key in ["finish_reason", "stop_reason"]:
|
|
210
|
+
if key in llm_output:
|
|
211
|
+
return llm_output[key]
|
|
212
|
+
|
|
213
|
+
# For AIMessage responses, check additional_kwargs
|
|
214
|
+
if hasattr(response, "additional_kwargs") and response.additional_kwargs:
|
|
215
|
+
kwargs = response.additional_kwargs
|
|
216
|
+
if isinstance(kwargs, dict):
|
|
217
|
+
for key in ["finish_reason", "stop_reason"]:
|
|
218
|
+
if key in kwargs:
|
|
219
|
+
return kwargs[key]
|
|
220
|
+
|
|
221
|
+
# For generation responses with choices (similar to OpenAI structure)
|
|
222
|
+
if hasattr(response, "generations") and response.generations:
|
|
223
|
+
generations = response.generations
|
|
224
|
+
if isinstance(generations, list) and len(generations) > 0:
|
|
225
|
+
for generation in generations:
|
|
226
|
+
if hasattr(generation, "generation_info") and generation.generation_info:
|
|
227
|
+
gen_info = generation.generation_info
|
|
228
|
+
if isinstance(gen_info, dict):
|
|
229
|
+
for key in ["finish_reason", "stop_reason"]:
|
|
230
|
+
if key in gen_info:
|
|
231
|
+
return gen_info[key]
|
|
232
|
+
|
|
233
|
+
# If no specific finish reason found, infer from status
|
|
234
|
+
status_code = get_status_code(arguments)
|
|
235
|
+
if status_code == 'success':
|
|
236
|
+
return "stop" # Default success finish reason
|
|
237
|
+
elif status_code == 'error':
|
|
238
|
+
return "error"
|
|
239
|
+
|
|
240
|
+
except Exception as e:
|
|
241
|
+
logger.warning("Warning: Error occurred in extract_finish_reason: %s", str(e))
|
|
242
|
+
return None
|
|
243
|
+
|
|
244
|
+
return None
|
|
245
|
+
|
|
246
|
+
|
|
247
|
+
def map_finish_reason_to_finish_type(finish_reason):
|
|
248
|
+
"""Map LangChain finish_reason to finish_type."""
|
|
249
|
+
return map_langchain_finish_reason_to_finish_type(finish_reason)
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
from monocle_apptrace.instrumentation.metamodel.langchain import (
|
|
2
2
|
_helper,
|
|
3
3
|
)
|
|
4
|
-
from monocle_apptrace.instrumentation.common.utils import resolve_from_alias, get_llm_type, get_status, get_status_code
|
|
4
|
+
from monocle_apptrace.instrumentation.common.utils import get_error_message, resolve_from_alias, get_llm_type, get_status, get_status_code
|
|
5
5
|
|
|
6
6
|
INFERENCE = {
|
|
7
7
|
"type": "inference.framework",
|
|
@@ -30,11 +30,11 @@ INFERENCE = {
|
|
|
30
30
|
{
|
|
31
31
|
"_comment": "LLM Model",
|
|
32
32
|
"attribute": "name",
|
|
33
|
-
"accessor": lambda arguments: resolve_from_alias(arguments['instance'].__dict__, ['model', 'model_name', 'endpoint_name', 'deployment_name'])
|
|
33
|
+
"accessor": lambda arguments: resolve_from_alias(arguments['instance'].__dict__, ['model', 'model_name', 'endpoint_name', 'deployment_name', 'model_id'])
|
|
34
34
|
},
|
|
35
35
|
{
|
|
36
36
|
"attribute": "type",
|
|
37
|
-
"accessor": lambda arguments: 'model.llm.' + resolve_from_alias(arguments['instance'].__dict__, ['model', 'model_name', 'endpoint_name', 'deployment_name'])
|
|
37
|
+
"accessor": lambda arguments: 'model.llm.' + resolve_from_alias(arguments['instance'].__dict__, ['model', 'model_name', 'endpoint_name', 'deployment_name', 'model_id'])
|
|
38
38
|
}
|
|
39
39
|
]
|
|
40
40
|
],
|
|
@@ -53,13 +53,8 @@ INFERENCE = {
|
|
|
53
53
|
"name": "data.output",
|
|
54
54
|
"attributes": [
|
|
55
55
|
{
|
|
56
|
-
"
|
|
57
|
-
"
|
|
58
|
-
"accessor": lambda arguments: get_status(arguments)
|
|
59
|
-
},
|
|
60
|
-
{
|
|
61
|
-
"attribute": "status_code",
|
|
62
|
-
"accessor": lambda arguments: get_status_code(arguments)
|
|
56
|
+
"attribute": "error_code",
|
|
57
|
+
"accessor": lambda arguments: get_error_message(arguments)
|
|
63
58
|
},
|
|
64
59
|
{
|
|
65
60
|
"attribute": "response",
|
|
@@ -73,6 +68,16 @@ INFERENCE = {
|
|
|
73
68
|
{
|
|
74
69
|
"_comment": "this is metadata usage from LLM",
|
|
75
70
|
"accessor": lambda arguments: _helper.update_span_from_llm_response(arguments['result'], arguments['instance'])
|
|
71
|
+
},
|
|
72
|
+
{
|
|
73
|
+
"attribute": "finish_reason",
|
|
74
|
+
"accessor": lambda arguments: _helper.extract_finish_reason(arguments)
|
|
75
|
+
},
|
|
76
|
+
{
|
|
77
|
+
"attribute": "finish_type",
|
|
78
|
+
"accessor": lambda arguments: _helper.map_finish_reason_to_finish_type(
|
|
79
|
+
_helper.extract_finish_reason(arguments)
|
|
80
|
+
)
|
|
76
81
|
}
|
|
77
82
|
]
|
|
78
83
|
}
|
|
@@ -1,15 +1,20 @@
|
|
|
1
|
+
from opentelemetry.context import get_value
|
|
1
2
|
from monocle_apptrace.instrumentation.common.utils import resolve_from_alias
|
|
2
3
|
import logging
|
|
3
4
|
logger = logging.getLogger(__name__)
|
|
4
5
|
|
|
5
|
-
|
|
6
|
+
DELEGATION_NAME_PREFIX = 'transfer_to_'
|
|
7
|
+
ROOT_AGENT_NAME = 'LangGraph'
|
|
8
|
+
LANGGRAPTH_AGENT_NAME_KEY = "agent.langgraph"
|
|
9
|
+
|
|
10
|
+
def extract_agent_response(response):
|
|
6
11
|
try:
|
|
7
|
-
if 'messages' in response:
|
|
12
|
+
if response is not None and 'messages' in response:
|
|
8
13
|
output = response["messages"][-1]
|
|
9
14
|
return str(output.content)
|
|
10
15
|
except Exception as e:
|
|
11
|
-
logger.warning("Warning: Error occurred in
|
|
12
|
-
|
|
16
|
+
logger.warning("Warning: Error occurred in handle_response: %s", str(e))
|
|
17
|
+
return ""
|
|
13
18
|
|
|
14
19
|
def agent_instructions(arguments):
|
|
15
20
|
if callable(arguments['kwargs']['agent'].instructions):
|
|
@@ -17,11 +22,13 @@ def agent_instructions(arguments):
|
|
|
17
22
|
else:
|
|
18
23
|
return arguments['kwargs']['agent'].instructions
|
|
19
24
|
|
|
20
|
-
def
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
+
def extract_agent_input(arguments):
|
|
26
|
+
if arguments['result'] is not None and 'messages' in arguments['result']:
|
|
27
|
+
history = arguments['result']['messages']
|
|
28
|
+
for message in history:
|
|
29
|
+
if hasattr(message, 'content') and hasattr(message, 'type') and message.type == "human": # Check if the message is a HumanMessage
|
|
30
|
+
return message.content
|
|
31
|
+
return None
|
|
25
32
|
|
|
26
33
|
def get_inference_endpoint(arguments):
|
|
27
34
|
inference_endpoint = resolve_from_alias(arguments['instance'].client.__dict__, ['azure_endpoint', 'api_base', '_base_url'])
|
|
@@ -33,7 +40,6 @@ def tools(instance):
|
|
|
33
40
|
if hasattr(tools,'bound') and hasattr(tools.bound,'tools_by_name'):
|
|
34
41
|
return list(tools.bound.tools_by_name.keys())
|
|
35
42
|
|
|
36
|
-
|
|
37
43
|
def update_span_from_llm_response(response):
|
|
38
44
|
meta_dict = {}
|
|
39
45
|
token_usage = None
|
|
@@ -46,3 +52,54 @@ def update_span_from_llm_response(response):
|
|
|
46
52
|
meta_dict.update({"prompt_tokens": token_usage.get('prompt_tokens')})
|
|
47
53
|
meta_dict.update({"total_tokens": token_usage.get('total_tokens')})
|
|
48
54
|
return meta_dict
|
|
55
|
+
|
|
56
|
+
def extract_tool_response(result):
|
|
57
|
+
if result is not None and hasattr(result, 'content'):
|
|
58
|
+
return result.content
|
|
59
|
+
return None
|
|
60
|
+
|
|
61
|
+
def get_status(result):
|
|
62
|
+
if result is not None and hasattr(result, 'status'):
|
|
63
|
+
return result.status
|
|
64
|
+
return None
|
|
65
|
+
|
|
66
|
+
def extract_tool_input(arguments):
|
|
67
|
+
tool_input = arguments['args'][0]
|
|
68
|
+
if isinstance(tool_input, str):
|
|
69
|
+
return [tool_input]
|
|
70
|
+
else:
|
|
71
|
+
return list(tool_input.values())
|
|
72
|
+
|
|
73
|
+
def get_name(instance):
|
|
74
|
+
return instance.name if hasattr(instance, 'name') else ""
|
|
75
|
+
|
|
76
|
+
def get_agent_name(instance) -> str:
|
|
77
|
+
return get_name(instance)
|
|
78
|
+
|
|
79
|
+
def get_tool_name(instance) -> str:
|
|
80
|
+
return get_name(instance)
|
|
81
|
+
|
|
82
|
+
def is_delegation_tool(instance) -> bool:
|
|
83
|
+
return get_name(instance).startswith(DELEGATION_NAME_PREFIX)
|
|
84
|
+
|
|
85
|
+
def get_target_agent(instance) -> str:
|
|
86
|
+
return get_name(instance).replace(DELEGATION_NAME_PREFIX, '', 1)
|
|
87
|
+
|
|
88
|
+
def is_root_agent_name(instance) -> bool:
|
|
89
|
+
return get_name(instance) == ROOT_AGENT_NAME
|
|
90
|
+
|
|
91
|
+
def get_source_agent() -> str:
|
|
92
|
+
"""Get the name of the agent that initiated the request."""
|
|
93
|
+
from_agent = get_value(LANGGRAPTH_AGENT_NAME_KEY)
|
|
94
|
+
return from_agent if from_agent is not None else ""
|
|
95
|
+
|
|
96
|
+
def get_description(instance) -> str:
|
|
97
|
+
return instance.description if hasattr(instance, 'description') else ""
|
|
98
|
+
|
|
99
|
+
def get_agent_description(instance) -> str:
|
|
100
|
+
"""Get the description of the agent."""
|
|
101
|
+
return get_description(instance)
|
|
102
|
+
|
|
103
|
+
def get_tool_description(instance) -> str:
|
|
104
|
+
"""Get the description of the tool."""
|
|
105
|
+
return get_description(instance)
|