monocle-apptrace 0.4.1__py3-none-any.whl → 0.5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of monocle-apptrace might be problematic. Click here for more details.
- monocle_apptrace/__main__.py +1 -1
- monocle_apptrace/exporters/file_exporter.py +125 -37
- monocle_apptrace/instrumentation/common/__init__.py +16 -1
- monocle_apptrace/instrumentation/common/constants.py +14 -1
- monocle_apptrace/instrumentation/common/instrumentor.py +19 -152
- monocle_apptrace/instrumentation/common/method_wrappers.py +376 -0
- monocle_apptrace/instrumentation/common/span_handler.py +58 -32
- monocle_apptrace/instrumentation/common/utils.py +52 -15
- monocle_apptrace/instrumentation/common/wrapper.py +124 -18
- monocle_apptrace/instrumentation/common/wrapper_method.py +48 -1
- monocle_apptrace/instrumentation/metamodel/a2a/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/a2a/_helper.py +37 -0
- monocle_apptrace/instrumentation/metamodel/a2a/entities/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/a2a/entities/inference.py +112 -0
- monocle_apptrace/instrumentation/metamodel/a2a/methods.py +22 -0
- monocle_apptrace/instrumentation/metamodel/adk/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/adk/_helper.py +182 -0
- monocle_apptrace/instrumentation/metamodel/adk/entities/agent.py +50 -0
- monocle_apptrace/instrumentation/metamodel/adk/entities/tool.py +57 -0
- monocle_apptrace/instrumentation/metamodel/adk/methods.py +24 -0
- monocle_apptrace/instrumentation/metamodel/agents/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/agents/_helper.py +220 -0
- monocle_apptrace/instrumentation/metamodel/agents/agents_processor.py +152 -0
- monocle_apptrace/instrumentation/metamodel/agents/entities/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/agents/entities/inference.py +191 -0
- monocle_apptrace/instrumentation/metamodel/agents/methods.py +56 -0
- monocle_apptrace/instrumentation/metamodel/aiohttp/_helper.py +6 -11
- monocle_apptrace/instrumentation/metamodel/anthropic/_helper.py +112 -18
- monocle_apptrace/instrumentation/metamodel/anthropic/entities/inference.py +18 -10
- monocle_apptrace/instrumentation/metamodel/azfunc/_helper.py +13 -11
- monocle_apptrace/instrumentation/metamodel/azfunc/entities/http.py +5 -0
- monocle_apptrace/instrumentation/metamodel/azureaiinference/_helper.py +88 -8
- monocle_apptrace/instrumentation/metamodel/azureaiinference/entities/inference.py +22 -8
- monocle_apptrace/instrumentation/metamodel/botocore/_helper.py +92 -16
- monocle_apptrace/instrumentation/metamodel/botocore/entities/inference.py +13 -8
- monocle_apptrace/instrumentation/metamodel/botocore/handlers/botocore_span_handler.py +1 -1
- monocle_apptrace/instrumentation/metamodel/fastapi/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/fastapi/_helper.py +82 -0
- monocle_apptrace/instrumentation/metamodel/fastapi/entities/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/fastapi/entities/http.py +44 -0
- monocle_apptrace/instrumentation/metamodel/fastapi/methods.py +23 -0
- monocle_apptrace/instrumentation/metamodel/finish_types.py +463 -0
- monocle_apptrace/instrumentation/metamodel/flask/_helper.py +6 -11
- monocle_apptrace/instrumentation/metamodel/gemini/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/gemini/_helper.py +120 -0
- monocle_apptrace/instrumentation/metamodel/gemini/entities/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/gemini/entities/inference.py +86 -0
- monocle_apptrace/instrumentation/metamodel/gemini/entities/retrieval.py +43 -0
- monocle_apptrace/instrumentation/metamodel/gemini/methods.py +31 -0
- monocle_apptrace/instrumentation/metamodel/haystack/_helper.py +79 -8
- monocle_apptrace/instrumentation/metamodel/haystack/entities/inference.py +15 -10
- monocle_apptrace/instrumentation/metamodel/haystack/methods.py +7 -0
- monocle_apptrace/instrumentation/metamodel/lambdafunc/_helper.py +78 -0
- monocle_apptrace/instrumentation/metamodel/lambdafunc/entities/http.py +51 -0
- monocle_apptrace/instrumentation/metamodel/lambdafunc/methods.py +23 -0
- monocle_apptrace/instrumentation/metamodel/lambdafunc/wrapper.py +23 -0
- monocle_apptrace/instrumentation/metamodel/langchain/_helper.py +145 -19
- monocle_apptrace/instrumentation/metamodel/langchain/entities/inference.py +19 -10
- monocle_apptrace/instrumentation/metamodel/langgraph/_helper.py +67 -10
- monocle_apptrace/instrumentation/metamodel/langgraph/entities/inference.py +127 -20
- monocle_apptrace/instrumentation/metamodel/langgraph/langgraph_processor.py +46 -0
- monocle_apptrace/instrumentation/metamodel/langgraph/methods.py +35 -9
- monocle_apptrace/instrumentation/metamodel/litellm/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/litellm/_helper.py +89 -0
- monocle_apptrace/instrumentation/metamodel/litellm/entities/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/litellm/entities/inference.py +108 -0
- monocle_apptrace/instrumentation/metamodel/litellm/methods.py +19 -0
- monocle_apptrace/instrumentation/metamodel/llamaindex/_helper.py +227 -16
- monocle_apptrace/instrumentation/metamodel/llamaindex/entities/agent.py +127 -10
- monocle_apptrace/instrumentation/metamodel/llamaindex/entities/inference.py +13 -8
- monocle_apptrace/instrumentation/metamodel/llamaindex/llamaindex_processor.py +62 -0
- monocle_apptrace/instrumentation/metamodel/llamaindex/methods.py +68 -1
- monocle_apptrace/instrumentation/metamodel/mcp/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/mcp/_helper.py +118 -0
- monocle_apptrace/instrumentation/metamodel/mcp/entities/__init__.py +0 -0
- monocle_apptrace/instrumentation/metamodel/mcp/entities/inference.py +48 -0
- monocle_apptrace/instrumentation/metamodel/mcp/mcp_processor.py +8 -0
- monocle_apptrace/instrumentation/metamodel/mcp/methods.py +21 -0
- monocle_apptrace/instrumentation/metamodel/openai/_helper.py +188 -16
- monocle_apptrace/instrumentation/metamodel/openai/entities/inference.py +148 -92
- monocle_apptrace/instrumentation/metamodel/openai/entities/retrieval.py +1 -1
- monocle_apptrace/instrumentation/metamodel/teamsai/_helper.py +53 -23
- monocle_apptrace/instrumentation/metamodel/teamsai/entities/inference/actionplanner_output_processor.py +1 -1
- monocle_apptrace/instrumentation/metamodel/teamsai/entities/inference/teamsai_output_processor.py +15 -9
- monocle_apptrace/instrumentation/metamodel/teamsai/sample.json +0 -4
- {monocle_apptrace-0.4.1.dist-info → monocle_apptrace-0.5.0.dist-info}/METADATA +27 -11
- monocle_apptrace-0.5.0.dist-info/RECORD +142 -0
- monocle_apptrace-0.4.1.dist-info/RECORD +0 -96
- {monocle_apptrace-0.4.1.dist-info → monocle_apptrace-0.5.0.dist-info}/WHEEL +0 -0
- {monocle_apptrace-0.4.1.dist-info → monocle_apptrace-0.5.0.dist-info}/licenses/LICENSE +0 -0
- {monocle_apptrace-0.4.1.dist-info → monocle_apptrace-0.5.0.dist-info}/licenses/NOTICE +0 -0
|
@@ -0,0 +1,86 @@
|
|
|
1
|
+
from monocle_apptrace.instrumentation.metamodel.gemini import (
|
|
2
|
+
_helper,
|
|
3
|
+
)
|
|
4
|
+
from monocle_apptrace.instrumentation.common.utils import get_error_message
|
|
5
|
+
|
|
6
|
+
INFERENCE = {
|
|
7
|
+
"type": "inference",
|
|
8
|
+
"attributes": [
|
|
9
|
+
[
|
|
10
|
+
{
|
|
11
|
+
"_comment": "provider type , inference_endpoint",
|
|
12
|
+
"attribute": "type",
|
|
13
|
+
"accessor": lambda arguments: 'inference.vertexai' if hasattr(arguments['instance'],"vertexai") and arguments['instance'].vertexai else 'inference.gemini'
|
|
14
|
+
},
|
|
15
|
+
{
|
|
16
|
+
"attribute": "inference_endpoint",
|
|
17
|
+
"accessor": lambda arguments: _helper.extract_inference_endpoint(arguments['instance'])
|
|
18
|
+
},
|
|
19
|
+
{
|
|
20
|
+
"attribute": "provider_name",
|
|
21
|
+
"accessor": lambda arguments: 'gcp'
|
|
22
|
+
} ],
|
|
23
|
+
[
|
|
24
|
+
{
|
|
25
|
+
"_comment": "LLM Model",
|
|
26
|
+
"attribute": "name",
|
|
27
|
+
"accessor": lambda arguments: _helper.resolve_from_alias(arguments['kwargs'],
|
|
28
|
+
['model'])
|
|
29
|
+
},
|
|
30
|
+
{
|
|
31
|
+
"attribute": "type",
|
|
32
|
+
"accessor": lambda arguments: 'model.llm.' + _helper.resolve_from_alias(arguments['kwargs'],
|
|
33
|
+
['model'])
|
|
34
|
+
}
|
|
35
|
+
]
|
|
36
|
+
],
|
|
37
|
+
"events": [
|
|
38
|
+
{
|
|
39
|
+
"name": "data.input",
|
|
40
|
+
"attributes": [
|
|
41
|
+
|
|
42
|
+
{
|
|
43
|
+
"_comment": "this is instruction and user query to LLM",
|
|
44
|
+
"attribute": "input",
|
|
45
|
+
"accessor": lambda arguments: _helper.extract_messages(arguments['kwargs'])
|
|
46
|
+
}
|
|
47
|
+
]
|
|
48
|
+
},
|
|
49
|
+
{
|
|
50
|
+
"name": "data.output",
|
|
51
|
+
"attributes": [
|
|
52
|
+
{
|
|
53
|
+
"attribute": "error_code",
|
|
54
|
+
"accessor": lambda arguments: get_error_message(arguments)
|
|
55
|
+
},
|
|
56
|
+
{
|
|
57
|
+
"attribute": "response",
|
|
58
|
+
"accessor": lambda arguments: _helper.extract_assistant_message(arguments)
|
|
59
|
+
}
|
|
60
|
+
]
|
|
61
|
+
},
|
|
62
|
+
{
|
|
63
|
+
"name": "metadata",
|
|
64
|
+
"attributes": [
|
|
65
|
+
{
|
|
66
|
+
"_comment": "this is metadata usage from LLM",
|
|
67
|
+
"accessor": lambda arguments: _helper.update_span_from_llm_response(arguments['result'], arguments['instance'])
|
|
68
|
+
},
|
|
69
|
+
{
|
|
70
|
+
"_comment": "finish reason from Gemini response",
|
|
71
|
+
"attribute": "finish_reason",
|
|
72
|
+
"accessor": lambda arguments: _helper.extract_finish_reason(arguments)
|
|
73
|
+
},
|
|
74
|
+
{
|
|
75
|
+
"_comment": "finish type mapped from finish reason",
|
|
76
|
+
"attribute": "finish_type",
|
|
77
|
+
"accessor": lambda arguments: _helper.map_finish_reason_to_finish_type(
|
|
78
|
+
_helper.extract_finish_reason(arguments)
|
|
79
|
+
)
|
|
80
|
+
}
|
|
81
|
+
]
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
]
|
|
86
|
+
}
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
from monocle_apptrace.instrumentation.metamodel.gemini import (
|
|
2
|
+
_helper,
|
|
3
|
+
)
|
|
4
|
+
|
|
5
|
+
RETRIEVAL = {
|
|
6
|
+
"type": "retrieval",
|
|
7
|
+
"attributes": [
|
|
8
|
+
[
|
|
9
|
+
{
|
|
10
|
+
"_comment": "Embedding Model",
|
|
11
|
+
"attribute": "name",
|
|
12
|
+
"accessor": lambda arguments: _helper.resolve_from_alias(arguments['kwargs'],
|
|
13
|
+
['model'])
|
|
14
|
+
},
|
|
15
|
+
{
|
|
16
|
+
"attribute": "type",
|
|
17
|
+
"accessor": lambda arguments: 'model.embedding.' + _helper.resolve_from_alias(arguments['kwargs'],
|
|
18
|
+
['model'])
|
|
19
|
+
}
|
|
20
|
+
]
|
|
21
|
+
],
|
|
22
|
+
"events": [
|
|
23
|
+
{
|
|
24
|
+
"name": "data.input",
|
|
25
|
+
"attributes": [
|
|
26
|
+
{
|
|
27
|
+
"attribute": "input",
|
|
28
|
+
"accessor": lambda arguments: _helper.update_input_span_events(arguments['kwargs'])
|
|
29
|
+
}
|
|
30
|
+
]
|
|
31
|
+
},
|
|
32
|
+
{
|
|
33
|
+
"name": "data.output",
|
|
34
|
+
"attributes": [
|
|
35
|
+
{
|
|
36
|
+
"attribute": "response",
|
|
37
|
+
"accessor": lambda arguments: _helper.update_output_span_events(arguments['result'])
|
|
38
|
+
}
|
|
39
|
+
]
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
]
|
|
43
|
+
}
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
from monocle_apptrace.instrumentation.common.wrapper import atask_wrapper, task_wrapper
|
|
2
|
+
from monocle_apptrace.instrumentation.metamodel.gemini.entities.inference import (
|
|
3
|
+
INFERENCE,
|
|
4
|
+
)
|
|
5
|
+
from monocle_apptrace.instrumentation.metamodel.gemini.entities.retrieval import (
|
|
6
|
+
RETRIEVAL,
|
|
7
|
+
)
|
|
8
|
+
|
|
9
|
+
GEMINI_METHODS = [
|
|
10
|
+
{
|
|
11
|
+
"package": "google.genai.models",
|
|
12
|
+
"object": "Models",
|
|
13
|
+
"method": "generate_content",
|
|
14
|
+
"wrapper_method": task_wrapper,
|
|
15
|
+
"output_processor": INFERENCE,
|
|
16
|
+
},
|
|
17
|
+
{
|
|
18
|
+
"package": "google.genai.models",
|
|
19
|
+
"object": "AsyncModels",
|
|
20
|
+
"method": "generate_content",
|
|
21
|
+
"wrapper_method": atask_wrapper,
|
|
22
|
+
"output_processor": INFERENCE,
|
|
23
|
+
},
|
|
24
|
+
{
|
|
25
|
+
"package": "google.genai.models",
|
|
26
|
+
"object": "Models",
|
|
27
|
+
"method": "embed_content",
|
|
28
|
+
"wrapper_method": task_wrapper,
|
|
29
|
+
"output_processor": RETRIEVAL,
|
|
30
|
+
}
|
|
31
|
+
]
|
|
@@ -2,12 +2,15 @@ import logging
|
|
|
2
2
|
|
|
3
3
|
from monocle_apptrace.instrumentation.common.utils import (
|
|
4
4
|
Option,
|
|
5
|
+
get_json_dumps,
|
|
5
6
|
get_keys_as_tuple,
|
|
6
7
|
get_nested_value,
|
|
7
8
|
try_option,
|
|
8
9
|
get_exception_message,
|
|
9
10
|
get_status_code,
|
|
10
11
|
)
|
|
12
|
+
from monocle_apptrace.instrumentation.metamodel.finish_types import map_haystack_finish_reason_to_finish_type
|
|
13
|
+
|
|
11
14
|
logger = logging.getLogger(__name__)
|
|
12
15
|
|
|
13
16
|
|
|
@@ -56,24 +59,27 @@ def extract_question_from_prompt(content):
|
|
|
56
59
|
|
|
57
60
|
def extract_assistant_message(arguments):
|
|
58
61
|
status = get_status_code(arguments)
|
|
59
|
-
|
|
62
|
+
messages = []
|
|
63
|
+
role = "assistant"
|
|
60
64
|
if status == 'success':
|
|
65
|
+
response = ""
|
|
61
66
|
if "replies" in arguments['result']:
|
|
62
67
|
reply = arguments['result']["replies"][0]
|
|
68
|
+
if hasattr(reply, role) and hasattr(reply,role, "value") and isinstance(reply.role.value, str):
|
|
69
|
+
role = reply.role.value or role
|
|
63
70
|
if hasattr(reply, 'content'):
|
|
64
71
|
response = reply.content
|
|
65
72
|
elif hasattr(reply, 'text'):
|
|
66
73
|
response = reply.text
|
|
67
74
|
else:
|
|
68
75
|
response = reply
|
|
76
|
+
messages.append({role: response})
|
|
69
77
|
else:
|
|
70
78
|
if arguments["exception"] is not None:
|
|
71
|
-
|
|
72
|
-
elif hasattr(
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
return response
|
|
76
|
-
|
|
79
|
+
return get_exception_message(arguments)
|
|
80
|
+
elif hasattr(arguments["result"], "error"):
|
|
81
|
+
return arguments['result'].error
|
|
82
|
+
return get_json_dumps(messages[0]) if messages else ""
|
|
77
83
|
|
|
78
84
|
def get_vectorstore_deployment(my_map):
|
|
79
85
|
if isinstance(my_map, dict):
|
|
@@ -112,7 +118,10 @@ def resolve_from_alias(my_map, alias):
|
|
|
112
118
|
return None
|
|
113
119
|
|
|
114
120
|
def extract_inference_endpoint(instance):
|
|
115
|
-
|
|
121
|
+
if hasattr(instance, '_model_name') and isinstance(instance._model_name, str) and 'gemini' in instance._model_name.lower():
|
|
122
|
+
inference_endpoint = try_option(lambda: f"https://generativelanguage.googleapis.com/v1beta/models/{instance._model_name}:generateContent")
|
|
123
|
+
if hasattr(instance, 'client') and hasattr(instance.client, 'base_url'):
|
|
124
|
+
inference_endpoint: Option[str] = try_option(getattr, instance.client, 'base_url').map(str)
|
|
116
125
|
if inference_endpoint.is_none():
|
|
117
126
|
inference_endpoint = try_option(getattr, instance.client.meta, 'endpoint_url').map(str)
|
|
118
127
|
|
|
@@ -146,3 +155,65 @@ def update_output_span_events(results):
|
|
|
146
155
|
if len(output_arg_text) > 100:
|
|
147
156
|
output_arg_text = output_arg_text[:100] + "..."
|
|
148
157
|
return output_arg_text
|
|
158
|
+
|
|
159
|
+
def extract_finish_reason(arguments):
|
|
160
|
+
"""Extract finish_reason from Haystack response."""
|
|
161
|
+
try:
|
|
162
|
+
# Handle exception cases first
|
|
163
|
+
if arguments.get("exception") is not None:
|
|
164
|
+
return "error"
|
|
165
|
+
|
|
166
|
+
response = arguments.get("result")
|
|
167
|
+
if response is None:
|
|
168
|
+
return None
|
|
169
|
+
|
|
170
|
+
# Direct finish_reason attribute
|
|
171
|
+
if hasattr(response, "finish_reason") and response.finish_reason:
|
|
172
|
+
return response.finish_reason
|
|
173
|
+
|
|
174
|
+
if isinstance(response,dict) and 'meta' in response and response['meta'] and len(response['meta']) > 0:
|
|
175
|
+
metadata = response['meta'][0]
|
|
176
|
+
if isinstance(metadata, dict):
|
|
177
|
+
# Check for finish_reason in metadata
|
|
178
|
+
if "finish_reason" in metadata:
|
|
179
|
+
return metadata["finish_reason"]
|
|
180
|
+
|
|
181
|
+
if isinstance(response,dict) and 'replies' in response and response['replies'] and len(response['replies']) > 0:
|
|
182
|
+
metadata = response['replies'][0]
|
|
183
|
+
if hasattr(metadata,'meta') and metadata.meta:
|
|
184
|
+
if "finish_reason" in metadata.meta:
|
|
185
|
+
return metadata.meta["finish_reason"]
|
|
186
|
+
|
|
187
|
+
# Check if response has generation_info
|
|
188
|
+
if hasattr(response, "generation_info") and response.generation_info:
|
|
189
|
+
finish_reason = response.generation_info.get("finish_reason")
|
|
190
|
+
if finish_reason:
|
|
191
|
+
return finish_reason
|
|
192
|
+
|
|
193
|
+
# Check if response has llm_output (batch responses)
|
|
194
|
+
if hasattr(response, "llm_output") and response.llm_output:
|
|
195
|
+
finish_reason = response.llm_output.get("finish_reason")
|
|
196
|
+
if finish_reason:
|
|
197
|
+
return finish_reason
|
|
198
|
+
|
|
199
|
+
# For AIMessage responses, check additional_kwargs
|
|
200
|
+
if hasattr(response, "additional_kwargs") and response.additional_kwargs:
|
|
201
|
+
finish_reason = response.additional_kwargs.get("finish_reason")
|
|
202
|
+
if finish_reason:
|
|
203
|
+
return finish_reason
|
|
204
|
+
|
|
205
|
+
# For generation responses with choices (similar to OpenAI structure)
|
|
206
|
+
if hasattr(response, "choices") and response.choices:
|
|
207
|
+
choice = response.choices[0]
|
|
208
|
+
if hasattr(choice, "finish_reason"):
|
|
209
|
+
return choice.finish_reason
|
|
210
|
+
|
|
211
|
+
# Fallback: if no finish_reason found, default to "stop" (success)
|
|
212
|
+
return "stop"
|
|
213
|
+
except Exception as e:
|
|
214
|
+
logger.warning("Warning: Error occurred in extract_finish_reason: %s", str(e))
|
|
215
|
+
return None
|
|
216
|
+
|
|
217
|
+
def map_finish_reason_to_finish_type(finish_reason):
|
|
218
|
+
"""Map Haystack finish_reason to finish_type using Haystack mapping."""
|
|
219
|
+
return map_haystack_finish_reason_to_finish_type(finish_reason)
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
from monocle_apptrace.instrumentation.metamodel.haystack import (
|
|
2
2
|
_helper,
|
|
3
3
|
)
|
|
4
|
-
from monocle_apptrace.instrumentation.common.utils import
|
|
4
|
+
from monocle_apptrace.instrumentation.common.utils import get_error_message, get_llm_type
|
|
5
5
|
|
|
6
6
|
INFERENCE = {
|
|
7
7
|
"type": "inference.framework",
|
|
@@ -35,12 +35,12 @@ INFERENCE = {
|
|
|
35
35
|
"_comment": "LLM Model",
|
|
36
36
|
"attribute": "name",
|
|
37
37
|
"accessor": lambda arguments: _helper.resolve_from_alias(arguments['instance'].__dict__,
|
|
38
|
-
['model', 'model_name'])
|
|
38
|
+
['model', 'model_name','_model_name'])
|
|
39
39
|
},
|
|
40
40
|
{
|
|
41
41
|
"attribute": "type",
|
|
42
42
|
"accessor": lambda arguments: 'model.llm.' + _helper.resolve_from_alias(arguments['instance'].__dict__,
|
|
43
|
-
['model', 'model_name'])
|
|
43
|
+
['model', 'model_name','_model_name'])
|
|
44
44
|
}
|
|
45
45
|
]
|
|
46
46
|
],
|
|
@@ -59,13 +59,8 @@ INFERENCE = {
|
|
|
59
59
|
"name": "data.output",
|
|
60
60
|
"attributes": [
|
|
61
61
|
{
|
|
62
|
-
"
|
|
63
|
-
"
|
|
64
|
-
"accessor": lambda arguments: get_status(arguments)
|
|
65
|
-
},
|
|
66
|
-
{
|
|
67
|
-
"attribute": "status_code",
|
|
68
|
-
"accessor": lambda arguments: get_status_code(arguments)
|
|
62
|
+
"attribute": "error_code",
|
|
63
|
+
"accessor": lambda arguments: get_error_message(arguments)
|
|
69
64
|
},
|
|
70
65
|
{
|
|
71
66
|
"attribute": "response",
|
|
@@ -80,6 +75,16 @@ INFERENCE = {
|
|
|
80
75
|
"_comment": "this is metadata usage from LLM",
|
|
81
76
|
"accessor": lambda arguments: _helper.update_span_from_llm_response(arguments['result'],
|
|
82
77
|
arguments['instance'])
|
|
78
|
+
},
|
|
79
|
+
{
|
|
80
|
+
"attribute": "finish_reason",
|
|
81
|
+
"accessor": lambda arguments: _helper.extract_finish_reason(arguments)
|
|
82
|
+
},
|
|
83
|
+
{
|
|
84
|
+
"attribute": "finish_type",
|
|
85
|
+
"accessor": lambda arguments: _helper.map_finish_reason_to_finish_type(
|
|
86
|
+
_helper.extract_finish_reason(arguments)
|
|
87
|
+
)
|
|
83
88
|
}
|
|
84
89
|
]
|
|
85
90
|
}
|
|
@@ -44,4 +44,11 @@ HAYSTACK_METHODS = [
|
|
|
44
44
|
"wrapper_method": task_wrapper,
|
|
45
45
|
"output_processor": INFERENCE
|
|
46
46
|
},
|
|
47
|
+
{
|
|
48
|
+
"package": "haystack_integrations.components.generators.google_ai",
|
|
49
|
+
"object": "GoogleAIGeminiChatGenerator",
|
|
50
|
+
"method": "run",
|
|
51
|
+
"wrapper_method": task_wrapper,
|
|
52
|
+
"output_processor": INFERENCE
|
|
53
|
+
},
|
|
47
54
|
]
|
|
@@ -0,0 +1,78 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
from threading import local
|
|
3
|
+
from monocle_apptrace.instrumentation.common.utils import extract_http_headers, clear_http_scopes, try_option, Option, \
|
|
4
|
+
MonocleSpanException
|
|
5
|
+
from monocle_apptrace.instrumentation.common.span_handler import SpanHandler
|
|
6
|
+
from monocle_apptrace.instrumentation.common.constants import HTTP_SUCCESS_CODES
|
|
7
|
+
from urllib.parse import unquote, urlparse, ParseResult
|
|
8
|
+
|
|
9
|
+
logger = logging.getLogger(__name__)
|
|
10
|
+
MAX_DATA_LENGTH = 1000
|
|
11
|
+
token_data = local()
|
|
12
|
+
token_data.current_token = None
|
|
13
|
+
|
|
14
|
+
def get_url(kwargs) -> ParseResult:
|
|
15
|
+
url_str = try_option(lambda k: k.get('path'), kwargs['event'])
|
|
16
|
+
url = url_str.unwrap_or(None)
|
|
17
|
+
if url is not None:
|
|
18
|
+
return urlparse(url)
|
|
19
|
+
else:
|
|
20
|
+
return None
|
|
21
|
+
|
|
22
|
+
def get_route(args) -> str:
|
|
23
|
+
event = args[1]
|
|
24
|
+
route = event.get("path") or event.get("requestContext", {}).get("path")
|
|
25
|
+
return route
|
|
26
|
+
|
|
27
|
+
def get_method(args) -> str:
|
|
28
|
+
event = args[1]
|
|
29
|
+
http_method = event.get("httpMethod") or event.get("requestContext", {}).get("httpMethod")
|
|
30
|
+
return http_method
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def get_params(args) -> dict:
|
|
34
|
+
event = args[1]
|
|
35
|
+
question = None
|
|
36
|
+
query_params = event.get('queryStringParameters', {})
|
|
37
|
+
if isinstance(query_params, dict):
|
|
38
|
+
question = query_params.get('question')
|
|
39
|
+
return question
|
|
40
|
+
|
|
41
|
+
def get_body(args) -> dict:
|
|
42
|
+
event = args[1]
|
|
43
|
+
body = event.get("body")
|
|
44
|
+
return body
|
|
45
|
+
|
|
46
|
+
def extract_response(result) -> str:
|
|
47
|
+
if isinstance(result, dict) and 'body' in result:
|
|
48
|
+
response = result['body']
|
|
49
|
+
if isinstance(response, bytes):
|
|
50
|
+
response = response.decode('utf-8', errors='ignore')
|
|
51
|
+
else:
|
|
52
|
+
response = ""
|
|
53
|
+
return response
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
def extract_status(result) -> str:
|
|
57
|
+
status = f"{result['statusCode']}" if isinstance(result, dict) and 'statusCode' in result else ""
|
|
58
|
+
if status not in HTTP_SUCCESS_CODES:
|
|
59
|
+
error_message = extract_response(result)
|
|
60
|
+
raise MonocleSpanException(f"error: {status} - {error_message}")
|
|
61
|
+
return status
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
def lambda_func_pre_tracing(kwargs):
|
|
65
|
+
headers = kwargs['event'].get('headers', {}) if 'event' in kwargs else {}
|
|
66
|
+
return extract_http_headers(headers)
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
def lambda_func_post_tracing(token):
|
|
70
|
+
clear_http_scopes(token)
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
class lambdaSpanHandler(SpanHandler):
|
|
74
|
+
def pre_tracing(self, to_wrap, wrapped, instance, args, kwargs):
|
|
75
|
+
return lambda_func_pre_tracing(kwargs)
|
|
76
|
+
|
|
77
|
+
def post_tracing(self, to_wrap, wrapped, instance, args, kwargs, return_value,token):
|
|
78
|
+
lambda_func_post_tracing(token)
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
from monocle_apptrace.instrumentation.metamodel.lambdafunc import _helper
|
|
2
|
+
LAMBDA_HTTP_PROCESSOR = {
|
|
3
|
+
"type": "http.process",
|
|
4
|
+
"attributes": [
|
|
5
|
+
[
|
|
6
|
+
{
|
|
7
|
+
"_comment": "request method, request URI",
|
|
8
|
+
"attribute": "method",
|
|
9
|
+
"accessor": lambda arguments: _helper.get_method(arguments['args'])
|
|
10
|
+
},
|
|
11
|
+
{
|
|
12
|
+
"_comment": "request method, request URI",
|
|
13
|
+
"attribute": "route",
|
|
14
|
+
"accessor": lambda arguments: _helper.get_route(arguments['args'])
|
|
15
|
+
},
|
|
16
|
+
{
|
|
17
|
+
"_comment": "request method, request URI",
|
|
18
|
+
"attribute": "body",
|
|
19
|
+
"accessor": lambda arguments: _helper.get_body(arguments['args'])
|
|
20
|
+
},
|
|
21
|
+
]
|
|
22
|
+
],
|
|
23
|
+
"events": [
|
|
24
|
+
{
|
|
25
|
+
"name": "data.input",
|
|
26
|
+
"attributes": [
|
|
27
|
+
{
|
|
28
|
+
"_comment": "route params",
|
|
29
|
+
"attribute": "params",
|
|
30
|
+
"accessor": lambda arguments: _helper.get_params(arguments['args'])
|
|
31
|
+
}
|
|
32
|
+
]
|
|
33
|
+
},
|
|
34
|
+
{
|
|
35
|
+
"name": "data.output",
|
|
36
|
+
"attributes": [
|
|
37
|
+
{
|
|
38
|
+
"_comment": "status from HTTP response",
|
|
39
|
+
"attribute": "status",
|
|
40
|
+
"accessor": lambda arguments: _helper.extract_status(arguments['result'])
|
|
41
|
+
},
|
|
42
|
+
{
|
|
43
|
+
"_comment": "this is result from LLM",
|
|
44
|
+
"attribute": "response",
|
|
45
|
+
"accessor": lambda arguments: _helper.extract_response(arguments['result'])
|
|
46
|
+
}
|
|
47
|
+
]
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
]
|
|
51
|
+
}
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
from monocle_apptrace.instrumentation.common.wrapper import atask_wrapper, task_wrapper
|
|
2
|
+
from monocle_apptrace.instrumentation.metamodel.lambdafunc.entities.http import LAMBDA_HTTP_PROCESSOR
|
|
3
|
+
|
|
4
|
+
LAMBDA_HTTP_METHODS = [
|
|
5
|
+
{
|
|
6
|
+
"package": "monocle_apptrace.instrumentation.metamodel.lambdafunc.wrapper",
|
|
7
|
+
"object": "LambdaFunctionRouteWrapper",
|
|
8
|
+
"method": "run_async",
|
|
9
|
+
"span_name": "lambda_function_route",
|
|
10
|
+
"wrapper_method": atask_wrapper,
|
|
11
|
+
"span_handler": "lambda_func_handler",
|
|
12
|
+
"output_processor": LAMBDA_HTTP_PROCESSOR
|
|
13
|
+
},
|
|
14
|
+
{
|
|
15
|
+
"package": "monocle_apptrace.instrumentation.metamodel.lambdafunc.wrapper",
|
|
16
|
+
"object": "LambdaFunctionRouteWrapper",
|
|
17
|
+
"method": "run_sync",
|
|
18
|
+
"span_name": "lambda_function_route",
|
|
19
|
+
"wrapper_method": task_wrapper,
|
|
20
|
+
"span_handler": "lambda_func_handler",
|
|
21
|
+
"output_processor": LAMBDA_HTTP_PROCESSOR
|
|
22
|
+
}
|
|
23
|
+
]
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
from functools import wraps
|
|
2
|
+
import inspect
|
|
3
|
+
|
|
4
|
+
def monocle_trace_lambda_function_route(func):
|
|
5
|
+
if inspect.iscoroutinefunction(func):
|
|
6
|
+
@wraps(func)
|
|
7
|
+
async def wrapper(*args, **kwargs):
|
|
8
|
+
return await LambdaFunctionRouteWrapper.run_async(func, *args, **kwargs)
|
|
9
|
+
return wrapper
|
|
10
|
+
else:
|
|
11
|
+
@wraps(func)
|
|
12
|
+
def wrapper(*args, **kwargs):
|
|
13
|
+
return LambdaFunctionRouteWrapper.run_sync(func, *args, **kwargs)
|
|
14
|
+
return wrapper
|
|
15
|
+
|
|
16
|
+
class LambdaFunctionRouteWrapper:
|
|
17
|
+
@staticmethod
|
|
18
|
+
async def run_async(func, *args, **kwargs):
|
|
19
|
+
return await func(*args, **kwargs)
|
|
20
|
+
|
|
21
|
+
@staticmethod
|
|
22
|
+
def run_sync(func, *args, **kwargs):
|
|
23
|
+
return func(*args, **kwargs)
|