monocle-apptrace 0.4.2__py3-none-any.whl → 0.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of monocle-apptrace might be problematic. Click here for more details.

Files changed (88) hide show
  1. monocle_apptrace/__main__.py +1 -1
  2. monocle_apptrace/exporters/file_exporter.py +125 -37
  3. monocle_apptrace/instrumentation/common/__init__.py +16 -1
  4. monocle_apptrace/instrumentation/common/constants.py +14 -1
  5. monocle_apptrace/instrumentation/common/instrumentor.py +19 -152
  6. monocle_apptrace/instrumentation/common/method_wrappers.py +376 -0
  7. monocle_apptrace/instrumentation/common/span_handler.py +58 -32
  8. monocle_apptrace/instrumentation/common/utils.py +52 -15
  9. monocle_apptrace/instrumentation/common/wrapper.py +124 -18
  10. monocle_apptrace/instrumentation/common/wrapper_method.py +47 -1
  11. monocle_apptrace/instrumentation/metamodel/a2a/__init__.py +0 -0
  12. monocle_apptrace/instrumentation/metamodel/a2a/_helper.py +37 -0
  13. monocle_apptrace/instrumentation/metamodel/a2a/entities/__init__.py +0 -0
  14. monocle_apptrace/instrumentation/metamodel/a2a/entities/inference.py +112 -0
  15. monocle_apptrace/instrumentation/metamodel/a2a/methods.py +22 -0
  16. monocle_apptrace/instrumentation/metamodel/adk/__init__.py +0 -0
  17. monocle_apptrace/instrumentation/metamodel/adk/_helper.py +182 -0
  18. monocle_apptrace/instrumentation/metamodel/adk/entities/agent.py +50 -0
  19. monocle_apptrace/instrumentation/metamodel/adk/entities/tool.py +57 -0
  20. monocle_apptrace/instrumentation/metamodel/adk/methods.py +24 -0
  21. monocle_apptrace/instrumentation/metamodel/agents/__init__.py +0 -0
  22. monocle_apptrace/instrumentation/metamodel/agents/_helper.py +220 -0
  23. monocle_apptrace/instrumentation/metamodel/agents/agents_processor.py +152 -0
  24. monocle_apptrace/instrumentation/metamodel/agents/entities/__init__.py +0 -0
  25. monocle_apptrace/instrumentation/metamodel/agents/entities/inference.py +191 -0
  26. monocle_apptrace/instrumentation/metamodel/agents/methods.py +56 -0
  27. monocle_apptrace/instrumentation/metamodel/aiohttp/_helper.py +6 -11
  28. monocle_apptrace/instrumentation/metamodel/anthropic/_helper.py +112 -18
  29. monocle_apptrace/instrumentation/metamodel/anthropic/entities/inference.py +18 -10
  30. monocle_apptrace/instrumentation/metamodel/azfunc/_helper.py +13 -11
  31. monocle_apptrace/instrumentation/metamodel/azfunc/entities/http.py +5 -0
  32. monocle_apptrace/instrumentation/metamodel/azureaiinference/_helper.py +88 -8
  33. monocle_apptrace/instrumentation/metamodel/azureaiinference/entities/inference.py +22 -8
  34. monocle_apptrace/instrumentation/metamodel/botocore/_helper.py +92 -16
  35. monocle_apptrace/instrumentation/metamodel/botocore/entities/inference.py +13 -8
  36. monocle_apptrace/instrumentation/metamodel/botocore/handlers/botocore_span_handler.py +1 -1
  37. monocle_apptrace/instrumentation/metamodel/fastapi/__init__.py +0 -0
  38. monocle_apptrace/instrumentation/metamodel/fastapi/_helper.py +82 -0
  39. monocle_apptrace/instrumentation/metamodel/fastapi/entities/__init__.py +0 -0
  40. monocle_apptrace/instrumentation/metamodel/fastapi/entities/http.py +44 -0
  41. monocle_apptrace/instrumentation/metamodel/fastapi/methods.py +23 -0
  42. monocle_apptrace/instrumentation/metamodel/finish_types.py +463 -0
  43. monocle_apptrace/instrumentation/metamodel/flask/_helper.py +6 -11
  44. monocle_apptrace/instrumentation/metamodel/gemini/_helper.py +51 -7
  45. monocle_apptrace/instrumentation/metamodel/gemini/entities/inference.py +22 -11
  46. monocle_apptrace/instrumentation/metamodel/gemini/entities/retrieval.py +43 -0
  47. monocle_apptrace/instrumentation/metamodel/gemini/methods.py +18 -1
  48. monocle_apptrace/instrumentation/metamodel/haystack/_helper.py +79 -8
  49. monocle_apptrace/instrumentation/metamodel/haystack/entities/inference.py +15 -10
  50. monocle_apptrace/instrumentation/metamodel/haystack/methods.py +7 -0
  51. monocle_apptrace/instrumentation/metamodel/lambdafunc/_helper.py +78 -0
  52. monocle_apptrace/instrumentation/metamodel/lambdafunc/entities/http.py +51 -0
  53. monocle_apptrace/instrumentation/metamodel/lambdafunc/methods.py +23 -0
  54. monocle_apptrace/instrumentation/metamodel/lambdafunc/wrapper.py +23 -0
  55. monocle_apptrace/instrumentation/metamodel/langchain/_helper.py +145 -19
  56. monocle_apptrace/instrumentation/metamodel/langchain/entities/inference.py +19 -10
  57. monocle_apptrace/instrumentation/metamodel/langgraph/_helper.py +67 -10
  58. monocle_apptrace/instrumentation/metamodel/langgraph/entities/inference.py +127 -20
  59. monocle_apptrace/instrumentation/metamodel/langgraph/langgraph_processor.py +46 -0
  60. monocle_apptrace/instrumentation/metamodel/langgraph/methods.py +35 -9
  61. monocle_apptrace/instrumentation/metamodel/litellm/__init__.py +0 -0
  62. monocle_apptrace/instrumentation/metamodel/litellm/_helper.py +89 -0
  63. monocle_apptrace/instrumentation/metamodel/litellm/entities/__init__.py +0 -0
  64. monocle_apptrace/instrumentation/metamodel/litellm/entities/inference.py +108 -0
  65. monocle_apptrace/instrumentation/metamodel/litellm/methods.py +19 -0
  66. monocle_apptrace/instrumentation/metamodel/llamaindex/_helper.py +227 -16
  67. monocle_apptrace/instrumentation/metamodel/llamaindex/entities/agent.py +127 -10
  68. monocle_apptrace/instrumentation/metamodel/llamaindex/entities/inference.py +13 -8
  69. monocle_apptrace/instrumentation/metamodel/llamaindex/llamaindex_processor.py +62 -0
  70. monocle_apptrace/instrumentation/metamodel/llamaindex/methods.py +68 -1
  71. monocle_apptrace/instrumentation/metamodel/mcp/__init__.py +0 -0
  72. monocle_apptrace/instrumentation/metamodel/mcp/_helper.py +118 -0
  73. monocle_apptrace/instrumentation/metamodel/mcp/entities/__init__.py +0 -0
  74. monocle_apptrace/instrumentation/metamodel/mcp/entities/inference.py +48 -0
  75. monocle_apptrace/instrumentation/metamodel/mcp/mcp_processor.py +8 -0
  76. monocle_apptrace/instrumentation/metamodel/mcp/methods.py +21 -0
  77. monocle_apptrace/instrumentation/metamodel/openai/_helper.py +188 -16
  78. monocle_apptrace/instrumentation/metamodel/openai/entities/inference.py +148 -92
  79. monocle_apptrace/instrumentation/metamodel/openai/entities/retrieval.py +1 -1
  80. monocle_apptrace/instrumentation/metamodel/teamsai/_helper.py +53 -23
  81. monocle_apptrace/instrumentation/metamodel/teamsai/entities/inference/actionplanner_output_processor.py +1 -1
  82. monocle_apptrace/instrumentation/metamodel/teamsai/entities/inference/teamsai_output_processor.py +15 -9
  83. monocle_apptrace/instrumentation/metamodel/teamsai/sample.json +0 -4
  84. {monocle_apptrace-0.4.2.dist-info → monocle_apptrace-0.5.0.dist-info}/METADATA +27 -11
  85. {monocle_apptrace-0.4.2.dist-info → monocle_apptrace-0.5.0.dist-info}/RECORD +88 -47
  86. {monocle_apptrace-0.4.2.dist-info → monocle_apptrace-0.5.0.dist-info}/WHEEL +0 -0
  87. {monocle_apptrace-0.4.2.dist-info → monocle_apptrace-0.5.0.dist-info}/licenses/LICENSE +0 -0
  88. {monocle_apptrace-0.4.2.dist-info → monocle_apptrace-0.5.0.dist-info}/licenses/NOTICE +0 -0
@@ -0,0 +1,43 @@
1
+ from monocle_apptrace.instrumentation.metamodel.gemini import (
2
+ _helper,
3
+ )
4
+
5
+ RETRIEVAL = {
6
+ "type": "retrieval",
7
+ "attributes": [
8
+ [
9
+ {
10
+ "_comment": "Embedding Model",
11
+ "attribute": "name",
12
+ "accessor": lambda arguments: _helper.resolve_from_alias(arguments['kwargs'],
13
+ ['model'])
14
+ },
15
+ {
16
+ "attribute": "type",
17
+ "accessor": lambda arguments: 'model.embedding.' + _helper.resolve_from_alias(arguments['kwargs'],
18
+ ['model'])
19
+ }
20
+ ]
21
+ ],
22
+ "events": [
23
+ {
24
+ "name": "data.input",
25
+ "attributes": [
26
+ {
27
+ "attribute": "input",
28
+ "accessor": lambda arguments: _helper.update_input_span_events(arguments['kwargs'])
29
+ }
30
+ ]
31
+ },
32
+ {
33
+ "name": "data.output",
34
+ "attributes": [
35
+ {
36
+ "attribute": "response",
37
+ "accessor": lambda arguments: _helper.update_output_span_events(arguments['result'])
38
+ }
39
+ ]
40
+ }
41
+
42
+ ]
43
+ }
@@ -1,7 +1,10 @@
1
- from monocle_apptrace.instrumentation.common.wrapper import task_wrapper
1
+ from monocle_apptrace.instrumentation.common.wrapper import atask_wrapper, task_wrapper
2
2
  from monocle_apptrace.instrumentation.metamodel.gemini.entities.inference import (
3
3
  INFERENCE,
4
4
  )
5
+ from monocle_apptrace.instrumentation.metamodel.gemini.entities.retrieval import (
6
+ RETRIEVAL,
7
+ )
5
8
 
6
9
  GEMINI_METHODS = [
7
10
  {
@@ -10,5 +13,19 @@ GEMINI_METHODS = [
10
13
  "method": "generate_content",
11
14
  "wrapper_method": task_wrapper,
12
15
  "output_processor": INFERENCE,
16
+ },
17
+ {
18
+ "package": "google.genai.models",
19
+ "object": "AsyncModels",
20
+ "method": "generate_content",
21
+ "wrapper_method": atask_wrapper,
22
+ "output_processor": INFERENCE,
23
+ },
24
+ {
25
+ "package": "google.genai.models",
26
+ "object": "Models",
27
+ "method": "embed_content",
28
+ "wrapper_method": task_wrapper,
29
+ "output_processor": RETRIEVAL,
13
30
  }
14
31
  ]
@@ -2,12 +2,15 @@ import logging
2
2
 
3
3
  from monocle_apptrace.instrumentation.common.utils import (
4
4
  Option,
5
+ get_json_dumps,
5
6
  get_keys_as_tuple,
6
7
  get_nested_value,
7
8
  try_option,
8
9
  get_exception_message,
9
10
  get_status_code,
10
11
  )
12
+ from monocle_apptrace.instrumentation.metamodel.finish_types import map_haystack_finish_reason_to_finish_type
13
+
11
14
  logger = logging.getLogger(__name__)
12
15
 
13
16
 
@@ -56,24 +59,27 @@ def extract_question_from_prompt(content):
56
59
 
57
60
  def extract_assistant_message(arguments):
58
61
  status = get_status_code(arguments)
59
- response: str = ""
62
+ messages = []
63
+ role = "assistant"
60
64
  if status == 'success':
65
+ response = ""
61
66
  if "replies" in arguments['result']:
62
67
  reply = arguments['result']["replies"][0]
68
+ if hasattr(reply, role) and hasattr(reply,role, "value") and isinstance(reply.role.value, str):
69
+ role = reply.role.value or role
63
70
  if hasattr(reply, 'content'):
64
71
  response = reply.content
65
72
  elif hasattr(reply, 'text'):
66
73
  response = reply.text
67
74
  else:
68
75
  response = reply
76
+ messages.append({role: response})
69
77
  else:
70
78
  if arguments["exception"] is not None:
71
- response = get_exception_message(arguments)
72
- elif hasattr(response, "error"):
73
- response = arguments['result'].error
74
-
75
- return response
76
-
79
+ return get_exception_message(arguments)
80
+ elif hasattr(arguments["result"], "error"):
81
+ return arguments['result'].error
82
+ return get_json_dumps(messages[0]) if messages else ""
77
83
 
78
84
  def get_vectorstore_deployment(my_map):
79
85
  if isinstance(my_map, dict):
@@ -112,7 +118,10 @@ def resolve_from_alias(my_map, alias):
112
118
  return None
113
119
 
114
120
  def extract_inference_endpoint(instance):
115
- inference_endpoint: Option[str] = try_option(getattr, instance.client, 'base_url').map(str)
121
+ if hasattr(instance, '_model_name') and isinstance(instance._model_name, str) and 'gemini' in instance._model_name.lower():
122
+ inference_endpoint = try_option(lambda: f"https://generativelanguage.googleapis.com/v1beta/models/{instance._model_name}:generateContent")
123
+ if hasattr(instance, 'client') and hasattr(instance.client, 'base_url'):
124
+ inference_endpoint: Option[str] = try_option(getattr, instance.client, 'base_url').map(str)
116
125
  if inference_endpoint.is_none():
117
126
  inference_endpoint = try_option(getattr, instance.client.meta, 'endpoint_url').map(str)
118
127
 
@@ -146,3 +155,65 @@ def update_output_span_events(results):
146
155
  if len(output_arg_text) > 100:
147
156
  output_arg_text = output_arg_text[:100] + "..."
148
157
  return output_arg_text
158
+
159
+ def extract_finish_reason(arguments):
160
+ """Extract finish_reason from Haystack response."""
161
+ try:
162
+ # Handle exception cases first
163
+ if arguments.get("exception") is not None:
164
+ return "error"
165
+
166
+ response = arguments.get("result")
167
+ if response is None:
168
+ return None
169
+
170
+ # Direct finish_reason attribute
171
+ if hasattr(response, "finish_reason") and response.finish_reason:
172
+ return response.finish_reason
173
+
174
+ if isinstance(response,dict) and 'meta' in response and response['meta'] and len(response['meta']) > 0:
175
+ metadata = response['meta'][0]
176
+ if isinstance(metadata, dict):
177
+ # Check for finish_reason in metadata
178
+ if "finish_reason" in metadata:
179
+ return metadata["finish_reason"]
180
+
181
+ if isinstance(response,dict) and 'replies' in response and response['replies'] and len(response['replies']) > 0:
182
+ metadata = response['replies'][0]
183
+ if hasattr(metadata,'meta') and metadata.meta:
184
+ if "finish_reason" in metadata.meta:
185
+ return metadata.meta["finish_reason"]
186
+
187
+ # Check if response has generation_info
188
+ if hasattr(response, "generation_info") and response.generation_info:
189
+ finish_reason = response.generation_info.get("finish_reason")
190
+ if finish_reason:
191
+ return finish_reason
192
+
193
+ # Check if response has llm_output (batch responses)
194
+ if hasattr(response, "llm_output") and response.llm_output:
195
+ finish_reason = response.llm_output.get("finish_reason")
196
+ if finish_reason:
197
+ return finish_reason
198
+
199
+ # For AIMessage responses, check additional_kwargs
200
+ if hasattr(response, "additional_kwargs") and response.additional_kwargs:
201
+ finish_reason = response.additional_kwargs.get("finish_reason")
202
+ if finish_reason:
203
+ return finish_reason
204
+
205
+ # For generation responses with choices (similar to OpenAI structure)
206
+ if hasattr(response, "choices") and response.choices:
207
+ choice = response.choices[0]
208
+ if hasattr(choice, "finish_reason"):
209
+ return choice.finish_reason
210
+
211
+ # Fallback: if no finish_reason found, default to "stop" (success)
212
+ return "stop"
213
+ except Exception as e:
214
+ logger.warning("Warning: Error occurred in extract_finish_reason: %s", str(e))
215
+ return None
216
+
217
+ def map_finish_reason_to_finish_type(finish_reason):
218
+ """Map Haystack finish_reason to finish_type using Haystack mapping."""
219
+ return map_haystack_finish_reason_to_finish_type(finish_reason)
@@ -1,7 +1,7 @@
1
1
  from monocle_apptrace.instrumentation.metamodel.haystack import (
2
2
  _helper,
3
3
  )
4
- from monocle_apptrace.instrumentation.common.utils import get_llm_type, get_status, get_status_code
4
+ from monocle_apptrace.instrumentation.common.utils import get_error_message, get_llm_type
5
5
 
6
6
  INFERENCE = {
7
7
  "type": "inference.framework",
@@ -35,12 +35,12 @@ INFERENCE = {
35
35
  "_comment": "LLM Model",
36
36
  "attribute": "name",
37
37
  "accessor": lambda arguments: _helper.resolve_from_alias(arguments['instance'].__dict__,
38
- ['model', 'model_name'])
38
+ ['model', 'model_name','_model_name'])
39
39
  },
40
40
  {
41
41
  "attribute": "type",
42
42
  "accessor": lambda arguments: 'model.llm.' + _helper.resolve_from_alias(arguments['instance'].__dict__,
43
- ['model', 'model_name'])
43
+ ['model', 'model_name','_model_name'])
44
44
  }
45
45
  ]
46
46
  ],
@@ -59,13 +59,8 @@ INFERENCE = {
59
59
  "name": "data.output",
60
60
  "attributes": [
61
61
  {
62
- "_comment": "this is response from LLM",
63
- "attribute": "status",
64
- "accessor": lambda arguments: get_status(arguments)
65
- },
66
- {
67
- "attribute": "status_code",
68
- "accessor": lambda arguments: get_status_code(arguments)
62
+ "attribute": "error_code",
63
+ "accessor": lambda arguments: get_error_message(arguments)
69
64
  },
70
65
  {
71
66
  "attribute": "response",
@@ -80,6 +75,16 @@ INFERENCE = {
80
75
  "_comment": "this is metadata usage from LLM",
81
76
  "accessor": lambda arguments: _helper.update_span_from_llm_response(arguments['result'],
82
77
  arguments['instance'])
78
+ },
79
+ {
80
+ "attribute": "finish_reason",
81
+ "accessor": lambda arguments: _helper.extract_finish_reason(arguments)
82
+ },
83
+ {
84
+ "attribute": "finish_type",
85
+ "accessor": lambda arguments: _helper.map_finish_reason_to_finish_type(
86
+ _helper.extract_finish_reason(arguments)
87
+ )
83
88
  }
84
89
  ]
85
90
  }
@@ -44,4 +44,11 @@ HAYSTACK_METHODS = [
44
44
  "wrapper_method": task_wrapper,
45
45
  "output_processor": INFERENCE
46
46
  },
47
+ {
48
+ "package": "haystack_integrations.components.generators.google_ai",
49
+ "object": "GoogleAIGeminiChatGenerator",
50
+ "method": "run",
51
+ "wrapper_method": task_wrapper,
52
+ "output_processor": INFERENCE
53
+ },
47
54
  ]
@@ -0,0 +1,78 @@
1
+ import logging
2
+ from threading import local
3
+ from monocle_apptrace.instrumentation.common.utils import extract_http_headers, clear_http_scopes, try_option, Option, \
4
+ MonocleSpanException
5
+ from monocle_apptrace.instrumentation.common.span_handler import SpanHandler
6
+ from monocle_apptrace.instrumentation.common.constants import HTTP_SUCCESS_CODES
7
+ from urllib.parse import unquote, urlparse, ParseResult
8
+
9
+ logger = logging.getLogger(__name__)
10
+ MAX_DATA_LENGTH = 1000
11
+ token_data = local()
12
+ token_data.current_token = None
13
+
14
+ def get_url(kwargs) -> ParseResult:
15
+ url_str = try_option(lambda k: k.get('path'), kwargs['event'])
16
+ url = url_str.unwrap_or(None)
17
+ if url is not None:
18
+ return urlparse(url)
19
+ else:
20
+ return None
21
+
22
+ def get_route(args) -> str:
23
+ event = args[1]
24
+ route = event.get("path") or event.get("requestContext", {}).get("path")
25
+ return route
26
+
27
+ def get_method(args) -> str:
28
+ event = args[1]
29
+ http_method = event.get("httpMethod") or event.get("requestContext", {}).get("httpMethod")
30
+ return http_method
31
+
32
+
33
+ def get_params(args) -> dict:
34
+ event = args[1]
35
+ question = None
36
+ query_params = event.get('queryStringParameters', {})
37
+ if isinstance(query_params, dict):
38
+ question = query_params.get('question')
39
+ return question
40
+
41
+ def get_body(args) -> dict:
42
+ event = args[1]
43
+ body = event.get("body")
44
+ return body
45
+
46
+ def extract_response(result) -> str:
47
+ if isinstance(result, dict) and 'body' in result:
48
+ response = result['body']
49
+ if isinstance(response, bytes):
50
+ response = response.decode('utf-8', errors='ignore')
51
+ else:
52
+ response = ""
53
+ return response
54
+
55
+
56
+ def extract_status(result) -> str:
57
+ status = f"{result['statusCode']}" if isinstance(result, dict) and 'statusCode' in result else ""
58
+ if status not in HTTP_SUCCESS_CODES:
59
+ error_message = extract_response(result)
60
+ raise MonocleSpanException(f"error: {status} - {error_message}")
61
+ return status
62
+
63
+
64
+ def lambda_func_pre_tracing(kwargs):
65
+ headers = kwargs['event'].get('headers', {}) if 'event' in kwargs else {}
66
+ return extract_http_headers(headers)
67
+
68
+
69
+ def lambda_func_post_tracing(token):
70
+ clear_http_scopes(token)
71
+
72
+
73
+ class lambdaSpanHandler(SpanHandler):
74
+ def pre_tracing(self, to_wrap, wrapped, instance, args, kwargs):
75
+ return lambda_func_pre_tracing(kwargs)
76
+
77
+ def post_tracing(self, to_wrap, wrapped, instance, args, kwargs, return_value,token):
78
+ lambda_func_post_tracing(token)
@@ -0,0 +1,51 @@
1
+ from monocle_apptrace.instrumentation.metamodel.lambdafunc import _helper
2
+ LAMBDA_HTTP_PROCESSOR = {
3
+ "type": "http.process",
4
+ "attributes": [
5
+ [
6
+ {
7
+ "_comment": "request method, request URI",
8
+ "attribute": "method",
9
+ "accessor": lambda arguments: _helper.get_method(arguments['args'])
10
+ },
11
+ {
12
+ "_comment": "request method, request URI",
13
+ "attribute": "route",
14
+ "accessor": lambda arguments: _helper.get_route(arguments['args'])
15
+ },
16
+ {
17
+ "_comment": "request method, request URI",
18
+ "attribute": "body",
19
+ "accessor": lambda arguments: _helper.get_body(arguments['args'])
20
+ },
21
+ ]
22
+ ],
23
+ "events": [
24
+ {
25
+ "name": "data.input",
26
+ "attributes": [
27
+ {
28
+ "_comment": "route params",
29
+ "attribute": "params",
30
+ "accessor": lambda arguments: _helper.get_params(arguments['args'])
31
+ }
32
+ ]
33
+ },
34
+ {
35
+ "name": "data.output",
36
+ "attributes": [
37
+ {
38
+ "_comment": "status from HTTP response",
39
+ "attribute": "status",
40
+ "accessor": lambda arguments: _helper.extract_status(arguments['result'])
41
+ },
42
+ {
43
+ "_comment": "this is result from LLM",
44
+ "attribute": "response",
45
+ "accessor": lambda arguments: _helper.extract_response(arguments['result'])
46
+ }
47
+ ]
48
+ }
49
+
50
+ ]
51
+ }
@@ -0,0 +1,23 @@
1
+ from monocle_apptrace.instrumentation.common.wrapper import atask_wrapper, task_wrapper
2
+ from monocle_apptrace.instrumentation.metamodel.lambdafunc.entities.http import LAMBDA_HTTP_PROCESSOR
3
+
4
+ LAMBDA_HTTP_METHODS = [
5
+ {
6
+ "package": "monocle_apptrace.instrumentation.metamodel.lambdafunc.wrapper",
7
+ "object": "LambdaFunctionRouteWrapper",
8
+ "method": "run_async",
9
+ "span_name": "lambda_function_route",
10
+ "wrapper_method": atask_wrapper,
11
+ "span_handler": "lambda_func_handler",
12
+ "output_processor": LAMBDA_HTTP_PROCESSOR
13
+ },
14
+ {
15
+ "package": "monocle_apptrace.instrumentation.metamodel.lambdafunc.wrapper",
16
+ "object": "LambdaFunctionRouteWrapper",
17
+ "method": "run_sync",
18
+ "span_name": "lambda_function_route",
19
+ "wrapper_method": task_wrapper,
20
+ "span_handler": "lambda_func_handler",
21
+ "output_processor": LAMBDA_HTTP_PROCESSOR
22
+ }
23
+ ]
@@ -0,0 +1,23 @@
1
+ from functools import wraps
2
+ import inspect
3
+
4
+ def monocle_trace_lambda_function_route(func):
5
+ if inspect.iscoroutinefunction(func):
6
+ @wraps(func)
7
+ async def wrapper(*args, **kwargs):
8
+ return await LambdaFunctionRouteWrapper.run_async(func, *args, **kwargs)
9
+ return wrapper
10
+ else:
11
+ @wraps(func)
12
+ def wrapper(*args, **kwargs):
13
+ return LambdaFunctionRouteWrapper.run_sync(func, *args, **kwargs)
14
+ return wrapper
15
+
16
+ class LambdaFunctionRouteWrapper:
17
+ @staticmethod
18
+ async def run_async(func, *args, **kwargs):
19
+ return await func(*args, **kwargs)
20
+
21
+ @staticmethod
22
+ def run_sync(func, *args, **kwargs):
23
+ return func(*args, **kwargs)
@@ -4,14 +4,18 @@ and assistant messages from various input formats.
4
4
  """
5
5
 
6
6
  import logging
7
+ from opentelemetry.context import get_value
8
+ from monocle_apptrace.instrumentation.common.constants import AGENT_PREFIX_KEY, INFERENCE_AGENT_DELEGATION, INFERENCE_COMMUNICATION, INFERENCE_TOOL_CALL
7
9
  from monocle_apptrace.instrumentation.common.utils import (
8
10
  Option,
11
+ get_json_dumps,
9
12
  get_keys_as_tuple,
10
13
  get_nested_value,
11
14
  try_option,
12
15
  get_exception_message,
13
16
  get_status_code,
14
17
  )
18
+ from monocle_apptrace.instrumentation.metamodel.finish_types import map_langchain_finish_reason_to_finish_type
15
19
 
16
20
 
17
21
  logger = logging.getLogger(__name__)
@@ -32,45 +36,80 @@ def extract_messages(args):
32
36
  for msg in args[0].messages:
33
37
  if hasattr(msg, 'content') and hasattr(msg, 'type'):
34
38
  messages.append({msg.type: msg.content})
35
- return [str(d) for d in messages]
39
+ else:
40
+ for msg in args[0]:
41
+ if hasattr(msg, 'content') and hasattr(msg, 'type') and msg.content:
42
+ messages.append({msg.type: msg.content})
43
+ elif hasattr(msg, 'tool_calls') and msg.tool_calls:
44
+ messages.append({msg.type: get_json_dumps(msg.tool_calls)})
45
+ return [get_json_dumps(d) for d in messages]
36
46
  except Exception as e:
37
47
  logger.warning("Warning: Error occurred in extract_messages: %s", str(e))
38
48
  return []
49
+ def agent_inference_type(arguments):
50
+ """Extract agent inference type from arguments."""
51
+ try:
52
+ if get_value(AGENT_PREFIX_KEY):
53
+ agent_prefix = get_value(AGENT_PREFIX_KEY)
54
+ if hasattr(arguments['result'], "tool_calls") and arguments['result'].tool_calls:
55
+ tool_call = arguments['result'].tool_calls[0] if arguments['result'].tool_calls else None
56
+ if tool_call and 'name' in tool_call and tool_call["name"].startswith(agent_prefix):
57
+ return INFERENCE_AGENT_DELEGATION
58
+ else:
59
+ return INFERENCE_TOOL_CALL
60
+ return INFERENCE_COMMUNICATION
61
+
62
+ except Exception as e:
63
+ logger.warning("Warning: Error occurred in agent_inference_type: %s", str(e))
64
+ return None
39
65
 
40
66
  def extract_assistant_message(arguments):
41
67
  status = get_status_code(arguments)
42
- response: str = ""
68
+ messages = []
69
+ role = "assistant"
43
70
  if status == 'success':
44
71
  if isinstance(arguments['result'], str):
45
- response = arguments['result']
46
- if hasattr(arguments['result'], "content"):
47
- response = arguments['result'].content
48
- if hasattr(arguments['result'], "message") and hasattr(arguments['result'].message, "content"):
49
- response = arguments['result'].message.content
72
+ messages.append({role: arguments['result']})
73
+ elif hasattr(arguments['result'], "content") and arguments['result'].content != "":
74
+ role = arguments['result'].type if hasattr(arguments['result'], 'type') else role
75
+ messages.append({role: arguments['result'].content})
76
+ elif hasattr(arguments['result'], "message") and hasattr(arguments['result'].message, "content") and arguments['result'].message.content != "":
77
+ role = arguments['result'].type if hasattr(arguments['result'], 'type') else role
78
+ messages.append({role: arguments['result'].message.content})
79
+ elif hasattr(arguments['result'], "tool_calls"):
80
+ role = arguments['result'].type if hasattr(arguments['result'], 'type') else role
81
+ messages.append({role: arguments['result'].tool_calls[0]})
50
82
  else:
51
83
  if arguments["exception"] is not None:
52
- response = get_exception_message(arguments)
84
+ messages.append({role: get_exception_message(arguments)})
53
85
  elif hasattr(arguments["result"], "error"):
54
- response = arguments["result"].error
55
-
56
- return response
57
-
86
+ return arguments["result"].error
87
+ return get_json_dumps(messages[0]) if messages else ""
58
88
 
59
89
  def extract_provider_name(instance):
60
- provider_url: Option[str] = None
61
- if hasattr(instance,'client'):
90
+ provider_url: Option[str] = Option(None)
91
+ if hasattr(instance, 'client'):
92
+ provider_url: Option[str] = try_option(getattr, instance.client, 'universe_domain')
93
+ if hasattr(instance,'client') and hasattr(instance.client, '_client') and hasattr(instance.client._client, 'base_url'):
94
+ # If the client has a base_url, extract the host from it
62
95
  provider_url: Option[str] = try_option(getattr, instance.client._client.base_url, 'host')
63
- if hasattr(instance, '_client'):
96
+ if hasattr(instance, '_client') and hasattr(instance._client, 'base_url'):
64
97
  provider_url = try_option(getattr, instance._client.base_url, 'host')
65
98
  return provider_url.unwrap_or(None)
66
99
 
67
100
 
68
101
  def extract_inference_endpoint(instance):
69
102
  inference_endpoint: Option[str] = None
70
- if hasattr(instance,'client'):
103
+ # instance.client.meta.endpoint_url
104
+ if hasattr(instance, 'client') and hasattr(instance.client, 'transport'):
105
+ inference_endpoint: Option[str] = try_option(getattr, instance.client.transport, 'host')
106
+
107
+ if hasattr(instance, 'client') and hasattr(instance.client, 'meta') and hasattr(instance.client.meta, 'endpoint_url'):
108
+ inference_endpoint: Option[str] = try_option(getattr, instance.client.meta, 'endpoint_url').map(str)
109
+
110
+ if hasattr(instance,'client') and hasattr(instance.client, '_client'):
71
111
  inference_endpoint: Option[str] = try_option(getattr, instance.client._client, 'base_url').map(str)
72
- if inference_endpoint.is_none() and "meta" in instance.client.__dict__:
73
- inference_endpoint = try_option(getattr, instance.client.meta, 'endpoint_url').map(str)
112
+
74
113
  if hasattr(instance,'_client'):
75
114
  inference_endpoint = try_option(getattr, instance._client, 'base_url').map(str)
76
115
 
@@ -138,4 +177,91 @@ def update_span_from_llm_response(response, instance):
138
177
  {"completion_tokens": token_usage.get("completion_tokens") or token_usage.get("output_tokens")})
139
178
  meta_dict.update({"prompt_tokens": token_usage.get("prompt_tokens") or token_usage.get("input_tokens")})
140
179
  meta_dict.update({"total_tokens": token_usage.get("total_tokens")})
141
- return meta_dict
180
+ return meta_dict
181
+
182
+ def extract_finish_reason(arguments):
183
+ """Extract finish_reason from LangChain response."""
184
+ try:
185
+ # Handle exception cases first
186
+ if arguments.get("exception") is not None:
187
+ # If there's an exception, it's typically an error finish type
188
+ return "error"
189
+
190
+ response = arguments.get("result")
191
+ if response is None:
192
+ return None
193
+
194
+ # Check various possible locations for finish_reason in LangChain responses
195
+
196
+ # Direct finish_reason attribute
197
+ if hasattr(response, "finish_reason") and response.finish_reason:
198
+ return response.finish_reason
199
+
200
+ # Response metadata (common in LangChain)
201
+ if hasattr(response, "response_metadata") and response.response_metadata:
202
+ metadata = response.response_metadata
203
+ if isinstance(metadata, dict):
204
+ # Check for finish_reason in metadata
205
+ if "finish_reason" in metadata:
206
+ return metadata["finish_reason"]
207
+ # Check for stop_reason (Anthropic style through LangChain)
208
+ if "stop_reason" in metadata:
209
+ return metadata["stop_reason"]
210
+ # Check for other common finish reason keys
211
+ for key in ["completion_reason", "end_reason", "status"]:
212
+ if key in metadata:
213
+ return metadata[key]
214
+
215
+ # Check if response has generation_info (some LangChain models)
216
+ if hasattr(response, "generation_info") and response.generation_info:
217
+ gen_info = response.generation_info
218
+ if isinstance(gen_info, dict):
219
+ for key in ["finish_reason", "stop_reason", "completion_reason"]:
220
+ if key in gen_info:
221
+ return gen_info[key]
222
+
223
+ # Check if response has llm_output (batch responses)
224
+ if hasattr(response, "llm_output") and response.llm_output:
225
+ llm_output = response.llm_output
226
+ if isinstance(llm_output, dict):
227
+ for key in ["finish_reason", "stop_reason"]:
228
+ if key in llm_output:
229
+ return llm_output[key]
230
+
231
+ # For AIMessage responses, check additional_kwargs
232
+ if hasattr(response, "additional_kwargs") and response.additional_kwargs:
233
+ kwargs = response.additional_kwargs
234
+ if isinstance(kwargs, dict):
235
+ for key in ["finish_reason", "stop_reason"]:
236
+ if key in kwargs:
237
+ return kwargs[key]
238
+
239
+ # For generation responses with choices (similar to OpenAI structure)
240
+ if hasattr(response, "generations") and response.generations:
241
+ generations = response.generations
242
+ if isinstance(generations, list) and len(generations) > 0:
243
+ for generation in generations:
244
+ if hasattr(generation, "generation_info") and generation.generation_info:
245
+ gen_info = generation.generation_info
246
+ if isinstance(gen_info, dict):
247
+ for key in ["finish_reason", "stop_reason"]:
248
+ if key in gen_info:
249
+ return gen_info[key]
250
+
251
+ # If no specific finish reason found, infer from status
252
+ status_code = get_status_code(arguments)
253
+ if status_code == 'success':
254
+ return "stop" # Default success finish reason
255
+ elif status_code == 'error':
256
+ return "error"
257
+
258
+ except Exception as e:
259
+ logger.warning("Warning: Error occurred in extract_finish_reason: %s", str(e))
260
+ return None
261
+
262
+ return None
263
+
264
+
265
+ def map_finish_reason_to_finish_type(finish_reason):
266
+ """Map LangChain finish_reason to finish_type."""
267
+ return map_langchain_finish_reason_to_finish_type(finish_reason)