monocle-apptrace 0.5.3__py3-none-any.whl → 0.6.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of monocle-apptrace might be problematic. Click here for more details.

Files changed (56) hide show
  1. monocle_apptrace/exporters/file_exporter.py +7 -1
  2. monocle_apptrace/instrumentation/common/constants.py +8 -0
  3. monocle_apptrace/instrumentation/common/instrumentor.py +1 -1
  4. monocle_apptrace/instrumentation/common/span_handler.py +75 -24
  5. monocle_apptrace/instrumentation/common/utils.py +63 -6
  6. monocle_apptrace/instrumentation/common/wrapper.py +111 -42
  7. monocle_apptrace/instrumentation/common/wrapper_method.py +6 -2
  8. monocle_apptrace/instrumentation/metamodel/a2a/methods.py +1 -1
  9. monocle_apptrace/instrumentation/metamodel/adk/_helper.py +7 -4
  10. monocle_apptrace/instrumentation/metamodel/adk/entities/agent.py +6 -1
  11. monocle_apptrace/instrumentation/metamodel/agents/_helper.py +8 -8
  12. monocle_apptrace/instrumentation/metamodel/agents/entities/inference.py +9 -2
  13. monocle_apptrace/instrumentation/metamodel/aiohttp/_helper.py +1 -1
  14. monocle_apptrace/instrumentation/metamodel/anthropic/entities/inference.py +1 -4
  15. monocle_apptrace/instrumentation/metamodel/azfunc/_helper.py +1 -1
  16. monocle_apptrace/instrumentation/metamodel/botocore/_helper.py +5 -0
  17. monocle_apptrace/instrumentation/metamodel/botocore/entities/inference.py +4 -0
  18. monocle_apptrace/instrumentation/metamodel/fastapi/_helper.py +4 -4
  19. monocle_apptrace/instrumentation/metamodel/fastapi/methods.py +4 -4
  20. monocle_apptrace/instrumentation/metamodel/finish_types.py +32 -1
  21. monocle_apptrace/instrumentation/metamodel/flask/_helper.py +3 -3
  22. monocle_apptrace/instrumentation/metamodel/hugging_face/__init__.py +0 -0
  23. monocle_apptrace/instrumentation/metamodel/hugging_face/_helper.py +138 -0
  24. monocle_apptrace/instrumentation/metamodel/hugging_face/entities/__init__.py +0 -0
  25. monocle_apptrace/instrumentation/metamodel/hugging_face/entities/inference.py +94 -0
  26. monocle_apptrace/instrumentation/metamodel/hugging_face/methods.py +23 -0
  27. monocle_apptrace/instrumentation/metamodel/lambdafunc/_helper.py +1 -1
  28. monocle_apptrace/instrumentation/metamodel/langchain/entities/inference.py +1 -4
  29. monocle_apptrace/instrumentation/metamodel/langgraph/_helper.py +34 -8
  30. monocle_apptrace/instrumentation/metamodel/langgraph/entities/inference.py +8 -3
  31. monocle_apptrace/instrumentation/metamodel/langgraph/langgraph_processor.py +88 -19
  32. monocle_apptrace/instrumentation/metamodel/langgraph/methods.py +22 -6
  33. monocle_apptrace/instrumentation/metamodel/llamaindex/_helper.py +30 -10
  34. monocle_apptrace/instrumentation/metamodel/llamaindex/entities/agent.py +4 -3
  35. monocle_apptrace/instrumentation/metamodel/llamaindex/llamaindex_processor.py +15 -7
  36. monocle_apptrace/instrumentation/metamodel/llamaindex/methods.py +1 -8
  37. monocle_apptrace/instrumentation/metamodel/mcp/_helper.py +7 -6
  38. monocle_apptrace/instrumentation/metamodel/mistral/_helper.py +98 -49
  39. monocle_apptrace/instrumentation/metamodel/mistral/entities/inference.py +15 -9
  40. monocle_apptrace/instrumentation/metamodel/mistral/entities/retrieval.py +41 -0
  41. monocle_apptrace/instrumentation/metamodel/mistral/methods.py +10 -1
  42. monocle_apptrace/instrumentation/metamodel/openai/_helper.py +47 -7
  43. monocle_apptrace/instrumentation/metamodel/openai/entities/inference.py +20 -4
  44. monocle_apptrace/instrumentation/metamodel/openai/methods.py +1 -1
  45. monocle_apptrace/instrumentation/metamodel/strands/_helper.py +44 -0
  46. monocle_apptrace/instrumentation/metamodel/strands/entities/agent.py +179 -0
  47. monocle_apptrace/instrumentation/metamodel/strands/entities/tool.py +62 -0
  48. monocle_apptrace/instrumentation/metamodel/strands/methods.py +20 -0
  49. {monocle_apptrace-0.5.3.dist-info → monocle_apptrace-0.6.6.dist-info}/METADATA +23 -79
  50. {monocle_apptrace-0.5.3.dist-info → monocle_apptrace-0.6.6.dist-info}/RECORD +53 -46
  51. monocle_apptrace/README.md +0 -101
  52. monocle_apptrace/mcp_server.py +0 -94
  53. monocle_apptrace-0.5.3.dist-info/licenses/NOTICE +0 -4
  54. {monocle_apptrace-0.5.3.dist-info → monocle_apptrace-0.6.6.dist-info}/WHEEL +0 -0
  55. {monocle_apptrace-0.5.3.dist-info → monocle_apptrace-0.6.6.dist-info}/entry_points.txt +0 -0
  56. {monocle_apptrace-0.5.3.dist-info → monocle_apptrace-0.6.6.dist-info}/licenses/LICENSE +0 -0
@@ -14,6 +14,7 @@ class FinishType(Enum):
14
14
  TOOL_CALL_ERROR = "tool_call_error"
15
15
  REFUSAL = "refusal"
16
16
  RATE_LIMITED = "rate_limited"
17
+ TOOL_CALL = "tool_call"
17
18
 
18
19
  # OpenAI finish reason mapping
19
20
  OPENAI_FINISH_REASON_MAPPING = {
@@ -274,6 +275,24 @@ HAYSTACK_FINISH_REASON_MAPPING = {
274
275
  "OTHER": FinishType.ERROR.value,
275
276
  }
276
277
 
278
+ MISTRAL_FINISH_REASON_MAPPING = {
279
+ "stop": FinishType.SUCCESS.value,
280
+ "tool_calls": FinishType.TOOL_CALL.value, # New category for tool calls
281
+ "length": FinishType.TRUNCATED.value,
282
+ # Mistral's API documentation does not explicitly mention other finish reasons like "content_filter" or "refusal".
283
+ # However, in case of an API-level error, the response itself would likely be an HTTP error rather than a
284
+ # successful response with a specific finish reason.
285
+ }
286
+
287
+ HUGGING_FACE_FINISH_REASON_MAPPING = {
288
+ "stop": FinishType.SUCCESS.value,
289
+ "tool_calls": FinishType.TOOL_CALL.value, # New category for tool calls
290
+ "length": FinishType.TRUNCATED.value,
291
+ # Hugging Face's API documentation does not explicitly mention other finish reasons like "content_filter" or "refusal".
292
+ # However, in case of an API-level error, the response itself would likely be an HTTP error rather than a
293
+ # successful response with a specific finish reason.
294
+ }
295
+
277
296
  ADK_FINISH_REASON_MAPPING = GEMINI_FINISH_REASON_MAPPING
278
297
 
279
298
  def map_openai_finish_reason_to_finish_type(finish_reason):
@@ -462,4 +481,16 @@ def map_adk_finish_reason_to_finish_type(finish_reason):
462
481
  """Map ADK finish_reason to standardized finish_type."""
463
482
  if not finish_reason:
464
483
  return None
465
- return ADK_FINISH_REASON_MAPPING.get(finish_reason, None)
484
+ return ADK_FINISH_REASON_MAPPING.get(finish_reason, None)
485
+
486
+ def map_mistral_finish_reason_to_finish_type(finish_reason):
487
+ """Map Mistral finish_reason to standardized finish_type."""
488
+ if not finish_reason:
489
+ return None
490
+ return MISTRAL_FINISH_REASON_MAPPING.get(finish_reason, None)
491
+
492
+ def map_hf_finish_reason_to_finish_type(finish_reason):
493
+ """Map Hugging Face finish_reason to standardized finish_type."""
494
+ if not finish_reason:
495
+ return None
496
+ return HUGGING_FACE_FINISH_REASON_MAPPING.get(finish_reason, None)
@@ -67,13 +67,13 @@ def flask_post_tracing(token):
67
67
  class FlaskSpanHandler(SpanHandler):
68
68
 
69
69
  def pre_tracing(self, to_wrap, wrapped, instance, args, kwargs):
70
- return flask_pre_tracing(args)
70
+ return flask_pre_tracing(args), None
71
71
 
72
72
  def post_tracing(self, to_wrap, wrapped, instance, args, kwargs, return_value, token):
73
73
  flask_post_tracing(token)
74
74
 
75
75
  class FlaskResponseSpanHandler(SpanHandler):
76
- def post_tracing(self, to_wrap, wrapped, instance, args, kwargs, return_value):
76
+ def post_tracing(self, to_wrap, wrapped, instance, args, kwargs, return_value, token):
77
77
  try:
78
78
  _parent_span_context = get_current()
79
79
  if _parent_span_context is not None:
@@ -82,4 +82,4 @@ class FlaskResponseSpanHandler(SpanHandler):
82
82
  self.hydrate_events(to_wrap, wrapped, instance, args, kwargs, return_value, parent_span=parent_span)
83
83
  except Exception as e:
84
84
  logger.info(f"Failed to propogate flask response: {e}")
85
- super().post_tracing(to_wrap, wrapped, instance, args, kwargs, return_value)
85
+ super().post_tracing(to_wrap, wrapped, instance, args, kwargs, return_value, token)
@@ -0,0 +1,138 @@
1
+ import os
2
+ import json
3
+ import logging
4
+ from opentelemetry.context import get_value
5
+ from monocle_apptrace.instrumentation.common.constants import (
6
+ AGENT_PREFIX_KEY,
7
+ INFERENCE_AGENT_DELEGATION,
8
+ INFERENCE_TURN_END,
9
+ INFERENCE_TOOL_CALL,
10
+ )
11
+ from monocle_apptrace.instrumentation.common.utils import (
12
+ Option,
13
+ get_json_dumps,
14
+ try_option,
15
+ )
16
+
17
+ from monocle_apptrace.instrumentation.metamodel.finish_types import map_hf_finish_reason_to_finish_type
18
+
19
+ logger = logging.getLogger(__name__)
20
+
21
+ def update_input_span_events(kwargs):
22
+ input_text = ""
23
+ print("DEBUG kwargs:", kwargs)
24
+ if "inputs" in kwargs:
25
+ if isinstance(kwargs["inputs"], list):
26
+ input_text = " | ".join(str(i) for i in kwargs["inputs"])
27
+ else:
28
+ input_text = str(kwargs["inputs"])
29
+ elif "messages" in kwargs:
30
+ input_text = json.dumps(kwargs["messages"])
31
+ return {"input": input_text} # always a dict with 'input'
32
+
33
+
34
+
35
+ def update_output_span_events(result):
36
+ try:
37
+ if hasattr(result, "choices") and result.choices:
38
+ output = [c.message for c in result.choices]
39
+ output_str = json.dumps(output)
40
+ return output_str[:200] + "..." if len(output_str) > 200 else output_str
41
+ except Exception as e:
42
+ logger.warning("Error in update_output_span_events: %s", str(e))
43
+ return ""
44
+
45
+ def extract_messages(kwargs):
46
+ """Extract system and user messages"""
47
+ try:
48
+ messages = []
49
+ if "system" in kwargs and isinstance(kwargs["system"], str):
50
+ messages.append({"system": kwargs["system"]})
51
+ if 'messages' in kwargs and kwargs['messages']:
52
+ for msg in kwargs['messages']:
53
+ if msg.get('content') and msg.get('role'):
54
+ messages.append({msg['role']: msg['content']})
55
+ return [get_json_dumps(message) for message in messages]
56
+ except Exception as e:
57
+ logger.warning("Warning: Error occurred in extract_messages: %s", str(e))
58
+ return []
59
+
60
+ def extract_assistant_message(arguments):
61
+ """
62
+ Extract the assistant message from a Mistral response or stream chunks.
63
+ Returns a JSON string like {"assistant": "<text>"}.
64
+ """
65
+ try:
66
+ result = arguments.get("result") if isinstance(arguments, dict) else arguments
67
+ if result is None:
68
+ return ""
69
+
70
+ # Handle full response
71
+ if hasattr(result, "choices") and result.choices:
72
+ msg_obj = result.choices[0].message
73
+ return get_json_dumps({msg_obj.role: msg_obj.content})
74
+
75
+ # Handle streaming: result might be a list of CompletionEvent chunks
76
+ if isinstance(result, list):
77
+ content = []
78
+ for chunk in result:
79
+ if hasattr(chunk, "data") and hasattr(chunk.data, "choices") and chunk.data.choices:
80
+ choice = chunk.data.choices[0]
81
+ if hasattr(choice, "delta") and hasattr(choice.delta, "content"):
82
+ content.append(choice.delta.content or "")
83
+ return get_json_dumps({"assistant": "".join(content)})
84
+
85
+ return ""
86
+
87
+ except Exception as e:
88
+ logger.warning("Warning in extract_assistant_message: %s", str(e))
89
+ return ""
90
+
91
+ def update_span_from_llm_response(result, include_token_counts=False):
92
+ tokens = {
93
+ "completion_tokens": getattr(result.usage, "completion_tokens", 0),
94
+ "prompt_tokens": getattr(result.usage, "prompt_tokens", 0),
95
+ "total_tokens": getattr(result.usage, "total_tokens", 0),
96
+ } if include_token_counts else {}
97
+ # Add other metadata fields like finish_reason, etc.
98
+ return {**tokens}
99
+
100
+
101
+ def get_exception_status_code(exc):
102
+ if exc is None:
103
+ return "success"
104
+ code = getattr(exc, "status_code", None)
105
+ if code == 401:
106
+ return "unauthorized"
107
+ elif code == 403:
108
+ return "forbidden"
109
+ elif code == 404:
110
+ return "not_found"
111
+ return "error"
112
+
113
+ def map_finish_reason_to_finish_type(finish_reason):
114
+ """Map Hugging Face finish_reason to finish_type, similar to OpenAI mapping."""
115
+ return map_hf_finish_reason_to_finish_type(finish_reason)
116
+
117
+
118
+ def agent_inference_type(result):
119
+ """
120
+ Simple agent inference type logic: if message contains AGENT_PREFIX_KEY,
121
+ mark as delegation; otherwise it's a normal turn_end.
122
+ """
123
+ try:
124
+ assistant_msg = extract_assistant_message(result)
125
+ if assistant_msg and AGENT_PREFIX_KEY in assistant_msg:
126
+ return INFERENCE_AGENT_DELEGATION
127
+ except Exception as e:
128
+ logger.warning("Error in agent_inference_type: %s", str(e))
129
+ return INFERENCE_TURN_END
130
+
131
+
132
+ def extract_finish_reason(result):
133
+ try:
134
+ return getattr(result, "finish_reason", None)
135
+ except Exception:
136
+ return None
137
+
138
+
@@ -0,0 +1,94 @@
1
+ from monocle_apptrace.instrumentation.common.constants import SPAN_TYPES
2
+ from monocle_apptrace.instrumentation.common.utils import get_error_message, resolve_from_alias
3
+ from monocle_apptrace.instrumentation.metamodel.hugging_face import _helper
4
+
5
+
6
+ INFERENCE = {
7
+ "type": SPAN_TYPES.INFERENCE,
8
+ "subtype": lambda arguments: _helper.agent_inference_type(arguments),
9
+ "attributes": [
10
+ [
11
+ {
12
+ "_comment": "provider type, name, deployment, inference_endpoint",
13
+ "attribute": "type",
14
+ "accessor": lambda arguments: "inference.huggingface"
15
+ },
16
+ {
17
+ "attribute": "provider_name",
18
+ "accessor": lambda arguments: "huggingface"
19
+ },
20
+ {
21
+ "attribute": "inference_endpoint",
22
+ "accessor": lambda arguments: "https://api-inference.huggingface.co/v1/"
23
+ }
24
+ ],
25
+ [
26
+ {
27
+ "_comment": "LLM Model (repo ID on Hugging Face hub)",
28
+ "attribute": "name",
29
+ "accessor": lambda arguments: resolve_from_alias(
30
+ arguments["kwargs"],
31
+ ["model", "model_name", "endpoint_name", "deployment_name"]
32
+ )
33
+ },
34
+ {
35
+ "attribute": "type",
36
+ "accessor": lambda arguments: (
37
+ "model.llm." +
38
+ resolve_from_alias(
39
+ arguments["kwargs"],
40
+ ["model", "model_name", "endpoint_name", "deployment_name"]
41
+ )
42
+ )
43
+ }
44
+ ]
45
+ ],
46
+ "events": [
47
+ {
48
+ "name": "data.input",
49
+ "attributes": [
50
+ {
51
+ "_comment": "this is instruction and user query to LLM",
52
+ "attribute": "input",
53
+ "accessor": lambda arguments: _helper.extract_messages(arguments["kwargs"])
54
+ }
55
+ ]
56
+ },
57
+ {
58
+ "name": "data.output",
59
+ "attributes": [
60
+ {
61
+ "attribute": "error_code",
62
+ "accessor": lambda arguments: get_error_message(arguments)
63
+ },
64
+ {
65
+ "_comment": "result from Hugging Face inference",
66
+ "attribute": "response",
67
+ "accessor": lambda arguments: _helper.extract_assistant_message(arguments)
68
+ }
69
+ ]
70
+ },
71
+ {
72
+ "name": "metadata",
73
+ "attributes": [
74
+ {
75
+ "_comment": "this is metadata usage from LLM, includes token counts",
76
+ "accessor": lambda arguments: _helper.update_span_from_llm_response(
77
+ arguments.get("result"),
78
+ include_token_counts=True # new flag for streaming handling
79
+ )
80
+ },
81
+ {
82
+ "_comment": "finish reason from Hugging Face response",
83
+ "attribute": "finish_reason",
84
+ "accessor": lambda arguments: _helper.extract_finish_reason(arguments)
85
+ },
86
+ {
87
+ "_comment": "finish type mapped from finish reason",
88
+ "attribute": "finish_type",
89
+ "accessor": lambda arguments: _helper.map_finish_reason_to_finish_type(_helper.extract_finish_reason(arguments))
90
+ }
91
+ ]
92
+ }
93
+ ]
94
+ }
@@ -0,0 +1,23 @@
1
+ from monocle_apptrace.instrumentation.common.wrapper import atask_wrapper, task_wrapper
2
+ from monocle_apptrace.instrumentation.metamodel.hugging_face.entities.inference import (
3
+ INFERENCE,
4
+ )
5
+
6
+ HUGGING_FACE_METHODS = [
7
+ {
8
+ "package": "huggingface_hub",
9
+ "object": "InferenceClient",
10
+ "method": "chat_completion", # sync
11
+ "wrapper_method": task_wrapper,
12
+ "span_handler": "non_framework_handler",
13
+ "output_processor": INFERENCE,
14
+ },
15
+ {
16
+ "package": "huggingface_hub",
17
+ "object": "AsyncInferenceClient",
18
+ "method": "chat_completion", # async
19
+ "wrapper_method": atask_wrapper,
20
+ "span_handler": "non_framework_handler",
21
+ "output_processor": INFERENCE,
22
+ },
23
+ ]
@@ -83,7 +83,7 @@ def lambda_func_post_tracing(token):
83
83
 
84
84
  class lambdaSpanHandler(SpanHandler):
85
85
  def pre_tracing(self, to_wrap, wrapped, instance, args, kwargs):
86
- return lambda_func_pre_tracing(kwargs)
86
+ return lambda_func_pre_tracing(kwargs), None
87
87
 
88
88
  def post_tracing(self, to_wrap, wrapped, instance, args, kwargs, return_value,token):
89
89
  lambda_func_post_tracing(token)
@@ -6,6 +6,7 @@ from monocle_apptrace.instrumentation.common.utils import get_error_message, res
6
6
 
7
7
  INFERENCE = {
8
8
  "type": SPAN_TYPES.INFERENCE_FRAMEWORK,
9
+ "subtype": lambda arguments: _helper.agent_inference_type(arguments),
9
10
  "attributes": [
10
11
  [
11
12
  {
@@ -79,10 +80,6 @@ INFERENCE = {
79
80
  "accessor": lambda arguments: _helper.map_finish_reason_to_finish_type(
80
81
  _helper.extract_finish_reason(arguments)
81
82
  )
82
- },
83
- {
84
- "attribute": "inference_sub_type",
85
- "accessor": lambda arguments: _helper.agent_inference_type(arguments)
86
83
  }
87
84
  ]
88
85
  }
@@ -22,13 +22,25 @@ def agent_instructions(arguments):
22
22
  else:
23
23
  return arguments['kwargs']['agent'].instructions
24
24
 
25
+ def extract_request_agent_input(arguments):
26
+ if arguments['kwargs'] is not None and 'input' in arguments['kwargs']:
27
+ history = arguments['kwargs']['input']['messages']
28
+ messages = []
29
+ for message in history:
30
+ if 'content' in message and 'role' in message and message['role'] == "user": # Check if the message is a UserMessage
31
+ messages.append(message['content'])
32
+ return messages
33
+ return []
34
+
25
35
  def extract_agent_input(arguments):
26
- if arguments['result'] is not None and 'messages' in arguments['result']:
27
- history = arguments['result']['messages']
36
+ if arguments['args'] is not None and len(arguments['args']) > 0 and 'messages' in arguments['args'][0]:
37
+ history = arguments['args'][0]['messages']
38
+ messages = []
28
39
  for message in history:
29
40
  if hasattr(message, 'content') and hasattr(message, 'type') and message.type == "human": # Check if the message is a HumanMessage
30
- return message.content
31
- return None
41
+ messages.append(message.content)
42
+ return messages
43
+ return []
32
44
 
33
45
  def get_inference_endpoint(arguments):
34
46
  inference_endpoint = resolve_from_alias(arguments['instance'].client.__dict__, ['azure_endpoint', 'api_base', '_base_url'])
@@ -56,6 +68,10 @@ def update_span_from_llm_response(response):
56
68
  def extract_tool_response(result):
57
69
  if result is not None and hasattr(result, 'content'):
58
70
  return result.content
71
+ if isinstance(result, str):
72
+ return result
73
+ if isinstance(result[0], str):
74
+ return result[0]
59
75
  return None
60
76
 
61
77
  def get_status(result):
@@ -64,11 +80,21 @@ def get_status(result):
64
80
  return None
65
81
 
66
82
  def extract_tool_input(arguments):
67
- tool_input = arguments['args'][0]
68
- if isinstance(tool_input, str):
69
- return [tool_input]
83
+ if arguments['args'] and len(arguments['args']) > 0:
84
+ tool_input = arguments['args'][0]
70
85
  else:
71
- return list(tool_input.values())
86
+ tool_input:dict = arguments['kwargs'].copy()
87
+ tool_input.pop('run_manager', None) # remove run_manager if exists
88
+ tool_input.pop('config', None) # remove config if exists
89
+ return str(tool_input)
90
+
91
+ # if isinstance(tool_input, str):
92
+ # return [tool_input]
93
+ # elif isinstance(tool_input, dict):
94
+ # # return array of key value pairs
95
+ # return [f"'{k}': '{str(v)}'" for k, v in tool_input.items()]
96
+ # else:
97
+ # return [str(tool_input)]
72
98
 
73
99
  def get_name(instance):
74
100
  return instance.name if hasattr(instance, 'name') else ""
@@ -2,6 +2,7 @@ from monocle_apptrace.instrumentation.common.constants import AGENT_REQUEST_SPAN
2
2
  from monocle_apptrace.instrumentation.metamodel.langgraph import (
3
3
  _helper
4
4
  )
5
+ from monocle_apptrace.instrumentation.common.utils import get_error_message
5
6
 
6
7
  AGENT = {
7
8
  "type": SPAN_TYPES.AGENTIC_INVOCATION,
@@ -31,7 +32,7 @@ AGENT = {
31
32
  "attributes": [
32
33
  {
33
34
  "_comment": "this is Agent input",
34
- "attribute": "query",
35
+ "attribute": "input",
35
36
  "accessor": lambda arguments: _helper.extract_agent_input(arguments)
36
37
  }
37
38
  ]
@@ -39,6 +40,10 @@ AGENT = {
39
40
  {
40
41
  "name":"data.output",
41
42
  "attributes": [
43
+ {
44
+ "attribute": "error_code",
45
+ "accessor": lambda arguments: get_error_message(arguments)
46
+ },
42
47
  {
43
48
  "_comment": "this is response from LLM",
44
49
  "attribute": "response",
@@ -50,7 +55,7 @@ AGENT = {
50
55
  }
51
56
 
52
57
  AGENT_REQUEST = {
53
- "type": AGENT_REQUEST_SPAN_NAME,
58
+ "type": SPAN_TYPES.AGENTIC_REQUEST,
54
59
  "subtype": SPAN_SUBTYPES.PLANNING,
55
60
  "attributes": [
56
61
  [
@@ -68,7 +73,7 @@ AGENT_REQUEST = {
68
73
  {
69
74
  "_comment": "this is Agent input",
70
75
  "attribute": "input",
71
- "accessor": lambda arguments: _helper.extract_agent_input(arguments)
76
+ "accessor": lambda arguments: _helper.extract_request_agent_input(arguments)
72
77
  }
73
78
  ]
74
79
  },
@@ -1,3 +1,4 @@
1
+ import logging
1
2
  from opentelemetry.context import set_value, attach, detach, get_value
2
3
  from monocle_apptrace.instrumentation.common.constants import AGENT_PREFIX_KEY, SCOPE_NAME
3
4
  from monocle_apptrace.instrumentation.common.span_handler import SpanHandler
@@ -5,46 +6,114 @@ from monocle_apptrace.instrumentation.metamodel.langgraph._helper import (
5
6
  DELEGATION_NAME_PREFIX, get_name, is_root_agent_name, is_delegation_tool, LANGGRAPTH_AGENT_NAME_KEY
6
7
  )
7
8
  from monocle_apptrace.instrumentation.metamodel.langgraph.entities.inference import (
8
- AGENT_DELEGATION, AGENT_REQUEST
9
+ AGENT_DELEGATION, AGENT_REQUEST, AGENT
9
10
  )
10
11
  from monocle_apptrace.instrumentation.common.scope_wrapper import start_scope, stop_scope
12
+ from monocle_apptrace.instrumentation.common.utils import is_scope_set
13
+ try:
14
+ from langgraph.errors import ParentCommand
15
+ except ImportError:
16
+ ParentCommand = None
17
+
18
+ logger = logging.getLogger(__name__)
19
+
20
+ class ParentCommandFilterSpan:
21
+ """A wrapper for spans that filters out ParentCommand exceptions from being recorded."""
22
+
23
+ def __init__(self, span):
24
+ self.span = span
25
+ self.original_record_exception = span.record_exception
26
+
27
+ def record_exception(self, exception, attributes=None, timestamp=None, escaped=False):
28
+ """Filter out ParentCommand exceptions before recording them."""
29
+ try:
30
+ # Check if this is a ParentCommand exception
31
+ if ParentCommand is not None and isinstance(exception, ParentCommand):
32
+ logger.debug("Filtering out ParentCommand exception from span recording")
33
+ return # Don't record ParentCommand exceptions
34
+
35
+ # For all other exceptions, use the original record_exception method
36
+ return self.original_record_exception(exception, attributes, timestamp, escaped)
37
+ except Exception as e:
38
+ logger.debug(f"Error in ParentCommand filtering: {e}")
39
+ # If filtering fails, fall back to original behavior
40
+ return self.original_record_exception(exception, attributes, timestamp, escaped)
11
41
 
12
42
  class LanggraphAgentHandler(SpanHandler):
13
43
  def pre_tracing(self, to_wrap, wrapped, instance, args, kwargs):
14
44
  context = set_value(LANGGRAPTH_AGENT_NAME_KEY, get_name(instance))
15
45
  context = set_value(AGENT_PREFIX_KEY, DELEGATION_NAME_PREFIX, context)
16
46
  scope_name = AGENT_REQUEST.get("type")
17
- if scope_name is not None and is_root_agent_name(instance) and get_value(scope_name, context) is None:
18
- return start_scope(scope_name, scope_value=None, context=context)
47
+ if not is_scope_set(scope_name):
48
+ agent_request_wrapper = to_wrap.copy()
49
+ agent_request_wrapper["output_processor"] = AGENT_REQUEST
50
+ # return start_scope(scope_name, scope_value=None, context=context)
51
+ return attach(context), agent_request_wrapper
19
52
  else:
20
- return attach(context)
53
+ return attach(context), None
21
54
 
22
55
  def post_tracing(self, to_wrap, wrapped, instance, args, kwargs, result, token):
23
56
  if token is not None:
24
57
  detach(token)
25
58
 
59
+ def post_task_processing(self, to_wrap, wrapped, instance, args, kwargs, result, ex, span, parent_span):
60
+ """Apply ParentCommand filtering to the span before task execution."""
61
+ # Apply ParentCommand filtering to this span
62
+ self._apply_parent_command_filtering(span)
63
+ super().post_task_processing(to_wrap, wrapped, instance, args, kwargs, result, ex, span, parent_span)
64
+
65
+ def _apply_parent_command_filtering(self, span):
66
+ """Apply ParentCommand exception filtering to a span."""
67
+ try:
68
+ if hasattr(span, 'record_exception'):
69
+ # Create a filtered wrapper and replace the record_exception method
70
+ filter_wrapper = ParentCommandFilterSpan(span)
71
+ span.record_exception = filter_wrapper.record_exception
72
+ logger.debug("Applied ParentCommand filtering to LangGraph agent span")
73
+ except Exception as e:
74
+ logger.debug(f"Failed to apply ParentCommand filtering: {e}")
75
+
26
76
  # In multi agent scenarios, the root agent is the one that orchestrates the other agents. LangGraph generates an extra root level invoke()
27
77
  # call on top of the supervisor agent invoke().
28
78
  # This span handler resets the parent invoke call as generic type to avoid duplicate attributes/events in supervisor span and this root span.
29
- def hydrate_span(self, to_wrap, wrapped, instance, args, kwargs, result, span, parent_span = None, ex:Exception = None) -> bool:
30
- if is_root_agent_name(instance) and "parent.agent.span" in span.attributes:
31
- agent_request_wrapper = to_wrap.copy()
32
- agent_request_wrapper["output_processor"] = AGENT_REQUEST
33
- else:
34
- agent_request_wrapper = to_wrap
35
- if hasattr(instance, 'name') and parent_span is not None and not SpanHandler.is_root_span(parent_span):
36
- parent_span.set_attribute("parent.agent.span", True)
37
- return super().hydrate_span(agent_request_wrapper, wrapped, instance, args, kwargs, result, span, parent_span, ex)
79
+
80
+ def hydrate_span(self, to_wrap, wrapped, instance, args, kwargs, result, span, parent_span = None, ex:Exception = None, is_post_exec:bool= False) -> bool:
81
+ # Filter out ParentCommand exceptions as they are LangGraph control flow mechanisms, not actual errors
82
+ if ParentCommand is not None and isinstance(ex, ParentCommand):
83
+ ex = None # Suppress the ParentCommand exception from being recorded
84
+
85
+ return super().hydrate_span(to_wrap, wrapped, instance, args, kwargs, result, span, parent_span, ex, is_post_exec)
38
86
 
39
87
  class LanggraphToolHandler(SpanHandler):
40
- # LangGraph uses an internal tool to initate delegation to other agents. The method is tool invoke() with tool name as `transfer_to_<agent_name>`.
41
- # Hence we usea different output processor for tool invoke() to format the span as agentic.delegation.
42
- def hydrate_span(self, to_wrap, wrapped, instance, args, kwargs, result, span, parent_span = None, ex:Exception = None) -> bool:
88
+ def pre_tracing(self, to_wrap, wrapped, instance, args, kwargs):
43
89
  if is_delegation_tool(instance):
44
90
  agent_request_wrapper = to_wrap.copy()
45
91
  agent_request_wrapper["output_processor"] = AGENT_DELEGATION
46
92
  else:
47
- agent_request_wrapper = to_wrap
93
+ agent_request_wrapper = None
94
+ return None, agent_request_wrapper
95
+
96
+ # LangGraph uses an internal tool to initate delegation to other agents. The method is tool invoke() with tool name as `transfer_to_<agent_name>`.
97
+ # Hence we usea different output processor for tool invoke() to format the span as agentic.delegation.
98
+ def post_task_processing(self, to_wrap, wrapped, instance, args, kwargs, result, ex, span, parent_span):
99
+ """Apply ParentCommand filtering to the span before task execution."""
100
+ # Apply ParentCommand filtering to this span
101
+ self._apply_parent_command_filtering(span)
102
+ super().post_task_processing(to_wrap, wrapped, instance, args, kwargs, result, ex, span, parent_span)
103
+
104
+ def _apply_parent_command_filtering(self, span):
105
+ """Apply ParentCommand exception filtering to a span."""
106
+ try:
107
+ if hasattr(span, 'record_exception'):
108
+ # Create a filtered wrapper and replace the record_exception method
109
+ filter_wrapper = ParentCommandFilterSpan(span)
110
+ span.record_exception = filter_wrapper.record_exception
111
+ logger.debug("Applied ParentCommand filtering to LangGraph tool span")
112
+ except Exception as e:
113
+ logger.debug(f"Failed to apply ParentCommand filtering: {e}")
48
114
 
49
- return super().hydrate_span(agent_request_wrapper, wrapped, instance, args, kwargs, result, span, parent_span, ex)
50
-
115
+ def hydrate_span(self, to_wrap, wrapped, instance, args, kwargs, result, span, parent_span = None, ex:Exception = None, is_post_exec:bool= False) -> bool:
116
+ # Filter out ParentCommand exceptions as they are LangGraph control flow mechanisms, not actual errors
117
+ if ParentCommand is not None and isinstance(ex, ParentCommand):
118
+ ex = None # Suppress the ParentCommand exception from being recorded
119
+ return super().hydrate_span(to_wrap, wrapped, instance, args, kwargs, result, span, parent_span, ex, is_post_exec)
@@ -24,17 +24,33 @@ LANGGRAPH_METHODS = [
24
24
  "output_processor": AGENT,
25
25
  },
26
26
  {
27
- "package": "langchain_core.tools.base",
28
- "object": "BaseTool",
29
- "method": "run",
27
+ "package": "langchain_core.tools.simple",
28
+ "object": "Tool",
29
+ "method": "_run",
30
30
  "wrapper_method": task_wrapper,
31
31
  "span_handler": "langgraph_tool_handler",
32
32
  "output_processor": TOOLS,
33
33
  },
34
34
  {
35
- "package": "langchain_core.tools.base",
36
- "object": "BaseTool",
37
- "method": "arun",
35
+ "package": "langchain_core.tools.structured",
36
+ "object": "StructuredTool",
37
+ "method": "_run",
38
+ "wrapper_method": task_wrapper,
39
+ "span_handler": "langgraph_tool_handler",
40
+ "output_processor": TOOLS,
41
+ },
42
+ {
43
+ "package": "langchain_core.tools.simple",
44
+ "object": "Tool",
45
+ "method": "_arun",
46
+ "wrapper_method": atask_wrapper,
47
+ "span_handler": "langgraph_tool_handler",
48
+ "output_processor": TOOLS,
49
+ },
50
+ {
51
+ "package": "langchain_core.tools.structured",
52
+ "object": "StructuredTool",
53
+ "method": "_arun",
38
54
  "wrapper_method": atask_wrapper,
39
55
  "span_handler": "langgraph_tool_handler",
40
56
  "output_processor": TOOLS,