monocle-apptrace 0.4.1__py3-none-any.whl → 0.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of monocle-apptrace might be problematic. Click here for more details.

Files changed (91) hide show
  1. monocle_apptrace/__main__.py +1 -1
  2. monocle_apptrace/exporters/file_exporter.py +125 -37
  3. monocle_apptrace/instrumentation/common/__init__.py +16 -1
  4. monocle_apptrace/instrumentation/common/constants.py +14 -1
  5. monocle_apptrace/instrumentation/common/instrumentor.py +19 -152
  6. monocle_apptrace/instrumentation/common/method_wrappers.py +376 -0
  7. monocle_apptrace/instrumentation/common/span_handler.py +58 -32
  8. monocle_apptrace/instrumentation/common/utils.py +52 -15
  9. monocle_apptrace/instrumentation/common/wrapper.py +124 -18
  10. monocle_apptrace/instrumentation/common/wrapper_method.py +48 -1
  11. monocle_apptrace/instrumentation/metamodel/a2a/__init__.py +0 -0
  12. monocle_apptrace/instrumentation/metamodel/a2a/_helper.py +37 -0
  13. monocle_apptrace/instrumentation/metamodel/a2a/entities/__init__.py +0 -0
  14. monocle_apptrace/instrumentation/metamodel/a2a/entities/inference.py +112 -0
  15. monocle_apptrace/instrumentation/metamodel/a2a/methods.py +22 -0
  16. monocle_apptrace/instrumentation/metamodel/adk/__init__.py +0 -0
  17. monocle_apptrace/instrumentation/metamodel/adk/_helper.py +182 -0
  18. monocle_apptrace/instrumentation/metamodel/adk/entities/agent.py +50 -0
  19. monocle_apptrace/instrumentation/metamodel/adk/entities/tool.py +57 -0
  20. monocle_apptrace/instrumentation/metamodel/adk/methods.py +24 -0
  21. monocle_apptrace/instrumentation/metamodel/agents/__init__.py +0 -0
  22. monocle_apptrace/instrumentation/metamodel/agents/_helper.py +220 -0
  23. monocle_apptrace/instrumentation/metamodel/agents/agents_processor.py +152 -0
  24. monocle_apptrace/instrumentation/metamodel/agents/entities/__init__.py +0 -0
  25. monocle_apptrace/instrumentation/metamodel/agents/entities/inference.py +191 -0
  26. monocle_apptrace/instrumentation/metamodel/agents/methods.py +56 -0
  27. monocle_apptrace/instrumentation/metamodel/aiohttp/_helper.py +6 -11
  28. monocle_apptrace/instrumentation/metamodel/anthropic/_helper.py +112 -18
  29. monocle_apptrace/instrumentation/metamodel/anthropic/entities/inference.py +18 -10
  30. monocle_apptrace/instrumentation/metamodel/azfunc/_helper.py +13 -11
  31. monocle_apptrace/instrumentation/metamodel/azfunc/entities/http.py +5 -0
  32. monocle_apptrace/instrumentation/metamodel/azureaiinference/_helper.py +88 -8
  33. monocle_apptrace/instrumentation/metamodel/azureaiinference/entities/inference.py +22 -8
  34. monocle_apptrace/instrumentation/metamodel/botocore/_helper.py +92 -16
  35. monocle_apptrace/instrumentation/metamodel/botocore/entities/inference.py +13 -8
  36. monocle_apptrace/instrumentation/metamodel/botocore/handlers/botocore_span_handler.py +1 -1
  37. monocle_apptrace/instrumentation/metamodel/fastapi/__init__.py +0 -0
  38. monocle_apptrace/instrumentation/metamodel/fastapi/_helper.py +82 -0
  39. monocle_apptrace/instrumentation/metamodel/fastapi/entities/__init__.py +0 -0
  40. monocle_apptrace/instrumentation/metamodel/fastapi/entities/http.py +44 -0
  41. monocle_apptrace/instrumentation/metamodel/fastapi/methods.py +23 -0
  42. monocle_apptrace/instrumentation/metamodel/finish_types.py +463 -0
  43. monocle_apptrace/instrumentation/metamodel/flask/_helper.py +6 -11
  44. monocle_apptrace/instrumentation/metamodel/gemini/__init__.py +0 -0
  45. monocle_apptrace/instrumentation/metamodel/gemini/_helper.py +120 -0
  46. monocle_apptrace/instrumentation/metamodel/gemini/entities/__init__.py +0 -0
  47. monocle_apptrace/instrumentation/metamodel/gemini/entities/inference.py +86 -0
  48. monocle_apptrace/instrumentation/metamodel/gemini/entities/retrieval.py +43 -0
  49. monocle_apptrace/instrumentation/metamodel/gemini/methods.py +31 -0
  50. monocle_apptrace/instrumentation/metamodel/haystack/_helper.py +79 -8
  51. monocle_apptrace/instrumentation/metamodel/haystack/entities/inference.py +15 -10
  52. monocle_apptrace/instrumentation/metamodel/haystack/methods.py +7 -0
  53. monocle_apptrace/instrumentation/metamodel/lambdafunc/_helper.py +78 -0
  54. monocle_apptrace/instrumentation/metamodel/lambdafunc/entities/http.py +51 -0
  55. monocle_apptrace/instrumentation/metamodel/lambdafunc/methods.py +23 -0
  56. monocle_apptrace/instrumentation/metamodel/lambdafunc/wrapper.py +23 -0
  57. monocle_apptrace/instrumentation/metamodel/langchain/_helper.py +145 -19
  58. monocle_apptrace/instrumentation/metamodel/langchain/entities/inference.py +19 -10
  59. monocle_apptrace/instrumentation/metamodel/langgraph/_helper.py +67 -10
  60. monocle_apptrace/instrumentation/metamodel/langgraph/entities/inference.py +127 -20
  61. monocle_apptrace/instrumentation/metamodel/langgraph/langgraph_processor.py +46 -0
  62. monocle_apptrace/instrumentation/metamodel/langgraph/methods.py +35 -9
  63. monocle_apptrace/instrumentation/metamodel/litellm/__init__.py +0 -0
  64. monocle_apptrace/instrumentation/metamodel/litellm/_helper.py +89 -0
  65. monocle_apptrace/instrumentation/metamodel/litellm/entities/__init__.py +0 -0
  66. monocle_apptrace/instrumentation/metamodel/litellm/entities/inference.py +108 -0
  67. monocle_apptrace/instrumentation/metamodel/litellm/methods.py +19 -0
  68. monocle_apptrace/instrumentation/metamodel/llamaindex/_helper.py +227 -16
  69. monocle_apptrace/instrumentation/metamodel/llamaindex/entities/agent.py +127 -10
  70. monocle_apptrace/instrumentation/metamodel/llamaindex/entities/inference.py +13 -8
  71. monocle_apptrace/instrumentation/metamodel/llamaindex/llamaindex_processor.py +62 -0
  72. monocle_apptrace/instrumentation/metamodel/llamaindex/methods.py +68 -1
  73. monocle_apptrace/instrumentation/metamodel/mcp/__init__.py +0 -0
  74. monocle_apptrace/instrumentation/metamodel/mcp/_helper.py +118 -0
  75. monocle_apptrace/instrumentation/metamodel/mcp/entities/__init__.py +0 -0
  76. monocle_apptrace/instrumentation/metamodel/mcp/entities/inference.py +48 -0
  77. monocle_apptrace/instrumentation/metamodel/mcp/mcp_processor.py +8 -0
  78. monocle_apptrace/instrumentation/metamodel/mcp/methods.py +21 -0
  79. monocle_apptrace/instrumentation/metamodel/openai/_helper.py +188 -16
  80. monocle_apptrace/instrumentation/metamodel/openai/entities/inference.py +148 -92
  81. monocle_apptrace/instrumentation/metamodel/openai/entities/retrieval.py +1 -1
  82. monocle_apptrace/instrumentation/metamodel/teamsai/_helper.py +53 -23
  83. monocle_apptrace/instrumentation/metamodel/teamsai/entities/inference/actionplanner_output_processor.py +1 -1
  84. monocle_apptrace/instrumentation/metamodel/teamsai/entities/inference/teamsai_output_processor.py +15 -9
  85. monocle_apptrace/instrumentation/metamodel/teamsai/sample.json +0 -4
  86. {monocle_apptrace-0.4.1.dist-info → monocle_apptrace-0.5.0.dist-info}/METADATA +27 -11
  87. monocle_apptrace-0.5.0.dist-info/RECORD +142 -0
  88. monocle_apptrace-0.4.1.dist-info/RECORD +0 -96
  89. {monocle_apptrace-0.4.1.dist-info → monocle_apptrace-0.5.0.dist-info}/WHEEL +0 -0
  90. {monocle_apptrace-0.4.1.dist-info → monocle_apptrace-0.5.0.dist-info}/licenses/LICENSE +0 -0
  91. {monocle_apptrace-0.4.1.dist-info → monocle_apptrace-0.5.0.dist-info}/licenses/NOTICE +0 -0
@@ -4,14 +4,18 @@ and assistant messages from various input formats.
4
4
  """
5
5
 
6
6
  import logging
7
+ from opentelemetry.context import get_value
8
+ from monocle_apptrace.instrumentation.common.constants import AGENT_PREFIX_KEY, INFERENCE_AGENT_DELEGATION, INFERENCE_COMMUNICATION, INFERENCE_TOOL_CALL
7
9
  from monocle_apptrace.instrumentation.common.utils import (
8
10
  Option,
11
+ get_json_dumps,
9
12
  get_keys_as_tuple,
10
13
  get_nested_value,
11
14
  try_option,
12
15
  get_exception_message,
13
16
  get_status_code,
14
17
  )
18
+ from monocle_apptrace.instrumentation.metamodel.finish_types import map_langchain_finish_reason_to_finish_type
15
19
 
16
20
 
17
21
  logger = logging.getLogger(__name__)
@@ -32,45 +36,80 @@ def extract_messages(args):
32
36
  for msg in args[0].messages:
33
37
  if hasattr(msg, 'content') and hasattr(msg, 'type'):
34
38
  messages.append({msg.type: msg.content})
35
- return [str(d) for d in messages]
39
+ else:
40
+ for msg in args[0]:
41
+ if hasattr(msg, 'content') and hasattr(msg, 'type') and msg.content:
42
+ messages.append({msg.type: msg.content})
43
+ elif hasattr(msg, 'tool_calls') and msg.tool_calls:
44
+ messages.append({msg.type: get_json_dumps(msg.tool_calls)})
45
+ return [get_json_dumps(d) for d in messages]
36
46
  except Exception as e:
37
47
  logger.warning("Warning: Error occurred in extract_messages: %s", str(e))
38
48
  return []
49
+ def agent_inference_type(arguments):
50
+ """Extract agent inference type from arguments."""
51
+ try:
52
+ if get_value(AGENT_PREFIX_KEY):
53
+ agent_prefix = get_value(AGENT_PREFIX_KEY)
54
+ if hasattr(arguments['result'], "tool_calls") and arguments['result'].tool_calls:
55
+ tool_call = arguments['result'].tool_calls[0] if arguments['result'].tool_calls else None
56
+ if tool_call and 'name' in tool_call and tool_call["name"].startswith(agent_prefix):
57
+ return INFERENCE_AGENT_DELEGATION
58
+ else:
59
+ return INFERENCE_TOOL_CALL
60
+ return INFERENCE_COMMUNICATION
61
+
62
+ except Exception as e:
63
+ logger.warning("Warning: Error occurred in agent_inference_type: %s", str(e))
64
+ return None
39
65
 
40
66
  def extract_assistant_message(arguments):
41
67
  status = get_status_code(arguments)
42
- response: str = ""
68
+ messages = []
69
+ role = "assistant"
43
70
  if status == 'success':
44
71
  if isinstance(arguments['result'], str):
45
- response = arguments['result']
46
- if hasattr(arguments['result'], "content"):
47
- response = arguments['result'].content
48
- if hasattr(arguments['result'], "message") and hasattr(arguments['result'].message, "content"):
49
- response = arguments['result'].message.content
72
+ messages.append({role: arguments['result']})
73
+ elif hasattr(arguments['result'], "content") and arguments['result'].content != "":
74
+ role = arguments['result'].type if hasattr(arguments['result'], 'type') else role
75
+ messages.append({role: arguments['result'].content})
76
+ elif hasattr(arguments['result'], "message") and hasattr(arguments['result'].message, "content") and arguments['result'].message.content != "":
77
+ role = arguments['result'].type if hasattr(arguments['result'], 'type') else role
78
+ messages.append({role: arguments['result'].message.content})
79
+ elif hasattr(arguments['result'], "tool_calls"):
80
+ role = arguments['result'].type if hasattr(arguments['result'], 'type') else role
81
+ messages.append({role: arguments['result'].tool_calls[0]})
50
82
  else:
51
83
  if arguments["exception"] is not None:
52
- response = get_exception_message(arguments)
84
+ messages.append({role: get_exception_message(arguments)})
53
85
  elif hasattr(arguments["result"], "error"):
54
- response = arguments["result"].error
55
-
56
- return response
57
-
86
+ return arguments["result"].error
87
+ return get_json_dumps(messages[0]) if messages else ""
58
88
 
59
89
  def extract_provider_name(instance):
60
- provider_url: Option[str] = None
61
- if hasattr(instance,'client'):
90
+ provider_url: Option[str] = Option(None)
91
+ if hasattr(instance, 'client'):
92
+ provider_url: Option[str] = try_option(getattr, instance.client, 'universe_domain')
93
+ if hasattr(instance,'client') and hasattr(instance.client, '_client') and hasattr(instance.client._client, 'base_url'):
94
+ # If the client has a base_url, extract the host from it
62
95
  provider_url: Option[str] = try_option(getattr, instance.client._client.base_url, 'host')
63
- if hasattr(instance, '_client'):
96
+ if hasattr(instance, '_client') and hasattr(instance._client, 'base_url'):
64
97
  provider_url = try_option(getattr, instance._client.base_url, 'host')
65
98
  return provider_url.unwrap_or(None)
66
99
 
67
100
 
68
101
  def extract_inference_endpoint(instance):
69
102
  inference_endpoint: Option[str] = None
70
- if hasattr(instance,'client'):
103
+ # instance.client.meta.endpoint_url
104
+ if hasattr(instance, 'client') and hasattr(instance.client, 'transport'):
105
+ inference_endpoint: Option[str] = try_option(getattr, instance.client.transport, 'host')
106
+
107
+ if hasattr(instance, 'client') and hasattr(instance.client, 'meta') and hasattr(instance.client.meta, 'endpoint_url'):
108
+ inference_endpoint: Option[str] = try_option(getattr, instance.client.meta, 'endpoint_url').map(str)
109
+
110
+ if hasattr(instance,'client') and hasattr(instance.client, '_client'):
71
111
  inference_endpoint: Option[str] = try_option(getattr, instance.client._client, 'base_url').map(str)
72
- if inference_endpoint.is_none() and "meta" in instance.client.__dict__:
73
- inference_endpoint = try_option(getattr, instance.client.meta, 'endpoint_url').map(str)
112
+
74
113
  if hasattr(instance,'_client'):
75
114
  inference_endpoint = try_option(getattr, instance._client, 'base_url').map(str)
76
115
 
@@ -138,4 +177,91 @@ def update_span_from_llm_response(response, instance):
138
177
  {"completion_tokens": token_usage.get("completion_tokens") or token_usage.get("output_tokens")})
139
178
  meta_dict.update({"prompt_tokens": token_usage.get("prompt_tokens") or token_usage.get("input_tokens")})
140
179
  meta_dict.update({"total_tokens": token_usage.get("total_tokens")})
141
- return meta_dict
180
+ return meta_dict
181
+
182
+ def extract_finish_reason(arguments):
183
+ """Extract finish_reason from LangChain response."""
184
+ try:
185
+ # Handle exception cases first
186
+ if arguments.get("exception") is not None:
187
+ # If there's an exception, it's typically an error finish type
188
+ return "error"
189
+
190
+ response = arguments.get("result")
191
+ if response is None:
192
+ return None
193
+
194
+ # Check various possible locations for finish_reason in LangChain responses
195
+
196
+ # Direct finish_reason attribute
197
+ if hasattr(response, "finish_reason") and response.finish_reason:
198
+ return response.finish_reason
199
+
200
+ # Response metadata (common in LangChain)
201
+ if hasattr(response, "response_metadata") and response.response_metadata:
202
+ metadata = response.response_metadata
203
+ if isinstance(metadata, dict):
204
+ # Check for finish_reason in metadata
205
+ if "finish_reason" in metadata:
206
+ return metadata["finish_reason"]
207
+ # Check for stop_reason (Anthropic style through LangChain)
208
+ if "stop_reason" in metadata:
209
+ return metadata["stop_reason"]
210
+ # Check for other common finish reason keys
211
+ for key in ["completion_reason", "end_reason", "status"]:
212
+ if key in metadata:
213
+ return metadata[key]
214
+
215
+ # Check if response has generation_info (some LangChain models)
216
+ if hasattr(response, "generation_info") and response.generation_info:
217
+ gen_info = response.generation_info
218
+ if isinstance(gen_info, dict):
219
+ for key in ["finish_reason", "stop_reason", "completion_reason"]:
220
+ if key in gen_info:
221
+ return gen_info[key]
222
+
223
+ # Check if response has llm_output (batch responses)
224
+ if hasattr(response, "llm_output") and response.llm_output:
225
+ llm_output = response.llm_output
226
+ if isinstance(llm_output, dict):
227
+ for key in ["finish_reason", "stop_reason"]:
228
+ if key in llm_output:
229
+ return llm_output[key]
230
+
231
+ # For AIMessage responses, check additional_kwargs
232
+ if hasattr(response, "additional_kwargs") and response.additional_kwargs:
233
+ kwargs = response.additional_kwargs
234
+ if isinstance(kwargs, dict):
235
+ for key in ["finish_reason", "stop_reason"]:
236
+ if key in kwargs:
237
+ return kwargs[key]
238
+
239
+ # For generation responses with choices (similar to OpenAI structure)
240
+ if hasattr(response, "generations") and response.generations:
241
+ generations = response.generations
242
+ if isinstance(generations, list) and len(generations) > 0:
243
+ for generation in generations:
244
+ if hasattr(generation, "generation_info") and generation.generation_info:
245
+ gen_info = generation.generation_info
246
+ if isinstance(gen_info, dict):
247
+ for key in ["finish_reason", "stop_reason"]:
248
+ if key in gen_info:
249
+ return gen_info[key]
250
+
251
+ # If no specific finish reason found, infer from status
252
+ status_code = get_status_code(arguments)
253
+ if status_code == 'success':
254
+ return "stop" # Default success finish reason
255
+ elif status_code == 'error':
256
+ return "error"
257
+
258
+ except Exception as e:
259
+ logger.warning("Warning: Error occurred in extract_finish_reason: %s", str(e))
260
+ return None
261
+
262
+ return None
263
+
264
+
265
+ def map_finish_reason_to_finish_type(finish_reason):
266
+ """Map LangChain finish_reason to finish_type."""
267
+ return map_langchain_finish_reason_to_finish_type(finish_reason)
@@ -1,7 +1,7 @@
1
1
  from monocle_apptrace.instrumentation.metamodel.langchain import (
2
2
  _helper,
3
3
  )
4
- from monocle_apptrace.instrumentation.common.utils import resolve_from_alias, get_llm_type, get_status, get_status_code
4
+ from monocle_apptrace.instrumentation.common.utils import get_error_message, resolve_from_alias, get_llm_type, get_status, get_status_code
5
5
 
6
6
  INFERENCE = {
7
7
  "type": "inference.framework",
@@ -30,11 +30,11 @@ INFERENCE = {
30
30
  {
31
31
  "_comment": "LLM Model",
32
32
  "attribute": "name",
33
- "accessor": lambda arguments: resolve_from_alias(arguments['instance'].__dict__, ['model', 'model_name', 'endpoint_name', 'deployment_name'])
33
+ "accessor": lambda arguments: resolve_from_alias(arguments['instance'].__dict__, ['model', 'model_name', 'endpoint_name', 'deployment_name', 'model_id'])
34
34
  },
35
35
  {
36
36
  "attribute": "type",
37
- "accessor": lambda arguments: 'model.llm.' + resolve_from_alias(arguments['instance'].__dict__, ['model', 'model_name', 'endpoint_name', 'deployment_name'])
37
+ "accessor": lambda arguments: 'model.llm.' + resolve_from_alias(arguments['instance'].__dict__, ['model', 'model_name', 'endpoint_name', 'deployment_name', 'model_id'])
38
38
  }
39
39
  ]
40
40
  ],
@@ -53,13 +53,8 @@ INFERENCE = {
53
53
  "name": "data.output",
54
54
  "attributes": [
55
55
  {
56
- "_comment": "this is result from LLM",
57
- "attribute": "status",
58
- "accessor": lambda arguments: get_status(arguments)
59
- },
60
- {
61
- "attribute": "status_code",
62
- "accessor": lambda arguments: get_status_code(arguments)
56
+ "attribute": "error_code",
57
+ "accessor": lambda arguments: get_error_message(arguments)
63
58
  },
64
59
  {
65
60
  "attribute": "response",
@@ -73,6 +68,20 @@ INFERENCE = {
73
68
  {
74
69
  "_comment": "this is metadata usage from LLM",
75
70
  "accessor": lambda arguments: _helper.update_span_from_llm_response(arguments['result'], arguments['instance'])
71
+ },
72
+ {
73
+ "attribute": "finish_reason",
74
+ "accessor": lambda arguments: _helper.extract_finish_reason(arguments)
75
+ },
76
+ {
77
+ "attribute": "finish_type",
78
+ "accessor": lambda arguments: _helper.map_finish_reason_to_finish_type(
79
+ _helper.extract_finish_reason(arguments)
80
+ )
81
+ },
82
+ {
83
+ "attribute": "inference_sub_type",
84
+ "accessor": lambda arguments: _helper.agent_inference_type(arguments)
76
85
  }
77
86
  ]
78
87
  }
@@ -1,15 +1,20 @@
1
+ from opentelemetry.context import get_value
1
2
  from monocle_apptrace.instrumentation.common.utils import resolve_from_alias
2
3
  import logging
3
4
  logger = logging.getLogger(__name__)
4
5
 
5
- def handle_openai_response(response):
6
+ DELEGATION_NAME_PREFIX = 'transfer_to_'
7
+ ROOT_AGENT_NAME = 'LangGraph'
8
+ LANGGRAPTH_AGENT_NAME_KEY = "agent.langgraph"
9
+
10
+ def extract_agent_response(response):
6
11
  try:
7
- if 'messages' in response:
12
+ if response is not None and 'messages' in response:
8
13
  output = response["messages"][-1]
9
14
  return str(output.content)
10
15
  except Exception as e:
11
- logger.warning("Warning: Error occurred in handle_openai_response: %s", str(e))
12
- return ""
16
+ logger.warning("Warning: Error occurred in handle_response: %s", str(e))
17
+ return ""
13
18
 
14
19
  def agent_instructions(arguments):
15
20
  if callable(arguments['kwargs']['agent'].instructions):
@@ -17,11 +22,13 @@ def agent_instructions(arguments):
17
22
  else:
18
23
  return arguments['kwargs']['agent'].instructions
19
24
 
20
- def extract_input(arguments):
21
- history = arguments['result']['messages']
22
- for message in history:
23
- if hasattr(message, 'content') and hasattr(message, 'type') and message.type == "human": # Check if the message is a HumanMessage
24
- return message.content
25
+ def extract_agent_input(arguments):
26
+ if arguments['result'] is not None and 'messages' in arguments['result']:
27
+ history = arguments['result']['messages']
28
+ for message in history:
29
+ if hasattr(message, 'content') and hasattr(message, 'type') and message.type == "human": # Check if the message is a HumanMessage
30
+ return message.content
31
+ return None
25
32
 
26
33
  def get_inference_endpoint(arguments):
27
34
  inference_endpoint = resolve_from_alias(arguments['instance'].client.__dict__, ['azure_endpoint', 'api_base', '_base_url'])
@@ -33,7 +40,6 @@ def tools(instance):
33
40
  if hasattr(tools,'bound') and hasattr(tools.bound,'tools_by_name'):
34
41
  return list(tools.bound.tools_by_name.keys())
35
42
 
36
-
37
43
  def update_span_from_llm_response(response):
38
44
  meta_dict = {}
39
45
  token_usage = None
@@ -46,3 +52,54 @@ def update_span_from_llm_response(response):
46
52
  meta_dict.update({"prompt_tokens": token_usage.get('prompt_tokens')})
47
53
  meta_dict.update({"total_tokens": token_usage.get('total_tokens')})
48
54
  return meta_dict
55
+
56
+ def extract_tool_response(result):
57
+ if result is not None and hasattr(result, 'content'):
58
+ return result.content
59
+ return None
60
+
61
+ def get_status(result):
62
+ if result is not None and hasattr(result, 'status'):
63
+ return result.status
64
+ return None
65
+
66
+ def extract_tool_input(arguments):
67
+ tool_input = arguments['args'][0]
68
+ if isinstance(tool_input, str):
69
+ return [tool_input]
70
+ else:
71
+ return list(tool_input.values())
72
+
73
+ def get_name(instance):
74
+ return instance.name if hasattr(instance, 'name') else ""
75
+
76
+ def get_agent_name(instance) -> str:
77
+ return get_name(instance)
78
+
79
+ def get_tool_name(instance) -> str:
80
+ return get_name(instance)
81
+
82
+ def is_delegation_tool(instance) -> bool:
83
+ return get_name(instance).startswith(DELEGATION_NAME_PREFIX)
84
+
85
+ def get_target_agent(instance) -> str:
86
+ return get_name(instance).replace(DELEGATION_NAME_PREFIX, '', 1)
87
+
88
+ def is_root_agent_name(instance) -> bool:
89
+ return get_name(instance) == ROOT_AGENT_NAME
90
+
91
+ def get_source_agent() -> str:
92
+ """Get the name of the agent that initiated the request."""
93
+ from_agent = get_value(LANGGRAPTH_AGENT_NAME_KEY)
94
+ return from_agent if from_agent is not None else ""
95
+
96
+ def get_description(instance) -> str:
97
+ return instance.description if hasattr(instance, 'description') else ""
98
+
99
+ def get_agent_description(instance) -> str:
100
+ """Get the description of the agent."""
101
+ return get_description(instance)
102
+
103
+ def get_tool_description(instance) -> str:
104
+ """Get the description of the tool."""
105
+ return get_description(instance)
@@ -1,24 +1,25 @@
1
1
  from monocle_apptrace.instrumentation.metamodel.langgraph import (
2
2
  _helper
3
3
  )
4
- INFERENCE = {
5
- "type": "agent",
4
+
5
+ AGENT = {
6
+ "type": "agentic.invocation",
6
7
  "attributes": [
7
8
  [
8
9
  {
9
10
  "_comment": "agent type",
10
11
  "attribute": "type",
11
- "accessor": lambda arguments:'agent.oai'
12
+ "accessor": lambda arguments:'agent.langgraph'
12
13
  },
13
14
  {
14
15
  "_comment": "name of the agent",
15
16
  "attribute": "name",
16
- "accessor": lambda arguments:arguments['instance'].name
17
+ "accessor": lambda arguments: _helper.get_agent_name(arguments['instance'])
17
18
  },
18
19
  {
19
- "_comment": "agent tools",
20
- "attribute": "tools",
21
- "accessor": lambda arguments: _helper.tools(arguments['instance'])
20
+ "_comment": "agent description",
21
+ "attribute": "description",
22
+ "accessor": lambda arguments: _helper.get_agent_description(arguments['instance'])
22
23
  }
23
24
  ]
24
25
  ],
@@ -27,9 +28,9 @@ INFERENCE = {
27
28
  "name":"data.input",
28
29
  "attributes": [
29
30
  {
30
- "_comment": "this is LLM input",
31
+ "_comment": "this is Agent input",
31
32
  "attribute": "query",
32
- "accessor": lambda arguments: _helper.extract_input(arguments)
33
+ "accessor": lambda arguments: _helper.extract_agent_input(arguments)
33
34
  }
34
35
  ]
35
36
  },
@@ -39,18 +40,124 @@ INFERENCE = {
39
40
  {
40
41
  "_comment": "this is response from LLM",
41
42
  "attribute": "response",
42
- "accessor": lambda arguments: _helper.handle_openai_response(arguments['result'])
43
+ "accessor": lambda arguments: _helper.extract_agent_response(arguments['result'])
43
44
  }
44
45
  ]
45
- },
46
- {
47
- "name": "metadata",
48
- "attributes": [
49
- {
50
- "_comment": "this is metadata usage from LLM",
51
- "accessor": lambda arguments: _helper.update_span_from_llm_response(arguments['result'])
52
- }
53
- ]
54
46
  }
55
47
  ]
56
- }
48
+ }
49
+
50
+ AGENT_REQUEST = {
51
+ "type": "agentic.request",
52
+ "attributes": [
53
+ [
54
+ {
55
+ "_comment": "agent type",
56
+ "attribute": "type",
57
+ "accessor": lambda arguments:'agent.langgraph'
58
+ }
59
+ ],
60
+ ],
61
+ "events": [
62
+ {
63
+ "name":"data.input",
64
+ "attributes": [
65
+ {
66
+ "_comment": "this is Agent input",
67
+ "attribute": "input",
68
+ "accessor": lambda arguments: _helper.extract_agent_input(arguments)
69
+ }
70
+ ]
71
+ },
72
+ {
73
+ "name":"data.output",
74
+ "attributes": [
75
+ {
76
+ "_comment": "this is response from LLM",
77
+ "attribute": "response",
78
+ "accessor": lambda arguments: _helper.extract_agent_response(arguments['result'])
79
+ }
80
+ ]
81
+ }
82
+ ]
83
+ }
84
+
85
+ TOOLS = {
86
+ "type": "agentic.tool.invocation",
87
+ "attributes": [
88
+ [
89
+ {
90
+ "_comment": "tool type",
91
+ "attribute": "type",
92
+ "accessor": lambda arguments:'tool.langgraph'
93
+ },
94
+ {
95
+ "_comment": "name of the tool",
96
+ "attribute": "name",
97
+ "accessor": lambda arguments: _helper.get_tool_name(arguments['instance'])
98
+ },
99
+ {
100
+ "_comment": "tool description",
101
+ "attribute": "description",
102
+ "accessor": lambda arguments: _helper.get_tool_description(arguments['instance'])
103
+ }
104
+ ],
105
+ [
106
+ {
107
+ "_comment": "name of the agent",
108
+ "attribute": "name",
109
+ "accessor": lambda arguments: _helper.get_source_agent()
110
+ },
111
+ {
112
+ "_comment": "agent type",
113
+ "attribute": "type",
114
+ "accessor": lambda arguments:'agent.langgraph'
115
+ }
116
+ ]
117
+ ],
118
+ "events": [
119
+ {
120
+ "name":"data.input",
121
+ "attributes": [
122
+ {
123
+ "_comment": "this is Tool input",
124
+ "attribute": "Inputs",
125
+ "accessor": lambda arguments: _helper.extract_tool_input(arguments)
126
+ },
127
+ ]
128
+ },
129
+ {
130
+ "name":"data.output",
131
+ "attributes": [
132
+ {
133
+ "_comment": "this is response from Tool",
134
+ "attribute": "response",
135
+ "accessor": lambda arguments: _helper.extract_tool_response(arguments['result'])
136
+ }
137
+ ]
138
+ }
139
+ ]
140
+ }
141
+
142
+ AGENT_DELEGATION = {
143
+ "type": "agentic.delegation",
144
+ "attributes": [
145
+ [
146
+ {
147
+ "_comment": "agent type",
148
+ "attribute": "type",
149
+ "accessor": lambda arguments:'agent.langgraph'
150
+ },
151
+ {
152
+ "_comment": "name of the agent",
153
+ "attribute": "from_agent",
154
+ "accessor": lambda arguments: _helper.get_source_agent()
155
+ },
156
+ {
157
+ "_comment": "name of the agent called",
158
+ "attribute": "to_agent",
159
+ "accessor": lambda arguments: _helper.get_target_agent(arguments['instance'])
160
+ }
161
+ ]
162
+ ]
163
+ }
@@ -0,0 +1,46 @@
1
+ from opentelemetry.context import set_value, attach, detach
2
+ from monocle_apptrace.instrumentation.common.constants import AGENT_PREFIX_KEY
3
+ from monocle_apptrace.instrumentation.common.span_handler import SpanHandler
4
+ from monocle_apptrace.instrumentation.metamodel.langgraph._helper import (
5
+ DELEGATION_NAME_PREFIX, get_name, is_root_agent_name, is_delegation_tool, LANGGRAPTH_AGENT_NAME_KEY
6
+
7
+ )
8
+ from monocle_apptrace.instrumentation.metamodel.langgraph.entities.inference import (
9
+ AGENT_DELEGATION, AGENT_REQUEST
10
+ )
11
+
12
+ class LanggraphAgentHandler(SpanHandler):
13
+ def pre_tracing(self, to_wrap, wrapped, instance, args, kwargs):
14
+ context = set_value(LANGGRAPTH_AGENT_NAME_KEY, get_name(instance))
15
+ context = set_value(AGENT_PREFIX_KEY, DELEGATION_NAME_PREFIX, context)
16
+ return attach(context)
17
+
18
+ def post_tracing(self, to_wrap, wrapped, instance, args, kwargs, result, token):
19
+ if token is not None:
20
+ detach(token)
21
+
22
+ # In multi agent scenarios, the root agent is the one that orchestrates the other agents. LangGraph generates an extra root level invoke()
23
+ # call on top of the supervisor agent invoke().
24
+ # This span handler resets the parent invoke call as generic type to avoid duplicate attributes/events in supervisor span and this root span.
25
+ def hydrate_span(self, to_wrap, wrapped, instance, args, kwargs, result, span, parent_span = None, ex:Exception = None) -> bool:
26
+ if is_root_agent_name(instance) and "parent.agent.span" in span.attributes:
27
+ agent_request_wrapper = to_wrap.copy()
28
+ agent_request_wrapper["output_processor"] = AGENT_REQUEST
29
+ else:
30
+ agent_request_wrapper = to_wrap
31
+ if hasattr(instance, 'name') and parent_span is not None and not SpanHandler.is_root_span(parent_span):
32
+ parent_span.set_attribute("parent.agent.span", True)
33
+ return super().hydrate_span(agent_request_wrapper, wrapped, instance, args, kwargs, result, span, parent_span, ex)
34
+
35
+ class LanggraphToolHandler(SpanHandler):
36
+ # LangGraph uses an internal tool to initate delegation to other agents. The method is tool invoke() with tool name as `transfer_to_<agent_name>`.
37
+ # Hence we usea different output processor for tool invoke() to format the span as agentic.delegation.
38
+ def hydrate_span(self, to_wrap, wrapped, instance, args, kwargs, result, span, parent_span = None, ex:Exception = None) -> bool:
39
+ if is_delegation_tool(instance):
40
+ agent_request_wrapper = to_wrap.copy()
41
+ agent_request_wrapper["output_processor"] = AGENT_DELEGATION
42
+ else:
43
+ agent_request_wrapper = to_wrap
44
+
45
+ return super().hydrate_span(agent_request_wrapper, wrapped, instance, args, kwargs, result, span, parent_span, ex)
46
+
@@ -1,14 +1,40 @@
1
- from monocle_apptrace.instrumentation.common.wrapper import task_wrapper
1
+ from monocle_apptrace.instrumentation.common.wrapper import task_wrapper, atask_wrapper
2
2
  from monocle_apptrace.instrumentation.metamodel.langgraph.entities.inference import (
3
- INFERENCE,
3
+ AGENT,
4
+ TOOLS,
4
5
  )
6
+
5
7
  LANGGRAPH_METHODS = [
6
8
  {
7
9
  "package": "langgraph.graph.state",
8
- "object": "CompiledStateGraph",
9
- "method": "invoke",
10
- "span_name": "langgraph.graph.invoke",
11
- "wrapper_method": task_wrapper,
12
- "output_processor": INFERENCE
13
- }
14
- ]
10
+ "object": "CompiledStateGraph",
11
+ "method": "invoke",
12
+ "wrapper_method": task_wrapper,
13
+ "span_handler": "langgraph_agent_handler",
14
+ "output_processor": AGENT,
15
+ },
16
+ {
17
+ "package": "langgraph.graph.state",
18
+ "object": "CompiledStateGraph",
19
+ "method": "ainvoke",
20
+ "wrapper_method": atask_wrapper,
21
+ "span_handler": "langgraph_agent_handler",
22
+ "output_processor": AGENT,
23
+ },
24
+ {
25
+ "package": "langchain_core.tools.base",
26
+ "object": "BaseTool",
27
+ "method": "run",
28
+ "wrapper_method": task_wrapper,
29
+ "span_handler": "langgraph_tool_handler",
30
+ "output_processor": TOOLS,
31
+ },
32
+ {
33
+ "package": "langchain_core.tools.base",
34
+ "object": "BaseTool",
35
+ "method": "arun",
36
+ "wrapper_method": atask_wrapper,
37
+ "span_handler": "langgraph_tool_handler",
38
+ "output_processor": TOOLS,
39
+ },
40
+ ]