monocle-apptrace 0.5.0b1__py3-none-any.whl → 0.5.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of monocle-apptrace might be problematic. Click here for more details.

Files changed (70) hide show
  1. monocle_apptrace/exporters/file_exporter.py +2 -1
  2. monocle_apptrace/instrumentation/common/__init__.py +7 -5
  3. monocle_apptrace/instrumentation/common/constants.py +103 -12
  4. monocle_apptrace/instrumentation/common/instrumentor.py +1 -6
  5. monocle_apptrace/instrumentation/common/method_wrappers.py +10 -125
  6. monocle_apptrace/instrumentation/common/scope_wrapper.py +126 -0
  7. monocle_apptrace/instrumentation/common/span_handler.py +32 -8
  8. monocle_apptrace/instrumentation/common/utils.py +34 -3
  9. monocle_apptrace/instrumentation/common/wrapper.py +208 -41
  10. monocle_apptrace/instrumentation/common/wrapper_method.py +9 -1
  11. monocle_apptrace/instrumentation/metamodel/a2a/entities/inference.py +3 -1
  12. monocle_apptrace/instrumentation/metamodel/adk/__init__.py +0 -0
  13. monocle_apptrace/instrumentation/metamodel/adk/_helper.py +206 -0
  14. monocle_apptrace/instrumentation/metamodel/adk/entities/agent.py +111 -0
  15. monocle_apptrace/instrumentation/metamodel/adk/entities/tool.py +59 -0
  16. monocle_apptrace/instrumentation/metamodel/adk/methods.py +31 -0
  17. monocle_apptrace/instrumentation/metamodel/agents/__init__.py +0 -0
  18. monocle_apptrace/instrumentation/metamodel/agents/_helper.py +225 -0
  19. monocle_apptrace/instrumentation/metamodel/agents/agents_processor.py +174 -0
  20. monocle_apptrace/instrumentation/metamodel/agents/entities/__init__.py +0 -0
  21. monocle_apptrace/instrumentation/metamodel/agents/entities/inference.py +196 -0
  22. monocle_apptrace/instrumentation/metamodel/agents/methods.py +55 -0
  23. monocle_apptrace/instrumentation/metamodel/aiohttp/entities/http.py +2 -1
  24. monocle_apptrace/instrumentation/metamodel/anthropic/_helper.py +82 -5
  25. monocle_apptrace/instrumentation/metamodel/anthropic/entities/inference.py +6 -1
  26. monocle_apptrace/instrumentation/metamodel/azfunc/entities/http.py +2 -1
  27. monocle_apptrace/instrumentation/metamodel/azureaiinference/entities/inference.py +2 -1
  28. monocle_apptrace/instrumentation/metamodel/botocore/entities/inference.py +2 -1
  29. monocle_apptrace/instrumentation/metamodel/fastapi/entities/http.py +2 -1
  30. monocle_apptrace/instrumentation/metamodel/fastapi/methods.py +18 -18
  31. monocle_apptrace/instrumentation/metamodel/finish_types.py +79 -1
  32. monocle_apptrace/instrumentation/metamodel/flask/entities/http.py +2 -1
  33. monocle_apptrace/instrumentation/metamodel/gemini/entities/inference.py +7 -3
  34. monocle_apptrace/instrumentation/metamodel/gemini/entities/retrieval.py +2 -1
  35. monocle_apptrace/instrumentation/metamodel/gemini/methods.py +8 -1
  36. monocle_apptrace/instrumentation/metamodel/haystack/_helper.py +64 -0
  37. monocle_apptrace/instrumentation/metamodel/haystack/entities/inference.py +12 -1
  38. monocle_apptrace/instrumentation/metamodel/haystack/entities/retrieval.py +2 -1
  39. monocle_apptrace/instrumentation/metamodel/lambdafunc/entities/http.py +2 -1
  40. monocle_apptrace/instrumentation/metamodel/langchain/_helper.py +18 -0
  41. monocle_apptrace/instrumentation/metamodel/langchain/entities/inference.py +6 -1
  42. monocle_apptrace/instrumentation/metamodel/langchain/entities/retrieval.py +2 -1
  43. monocle_apptrace/instrumentation/metamodel/langgraph/_helper.py +6 -0
  44. monocle_apptrace/instrumentation/metamodel/langgraph/entities/inference.py +10 -5
  45. monocle_apptrace/instrumentation/metamodel/langgraph/langgraph_processor.py +11 -4
  46. monocle_apptrace/instrumentation/metamodel/langgraph/methods.py +27 -23
  47. monocle_apptrace/instrumentation/metamodel/litellm/__init__.py +0 -0
  48. monocle_apptrace/instrumentation/metamodel/litellm/_helper.py +89 -0
  49. monocle_apptrace/instrumentation/metamodel/litellm/entities/__init__.py +0 -0
  50. monocle_apptrace/instrumentation/metamodel/litellm/entities/inference.py +109 -0
  51. monocle_apptrace/instrumentation/metamodel/litellm/methods.py +19 -0
  52. monocle_apptrace/instrumentation/metamodel/llamaindex/entities/agent.py +9 -4
  53. monocle_apptrace/instrumentation/metamodel/llamaindex/entities/inference.py +2 -1
  54. monocle_apptrace/instrumentation/metamodel/llamaindex/entities/retrieval.py +2 -1
  55. monocle_apptrace/instrumentation/metamodel/llamaindex/llamaindex_processor.py +14 -3
  56. monocle_apptrace/instrumentation/metamodel/llamaindex/methods.py +1 -1
  57. monocle_apptrace/instrumentation/metamodel/mcp/_helper.py +2 -1
  58. monocle_apptrace/instrumentation/metamodel/mcp/entities/inference.py +3 -1
  59. monocle_apptrace/instrumentation/metamodel/mcp/mcp_processor.py +0 -5
  60. monocle_apptrace/instrumentation/metamodel/mcp/methods.py +1 -1
  61. monocle_apptrace/instrumentation/metamodel/openai/_helper.py +110 -5
  62. monocle_apptrace/instrumentation/metamodel/openai/entities/inference.py +59 -13
  63. monocle_apptrace/instrumentation/metamodel/requests/entities/http.py +2 -1
  64. monocle_apptrace/instrumentation/metamodel/teamsai/_helper.py +12 -1
  65. monocle_apptrace/instrumentation/metamodel/teamsai/entities/inference/teamsai_output_processor.py +12 -1
  66. {monocle_apptrace-0.5.0b1.dist-info → monocle_apptrace-0.5.1.dist-info}/METADATA +15 -10
  67. {monocle_apptrace-0.5.0b1.dist-info → monocle_apptrace-0.5.1.dist-info}/RECORD +70 -53
  68. {monocle_apptrace-0.5.0b1.dist-info → monocle_apptrace-0.5.1.dist-info}/WHEEL +0 -0
  69. {monocle_apptrace-0.5.0b1.dist-info → monocle_apptrace-0.5.1.dist-info}/licenses/LICENSE +0 -0
  70. {monocle_apptrace-0.5.0b1.dist-info → monocle_apptrace-0.5.1.dist-info}/licenses/NOTICE +0 -0
@@ -1,4 +1,5 @@
1
1
  from opentelemetry.context import attach, detach, get_current, get_value, set_value, Context
2
+ from monocle_apptrace.instrumentation.common.constants import AGENT_PREFIX_KEY
2
3
  from monocle_apptrace.instrumentation.common.span_handler import SpanHandler
3
4
  from monocle_apptrace.instrumentation.metamodel.llamaindex._helper import (
4
5
  is_delegation_tool, LLAMAINDEX_AGENT_NAME_KEY, get_agent_name
@@ -21,14 +22,15 @@ class DelegationHandler(SpanHandler):
21
22
 
22
23
  return super().hydrate_span(agent_request_wrapper, wrapped, instance, args, kwargs, result, span, parent_span, ex)
23
24
 
24
-
25
25
  # There are two different APIs for tool calling FunctionTool.call() and AgentWorkflow.tool_call(). In case of single agent calling tool, only the FunctionTool.call() is used. In case of multi agent case,
26
26
  # the AgentWorkflow.tool_call() is used which inturn calls FunctionTool.call(). We can't entirely rely on the FunctionTool.call() to extract tool span details, especially the agent delegation details are not available there.
27
27
  # Hence we want to distinguish between single agent tool call and multi agent tool call. In case of multi agent tool call, we suppress the FunctionTool.call() span and use AgentWorkflow.tool_call() span to capture the tool call details.
28
28
  class LlamaIndexToolHandler(DelegationHandler):
29
29
  def pre_tracing(self, to_wrap, wrapped, instance, args, kwargs):
30
- return attach(set_value(TOOL_INVOCATION_STARTED, True))
31
-
30
+ cur_context = get_current()
31
+ cur_context = set_value(TOOL_INVOCATION_STARTED, True, cur_context)
32
+ return attach(cur_context)
33
+
32
34
  def post_tracing(self, to_wrap, wrapped, instance, args, kwargs, return_value, token=None):
33
35
  if token:
34
36
  detach(token)
@@ -40,6 +42,15 @@ class LlamaIndexSingleAgenttToolHandlerWrapper(DelegationHandler):
40
42
  return super().skip_span(to_wrap, wrapped, instance, args, kwargs)
41
43
 
42
44
  class LlamaIndexAgentHandler(SpanHandler):
45
+ def pre_tracing(self, to_wrap, wrapped, instance, args, kwargs):
46
+ cur_context = get_current()
47
+ cur_context = set_value(AGENT_PREFIX_KEY, "handoff", cur_context)
48
+ return attach(cur_context)
49
+
50
+ def post_tracing(self, to_wrap, wrapped, instance, args, kwargs, return_value, token=None):
51
+ if token:
52
+ detach(token)
53
+
43
54
  # LlamaIndex uses direct OpenAI call for agent inferences. Given that the workflow type is set to llamaindex, the openAI inference does not record the input/output events.
44
55
  # To avoid this, we set the workflow type to generic for agent inference spans so we can capture the prompts and responses.
45
56
  def hydrate_span(self, to_wrap, wrapped, instance, args, kwargs, result, span, parent_span = None, ex:Exception = None) -> bool:
@@ -83,7 +83,7 @@ LLAMAINDEX_METHODS = [
83
83
  "object": "AgentWorkflow",
84
84
  "method": "run",
85
85
  "span_handler": "llamaindex_agent_handler",
86
- "wrapper_method": atask_wrapper,
86
+ "wrapper_method": task_wrapper,
87
87
  "output_processor": AGENT_REQUEST
88
88
  },
89
89
  {
@@ -37,7 +37,8 @@ def get_output_text(arguments):
37
37
 
38
38
  def get_name(arguments):
39
39
  """Get the name of the tool from the instance."""
40
-
40
+ if 'parent_span' in arguments:
41
+ arguments['parent_span'].set_attribute("is_mcp", True)
41
42
  args = arguments["args"]
42
43
  if (
43
44
  args
@@ -1,7 +1,9 @@
1
+ from monocle_apptrace.instrumentation.common.constants import SPAN_SUBTYPES, SPAN_TYPES
1
2
  from monocle_apptrace.instrumentation.metamodel.mcp import _helper
2
3
 
3
4
  TOOLS = {
4
- "type": "agentic.mcp.invocation",
5
+ "type": SPAN_TYPES.AGENTIC_MCP_INVOCATION,
6
+ "subtype": SPAN_SUBTYPES.ROUTING,
5
7
  "attributes": [
6
8
  [
7
9
  {
@@ -1,11 +1,6 @@
1
- from opentelemetry.context import set_value, attach, detach
2
1
  from monocle_apptrace.instrumentation.common.span_handler import SpanHandler
3
2
  from monocle_apptrace.instrumentation.metamodel.mcp._helper import (
4
3
  get_name
5
-
6
- )
7
- from monocle_apptrace.instrumentation.metamodel.langgraph.entities.inference import (
8
- AGENT_DELEGATION, AGENT_REQUEST
9
4
  )
10
5
 
11
6
  class MCPAgentHandler(SpanHandler):
@@ -17,5 +17,5 @@ MCP_METHODS = [
17
17
  "object": "",
18
18
  "method": "convert_mcp_tool_to_langchain_tool",
19
19
  "wrapper_method": _helper.langchain_mcp_wrapper,
20
- },
20
+ }
21
21
  ]
@@ -3,7 +3,9 @@ This module provides utility functions for extracting system, user,
3
3
  and assistant messages from various input formats.
4
4
  """
5
5
 
6
+ import json
6
7
  import logging
8
+ from opentelemetry.context import get_value
7
9
  from monocle_apptrace.instrumentation.common.utils import (
8
10
  Option,
9
11
  get_json_dumps,
@@ -17,7 +19,7 @@ from monocle_apptrace.instrumentation.metamodel.finish_types import (
17
19
  map_openai_finish_reason_to_finish_type,
18
20
  OPENAI_FINISH_REASON_MAPPING
19
21
  )
20
- from monocle_apptrace.instrumentation.common.constants import CHILD_ERROR_CODE
22
+ from monocle_apptrace.instrumentation.common.constants import AGENT_PREFIX_KEY, CHILD_ERROR_CODE, INFERENCE_AGENT_DELEGATION, INFERENCE_TURN_END, INFERENCE_TOOL_CALL
21
23
 
22
24
  logger = logging.getLogger(__name__)
23
25
 
@@ -28,7 +30,7 @@ def extract_messages(kwargs):
28
30
  if 'instructions' in kwargs:
29
31
  messages.append({'system': kwargs.get('instructions', {})})
30
32
  if 'input' in kwargs:
31
- if isinstance(kwargs['input'], str):
33
+ if isinstance(kwargs['input'], str):
32
34
  messages.append({'user': kwargs.get('input', "")})
33
35
  # [
34
36
  # {
@@ -41,13 +43,81 @@ def extract_messages(kwargs):
41
43
  # }
42
44
  # ]
43
45
  if isinstance(kwargs['input'], list):
46
+ # kwargs['input']
47
+ # [
48
+ # {
49
+ # "content": "I need to book a flight from NYC to LAX and also book the Hilton hotel in Los Angeles. Also check the weather in Los Angeles.",
50
+ # "role": "user"
51
+ # },
52
+ # {
53
+ # "arguments": "{}",
54
+ # "call_id": "call_dSljcToR2LWwqWibPt0qjeHD",
55
+ # "name": "transfer_to_flight_agent",
56
+ # "type": "function_call",
57
+ # "id": "fc_689c30f96f708191aabb0ffd8098cdbd016ef325124ac05f",
58
+ # "status": "completed"
59
+ # },
60
+ # {
61
+ # "arguments": "{}",
62
+ # "call_id": "call_z0MTZroziWDUd0fxVemGM5Pg",
63
+ # "name": "transfer_to_hotel_agent",
64
+ # "type": "function_call",
65
+ # "id": "fc_689c30f99b808191a8743ff407fa8ee2016ef325124ac05f",
66
+ # "status": "completed"
67
+ # },
68
+ # {
69
+ # "arguments": "{\"city\":\"Los Angeles\"}",
70
+ # "call_id": "call_rrdRSPv5vcB4pgl6P4W8U2bX",
71
+ # "name": "get_weather_tool",
72
+ # "type": "function_call",
73
+ # "id": "fc_689c30f9b824819196d4ad9379d570f7016ef325124ac05f",
74
+ # "status": "completed"
75
+ # },
76
+ # {
77
+ # "call_id": "call_rrdRSPv5vcB4pgl6P4W8U2bX",
78
+ # "output": "The weather in Los Angeles is sunny and 75.",
79
+ # "type": "function_call_output"
80
+ # },
81
+ # {
82
+ # "call_id": "call_z0MTZroziWDUd0fxVemGM5Pg",
83
+ # "output": "Multiple handoffs detected, ignoring this one.",
84
+ # "type": "function_call_output"
85
+ # },
86
+ # {
87
+ # "call_id": "call_dSljcToR2LWwqWibPt0qjeHD",
88
+ # "output": "{\"assistant\": \"Flight Agent\"}",
89
+ # "type": "function_call_output"
90
+ # }
91
+ # ]
44
92
  for item in kwargs['input']:
45
93
  if isinstance(item, dict) and 'role' in item and 'content' in item:
46
94
  messages.append({item['role']: item['content']})
95
+ elif isinstance(item, dict) and 'type' in item and item['type'] == 'function_call':
96
+ messages.append({
97
+ "tool_function": item.get("name", ""),
98
+ "tool_arguments": item.get("arguments", ""),
99
+ "call_id": item.get("call_id", "")
100
+ })
101
+ elif isinstance(item, dict) and 'type' in item and item['type'] == 'function_call_output':
102
+ messages.append({
103
+ "call_id": item.get("call_id", ""),
104
+ "output": item.get("output", "")
105
+ })
47
106
  if 'messages' in kwargs and len(kwargs['messages']) >0:
48
107
  for msg in kwargs['messages']:
49
108
  if msg.get('content') and msg.get('role'):
50
109
  messages.append({msg['role']: msg['content']})
110
+ elif msg.get('tool_calls') and msg.get('role'):
111
+ try:
112
+ tool_call_messages = []
113
+ for tool_call in msg['tool_calls']:
114
+ tool_call_messages.append(get_json_dumps({
115
+ "tool_function": tool_call.function.name,
116
+ "tool_arguments": tool_call.function.arguments,
117
+ }))
118
+ messages.append({msg['role']: tool_call_messages})
119
+ except Exception as e:
120
+ logger.warning("Warning: Error occurred while processing tool calls: %s", str(e))
51
121
 
52
122
  return [get_json_dumps(message) for message in messages]
53
123
  except Exception as e:
@@ -61,6 +131,29 @@ def extract_assistant_message(arguments):
61
131
  status = get_status_code(arguments)
62
132
  if status == 'success' or status == 'completed':
63
133
  response = arguments["result"]
134
+ if hasattr(response, "tools") and isinstance(response.tools, list) and len(response.tools) > 0 and isinstance(response.tools[0], dict):
135
+ tools = []
136
+ for tool in response.tools:
137
+ tools.append({
138
+ "tool_id": tool.get("id", ""),
139
+ "tool_name": tool.get("name", ""),
140
+ "tool_arguments": tool.get("arguments", "")
141
+ })
142
+ messages.append({"tools": tools})
143
+ if hasattr(response, "output") and isinstance(response.output, list) and len(response.output) > 0:
144
+ response_messages = []
145
+ role = "assistant"
146
+ for response_message in response.output:
147
+ if(response_message.type == "function_call"):
148
+ role = "tools"
149
+ response_messages.append({
150
+ "tool_id": response_message.call_id,
151
+ "tool_name": response_message.name,
152
+ "tool_arguments": response_message.arguments
153
+ })
154
+ if len(response_messages) > 0:
155
+ messages.append({role: response_messages})
156
+
64
157
  if hasattr(response, "output_text") and len(response.output_text):
65
158
  role = response.role if hasattr(response, "role") else "assistant"
66
159
  messages.append({role: response.output_text})
@@ -82,7 +175,7 @@ def extract_assistant_message(arguments):
82
175
  return get_exception_message(arguments)
83
176
  elif hasattr(arguments["result"], "error"):
84
177
  return arguments["result"].error
85
-
178
+
86
179
  except (IndexError, AttributeError) as e:
87
180
  logger.warning(
88
181
  "Warning: Error occurred in extract_assistant_message: %s", str(e)
@@ -194,11 +287,11 @@ def extract_finish_reason(arguments):
194
287
  if hasattr(arguments["exception"], "code") and arguments["exception"].code in OPENAI_FINISH_REASON_MAPPING.keys():
195
288
  return arguments["exception"].code
196
289
  response = arguments["result"]
197
-
290
+
198
291
  # Handle streaming responses
199
292
  if hasattr(response, "finish_reason") and response.finish_reason:
200
293
  return response.finish_reason
201
-
294
+
202
295
  # Handle non-streaming responses
203
296
  if response is not None and hasattr(response, "choices") and len(response.choices) > 0:
204
297
  if hasattr(response.choices[0], "finish_reason"):
@@ -211,3 +304,15 @@ def extract_finish_reason(arguments):
211
304
  def map_finish_reason_to_finish_type(finish_reason):
212
305
  """Map OpenAI finish_reason to finish_type based on the possible errors mapping"""
213
306
  return map_openai_finish_reason_to_finish_type(finish_reason)
307
+
308
+ def agent_inference_type(arguments):
309
+ """Extract agent inference type from OpenAI response"""
310
+ message = json.loads(extract_assistant_message(arguments))
311
+ # message["tools"][0]["tool_name"]
312
+ if message and message.get("tools") and isinstance(message["tools"], list) and len(message["tools"]) > 0:
313
+ agent_prefix = get_value(AGENT_PREFIX_KEY)
314
+ tool_name = message["tools"][0].get("tool_name", "")
315
+ if tool_name and agent_prefix and tool_name.startswith(agent_prefix):
316
+ return INFERENCE_AGENT_DELEGATION
317
+ return INFERENCE_TOOL_CALL
318
+ return INFERENCE_TURN_END
@@ -2,13 +2,14 @@ import logging
2
2
  import random
3
3
  import time
4
4
  from types import SimpleNamespace
5
+ from monocle_apptrace.instrumentation.common.constants import SPAN_TYPES
5
6
  from monocle_apptrace.instrumentation.metamodel.openai import (
6
7
  _helper,
7
8
  )
8
9
  from monocle_apptrace.instrumentation.common.utils import (
9
10
  get_error_message,
10
11
  patch_instance_method,
11
- resolve_from_alias
12
+ resolve_from_alias,
12
13
  )
13
14
 
14
15
  logger = logging.getLogger(__name__)
@@ -17,7 +18,11 @@ logger = logging.getLogger(__name__)
17
18
  def _process_stream_item(item, state):
18
19
  """Process a single stream item and update state."""
19
20
  try:
20
- if hasattr(item, "type") and isinstance(item.type, str) and item.type.startswith("response."):
21
+ if (
22
+ hasattr(item, "type")
23
+ and isinstance(item.type, str)
24
+ and item.type.startswith("response.")
25
+ ):
21
26
  if state["waiting_for_first_token"]:
22
27
  state["waiting_for_first_token"] = False
23
28
  state["first_token_time"] = time.time_ns()
@@ -40,7 +45,11 @@ def _process_stream_item(item, state):
40
45
  state["first_token_time"] = time.time_ns()
41
46
 
42
47
  state["accumulated_response"] += item.choices[0].delta.content
43
- elif hasattr(item, "object") and item.object == "chat.completion.chunk" and item.usage:
48
+ elif (
49
+ hasattr(item, "object")
50
+ and item.object == "chat.completion.chunk"
51
+ and item.usage
52
+ ):
44
53
  # Handle the case where the response is a chunk
45
54
  state["token_usage"] = item.usage
46
55
  state["stream_closed_time"] = time.time_ns()
@@ -49,7 +58,7 @@ def _process_stream_item(item, state):
49
58
  hasattr(item, "choices")
50
59
  and item.choices
51
60
  and len(item.choices) > 0
52
- and hasattr(item.choices[0], 'finish_reason')
61
+ and hasattr(item.choices[0], "finish_reason")
53
62
  and item.choices[0].finish_reason
54
63
  ):
55
64
  finish_reason = item.choices[0].finish_reason
@@ -65,6 +74,37 @@ def _process_stream_item(item, state):
65
74
 
66
75
 
67
76
  def _create_span_result(state, stream_start_time):
77
+ # extract tool calls from the accumulated_temp_list
78
+ # this can only be done when all the streaming is complete.
79
+ for item in state["accumulated_temp_list"]:
80
+ try:
81
+ if (
82
+ item.choices
83
+ and isinstance(item.choices, list)
84
+ and hasattr(item.choices[0], "delta")
85
+ and hasattr(item.choices[0].delta, "tool_calls")
86
+ and item.choices[0].delta.tool_calls
87
+ and item.choices[0].delta.tool_calls[0].id
88
+ and item.choices[0].delta.tool_calls[0].function
89
+ ):
90
+ state["tools"] = state.get("tools", [])
91
+ state["tools"].append(
92
+ {
93
+ "id": item.choices[0].delta.tool_calls[0].id,
94
+ "name": item.choices[0].delta.tool_calls[0].function.name,
95
+ "arguments": item.choices[0]
96
+ .delta.tool_calls[0]
97
+ .function.arguments,
98
+ }
99
+ )
100
+ if (item.choices and item.choices[0].finish_reason):
101
+ state["finish_reason"] = item.choices[0].finish_reason
102
+ except Exception as e:
103
+ logger.warning(
104
+ "Warning: Error occurred while processing tool calls: %s",
105
+ str(e),
106
+ )
107
+
68
108
  """Create the span result object."""
69
109
  return SimpleNamespace(
70
110
  type="stream",
@@ -75,14 +115,15 @@ def _create_span_result(state, stream_start_time):
75
115
  "metadata": int(state["stream_closed_time"] or time.time_ns()),
76
116
  },
77
117
  output_text=state["accumulated_response"],
118
+ tools=state["tools"] if "tools" in state else None,
78
119
  usage=state["token_usage"],
79
- finish_reason=state["finish_reason"]
120
+ finish_reason=state["finish_reason"],
80
121
  )
81
122
 
82
123
 
83
124
  def process_stream(to_wrap, response, span_processor):
84
125
  stream_start_time = time.time_ns()
85
-
126
+
86
127
  # Shared state for both sync and async processing
87
128
  state = {
88
129
  "waiting_for_first_token": True,
@@ -108,7 +149,7 @@ def process_stream(to_wrap, response, span_processor):
108
149
  span_processor(ret_val)
109
150
 
110
151
  patch_instance_method(response, "__iter__", new_iter)
111
-
152
+
112
153
  if to_wrap and hasattr(response, "__aiter__"):
113
154
  original_iter = response.__aiter__
114
155
 
@@ -125,7 +166,7 @@ def process_stream(to_wrap, response, span_processor):
125
166
 
126
167
 
127
168
  INFERENCE = {
128
- "type": "inference",
169
+ "type": SPAN_TYPES.INFERENCE,
129
170
  "is_auto_close": lambda kwargs: kwargs.get("stream", False) is False,
130
171
  "response_processor": process_stream,
131
172
  "attributes": [
@@ -200,10 +241,9 @@ INFERENCE = {
200
241
  {
201
242
  "name": "data.output",
202
243
  "attributes": [
203
-
204
244
  {
205
245
  "attribute": "error_code",
206
- "accessor": lambda arguments: get_error_message(arguments)
246
+ "accessor": lambda arguments: get_error_message(arguments),
207
247
  },
208
248
  {
209
249
  "_comment": "this is result from LLM",
@@ -211,7 +251,7 @@ INFERENCE = {
211
251
  "accessor": lambda arguments: _helper.extract_assistant_message(
212
252
  arguments,
213
253
  ),
214
- }
254
+ },
215
255
  ],
216
256
  },
217
257
  {
@@ -226,14 +266,20 @@ INFERENCE = {
226
266
  {
227
267
  "_comment": "finish reason from OpenAI response",
228
268
  "attribute": "finish_reason",
229
- "accessor": lambda arguments: _helper.extract_finish_reason(arguments)
269
+ "accessor": lambda arguments: _helper.extract_finish_reason(
270
+ arguments
271
+ ),
230
272
  },
231
273
  {
232
274
  "_comment": "finish type mapped from finish reason",
233
275
  "attribute": "finish_type",
234
276
  "accessor": lambda arguments: _helper.map_finish_reason_to_finish_type(
235
277
  _helper.extract_finish_reason(arguments)
236
- )
278
+ ),
279
+ },
280
+ {
281
+ "attribute": "inference_sub_type",
282
+ "accessor": lambda arguments: _helper.agent_inference_type(arguments)
237
283
  }
238
284
  ],
239
285
  },
@@ -1,6 +1,7 @@
1
+ from monocle_apptrace.instrumentation.common.constants import SPAN_TYPES
1
2
  from monocle_apptrace.instrumentation.metamodel.requests import _helper
2
3
  REQUEST_HTTP_PROCESSOR = {
3
- "type": "http.send",
4
+ "type": SPAN_TYPES.HTTP_SEND,
4
5
  "attributes": [
5
6
  [
6
7
  {
@@ -1,3 +1,4 @@
1
+ import json
1
2
  import logging
2
3
  from monocle_apptrace.instrumentation.common.utils import (
3
4
  Option,
@@ -170,4 +171,14 @@ def extract_inference_endpoint(instance):
170
171
  if inference_endpoint.is_none() and "meta" in instance.client.__dict__:
171
172
  inference_endpoint = try_option(getattr, instance.client.meta, 'endpoint_url').map(str)
172
173
 
173
- return inference_endpoint.unwrap_or(extract_provider_name(instance))
174
+ return inference_endpoint.unwrap_or(extract_provider_name(instance))
175
+
176
+ def agent_inference_type(arguments):
177
+ """
178
+ Extracts the agent inference type from the arguments.
179
+ """
180
+ output = extract_assistant_message(arguments)
181
+ command = json.loads(json.loads(output).get("assistant", "")).get("action", "").get("name")
182
+ if command == "SAY":
183
+ return "turn"
184
+ return "tool_call"
@@ -1,9 +1,10 @@
1
+ from monocle_apptrace.instrumentation.common.constants import SPAN_TYPES
1
2
  from monocle_apptrace.instrumentation.metamodel.teamsai import (
2
3
  _helper,
3
4
  )
4
5
  from monocle_apptrace.instrumentation.common.utils import get_error_message, get_llm_type
5
6
  TEAMAI_OUTPUT_PROCESSOR = {
6
- "type": "inference.framework",
7
+ "type": SPAN_TYPES.INFERENCE_FRAMEWORK,
7
8
  "attributes": [
8
9
  [
9
10
  {
@@ -66,5 +67,15 @@ TEAMAI_OUTPUT_PROCESSOR = {
66
67
  }
67
68
  ]
68
69
  },
70
+ {
71
+ "name": "metadata",
72
+ "_comment": "metadata for Teams AI",
73
+ "attributes": [
74
+ {
75
+ "attribute": "inference_sub_type",
76
+ "accessor": lambda arguments: _helper.agent_inference_type(arguments)
77
+ }
78
+ ]
79
+ }
69
80
  ]
70
81
  }
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: monocle_apptrace
3
- Version: 0.5.0b1
3
+ Version: 0.5.1
4
4
  Summary: package with monocle genAI tracing
5
5
  Project-URL: Homepage, https://github.com/monocle2ai/monocle
6
6
  Project-URL: Issues, https://github.com/monocle2ai/monocle/issues
@@ -25,12 +25,14 @@ Requires-Dist: anthropic-haystack; extra == 'dev'
25
25
  Requires-Dist: anthropic==0.57.1; extra == 'dev'
26
26
  Requires-Dist: azure-storage-blob==12.22.0; extra == 'dev'
27
27
  Requires-Dist: boto3==1.37.24; extra == 'dev'
28
- Requires-Dist: chromadb==1.0.10; extra == 'dev'
28
+ Requires-Dist: chromadb==1.0.15; extra == 'dev'
29
29
  Requires-Dist: click==8.2.1; extra == 'dev'
30
30
  Requires-Dist: datasets==2.20.0; extra == 'dev'
31
31
  Requires-Dist: faiss-cpu==1.8.0; extra == 'dev'
32
- Requires-Dist: fastapi>=0.115.1; extra == 'dev'
32
+ Requires-Dist: fastapi>=0.115.0; extra == 'dev'
33
33
  Requires-Dist: flask; extra == 'dev'
34
+ Requires-Dist: google-adk==1.10.0; extra == 'dev'
35
+ Requires-Dist: google-generativeai==0.8.5; extra == 'dev'
34
36
  Requires-Dist: haystack-ai==2.3.0; extra == 'dev'
35
37
  Requires-Dist: httpx==0.28.1; extra == 'dev'
36
38
  Requires-Dist: instructorembedding==1.0.1; extra == 'dev'
@@ -46,16 +48,19 @@ Requires-Dist: langchain==0.3.25; extra == 'dev'
46
48
  Requires-Dist: langchainhub==0.1.21; extra == 'dev'
47
49
  Requires-Dist: langgraph-supervisor==0.0.28; extra == 'dev'
48
50
  Requires-Dist: langgraph==0.5.4; extra == 'dev'
49
- Requires-Dist: llama-index-embeddings-huggingface==0.5.4; extra == 'dev'
50
- Requires-Dist: llama-index-llms-anthropic==0.6.19; extra == 'dev'
51
- Requires-Dist: llama-index-llms-azure-openai==0.3.2; extra == 'dev'
52
- Requires-Dist: llama-index-llms-mistralai==0.4.0; extra == 'dev'
53
- Requires-Dist: llama-index-vector-stores-chroma==0.4.1; extra == 'dev'
54
- Requires-Dist: llama-index-vector-stores-opensearch==0.5.4; extra == 'dev'
55
- Requires-Dist: llama-index==0.12.37; extra == 'dev'
51
+ Requires-Dist: llama-index-embeddings-huggingface==0.6.0; extra == 'dev'
52
+ Requires-Dist: llama-index-llms-anthropic==0.8.1; extra == 'dev'
53
+ Requires-Dist: llama-index-llms-azure-openai==0.4.0; extra == 'dev'
54
+ Requires-Dist: llama-index-llms-mistralai==0.7.0; extra == 'dev'
55
+ Requires-Dist: llama-index-llms-openai==0.5.0; extra == 'dev'
56
+ Requires-Dist: llama-index-tools-mcp==0.3.0; extra == 'dev'
57
+ Requires-Dist: llama-index-vector-stores-chroma==0.5.0; extra == 'dev'
58
+ Requires-Dist: llama-index-vector-stores-opensearch==0.6.0; extra == 'dev'
59
+ Requires-Dist: llama-index==0.13.0; extra == 'dev'
56
60
  Requires-Dist: mcp==1.12.1; extra == 'dev'
57
61
  Requires-Dist: mistral-haystack==0.0.2; extra == 'dev'
58
62
  Requires-Dist: numpy==1.26.4; extra == 'dev'
63
+ Requires-Dist: openai-agents==0.2.6; extra == 'dev'
59
64
  Requires-Dist: opendal==0.45.14; extra == 'dev'
60
65
  Requires-Dist: opensearch-haystack==1.2.0; extra == 'dev'
61
66
  Requires-Dist: opentelemetry-instrumentation-flask; extra == 'dev'