monocle-apptrace 0.4.1__py3-none-any.whl → 0.5.0b1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of monocle-apptrace might be problematic. Click here for more details.

Files changed (74) hide show
  1. monocle_apptrace/__main__.py +1 -1
  2. monocle_apptrace/exporters/file_exporter.py +123 -36
  3. monocle_apptrace/instrumentation/common/__init__.py +16 -1
  4. monocle_apptrace/instrumentation/common/constants.py +6 -1
  5. monocle_apptrace/instrumentation/common/instrumentor.py +19 -152
  6. monocle_apptrace/instrumentation/common/method_wrappers.py +380 -0
  7. monocle_apptrace/instrumentation/common/span_handler.py +39 -24
  8. monocle_apptrace/instrumentation/common/utils.py +20 -14
  9. monocle_apptrace/instrumentation/common/wrapper.py +10 -9
  10. monocle_apptrace/instrumentation/common/wrapper_method.py +40 -1
  11. monocle_apptrace/instrumentation/metamodel/a2a/__init__.py +0 -0
  12. monocle_apptrace/instrumentation/metamodel/a2a/_helper.py +37 -0
  13. monocle_apptrace/instrumentation/metamodel/a2a/entities/__init__.py +0 -0
  14. monocle_apptrace/instrumentation/metamodel/a2a/entities/inference.py +112 -0
  15. monocle_apptrace/instrumentation/metamodel/a2a/methods.py +22 -0
  16. monocle_apptrace/instrumentation/metamodel/aiohttp/_helper.py +6 -11
  17. monocle_apptrace/instrumentation/metamodel/anthropic/_helper.py +35 -18
  18. monocle_apptrace/instrumentation/metamodel/anthropic/entities/inference.py +14 -10
  19. monocle_apptrace/instrumentation/metamodel/azfunc/_helper.py +13 -11
  20. monocle_apptrace/instrumentation/metamodel/azfunc/entities/http.py +5 -0
  21. monocle_apptrace/instrumentation/metamodel/azureaiinference/_helper.py +88 -8
  22. monocle_apptrace/instrumentation/metamodel/azureaiinference/entities/inference.py +22 -8
  23. monocle_apptrace/instrumentation/metamodel/botocore/_helper.py +92 -16
  24. monocle_apptrace/instrumentation/metamodel/botocore/entities/inference.py +13 -8
  25. monocle_apptrace/instrumentation/metamodel/botocore/handlers/botocore_span_handler.py +1 -1
  26. monocle_apptrace/instrumentation/metamodel/fastapi/__init__.py +0 -0
  27. monocle_apptrace/instrumentation/metamodel/fastapi/_helper.py +82 -0
  28. monocle_apptrace/instrumentation/metamodel/fastapi/entities/__init__.py +0 -0
  29. monocle_apptrace/instrumentation/metamodel/fastapi/entities/http.py +44 -0
  30. monocle_apptrace/instrumentation/metamodel/fastapi/methods.py +23 -0
  31. monocle_apptrace/instrumentation/metamodel/finish_types.py +387 -0
  32. monocle_apptrace/instrumentation/metamodel/flask/_helper.py +6 -11
  33. monocle_apptrace/instrumentation/metamodel/gemini/__init__.py +0 -0
  34. monocle_apptrace/instrumentation/metamodel/gemini/_helper.py +120 -0
  35. monocle_apptrace/instrumentation/metamodel/gemini/entities/__init__.py +0 -0
  36. monocle_apptrace/instrumentation/metamodel/gemini/entities/inference.py +83 -0
  37. monocle_apptrace/instrumentation/metamodel/gemini/entities/retrieval.py +43 -0
  38. monocle_apptrace/instrumentation/metamodel/gemini/methods.py +24 -0
  39. monocle_apptrace/instrumentation/metamodel/haystack/_helper.py +15 -8
  40. monocle_apptrace/instrumentation/metamodel/haystack/entities/inference.py +5 -10
  41. monocle_apptrace/instrumentation/metamodel/haystack/methods.py +7 -0
  42. monocle_apptrace/instrumentation/metamodel/lambdafunc/_helper.py +78 -0
  43. monocle_apptrace/instrumentation/metamodel/lambdafunc/entities/http.py +51 -0
  44. monocle_apptrace/instrumentation/metamodel/lambdafunc/methods.py +23 -0
  45. monocle_apptrace/instrumentation/metamodel/lambdafunc/wrapper.py +23 -0
  46. monocle_apptrace/instrumentation/metamodel/langchain/_helper.py +127 -19
  47. monocle_apptrace/instrumentation/metamodel/langchain/entities/inference.py +15 -10
  48. monocle_apptrace/instrumentation/metamodel/langgraph/_helper.py +67 -10
  49. monocle_apptrace/instrumentation/metamodel/langgraph/entities/inference.py +127 -20
  50. monocle_apptrace/instrumentation/metamodel/langgraph/langgraph_processor.py +43 -0
  51. monocle_apptrace/instrumentation/metamodel/langgraph/methods.py +29 -5
  52. monocle_apptrace/instrumentation/metamodel/llamaindex/_helper.py +227 -16
  53. monocle_apptrace/instrumentation/metamodel/llamaindex/entities/agent.py +127 -10
  54. monocle_apptrace/instrumentation/metamodel/llamaindex/entities/inference.py +13 -8
  55. monocle_apptrace/instrumentation/metamodel/llamaindex/llamaindex_processor.py +51 -0
  56. monocle_apptrace/instrumentation/metamodel/llamaindex/methods.py +68 -1
  57. monocle_apptrace/instrumentation/metamodel/mcp/__init__.py +0 -0
  58. monocle_apptrace/instrumentation/metamodel/mcp/_helper.py +118 -0
  59. monocle_apptrace/instrumentation/metamodel/mcp/entities/__init__.py +0 -0
  60. monocle_apptrace/instrumentation/metamodel/mcp/entities/inference.py +48 -0
  61. monocle_apptrace/instrumentation/metamodel/mcp/mcp_processor.py +13 -0
  62. monocle_apptrace/instrumentation/metamodel/mcp/methods.py +21 -0
  63. monocle_apptrace/instrumentation/metamodel/openai/_helper.py +83 -16
  64. monocle_apptrace/instrumentation/metamodel/openai/entities/inference.py +103 -92
  65. monocle_apptrace/instrumentation/metamodel/openai/entities/retrieval.py +1 -1
  66. monocle_apptrace/instrumentation/metamodel/teamsai/_helper.py +41 -22
  67. monocle_apptrace/instrumentation/metamodel/teamsai/entities/inference/actionplanner_output_processor.py +1 -1
  68. monocle_apptrace/instrumentation/metamodel/teamsai/entities/inference/teamsai_output_processor.py +5 -9
  69. monocle_apptrace/instrumentation/metamodel/teamsai/sample.json +0 -4
  70. {monocle_apptrace-0.4.1.dist-info → monocle_apptrace-0.5.0b1.dist-info}/METADATA +14 -3
  71. {monocle_apptrace-0.4.1.dist-info → monocle_apptrace-0.5.0b1.dist-info}/RECORD +74 -44
  72. {monocle_apptrace-0.4.1.dist-info → monocle_apptrace-0.5.0b1.dist-info}/WHEEL +0 -0
  73. {monocle_apptrace-0.4.1.dist-info → monocle_apptrace-0.5.0b1.dist-info}/licenses/LICENSE +0 -0
  74. {monocle_apptrace-0.4.1.dist-info → monocle_apptrace-0.5.0b1.dist-info}/licenses/NOTICE +0 -0
@@ -3,21 +3,23 @@ from monocle_apptrace.instrumentation.metamodel.llamaindex import (
3
3
  )
4
4
 
5
5
  AGENT = {
6
- "type": "agent",
6
+ "type": "agentic.invocation",
7
7
  "attributes": [
8
8
  [
9
9
  {
10
- "_comment": "Agent name, type and Tools.",
11
- "attribute": "name",
12
- "accessor": lambda arguments: arguments['instance'].__class__.__name__
10
+ "_comment": "agent type",
11
+ "attribute": "type",
12
+ "accessor": lambda arguments:'agent.llamaindex'
13
13
  },
14
14
  {
15
- "attribute": "type",
16
- "accessor": lambda arguments: 'Agent.oai'
15
+ "_comment": "Agent name, type and Tools.",
16
+ "attribute": "name",
17
+ "accessor": lambda arguments: _helper.get_agent_name(arguments['instance'])
17
18
  },
18
19
  {
19
- "attribute": "tools",
20
- "accessor": lambda arguments: _helper.extract_tools(arguments['instance'])
20
+ "_comment": "agent description",
21
+ "attribute": "description",
22
+ "accessor": lambda arguments: _helper.get_agent_description(arguments['instance'])
21
23
  }
22
24
  ]
23
25
 
@@ -29,7 +31,7 @@ AGENT = {
29
31
  {
30
32
  "_comment": "this is instruction and user query to LLM",
31
33
  "attribute": "input",
32
- "accessor": lambda arguments: _helper.extract_messages(arguments['args'])
34
+ "accessor": lambda arguments: _helper.extract_agent_input(arguments['args'])
33
35
  }
34
36
  ]
35
37
  },
@@ -39,9 +41,124 @@ AGENT = {
39
41
  {
40
42
  "_comment": "this is response from LLM",
41
43
  "attribute": "response",
42
- "accessor": lambda arguments: _helper.extract_assistant_message(arguments['result'])
44
+ "accessor": lambda arguments: _helper.extract_agent_response(arguments)
45
+ }
46
+ ]
47
+ }
48
+ ]
49
+ }
50
+
51
+ AGENT_REQUEST = {
52
+ "type": "agentic.request",
53
+ "attributes": [
54
+ [
55
+ {
56
+ "_comment": "agent request type",
57
+ "attribute": "type",
58
+ "accessor": lambda arguments:'agent.llamaindex'
59
+ }
60
+ ]
61
+ ],
62
+ "events": [
63
+ {
64
+ "name": "data.input",
65
+ "attributes": [
66
+ {
67
+ "_comment": "this is request to LLM",
68
+ "attribute": "input",
69
+ "accessor": lambda arguments: _helper.extract_agent_request_input(arguments['kwargs'])
70
+ }
71
+ ]
72
+ },
73
+ {
74
+ "name": "data.output",
75
+ "attributes": [
76
+ {
77
+ "_comment": "this is response from LLM",
78
+ "attribute": "response",
79
+ "accessor": lambda arguments: _helper.extract_agent_request_output(arguments)
43
80
  }
44
81
  ]
45
82
  }
46
83
  ]
47
84
  }
85
+
86
+ TOOLS = {
87
+ "type": "agentic.tool.invocation",
88
+ "attributes": [
89
+ [
90
+ {
91
+ "_comment": "tool type",
92
+ "attribute": "type",
93
+ "accessor": lambda arguments:'tool.llamaindex'
94
+ },
95
+ {
96
+ "_comment": "name of the tool",
97
+ "attribute": "name",
98
+ "accessor": lambda arguments: _helper.get_tool_name(arguments['args'], arguments['instance'])
99
+ },
100
+ {
101
+ "_comment": "tool description",
102
+ "attribute": "description",
103
+ "accessor": lambda arguments: _helper.get_tool_description(arguments)
104
+ }
105
+ ],
106
+ [
107
+ {
108
+ "_comment": "name of the agent",
109
+ "attribute": "name",
110
+ "accessor": lambda arguments: _helper.get_source_agent(arguments['parent_span'])
111
+ },
112
+ {
113
+ "_comment": "agent type",
114
+ "attribute": "type",
115
+ "accessor": lambda arguments:'agent.llamaindex'
116
+ }
117
+ ]
118
+ ],
119
+ "events": [
120
+ {
121
+ "name":"data.input",
122
+ "attributes": [
123
+ {
124
+ "_comment": "this is Tool input",
125
+ "attribute": "Inputs",
126
+ "accessor": lambda arguments: _helper.extract_tool_args(arguments)
127
+ }
128
+ ]
129
+ },
130
+ {
131
+ "name":"data.output",
132
+ "attributes": [
133
+ {
134
+ "_comment": "this is response from Tool",
135
+ "attribute": "response",
136
+ "accessor": lambda arguments: _helper.extract_tool_response(arguments['result'])
137
+ }
138
+ ]
139
+ }
140
+ ]
141
+ }
142
+
143
+ AGENT_DELEGATION = {
144
+ "type": "agentic.delegation",
145
+ "attributes": [
146
+ [
147
+ {
148
+ "_comment": "agent type",
149
+ "attribute": "type",
150
+ "accessor": lambda arguments:'agent.llamaindex'
151
+ },
152
+ {
153
+ "_comment": "name of the agent",
154
+ "attribute": "from_agent",
155
+ "accessor": lambda arguments: _helper.get_source_agent(arguments['parent_span'])
156
+ },
157
+ {
158
+ "_comment": "name of the agent called",
159
+ "attribute": "to_agent",
160
+ "accessor": lambda arguments: _helper.get_target_agent(arguments['result'])
161
+ }
162
+ ]
163
+ ]
164
+ }
@@ -1,7 +1,7 @@
1
1
  from monocle_apptrace.instrumentation.metamodel.llamaindex import (
2
2
  _helper,
3
3
  )
4
- from monocle_apptrace.instrumentation.common.utils import resolve_from_alias, get_llm_type, get_status, get_status_code
4
+ from monocle_apptrace.instrumentation.common.utils import get_error_message, resolve_from_alias, get_llm_type, get_status, get_status_code
5
5
 
6
6
  INFERENCE = {
7
7
  "type": "inference.framework",
@@ -54,13 +54,8 @@ INFERENCE = {
54
54
  "name": "data.output",
55
55
  "attributes": [
56
56
  {
57
- "_comment": "this is response from LLM",
58
- "attribute": "status",
59
- "accessor": lambda arguments: get_status(arguments)
60
- },
61
- {
62
- "attribute": "status_code",
63
- "accessor": lambda arguments: get_status_code(arguments)
57
+ "attribute": "error_code",
58
+ "accessor": lambda arguments: get_error_message(arguments)
64
59
  },
65
60
  {
66
61
  "attribute": "response",
@@ -74,6 +69,16 @@ INFERENCE = {
74
69
  {
75
70
  "_comment": "this is metadata usage from LLM",
76
71
  "accessor": lambda arguments: _helper.update_span_from_llm_response(arguments['result'],arguments['instance'])
72
+ },
73
+ {
74
+ "attribute": "finish_reason",
75
+ "accessor": lambda arguments: _helper.extract_finish_reason(arguments)
76
+ },
77
+ {
78
+ "attribute": "finish_type",
79
+ "accessor": lambda arguments: _helper.map_finish_reason_to_finish_type(
80
+ _helper.extract_finish_reason(arguments)
81
+ )
77
82
  }
78
83
  ]
79
84
  }
@@ -0,0 +1,51 @@
1
+ from opentelemetry.context import attach, detach, get_current, get_value, set_value, Context
2
+ from monocle_apptrace.instrumentation.common.span_handler import SpanHandler
3
+ from monocle_apptrace.instrumentation.metamodel.llamaindex._helper import (
4
+ is_delegation_tool, LLAMAINDEX_AGENT_NAME_KEY, get_agent_name
5
+ )
6
+ from monocle_apptrace.instrumentation.metamodel.llamaindex.entities.agent import (
7
+ AGENT_DELEGATION
8
+ )
9
+
10
+ TOOL_INVOCATION_STARTED:str = "llamaindex.tool_invocation_started"
11
+
12
+ class DelegationHandler(SpanHandler):
13
+ # LlamaIndex uses an internal tool to initate delegation to other agents. The method is tool invoke() with tool name as `transfer_to_<agent_name>`.
14
+ # Hence we usea different output processor for tool invoke() to format the span as agentic.delegation.
15
+ def hydrate_span(self, to_wrap, wrapped, instance, args, kwargs, result, span, parent_span = None, ex:Exception = None) -> bool:
16
+ if is_delegation_tool(args, instance):
17
+ agent_request_wrapper = to_wrap.copy()
18
+ agent_request_wrapper["output_processor"] = AGENT_DELEGATION
19
+ else:
20
+ agent_request_wrapper = to_wrap
21
+
22
+ return super().hydrate_span(agent_request_wrapper, wrapped, instance, args, kwargs, result, span, parent_span, ex)
23
+
24
+
25
+ # There are two different APIs for tool calling FunctionTool.call() and AgentWorkflow.tool_call(). In case of single agent calling tool, only the FunctionTool.call() is used. In case of multi agent case,
26
+ # the AgentWorkflow.tool_call() is used which inturn calls FunctionTool.call(). We can't entirely rely on the FunctionTool.call() to extract tool span details, especially the agent delegation details are not available there.
27
+ # Hence we want to distinguish between single agent tool call and multi agent tool call. In case of multi agent tool call, we suppress the FunctionTool.call() span and use AgentWorkflow.tool_call() span to capture the tool call details.
28
+ class LlamaIndexToolHandler(DelegationHandler):
29
+ def pre_tracing(self, to_wrap, wrapped, instance, args, kwargs):
30
+ return attach(set_value(TOOL_INVOCATION_STARTED, True))
31
+
32
+ def post_tracing(self, to_wrap, wrapped, instance, args, kwargs, return_value, token=None):
33
+ if token:
34
+ detach(token)
35
+
36
+ class LlamaIndexSingleAgenttToolHandlerWrapper(DelegationHandler):
37
+ def skip_span(self, to_wrap, wrapped, instance, args, kwargs):
38
+ if get_value(TOOL_INVOCATION_STARTED) == True:
39
+ return True
40
+ return super().skip_span(to_wrap, wrapped, instance, args, kwargs)
41
+
42
+ class LlamaIndexAgentHandler(SpanHandler):
43
+ # LlamaIndex uses direct OpenAI call for agent inferences. Given that the workflow type is set to llamaindex, the openAI inference does not record the input/output events.
44
+ # To avoid this, we set the workflow type to generic for agent inference spans so we can capture the prompts and responses.
45
+ def hydrate_span(self, to_wrap, wrapped, instance, args, kwargs, result, span, parent_span = None, ex:Exception = None) -> bool:
46
+ retval = super().hydrate_span(to_wrap, wrapped, instance, args, kwargs, result, span, parent_span, ex)
47
+ if SpanHandler.is_root_span(parent_span):
48
+ span.set_attribute(LLAMAINDEX_AGENT_NAME_KEY, "")
49
+ else:
50
+ parent_span.set_attribute(LLAMAINDEX_AGENT_NAME_KEY, get_agent_name(instance))
51
+ return retval
@@ -2,7 +2,7 @@ from monocle_apptrace.instrumentation.common.wrapper import atask_wrapper, task_
2
2
  from monocle_apptrace.instrumentation.metamodel.llamaindex.entities.inference import (
3
3
  INFERENCE,
4
4
  )
5
- from monocle_apptrace.instrumentation.metamodel.llamaindex.entities.agent import AGENT
5
+ from monocle_apptrace.instrumentation.metamodel.llamaindex.entities.agent import AGENT, TOOLS, AGENT_REQUEST
6
6
  from monocle_apptrace.instrumentation.metamodel.llamaindex.entities.retrieval import (
7
7
  RETRIEVAL,
8
8
  )
@@ -78,6 +78,14 @@ LLAMAINDEX_METHODS = [
78
78
  "wrapper_method": atask_wrapper,
79
79
  "output_processor": INFERENCE
80
80
  },
81
+ {
82
+ "package": "llama_index.core.agent.workflow.multi_agent_workflow",
83
+ "object": "AgentWorkflow",
84
+ "method": "run",
85
+ "span_handler": "llamaindex_agent_handler",
86
+ "wrapper_method": atask_wrapper,
87
+ "output_processor": AGENT_REQUEST
88
+ },
81
89
  {
82
90
  "package": "llama_index.core.agent",
83
91
  "object": "ReActAgent",
@@ -85,6 +93,51 @@ LLAMAINDEX_METHODS = [
85
93
  "wrapper_method": task_wrapper,
86
94
  "output_processor": AGENT
87
95
  },
96
+ {
97
+ "package": "llama_index.core.agent",
98
+ "object": "ReActAgent",
99
+ "method": "achat",
100
+ "wrapper_method": atask_wrapper,
101
+ "output_processor": AGENT
102
+ },
103
+ {
104
+ "package": "llama_index.core.agent.workflow.function_agent",
105
+ "object": "FunctionAgent",
106
+ "method": "finalize",
107
+ "wrapper_method": atask_wrapper,
108
+ "output_processor": AGENT
109
+ },
110
+ {
111
+ "package": "llama_index.core.agent.workflow.function_agent",
112
+ "object": "FunctionAgent",
113
+ "method": "take_step",
114
+ "span_handler": "llamaindex_agent_handler",
115
+ "wrapper_method": atask_wrapper
116
+ },
117
+ {
118
+ "package": "llama_index.core.tools.function_tool",
119
+ "object": "FunctionTool",
120
+ "method": "call",
121
+ "span_handler": "llamaindex_single_agent_tool_handler",
122
+ "wrapper_method": task_wrapper,
123
+ "output_processor": TOOLS
124
+ },
125
+ {
126
+ "package": "llama_index.core.tools.function_tool",
127
+ "object": "FunctionTool",
128
+ "method": "acall",
129
+ "span_handler": "llamaindex_single_agent_tool_handler",
130
+ "wrapper_method": atask_wrapper,
131
+ "output_processor": TOOLS
132
+ },
133
+ {
134
+ "package": "llama_index.core.agent.workflow.multi_agent_workflow",
135
+ "object": "AgentWorkflow",
136
+ "method": "_call_tool",
137
+ "span_handler": "llamaindex_tool_handler",
138
+ "wrapper_method": atask_wrapper,
139
+ "output_processor": TOOLS
140
+ },
88
141
  {
89
142
  "package": "llama_index.llms.anthropic",
90
143
  "object": "Anthropic",
@@ -98,5 +151,19 @@ LLAMAINDEX_METHODS = [
98
151
  "method": "achat",
99
152
  "wrapper_method": atask_wrapper,
100
153
  "output_processor": INFERENCE
154
+ },
155
+ {
156
+ "package": "llama_index.llms.gemini",
157
+ "object": "Gemini",
158
+ "method": "chat",
159
+ "wrapper_method": task_wrapper,
160
+ "output_processor": INFERENCE
161
+ },
162
+ {
163
+ "package": "llama_index.llms.gemini",
164
+ "object": "Gemini",
165
+ "method": "achat",
166
+ "wrapper_method": atask_wrapper,
167
+ "output_processor": INFERENCE
101
168
  }
102
169
  ]
@@ -0,0 +1,118 @@
1
+ from monocle_apptrace.instrumentation.common.utils import with_tracer_wrapper
2
+ from opentelemetry.context import attach, set_value, get_value, detach
3
+ from monocle_apptrace.instrumentation.common.utils import resolve_from_alias
4
+ import logging
5
+ import json
6
+
7
+ logger = logging.getLogger(__name__)
8
+
9
+
10
+ def log(arguments):
11
+ print(f"Arguments: {arguments}")
12
+
13
+
14
+ def get_output_text(arguments):
15
+ # arguments["result"].content[0].text
16
+ if (
17
+ "result" in arguments
18
+ and hasattr(arguments["result"], "tools")
19
+ and isinstance(arguments["result"].tools, list)
20
+ ):
21
+ tools = []
22
+ for tool in arguments["result"].tools:
23
+ if hasattr(tool, "name"):
24
+ tools.append(tool.name)
25
+ return tools
26
+ if (
27
+ "result" in arguments
28
+ and hasattr(arguments["result"], "content")
29
+ and isinstance(arguments["result"].content, list)
30
+ ):
31
+ ret_val = []
32
+ for content in arguments["result"].content:
33
+ if hasattr(content, "text"):
34
+ ret_val.append(content.text)
35
+ return ret_val
36
+
37
+
38
+ def get_name(arguments):
39
+ """Get the name of the tool from the instance."""
40
+
41
+ args = arguments["args"]
42
+ if (
43
+ args
44
+ and hasattr(args[0], "root")
45
+ and hasattr(args[0].root, "params")
46
+ and hasattr(args[0].root.params, "name")
47
+ ):
48
+ # If the first argument has a root with params and name, return that name
49
+ return args[0].root.params.name
50
+
51
+
52
+ def get_type(arguments):
53
+ """Get the type of the tool from the instance."""
54
+ args = arguments["args"]
55
+ if args and hasattr(args[0], "root") and hasattr(args[0].root, "method"):
56
+ # If the first argument has a root with a method, return that method's name
57
+ return args[0].root.method
58
+
59
+
60
+ def get_params_arguments(arguments):
61
+ """Get the params of the tool from the instance."""
62
+
63
+ args = arguments["args"]
64
+ if (
65
+ args
66
+ and hasattr(args[0], "root")
67
+ and hasattr(args[0].root, "params")
68
+ and hasattr(args[0].root.params, "arguments")
69
+ ):
70
+ # If the first argument has a root with params and arguments, return those arguments
71
+ try:
72
+ return json.dumps(args[0].root.params.arguments)
73
+ except (TypeError, ValueError) as e:
74
+ logger.error(f"Error serializing arguments: {e}")
75
+ return str(args[0].root.params.arguments)
76
+
77
+
78
+ def get_url(arguments):
79
+ """Get the URL of the tool from the instance."""
80
+ url = get_value("mcp.url", None)
81
+
82
+ return url
83
+
84
+ # this extracts the url from the langchain mcp adapter tools and attaches it to the context.
85
+ @with_tracer_wrapper
86
+ def langchain_mcp_wrapper(
87
+ tracer: any, handler: any, to_wrap, wrapped, instance, source_path, args, kwargs
88
+ ):
89
+ return_value = None
90
+ try:
91
+ return_value = wrapped(*args, **kwargs)
92
+ return return_value
93
+ finally:
94
+ if (
95
+ return_value
96
+ and hasattr(return_value, "coroutine")
97
+ and kwargs.get("connection", None)
98
+ ):
99
+ try:
100
+ # extract the URL from the connection and attach it to the context
101
+ url = kwargs.get("connection").get("url", None)
102
+ if url:
103
+ # wrap coroutine methods and attach the URL to the context
104
+
105
+ original_coroutine = return_value.coroutine
106
+
107
+ async def wrapped_coroutine(*args1, **kwargs1):
108
+ token = None
109
+ try:
110
+ token = attach(set_value("mcp.url", url))
111
+ return await original_coroutine(*args1, **kwargs1)
112
+ finally:
113
+ detach(token)
114
+
115
+ return_value.coroutine = wrapped_coroutine
116
+
117
+ except Exception as e:
118
+ pass
@@ -0,0 +1,48 @@
1
+ from monocle_apptrace.instrumentation.metamodel.mcp import _helper
2
+
3
+ TOOLS = {
4
+ "type": "agentic.mcp.invocation",
5
+ "attributes": [
6
+ [
7
+ {
8
+ "_comment": "name of the tool",
9
+ "attribute": "name",
10
+ "accessor": lambda arguments: _helper.get_name(arguments),
11
+ },
12
+ {
13
+ "_comment": "tool type",
14
+ "attribute": "type",
15
+ "accessor": lambda arguments: "mcp.server",
16
+ },
17
+ {
18
+ "_comment": "tool url",
19
+ "attribute": "url",
20
+ "accessor": lambda arguments: _helper.get_url(arguments),
21
+ },
22
+ ]
23
+ ],
24
+ "events": [
25
+ {
26
+ "name": "data.input",
27
+ "attributes": [
28
+ {
29
+ "_comment": "this is Tool input",
30
+ "attribute": "input",
31
+ "accessor": lambda arguments: _helper.get_params_arguments(
32
+ arguments
33
+ ),
34
+ },
35
+ ],
36
+ },
37
+ {
38
+ "name": "data.output",
39
+ "attributes": [
40
+ {
41
+ "_comment": "this is Tool output",
42
+ "attribute": "output",
43
+ "accessor": lambda arguments: _helper.get_output_text(arguments)
44
+ },
45
+ ],
46
+ },
47
+ ],
48
+ }
@@ -0,0 +1,13 @@
1
+ from opentelemetry.context import set_value, attach, detach
2
+ from monocle_apptrace.instrumentation.common.span_handler import SpanHandler
3
+ from monocle_apptrace.instrumentation.metamodel.mcp._helper import (
4
+ get_name
5
+
6
+ )
7
+ from monocle_apptrace.instrumentation.metamodel.langgraph.entities.inference import (
8
+ AGENT_DELEGATION, AGENT_REQUEST
9
+ )
10
+
11
+ class MCPAgentHandler(SpanHandler):
12
+ def skip_span(self, to_wrap, wrapped, instance, args, kwargs) -> bool:
13
+ return get_name({"args": args, "kwargs": kwargs}) is None or args[0].root.method == "tools/list"
@@ -0,0 +1,21 @@
1
+ from monocle_apptrace.instrumentation.common.wrapper import atask_wrapper
2
+ from monocle_apptrace.instrumentation.metamodel.mcp import _helper
3
+ from monocle_apptrace.instrumentation.metamodel.mcp.entities.inference import TOOLS
4
+
5
+
6
+ MCP_METHODS = [
7
+ {
8
+ "package": "mcp.shared.session",
9
+ "object": "BaseSession",
10
+ "method": "send_request",
11
+ "wrapper_method": atask_wrapper,
12
+ "span_handler": "mcp_agent_handler",
13
+ "output_processor": TOOLS,
14
+ },
15
+ {
16
+ "package": "langchain_mcp_adapters.tools",
17
+ "object": "",
18
+ "method": "convert_mcp_tool_to_langchain_tool",
19
+ "wrapper_method": _helper.langchain_mcp_wrapper,
20
+ },
21
+ ]