cortexhub 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,297 @@
1
+ """CrewAI adapter for tool and LLM interception.
2
+
3
+ Patches CrewAI at multiple levels:
4
+ - CrewStructuredTool.invoke for all LLM-driven tool calls
5
+ - BaseTool._run for direct tool usage
6
+ - LiteLLM completion for LLM call governance (guardrails, PII detection)
7
+
8
+ IMPORTANT: CrewAI has its own OpenTelemetry setup that may conflict with
9
+ CortexHub's telemetry. To ensure proper telemetry capture, either:
10
+ 1. Call cortexhub.init() BEFORE importing crewai
11
+ 2. Set environment variable: CREWAI_TRACING_ENABLED=false
12
+
13
+ Architectural rules:
14
+ - Adapter is DUMB plumbing
15
+ - SDK orchestrates everything via govern_execution()
16
+ - Store original on class, not global
17
+ """
18
+
19
+ from typing import Any
20
+
21
+ import structlog
22
+
23
+ from cortexhub.adapters.base import ToolAdapter
24
+ from cortexhub.pipeline import govern_execution
25
+
26
+ logger = structlog.get_logger(__name__)
27
+
28
+ # Attribute names for storing originals on class
29
+ _ORIGINAL_INVOKE_ATTR = "__cortexhub_original_invoke__"
30
+ _ORIGINAL_RUN_ATTR = "__cortexhub_original_run__"
31
+ _PATCHED_ATTR = "__cortexhub_patched__"
32
+ _PATCHED_TOOL_ATTR = "__cortexhub_tool_patched__"
33
+ _PATCHED_LLM_ATTR = "__cortexhub_llm_patched__"
34
+ _ORIGINAL_COMPLETION_ATTR = "__cortexhub_original_completion__"
35
+ _ORIGINAL_ACOMPLETION_ATTR = "__cortexhub_original_acompletion__"
36
+
37
+
38
+ class CrewAIAdapter(ToolAdapter):
39
+ """Adapter for CrewAI framework.
40
+
41
+ Patches CrewStructuredTool.invoke - the method called by CrewAI's
42
+ agent executor when tools are invoked by the LLM.
43
+
44
+ Key properties:
45
+ - Adapter is dumb plumbing
46
+ - Patches at class level so all tools are governed
47
+ - Works regardless of when tools are created
48
+ """
49
+
50
+ @property
51
+ def framework_name(self) -> str:
52
+ return "crewai"
53
+
54
+ def _get_framework_modules(self) -> list[str]:
55
+ return ["crewai", "crewai.tools"]
56
+
57
+ def patch(self) -> None:
58
+ """Patch CrewAI tool execution methods."""
59
+ try:
60
+ from crewai.tools.structured_tool import CrewStructuredTool
61
+
62
+ cortex_hub = self.cortex_hub
63
+ tools = self._discover_tools()
64
+ if tools:
65
+ cortex_hub.backend.register_tool_inventory(
66
+ agent_id=cortex_hub.agent_id,
67
+ framework=self.framework_name,
68
+ tools=tools,
69
+ )
70
+
71
+ # Patch CrewStructuredTool.invoke (primary execution path)
72
+ if not getattr(CrewStructuredTool, _PATCHED_ATTR, False):
73
+ if not hasattr(CrewStructuredTool, _ORIGINAL_INVOKE_ATTR):
74
+ setattr(CrewStructuredTool, _ORIGINAL_INVOKE_ATTR, CrewStructuredTool.invoke)
75
+
76
+ original_invoke = getattr(CrewStructuredTool, _ORIGINAL_INVOKE_ATTR)
77
+
78
+ def patched_invoke(self, input, config=None, **kwargs):
79
+ """Governed CrewStructuredTool execution."""
80
+ tool_name = getattr(self, 'name', 'unknown_tool')
81
+ tool_description = getattr(self, 'description', None)
82
+
83
+ tool_metadata = {
84
+ "name": tool_name,
85
+ "description": tool_description,
86
+ "framework": "crewai",
87
+ }
88
+
89
+ governed_fn = govern_execution(
90
+ tool_fn=lambda **_kw: original_invoke(self, input, config, **kwargs),
91
+ tool_metadata=tool_metadata,
92
+ cortex_hub=cortex_hub,
93
+ )
94
+
95
+ # Extract args from input
96
+ if isinstance(input, dict):
97
+ return governed_fn(**input)
98
+ elif isinstance(input, str):
99
+ return governed_fn(_raw=input)
100
+ return governed_fn()
101
+
102
+ CrewStructuredTool.invoke = patched_invoke
103
+ setattr(CrewStructuredTool, _PATCHED_ATTR, True)
104
+ logger.info("CrewAI CrewStructuredTool.invoke patched")
105
+
106
+ # Also patch BaseTool._run for direct tool.run() calls
107
+ try:
108
+ from crewai.tools.base_tool import BaseTool
109
+
110
+ if not getattr(BaseTool, _PATCHED_TOOL_ATTR, False):
111
+ if not hasattr(BaseTool, _ORIGINAL_RUN_ATTR):
112
+ setattr(BaseTool, _ORIGINAL_RUN_ATTR, BaseTool._run)
113
+
114
+ original_run = getattr(BaseTool, _ORIGINAL_RUN_ATTR)
115
+
116
+ def patched_run(self, *args, **kwargs):
117
+ """Governed BaseTool execution."""
118
+ tool_name = getattr(self, 'name', 'unknown_tool')
119
+ tool_description = getattr(self, 'description', None)
120
+
121
+ tool_metadata = {
122
+ "name": tool_name,
123
+ "description": tool_description,
124
+ "framework": "crewai",
125
+ }
126
+
127
+ governed_fn = govern_execution(
128
+ tool_fn=lambda **_kw: original_run(self, *args, **kwargs),
129
+ tool_metadata=tool_metadata,
130
+ cortex_hub=cortex_hub,
131
+ )
132
+
133
+ # Extract args
134
+ if kwargs:
135
+ return governed_fn(**kwargs)
136
+ if len(args) == 1 and isinstance(args[0], dict):
137
+ return governed_fn(**args[0])
138
+ if args:
139
+ return governed_fn(_raw=args[0])
140
+ return governed_fn()
141
+
142
+ BaseTool._run = patched_run
143
+ setattr(BaseTool, _PATCHED_TOOL_ATTR, True)
144
+ logger.info("CrewAI BaseTool._run patched")
145
+
146
+ except ImportError:
147
+ logger.debug("CrewAI BaseTool not available")
148
+
149
+ logger.info("CrewAI adapter patched successfully")
150
+
151
+ # Patch LiteLLM for LLM call governance (guardrails, PII)
152
+ self._patch_litellm(cortex_hub)
153
+
154
+ except ImportError:
155
+ logger.debug("CrewAI not available, skipping adapter")
156
+ raise
157
+ except Exception as e:
158
+ logger.error("Failed to patch CrewAI", error=str(e))
159
+ raise
160
+
161
+ def _patch_litellm(self, cortex_hub) -> None:
162
+ """Patch LiteLLM completion for LLM call governance.
163
+
164
+ CrewAI uses LiteLLM internally for all LLM calls.
165
+ We patch litellm.completion to intercept and run guardrails.
166
+ """
167
+ try:
168
+ import litellm
169
+
170
+ if getattr(litellm, _PATCHED_LLM_ATTR, False):
171
+ logger.debug("LiteLLM already patched for CrewAI")
172
+ return
173
+
174
+ # Store originals
175
+ if not hasattr(litellm, _ORIGINAL_COMPLETION_ATTR):
176
+ setattr(litellm, _ORIGINAL_COMPLETION_ATTR, litellm.completion)
177
+ original_completion = getattr(litellm, _ORIGINAL_COMPLETION_ATTR)
178
+
179
+ def patched_completion(*args, **kwargs):
180
+ """Governed LiteLLM completion."""
181
+ model = kwargs.get("model") or (args[0] if args else "unknown")
182
+ messages = kwargs.get("messages") or (args[1] if len(args) > 1 else [])
183
+
184
+ # Extract prompt from messages
185
+ prompt = messages
186
+
187
+ def call_original(prompt_override):
188
+ # Replace messages if overridden (for redaction)
189
+ call_kwargs = kwargs.copy()
190
+ if prompt_override is not None:
191
+ call_kwargs["messages"] = prompt_override
192
+ return original_completion(*args[:1] if args else [], **call_kwargs)
193
+
194
+ llm_metadata = {
195
+ "kind": "llm",
196
+ "framework": "crewai",
197
+ "model": model,
198
+ "prompt": prompt,
199
+ "call_original": call_original,
200
+ }
201
+
202
+ governed = govern_execution(
203
+ tool_fn=lambda *a, **kw: original_completion(*args, **kwargs),
204
+ tool_metadata=llm_metadata,
205
+ cortex_hub=cortex_hub,
206
+ )
207
+ return governed()
208
+
209
+ litellm.completion = patched_completion
210
+
211
+ # Patch async version too
212
+ if hasattr(litellm, "acompletion"):
213
+ if not hasattr(litellm, _ORIGINAL_ACOMPLETION_ATTR):
214
+ setattr(litellm, _ORIGINAL_ACOMPLETION_ATTR, litellm.acompletion)
215
+ original_acompletion = getattr(litellm, _ORIGINAL_ACOMPLETION_ATTR)
216
+
217
+ async def patched_acompletion(*args, **kwargs):
218
+ """Governed async LiteLLM completion."""
219
+ model = kwargs.get("model") or (args[0] if args else "unknown")
220
+ messages = kwargs.get("messages") or (args[1] if len(args) > 1 else [])
221
+ prompt = messages
222
+
223
+ async def call_original(prompt_override):
224
+ call_kwargs = kwargs.copy()
225
+ if prompt_override is not None:
226
+ call_kwargs["messages"] = prompt_override
227
+ return await original_acompletion(*args[:1] if args else [], **call_kwargs)
228
+
229
+ llm_metadata = {
230
+ "kind": "llm",
231
+ "framework": "crewai",
232
+ "model": model,
233
+ "prompt": prompt,
234
+ "call_original": call_original,
235
+ }
236
+
237
+ governed = govern_execution(
238
+ tool_fn=lambda *a, **kw: original_acompletion(*args, **kwargs),
239
+ tool_metadata=llm_metadata,
240
+ cortex_hub=cortex_hub,
241
+ )
242
+ return await governed()
243
+
244
+ litellm.acompletion = patched_acompletion
245
+
246
+ setattr(litellm, _PATCHED_LLM_ATTR, True)
247
+ logger.info("CrewAI LiteLLM interception patched successfully")
248
+
249
+ except ImportError:
250
+ logger.debug("LiteLLM not available, skipping LLM interception for CrewAI")
251
+ except Exception as e:
252
+ logger.debug("CrewAI LiteLLM interception skipped", reason=str(e))
253
+
254
+ def unpatch(self) -> None:
255
+ """Restore original CrewAI methods."""
256
+ try:
257
+ from crewai.tools.structured_tool import CrewStructuredTool
258
+
259
+ if hasattr(CrewStructuredTool, _ORIGINAL_INVOKE_ATTR):
260
+ original = getattr(CrewStructuredTool, _ORIGINAL_INVOKE_ATTR)
261
+ CrewStructuredTool.invoke = original
262
+ setattr(CrewStructuredTool, _PATCHED_ATTR, False)
263
+
264
+ logger.info("CrewAI CrewStructuredTool unpatched")
265
+
266
+ try:
267
+ from crewai.tools.base_tool import BaseTool
268
+ if hasattr(BaseTool, _ORIGINAL_RUN_ATTR):
269
+ original = getattr(BaseTool, _ORIGINAL_RUN_ATTR)
270
+ BaseTool._run = original
271
+ setattr(BaseTool, _PATCHED_TOOL_ATTR, False)
272
+ logger.info("CrewAI BaseTool unpatched")
273
+ except ImportError:
274
+ pass
275
+
276
+ # Restore LiteLLM
277
+ try:
278
+ import litellm
279
+ if hasattr(litellm, _ORIGINAL_COMPLETION_ATTR):
280
+ litellm.completion = getattr(litellm, _ORIGINAL_COMPLETION_ATTR)
281
+ if hasattr(litellm, _ORIGINAL_ACOMPLETION_ATTR):
282
+ litellm.acompletion = getattr(litellm, _ORIGINAL_ACOMPLETION_ATTR)
283
+ setattr(litellm, _PATCHED_LLM_ATTR, False)
284
+ logger.info("CrewAI LiteLLM unpatched")
285
+ except ImportError:
286
+ pass
287
+
288
+ except ImportError:
289
+ pass
290
+
291
+ def intercept(self, tool_fn, tool_name, args, **kwargs):
292
+ """Not used - governance happens via SDK entrypoint."""
293
+ raise NotImplementedError("Use govern_execution via pipeline")
294
+
295
+ def _discover_tools(self) -> list[dict[str, Any]]:
296
+ """Discover tools from CrewAI (best-effort)."""
297
+ return []
@@ -0,0 +1,386 @@
1
+ """LangGraph adapter for tool interception.
2
+
3
+ Intercepts LangGraph tool execution via langchain_core.tools.BaseTool.
4
+
5
+ LangGraph uses LangChain's tool infrastructure, so we patch BaseTool.invoke().
6
+ For approval workflows, LangGraph provides native support via:
7
+ - interrupt() for human-in-the-loop
8
+ - Checkpointing for state persistence
9
+
10
+ Architectural rules:
11
+ - Adapter is DUMB plumbing
12
+ - Adapter calls ONE SDK entrypoint: execute_governed_tool()
13
+ - SDK orchestrates everything
14
+ - No governance logic in adapter
15
+ - For approval: Use LangGraph's native interrupt() mechanism
16
+ """
17
+
18
+ from typing import Any
19
+ import json
20
+
21
+ import structlog
22
+
23
+ from cortexhub.adapters.base import ToolAdapter
24
+ from cortexhub.pipeline import govern_execution
25
+
26
+ logger = structlog.get_logger(__name__)
27
+
28
+ # Attribute names for storing originals on class
29
+ _ORIGINAL_INVOKE_ATTR = "__cortexhub_original_invoke__"
30
+ _PATCHED_ATTR = "__cortexhub_patched__"
31
+ _ORIGINAL_CHAT_INVOKE_ATTR = "__cortexhub_original_chat_invoke__"
32
+ _ORIGINAL_CHAT_AINVOKE_ATTR = "__cortexhub_original_chat_ainvoke__"
33
+ _PATCHED_LLM_ATTR = "__cortexhub_llm_patched__"
34
+ _ORIGINAL_TOOLNODE_INIT_ATTR = "__cortexhub_original_toolnode_init__"
35
+ _PATCHED_TOOLNODE_ATTR = "__cortexhub_toolnode_patched__"
36
+
37
+
38
+ class LangGraphAdapter(ToolAdapter):
39
+ """Adapter for LangGraph framework.
40
+
41
+ LangGraph uses LangChain's tool infrastructure, so we patch BaseTool.invoke().
42
+
43
+ For approval workflows (require_approval policy effect):
44
+ - We detect if checkpointer is configured
45
+ - If yes: Use LangGraph's native interrupt() mechanism
46
+ - If no: Raise clear error asking developer to add checkpointer
47
+
48
+ Key properties:
49
+ - Adapter is dumb plumbing
50
+ - Calls SDK entrypoint, doesn't implement governance
51
+ - Original stored on class, not global
52
+ - Leverages LangGraph's native human-in-the-loop support
53
+ """
54
+
55
+ @property
56
+ def framework_name(self) -> str:
57
+ return "langgraph"
58
+
59
+ def __init__(self, cortex_hub: Any):
60
+ super().__init__(cortex_hub)
61
+ self._discovered_tools: dict[str, dict[str, Any]] = {}
62
+
63
+ def _get_framework_modules(self) -> list[str]:
64
+ return ["langgraph", "langchain_core", "langchain_core.tools"]
65
+
66
+ def patch(self) -> None:
67
+ """Patch LangGraph/LangChain BaseTool.invoke method."""
68
+ try:
69
+ from langchain_core.tools import BaseTool
70
+
71
+ # Check if already patched
72
+ if getattr(BaseTool, _PATCHED_ATTR, False):
73
+ logger.info("LangGraph already patched")
74
+ return
75
+
76
+ # Store original on class
77
+ if not hasattr(BaseTool, _ORIGINAL_INVOKE_ATTR):
78
+ setattr(BaseTool, _ORIGINAL_INVOKE_ATTR, BaseTool.invoke)
79
+
80
+ tool_original_invoke = getattr(BaseTool, _ORIGINAL_INVOKE_ATTR)
81
+ cortex_hub = self.cortex_hub
82
+ adapter = self
83
+ tools = self._discover_tools()
84
+ if tools:
85
+ self._register_tools(tools)
86
+
87
+ def patched_invoke(self, input, config=None, **kwargs):
88
+ """Governed tool invocation."""
89
+ tool_name = getattr(self, "name", "unknown_tool")
90
+ tool_description = getattr(self, "description", None)
91
+
92
+ # Extract args - preserve structure without rewriting
93
+ if isinstance(input, dict):
94
+ args = input
95
+ elif hasattr(input, "model_dump"):
96
+ args = input.model_dump()
97
+ elif hasattr(input, "dict"):
98
+ args = input.dict()
99
+ else:
100
+ args = {"_raw": input}
101
+ policy_args = adapter._normalize_policy_args(self, args)
102
+
103
+ tool_metadata = {
104
+ "name": tool_name,
105
+ "description": tool_description,
106
+ "framework": "langgraph",
107
+ }
108
+ governed_fn = govern_execution(
109
+ tool_fn=lambda *a, **kw: tool_original_invoke(
110
+ self, input, config=config, **kwargs
111
+ ),
112
+ tool_metadata=tool_metadata,
113
+ cortex_hub=cortex_hub,
114
+ )
115
+ return governed_fn(**policy_args)
116
+
117
+ # Apply patch
118
+ BaseTool.invoke = patched_invoke
119
+ setattr(BaseTool, _PATCHED_ATTR, True)
120
+
121
+ logger.info("LangGraph adapter patched successfully")
122
+
123
+ # Patch ToolNode to discover tools (best-effort)
124
+ self._patch_tool_node()
125
+
126
+ # Patch LLM invoke for LLM call governance
127
+ self._patch_llm_invoke(cortex_hub)
128
+
129
+ except ImportError:
130
+ logger.debug("LangGraph/LangChain not available, skipping adapter")
131
+ except Exception as e:
132
+ logger.error("Failed to patch LangGraph", error=str(e))
133
+
134
+ def _patch_llm_invoke(self, cortex_hub) -> None:
135
+ """Patch LangChain chat model invoke for LLM call governance."""
136
+ try:
137
+ from langchain_core.language_models.chat_models import BaseChatModel
138
+
139
+ if getattr(BaseChatModel, _PATCHED_LLM_ATTR, False):
140
+ return
141
+
142
+ if not hasattr(BaseChatModel, _ORIGINAL_CHAT_INVOKE_ATTR):
143
+ setattr(BaseChatModel, _ORIGINAL_CHAT_INVOKE_ATTR, BaseChatModel.invoke)
144
+ chat_original_invoke = getattr(BaseChatModel, _ORIGINAL_CHAT_INVOKE_ATTR)
145
+
146
+ def patched_chat_invoke(self, input, config=None, **kwargs):
147
+ model_name = (
148
+ getattr(self, "model_name", None) or getattr(self, "model", None) or "unknown"
149
+ )
150
+ prompt = input
151
+
152
+ def call_original(prompt_override):
153
+ return chat_original_invoke(self, prompt_override, config=config, **kwargs)
154
+
155
+ llm_metadata = {
156
+ "kind": "llm",
157
+ "framework": "langgraph",
158
+ "model": model_name,
159
+ "prompt": prompt,
160
+ "call_original": call_original,
161
+ }
162
+
163
+ governed = govern_execution(
164
+ tool_fn=lambda *a, **kw: chat_original_invoke(
165
+ self, input, config=config, **kwargs
166
+ ),
167
+ tool_metadata=llm_metadata,
168
+ cortex_hub=cortex_hub,
169
+ )
170
+ return governed()
171
+
172
+ BaseChatModel.invoke = patched_chat_invoke
173
+
174
+ # Patch async version too
175
+ if hasattr(BaseChatModel, "ainvoke"):
176
+ if not hasattr(BaseChatModel, _ORIGINAL_CHAT_AINVOKE_ATTR):
177
+ setattr(BaseChatModel, _ORIGINAL_CHAT_AINVOKE_ATTR, BaseChatModel.ainvoke)
178
+ chat_original_ainvoke = getattr(BaseChatModel, _ORIGINAL_CHAT_AINVOKE_ATTR)
179
+
180
+ async def patched_chat_ainvoke(self, input, config=None, **kwargs):
181
+ model_name = (
182
+ getattr(self, "model_name", None)
183
+ or getattr(self, "model", None)
184
+ or "unknown"
185
+ )
186
+ prompt = input
187
+
188
+ async def call_original(prompt_override):
189
+ return await chat_original_ainvoke(
190
+ self, prompt_override, config=config, **kwargs
191
+ )
192
+
193
+ llm_metadata = {
194
+ "kind": "llm",
195
+ "framework": "langgraph",
196
+ "model": model_name,
197
+ "prompt": prompt,
198
+ "call_original": call_original,
199
+ }
200
+
201
+ governed = govern_execution(
202
+ tool_fn=lambda *a, **kw: chat_original_ainvoke(
203
+ self, input, config=config, **kwargs
204
+ ),
205
+ tool_metadata=llm_metadata,
206
+ cortex_hub=cortex_hub,
207
+ )
208
+ return await governed()
209
+
210
+ BaseChatModel.ainvoke = patched_chat_ainvoke
211
+
212
+ setattr(BaseChatModel, _PATCHED_LLM_ATTR, True)
213
+ logger.info("LangGraph LLM interception patched successfully")
214
+
215
+ except Exception as e:
216
+ logger.debug("LangGraph LLM interception skipped", reason=str(e))
217
+
218
+ def _patch_tool_node(self) -> None:
219
+ """Patch LangGraph ToolNode to capture tool inventory."""
220
+ try:
221
+ from langgraph.prebuilt import ToolNode
222
+
223
+ if getattr(ToolNode, _PATCHED_TOOLNODE_ATTR, False):
224
+ return
225
+
226
+ if not hasattr(ToolNode, _ORIGINAL_TOOLNODE_INIT_ATTR):
227
+ setattr(ToolNode, _ORIGINAL_TOOLNODE_INIT_ATTR, ToolNode.__init__)
228
+ original_init = getattr(ToolNode, _ORIGINAL_TOOLNODE_INIT_ATTR)
229
+ adapter = self
230
+
231
+ def patched_init(self, tools, *args, **kwargs):
232
+ original_init(self, tools, *args, **kwargs)
233
+ try:
234
+ adapter._register_tools(adapter._normalize_tools(tools))
235
+ except Exception as e:
236
+ logger.debug("ToolNode tool discovery failed", reason=str(e))
237
+
238
+ ToolNode.__init__ = patched_init
239
+ setattr(ToolNode, _PATCHED_TOOLNODE_ATTR, True)
240
+ logger.info("LangGraph ToolNode patched for tool discovery")
241
+ except Exception as e:
242
+ logger.debug("LangGraph ToolNode patch skipped", reason=str(e))
243
+
244
+ def unpatch(self) -> None:
245
+ """Restore original methods."""
246
+ try:
247
+ from langchain_core.tools import BaseTool
248
+
249
+ if hasattr(BaseTool, _ORIGINAL_INVOKE_ATTR):
250
+ BaseTool.invoke = getattr(BaseTool, _ORIGINAL_INVOKE_ATTR)
251
+ setattr(BaseTool, _PATCHED_ATTR, False)
252
+ logger.info("LangGraph adapter unpatched")
253
+
254
+ # Restore LLM interception
255
+ try:
256
+ from langchain_core.language_models.chat_models import BaseChatModel
257
+
258
+ if hasattr(BaseChatModel, _ORIGINAL_CHAT_INVOKE_ATTR):
259
+ BaseChatModel.invoke = getattr(BaseChatModel, _ORIGINAL_CHAT_INVOKE_ATTR)
260
+ if hasattr(BaseChatModel, _ORIGINAL_CHAT_AINVOKE_ATTR):
261
+ BaseChatModel.ainvoke = getattr(BaseChatModel, _ORIGINAL_CHAT_AINVOKE_ATTR)
262
+ setattr(BaseChatModel, _PATCHED_LLM_ATTR, False)
263
+ except ImportError:
264
+ pass
265
+
266
+ # Restore ToolNode init
267
+ try:
268
+ from langgraph.prebuilt import ToolNode
269
+
270
+ if hasattr(ToolNode, _ORIGINAL_TOOLNODE_INIT_ATTR):
271
+ ToolNode.__init__ = getattr(ToolNode, _ORIGINAL_TOOLNODE_INIT_ATTR)
272
+ setattr(ToolNode, _PATCHED_TOOLNODE_ATTR, False)
273
+ except ImportError:
274
+ pass
275
+ except ImportError:
276
+ pass
277
+
278
+ def intercept(self, tool_fn, tool_name, args, **kwargs):
279
+ """Not used - governance happens via SDK entrypoint."""
280
+ raise NotImplementedError("Use execute_governed_tool via patched invoke")
281
+
282
+ def _discover_tools(self) -> list[dict[str, Any]]:
283
+ """Discover tools from LangGraph (best-effort)."""
284
+ return list(self._discovered_tools.values())
285
+
286
+ def _register_tools(self, tools: list[dict[str, Any]]) -> None:
287
+ """Register tools with backend, merging by name."""
288
+ if not tools:
289
+ return
290
+
291
+ for tool in tools:
292
+ name = tool.get("name") or "unknown_tool"
293
+ self._discovered_tools[name] = tool
294
+
295
+ self.cortex_hub.backend.register_tool_inventory(
296
+ agent_id=self.cortex_hub.agent_id,
297
+ framework=self.framework_name,
298
+ tools=list(self._discovered_tools.values()),
299
+ )
300
+
301
+ def _normalize_tools(self, tools: Any) -> list[dict[str, Any]]:
302
+ """Convert tool objects to inventory payloads."""
303
+ normalized: list[dict[str, Any]] = []
304
+
305
+ if not tools:
306
+ return normalized
307
+
308
+ tool_list = tools if isinstance(tools, list) else [tools]
309
+
310
+ for tool in tool_list:
311
+ name = getattr(tool, "name", None) or getattr(tool, "__name__", None) or "unknown_tool"
312
+ description = getattr(tool, "description", None) or getattr(tool, "__doc__", None)
313
+ parameters_schema = self._extract_parameters_schema(tool)
314
+
315
+ normalized.append(
316
+ {
317
+ "name": name,
318
+ "description": description.strip() if isinstance(description, str) else None,
319
+ "parameters_schema": parameters_schema,
320
+ "source": "framework",
321
+ }
322
+ )
323
+
324
+ return normalized
325
+
326
+ def _normalize_policy_args(self, tool: Any, raw_args: dict[str, Any]) -> dict[str, Any]:
327
+ """Best-effort normalize tool args for policy evaluation."""
328
+ args = raw_args
329
+ if isinstance(args, dict) and isinstance(args.get("args"), dict):
330
+ args = args["args"]
331
+
332
+ if isinstance(args, str):
333
+ try:
334
+ parsed = json.loads(args)
335
+ if isinstance(parsed, dict):
336
+ args = parsed
337
+ except json.JSONDecodeError:
338
+ pass
339
+
340
+ args_schema = getattr(tool, "args_schema", None)
341
+ if args_schema and isinstance(args, dict):
342
+ try:
343
+ if hasattr(args_schema, "model_validate"):
344
+ parsed = args_schema.model_validate(args)
345
+ elif hasattr(args_schema, "parse_obj"):
346
+ parsed = args_schema.parse_obj(args)
347
+ else:
348
+ parsed = None
349
+ if parsed is not None:
350
+ if hasattr(parsed, "model_dump"):
351
+ args = parsed.model_dump()
352
+ elif hasattr(parsed, "dict"):
353
+ args = parsed.dict()
354
+ except Exception:
355
+ pass
356
+
357
+ return args if isinstance(args, dict) else {"_raw": args}
358
+
359
+ def _extract_parameters_schema(self, tool: Any) -> dict[str, Any] | None:
360
+ """Best-effort JSON schema extraction for tool parameters."""
361
+ schema = None
362
+ args_schema = getattr(tool, "args_schema", None)
363
+
364
+ if args_schema is not None:
365
+ if hasattr(args_schema, "model_json_schema"):
366
+ schema = args_schema.model_json_schema()
367
+ elif hasattr(args_schema, "schema"):
368
+ schema = args_schema.schema()
369
+
370
+ if schema is None:
371
+ args = getattr(tool, "args", None)
372
+ if isinstance(args, dict):
373
+ schema = {
374
+ "type": "object",
375
+ "properties": args,
376
+ "required": list(args.keys()),
377
+ }
378
+
379
+ if not isinstance(schema, dict):
380
+ return None
381
+
382
+ return {
383
+ "type": schema.get("type", "object"),
384
+ "properties": schema.get("properties", {}) or {},
385
+ "required": schema.get("required", []) or [],
386
+ }