cortexhub 0.1.3__py3-none-any.whl → 0.1.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cortexhub/adapters/claude_agents.py +234 -7
- cortexhub/adapters/crewai.py +275 -1
- cortexhub/adapters/langgraph.py +119 -0
- cortexhub/adapters/openai_agents.py +335 -11
- cortexhub/client.py +84 -1
- {cortexhub-0.1.3.dist-info → cortexhub-0.1.4.dist-info}/METADATA +13 -1
- {cortexhub-0.1.3.dist-info → cortexhub-0.1.4.dist-info}/RECORD +9 -9
- {cortexhub-0.1.3.dist-info → cortexhub-0.1.4.dist-info}/WHEEL +1 -1
- {cortexhub-0.1.3.dist-info → cortexhub-0.1.4.dist-info}/licenses/LICENSE +0 -0
cortexhub/adapters/langgraph.py
CHANGED
|
@@ -33,6 +33,11 @@ _ORIGINAL_CHAT_AINVOKE_ATTR = "__cortexhub_original_chat_ainvoke__"
|
|
|
33
33
|
_PATCHED_LLM_ATTR = "__cortexhub_llm_patched__"
|
|
34
34
|
_ORIGINAL_TOOLNODE_INIT_ATTR = "__cortexhub_original_toolnode_init__"
|
|
35
35
|
_PATCHED_TOOLNODE_ATTR = "__cortexhub_toolnode_patched__"
|
|
36
|
+
_ORIGINAL_GRAPH_INVOKE_ATTR = "__cortexhub_original_graph_invoke__"
|
|
37
|
+
_ORIGINAL_GRAPH_AINVOKE_ATTR = "__cortexhub_original_graph_ainvoke__"
|
|
38
|
+
_ORIGINAL_GRAPH_STREAM_ATTR = "__cortexhub_original_graph_stream__"
|
|
39
|
+
_ORIGINAL_GRAPH_ASTREAM_ATTR = "__cortexhub_original_graph_astream__"
|
|
40
|
+
_PATCHED_GRAPH_RUN_ATTR = "__cortexhub_graph_run_patched__"
|
|
36
41
|
|
|
37
42
|
|
|
38
43
|
class LangGraphAdapter(ToolAdapter):
|
|
@@ -126,6 +131,9 @@ class LangGraphAdapter(ToolAdapter):
|
|
|
126
131
|
# Patch LLM invoke for LLM call governance
|
|
127
132
|
self._patch_llm_invoke(cortex_hub)
|
|
128
133
|
|
|
134
|
+
# Patch graph execution for run completion events
|
|
135
|
+
self._patch_run_completion(cortex_hub)
|
|
136
|
+
|
|
129
137
|
except ImportError:
|
|
130
138
|
logger.debug("LangGraph/LangChain not available, skipping adapter")
|
|
131
139
|
except Exception as e:
|
|
@@ -241,6 +249,95 @@ class LangGraphAdapter(ToolAdapter):
|
|
|
241
249
|
except Exception as e:
|
|
242
250
|
logger.debug("LangGraph ToolNode patch skipped", reason=str(e))
|
|
243
251
|
|
|
252
|
+
def _patch_run_completion(self, cortex_hub) -> None:
|
|
253
|
+
"""Patch LangGraph compiled graph execution to emit run completion."""
|
|
254
|
+
try:
|
|
255
|
+
from langgraph.graph.state import CompiledStateGraph
|
|
256
|
+
|
|
257
|
+
if getattr(CompiledStateGraph, _PATCHED_GRAPH_RUN_ATTR, False):
|
|
258
|
+
return
|
|
259
|
+
|
|
260
|
+
if not hasattr(CompiledStateGraph, _ORIGINAL_GRAPH_INVOKE_ATTR):
|
|
261
|
+
setattr(CompiledStateGraph, _ORIGINAL_GRAPH_INVOKE_ATTR, CompiledStateGraph.invoke)
|
|
262
|
+
if not hasattr(CompiledStateGraph, _ORIGINAL_GRAPH_AINVOKE_ATTR):
|
|
263
|
+
setattr(CompiledStateGraph, _ORIGINAL_GRAPH_AINVOKE_ATTR, CompiledStateGraph.ainvoke)
|
|
264
|
+
if not hasattr(CompiledStateGraph, _ORIGINAL_GRAPH_STREAM_ATTR):
|
|
265
|
+
setattr(CompiledStateGraph, _ORIGINAL_GRAPH_STREAM_ATTR, CompiledStateGraph.stream)
|
|
266
|
+
if not hasattr(CompiledStateGraph, _ORIGINAL_GRAPH_ASTREAM_ATTR):
|
|
267
|
+
setattr(CompiledStateGraph, _ORIGINAL_GRAPH_ASTREAM_ATTR, CompiledStateGraph.astream)
|
|
268
|
+
|
|
269
|
+
original_invoke = getattr(CompiledStateGraph, _ORIGINAL_GRAPH_INVOKE_ATTR)
|
|
270
|
+
original_ainvoke = getattr(CompiledStateGraph, _ORIGINAL_GRAPH_AINVOKE_ATTR)
|
|
271
|
+
original_stream = getattr(CompiledStateGraph, _ORIGINAL_GRAPH_STREAM_ATTR)
|
|
272
|
+
original_astream = getattr(CompiledStateGraph, _ORIGINAL_GRAPH_ASTREAM_ATTR)
|
|
273
|
+
|
|
274
|
+
def patched_invoke(self, *args, **kwargs):
|
|
275
|
+
status = "completed"
|
|
276
|
+
cortex_hub.start_run(framework="langgraph")
|
|
277
|
+
try:
|
|
278
|
+
return original_invoke(self, *args, **kwargs)
|
|
279
|
+
except Exception:
|
|
280
|
+
status = "failed"
|
|
281
|
+
raise
|
|
282
|
+
finally:
|
|
283
|
+
cortex_hub.finish_run(framework="langgraph", status=status)
|
|
284
|
+
|
|
285
|
+
async def patched_ainvoke(self, *args, **kwargs):
|
|
286
|
+
status = "completed"
|
|
287
|
+
cortex_hub.start_run(framework="langgraph")
|
|
288
|
+
try:
|
|
289
|
+
return await original_ainvoke(self, *args, **kwargs)
|
|
290
|
+
except Exception:
|
|
291
|
+
status = "failed"
|
|
292
|
+
raise
|
|
293
|
+
finally:
|
|
294
|
+
cortex_hub.finish_run(framework="langgraph", status=status)
|
|
295
|
+
|
|
296
|
+
def patched_stream(self, *args, **kwargs):
|
|
297
|
+
completed = False
|
|
298
|
+
failed = False
|
|
299
|
+
cortex_hub.start_run(framework="langgraph")
|
|
300
|
+
stream_iter = original_stream(self, *args, **kwargs)
|
|
301
|
+
try:
|
|
302
|
+
for item in stream_iter:
|
|
303
|
+
yield item
|
|
304
|
+
completed = True
|
|
305
|
+
except Exception:
|
|
306
|
+
failed = True
|
|
307
|
+
completed = True
|
|
308
|
+
raise
|
|
309
|
+
finally:
|
|
310
|
+
if completed:
|
|
311
|
+
status = "failed" if failed else "completed"
|
|
312
|
+
cortex_hub.finish_run(framework="langgraph", status=status)
|
|
313
|
+
|
|
314
|
+
async def patched_astream(self, *args, **kwargs):
|
|
315
|
+
completed = False
|
|
316
|
+
failed = False
|
|
317
|
+
cortex_hub.start_run(framework="langgraph")
|
|
318
|
+
stream_iter = original_astream(self, *args, **kwargs)
|
|
319
|
+
try:
|
|
320
|
+
async for item in stream_iter:
|
|
321
|
+
yield item
|
|
322
|
+
completed = True
|
|
323
|
+
except Exception:
|
|
324
|
+
failed = True
|
|
325
|
+
completed = True
|
|
326
|
+
raise
|
|
327
|
+
finally:
|
|
328
|
+
if completed:
|
|
329
|
+
status = "failed" if failed else "completed"
|
|
330
|
+
cortex_hub.finish_run(framework="langgraph", status=status)
|
|
331
|
+
|
|
332
|
+
CompiledStateGraph.invoke = patched_invoke
|
|
333
|
+
CompiledStateGraph.ainvoke = patched_ainvoke
|
|
334
|
+
CompiledStateGraph.stream = patched_stream
|
|
335
|
+
CompiledStateGraph.astream = patched_astream
|
|
336
|
+
setattr(CompiledStateGraph, _PATCHED_GRAPH_RUN_ATTR, True)
|
|
337
|
+
logger.info("LangGraph run completion patched successfully")
|
|
338
|
+
except Exception as e:
|
|
339
|
+
logger.debug("LangGraph run completion patch skipped", reason=str(e))
|
|
340
|
+
|
|
244
341
|
def unpatch(self) -> None:
|
|
245
342
|
"""Restore original methods."""
|
|
246
343
|
try:
|
|
@@ -272,6 +369,28 @@ class LangGraphAdapter(ToolAdapter):
|
|
|
272
369
|
setattr(ToolNode, _PATCHED_TOOLNODE_ATTR, False)
|
|
273
370
|
except ImportError:
|
|
274
371
|
pass
|
|
372
|
+
|
|
373
|
+
# Restore graph execution methods
|
|
374
|
+
try:
|
|
375
|
+
from langgraph.graph.state import CompiledStateGraph
|
|
376
|
+
|
|
377
|
+
if hasattr(CompiledStateGraph, _ORIGINAL_GRAPH_INVOKE_ATTR):
|
|
378
|
+
CompiledStateGraph.invoke = getattr(CompiledStateGraph, _ORIGINAL_GRAPH_INVOKE_ATTR)
|
|
379
|
+
if hasattr(CompiledStateGraph, _ORIGINAL_GRAPH_AINVOKE_ATTR):
|
|
380
|
+
CompiledStateGraph.ainvoke = getattr(
|
|
381
|
+
CompiledStateGraph, _ORIGINAL_GRAPH_AINVOKE_ATTR
|
|
382
|
+
)
|
|
383
|
+
if hasattr(CompiledStateGraph, _ORIGINAL_GRAPH_STREAM_ATTR):
|
|
384
|
+
CompiledStateGraph.stream = getattr(
|
|
385
|
+
CompiledStateGraph, _ORIGINAL_GRAPH_STREAM_ATTR
|
|
386
|
+
)
|
|
387
|
+
if hasattr(CompiledStateGraph, _ORIGINAL_GRAPH_ASTREAM_ATTR):
|
|
388
|
+
CompiledStateGraph.astream = getattr(
|
|
389
|
+
CompiledStateGraph, _ORIGINAL_GRAPH_ASTREAM_ATTR
|
|
390
|
+
)
|
|
391
|
+
setattr(CompiledStateGraph, _PATCHED_GRAPH_RUN_ATTR, False)
|
|
392
|
+
except ImportError:
|
|
393
|
+
pass
|
|
275
394
|
except ImportError:
|
|
276
395
|
pass
|
|
277
396
|
|
|
@@ -26,6 +26,13 @@ logger = structlog.get_logger(__name__)
|
|
|
26
26
|
# Attribute names for storing originals
|
|
27
27
|
_ORIGINAL_FUNCTION_TOOL_ATTR = "__cortexhub_original_function_tool__"
|
|
28
28
|
_PATCHED_ATTR = "__cortexhub_patched__"
|
|
29
|
+
_ORIGINAL_RUN_ATTR = "__cortexhub_original_run__"
|
|
30
|
+
_ORIGINAL_RUN_SYNC_ATTR = "__cortexhub_original_run_sync__"
|
|
31
|
+
_ORIGINAL_RUN_STREAMED_ATTR = "__cortexhub_original_run_streamed__"
|
|
32
|
+
_PATCHED_RUN_ATTR = "__cortexhub_run_patched__"
|
|
33
|
+
_ORIGINAL_RESPONSES_FETCH_ATTR = "__cortexhub_original_responses_fetch__"
|
|
34
|
+
_ORIGINAL_CHAT_FETCH_ATTR = "__cortexhub_original_chat_fetch__"
|
|
35
|
+
_PATCHED_LLM_ATTR = "__cortexhub_llm_patched__"
|
|
29
36
|
|
|
30
37
|
|
|
31
38
|
class OpenAIAgentsAdapter(ToolAdapter):
|
|
@@ -132,18 +139,28 @@ class OpenAIAgentsAdapter(ToolAdapter):
|
|
|
132
139
|
|
|
133
140
|
# Replace on_invoke_tool with governed version
|
|
134
141
|
# FunctionTool is a dataclass, so we need to create a new instance
|
|
142
|
+
from dataclasses import fields
|
|
135
143
|
from agents.tool import FunctionTool
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
144
|
+
|
|
145
|
+
field_names = {field.name for field in fields(FunctionTool)}
|
|
146
|
+
tool_kwargs = {
|
|
147
|
+
"name": tool.name,
|
|
148
|
+
"description": tool.description,
|
|
149
|
+
"params_json_schema": tool.params_json_schema,
|
|
150
|
+
"on_invoke_tool": governed_invoke,
|
|
151
|
+
"strict_json_schema": tool.strict_json_schema,
|
|
152
|
+
"is_enabled": tool.is_enabled,
|
|
153
|
+
}
|
|
154
|
+
if "tool_input_guardrails" in field_names:
|
|
155
|
+
tool_kwargs["tool_input_guardrails"] = getattr(
|
|
156
|
+
tool, "tool_input_guardrails", None
|
|
157
|
+
)
|
|
158
|
+
if "tool_output_guardrails" in field_names:
|
|
159
|
+
tool_kwargs["tool_output_guardrails"] = getattr(
|
|
160
|
+
tool, "tool_output_guardrails", None
|
|
161
|
+
)
|
|
162
|
+
|
|
163
|
+
governed_tool = FunctionTool(**tool_kwargs)
|
|
147
164
|
|
|
148
165
|
return governed_tool
|
|
149
166
|
|
|
@@ -158,6 +175,9 @@ class OpenAIAgentsAdapter(ToolAdapter):
|
|
|
158
175
|
setattr(tool_module, _PATCHED_ATTR, True)
|
|
159
176
|
|
|
160
177
|
logger.info("OpenAI Agents adapter patched successfully")
|
|
178
|
+
|
|
179
|
+
self._patch_run_completion(cortex_hub)
|
|
180
|
+
self._patch_llm_calls(cortex_hub)
|
|
161
181
|
|
|
162
182
|
except ImportError:
|
|
163
183
|
logger.debug("OpenAI Agents SDK not installed, skipping")
|
|
@@ -180,6 +200,35 @@ class OpenAIAgentsAdapter(ToolAdapter):
|
|
|
180
200
|
setattr(tool_module, _PATCHED_ATTR, False)
|
|
181
201
|
|
|
182
202
|
logger.info("OpenAI Agents adapter unpatched")
|
|
203
|
+
|
|
204
|
+
try:
|
|
205
|
+
from agents.run import Runner
|
|
206
|
+
|
|
207
|
+
if hasattr(Runner, _ORIGINAL_RUN_ATTR):
|
|
208
|
+
Runner.run = getattr(Runner, _ORIGINAL_RUN_ATTR)
|
|
209
|
+
if hasattr(Runner, _ORIGINAL_RUN_SYNC_ATTR):
|
|
210
|
+
Runner.run_sync = getattr(Runner, _ORIGINAL_RUN_SYNC_ATTR)
|
|
211
|
+
if hasattr(Runner, _ORIGINAL_RUN_STREAMED_ATTR):
|
|
212
|
+
Runner.run_streamed = getattr(Runner, _ORIGINAL_RUN_STREAMED_ATTR)
|
|
213
|
+
setattr(Runner, _PATCHED_RUN_ATTR, False)
|
|
214
|
+
except ImportError:
|
|
215
|
+
pass
|
|
216
|
+
try:
|
|
217
|
+
from agents.models.openai_responses import OpenAIResponsesModel
|
|
218
|
+
from agents.models.openai_chatcompletions import OpenAIChatCompletionsModel
|
|
219
|
+
|
|
220
|
+
if hasattr(OpenAIResponsesModel, _ORIGINAL_RESPONSES_FETCH_ATTR):
|
|
221
|
+
OpenAIResponsesModel._fetch_response = getattr(
|
|
222
|
+
OpenAIResponsesModel, _ORIGINAL_RESPONSES_FETCH_ATTR
|
|
223
|
+
)
|
|
224
|
+
if hasattr(OpenAIChatCompletionsModel, _ORIGINAL_CHAT_FETCH_ATTR):
|
|
225
|
+
OpenAIChatCompletionsModel._fetch_response = getattr(
|
|
226
|
+
OpenAIChatCompletionsModel, _ORIGINAL_CHAT_FETCH_ATTR
|
|
227
|
+
)
|
|
228
|
+
setattr(OpenAIResponsesModel, _PATCHED_LLM_ATTR, False)
|
|
229
|
+
setattr(OpenAIChatCompletionsModel, _PATCHED_LLM_ATTR, False)
|
|
230
|
+
except ImportError:
|
|
231
|
+
pass
|
|
183
232
|
except ImportError:
|
|
184
233
|
pass
|
|
185
234
|
|
|
@@ -190,3 +239,278 @@ class OpenAIAgentsAdapter(ToolAdapter):
|
|
|
190
239
|
def _discover_tools(self) -> list[dict[str, Any]]:
|
|
191
240
|
"""Discover tools from OpenAI Agents SDK (best-effort)."""
|
|
192
241
|
return []
|
|
242
|
+
|
|
243
|
+
def _patch_run_completion(self, cortex_hub) -> None:
|
|
244
|
+
"""Patch Runner run methods to emit run completion."""
|
|
245
|
+
try:
|
|
246
|
+
from agents.run import Runner
|
|
247
|
+
|
|
248
|
+
if getattr(Runner, _PATCHED_RUN_ATTR, False):
|
|
249
|
+
return
|
|
250
|
+
|
|
251
|
+
if not hasattr(Runner, _ORIGINAL_RUN_ATTR):
|
|
252
|
+
setattr(Runner, _ORIGINAL_RUN_ATTR, Runner.__dict__.get("run", Runner.run))
|
|
253
|
+
if not hasattr(Runner, _ORIGINAL_RUN_SYNC_ATTR):
|
|
254
|
+
setattr(Runner, _ORIGINAL_RUN_SYNC_ATTR, Runner.__dict__.get("run_sync", Runner.run_sync))
|
|
255
|
+
if not hasattr(Runner, _ORIGINAL_RUN_STREAMED_ATTR):
|
|
256
|
+
setattr(
|
|
257
|
+
Runner,
|
|
258
|
+
_ORIGINAL_RUN_STREAMED_ATTR,
|
|
259
|
+
Runner.__dict__.get("run_streamed", Runner.run_streamed),
|
|
260
|
+
)
|
|
261
|
+
|
|
262
|
+
original_run_descriptor = getattr(Runner, _ORIGINAL_RUN_ATTR)
|
|
263
|
+
original_run_sync_descriptor = getattr(Runner, _ORIGINAL_RUN_SYNC_ATTR)
|
|
264
|
+
original_run_streamed_descriptor = getattr(Runner, _ORIGINAL_RUN_STREAMED_ATTR)
|
|
265
|
+
original_run = (
|
|
266
|
+
original_run_descriptor.__get__(None, Runner)
|
|
267
|
+
if hasattr(original_run_descriptor, "__get__")
|
|
268
|
+
else original_run_descriptor
|
|
269
|
+
)
|
|
270
|
+
original_run_sync = (
|
|
271
|
+
original_run_sync_descriptor.__get__(None, Runner)
|
|
272
|
+
if hasattr(original_run_sync_descriptor, "__get__")
|
|
273
|
+
else original_run_sync_descriptor
|
|
274
|
+
)
|
|
275
|
+
original_run_streamed = (
|
|
276
|
+
original_run_streamed_descriptor.__get__(None, Runner)
|
|
277
|
+
if hasattr(original_run_streamed_descriptor, "__get__")
|
|
278
|
+
else original_run_streamed_descriptor
|
|
279
|
+
)
|
|
280
|
+
|
|
281
|
+
@classmethod
|
|
282
|
+
async def patched_run(cls, *args, **kwargs):
|
|
283
|
+
status = "completed"
|
|
284
|
+
cortex_hub.start_run(framework="openai_agents")
|
|
285
|
+
try:
|
|
286
|
+
return await original_run(*args, **kwargs)
|
|
287
|
+
except Exception:
|
|
288
|
+
status = "failed"
|
|
289
|
+
raise
|
|
290
|
+
finally:
|
|
291
|
+
cortex_hub.finish_run(framework="openai_agents", status=status)
|
|
292
|
+
|
|
293
|
+
@classmethod
|
|
294
|
+
def patched_run_sync(cls, *args, **kwargs):
|
|
295
|
+
status = "completed"
|
|
296
|
+
cortex_hub.start_run(framework="openai_agents")
|
|
297
|
+
try:
|
|
298
|
+
return original_run_sync(*args, **kwargs)
|
|
299
|
+
except Exception:
|
|
300
|
+
status = "failed"
|
|
301
|
+
raise
|
|
302
|
+
finally:
|
|
303
|
+
cortex_hub.finish_run(framework="openai_agents", status=status)
|
|
304
|
+
|
|
305
|
+
@classmethod
|
|
306
|
+
def patched_run_streamed(cls, *args, **kwargs):
|
|
307
|
+
cortex_hub.start_run(framework="openai_agents")
|
|
308
|
+
try:
|
|
309
|
+
result = original_run_streamed(*args, **kwargs)
|
|
310
|
+
except Exception:
|
|
311
|
+
cortex_hub.finish_run(framework="openai_agents", status="failed")
|
|
312
|
+
raise
|
|
313
|
+
|
|
314
|
+
original_stream_events = getattr(result, "stream_events", None)
|
|
315
|
+
if not callable(original_stream_events):
|
|
316
|
+
return result
|
|
317
|
+
|
|
318
|
+
async def wrapped_stream_events(*stream_args, **stream_kwargs):
|
|
319
|
+
completed = False
|
|
320
|
+
failed = False
|
|
321
|
+
try:
|
|
322
|
+
async for event in original_stream_events(*stream_args, **stream_kwargs):
|
|
323
|
+
yield event
|
|
324
|
+
completed = True
|
|
325
|
+
except Exception:
|
|
326
|
+
failed = True
|
|
327
|
+
completed = True
|
|
328
|
+
raise
|
|
329
|
+
finally:
|
|
330
|
+
if completed:
|
|
331
|
+
status = "failed" if failed else "completed"
|
|
332
|
+
cortex_hub.finish_run(framework="openai_agents", status=status)
|
|
333
|
+
|
|
334
|
+
result.stream_events = wrapped_stream_events
|
|
335
|
+
return result
|
|
336
|
+
|
|
337
|
+
Runner.run = patched_run
|
|
338
|
+
Runner.run_sync = patched_run_sync
|
|
339
|
+
Runner.run_streamed = patched_run_streamed
|
|
340
|
+
setattr(Runner, _PATCHED_RUN_ATTR, True)
|
|
341
|
+
logger.info("OpenAI Agents run completion patched successfully")
|
|
342
|
+
except ImportError:
|
|
343
|
+
logger.debug("OpenAI Agents run completion patch skipped")
|
|
344
|
+
|
|
345
|
+
def _patch_llm_calls(self, cortex_hub: "CortexHub") -> None:
|
|
346
|
+
"""Patch OpenAI Agents models to emit llm.call spans."""
|
|
347
|
+
try:
|
|
348
|
+
from agents.models.openai_responses import OpenAIResponsesModel
|
|
349
|
+
from agents.models.openai_chatcompletions import OpenAIChatCompletionsModel
|
|
350
|
+
|
|
351
|
+
if getattr(OpenAIResponsesModel, _PATCHED_LLM_ATTR, False):
|
|
352
|
+
return
|
|
353
|
+
|
|
354
|
+
if not hasattr(OpenAIResponsesModel, _ORIGINAL_RESPONSES_FETCH_ATTR):
|
|
355
|
+
setattr(
|
|
356
|
+
OpenAIResponsesModel,
|
|
357
|
+
_ORIGINAL_RESPONSES_FETCH_ATTR,
|
|
358
|
+
OpenAIResponsesModel._fetch_response,
|
|
359
|
+
)
|
|
360
|
+
if not hasattr(OpenAIChatCompletionsModel, _ORIGINAL_CHAT_FETCH_ATTR):
|
|
361
|
+
setattr(
|
|
362
|
+
OpenAIChatCompletionsModel,
|
|
363
|
+
_ORIGINAL_CHAT_FETCH_ATTR,
|
|
364
|
+
OpenAIChatCompletionsModel._fetch_response,
|
|
365
|
+
)
|
|
366
|
+
|
|
367
|
+
original_responses_fetch = getattr(
|
|
368
|
+
OpenAIResponsesModel, _ORIGINAL_RESPONSES_FETCH_ATTR
|
|
369
|
+
)
|
|
370
|
+
original_chat_fetch = getattr(
|
|
371
|
+
OpenAIChatCompletionsModel, _ORIGINAL_CHAT_FETCH_ATTR
|
|
372
|
+
)
|
|
373
|
+
|
|
374
|
+
def _with_system_prompt(system_instructions, input_payload):
|
|
375
|
+
if not system_instructions:
|
|
376
|
+
return input_payload
|
|
377
|
+
if isinstance(input_payload, list):
|
|
378
|
+
return [{"role": "system", "content": system_instructions}] + input_payload
|
|
379
|
+
return [{"role": "system", "content": system_instructions}, input_payload]
|
|
380
|
+
|
|
381
|
+
def _split_prompt_override(prompt_override, system_instructions, input_payload):
|
|
382
|
+
if prompt_override is None:
|
|
383
|
+
return system_instructions, input_payload
|
|
384
|
+
if isinstance(prompt_override, list) and prompt_override:
|
|
385
|
+
first = prompt_override[0]
|
|
386
|
+
if (
|
|
387
|
+
isinstance(first, dict)
|
|
388
|
+
and first.get("role") == "system"
|
|
389
|
+
and isinstance(first.get("content"), str)
|
|
390
|
+
):
|
|
391
|
+
return first["content"], prompt_override[1:]
|
|
392
|
+
return system_instructions, prompt_override
|
|
393
|
+
return system_instructions, prompt_override
|
|
394
|
+
|
|
395
|
+
async def patched_responses_fetch(
|
|
396
|
+
self,
|
|
397
|
+
system_instructions,
|
|
398
|
+
input,
|
|
399
|
+
model_settings,
|
|
400
|
+
tools,
|
|
401
|
+
output_schema,
|
|
402
|
+
handoffs,
|
|
403
|
+
previous_response_id,
|
|
404
|
+
stream=False,
|
|
405
|
+
):
|
|
406
|
+
prompt = _with_system_prompt(system_instructions, input)
|
|
407
|
+
model_name = str(getattr(self, "model", "unknown"))
|
|
408
|
+
|
|
409
|
+
async def call_original(prompt_override):
|
|
410
|
+
new_system, new_input = _split_prompt_override(
|
|
411
|
+
prompt_override,
|
|
412
|
+
system_instructions,
|
|
413
|
+
input,
|
|
414
|
+
)
|
|
415
|
+
return await original_responses_fetch(
|
|
416
|
+
self,
|
|
417
|
+
new_system,
|
|
418
|
+
new_input,
|
|
419
|
+
model_settings,
|
|
420
|
+
tools,
|
|
421
|
+
output_schema,
|
|
422
|
+
handoffs,
|
|
423
|
+
previous_response_id,
|
|
424
|
+
stream=stream,
|
|
425
|
+
)
|
|
426
|
+
|
|
427
|
+
llm_metadata = {
|
|
428
|
+
"kind": "llm",
|
|
429
|
+
"framework": "openai_agents",
|
|
430
|
+
"model": model_name,
|
|
431
|
+
"prompt": prompt,
|
|
432
|
+
"call_original": call_original,
|
|
433
|
+
}
|
|
434
|
+
governed = govern_execution(
|
|
435
|
+
tool_fn=lambda *a, **kw: original_responses_fetch(
|
|
436
|
+
self,
|
|
437
|
+
system_instructions,
|
|
438
|
+
input,
|
|
439
|
+
model_settings,
|
|
440
|
+
tools,
|
|
441
|
+
output_schema,
|
|
442
|
+
handoffs,
|
|
443
|
+
previous_response_id,
|
|
444
|
+
stream=stream,
|
|
445
|
+
),
|
|
446
|
+
tool_metadata=llm_metadata,
|
|
447
|
+
cortex_hub=cortex_hub,
|
|
448
|
+
)
|
|
449
|
+
return await governed()
|
|
450
|
+
|
|
451
|
+
async def patched_chat_fetch(
|
|
452
|
+
self,
|
|
453
|
+
system_instructions,
|
|
454
|
+
input,
|
|
455
|
+
model_settings,
|
|
456
|
+
tools,
|
|
457
|
+
output_schema,
|
|
458
|
+
handoffs,
|
|
459
|
+
span,
|
|
460
|
+
tracing,
|
|
461
|
+
stream=False,
|
|
462
|
+
):
|
|
463
|
+
prompt = _with_system_prompt(system_instructions, input)
|
|
464
|
+
model_name = str(getattr(self, "model", "unknown"))
|
|
465
|
+
|
|
466
|
+
async def call_original(prompt_override):
|
|
467
|
+
new_system, new_input = _split_prompt_override(
|
|
468
|
+
prompt_override,
|
|
469
|
+
system_instructions,
|
|
470
|
+
input,
|
|
471
|
+
)
|
|
472
|
+
return await original_chat_fetch(
|
|
473
|
+
self,
|
|
474
|
+
new_system,
|
|
475
|
+
new_input,
|
|
476
|
+
model_settings,
|
|
477
|
+
tools,
|
|
478
|
+
output_schema,
|
|
479
|
+
handoffs,
|
|
480
|
+
span,
|
|
481
|
+
tracing,
|
|
482
|
+
stream=stream,
|
|
483
|
+
)
|
|
484
|
+
|
|
485
|
+
llm_metadata = {
|
|
486
|
+
"kind": "llm",
|
|
487
|
+
"framework": "openai_agents",
|
|
488
|
+
"model": model_name,
|
|
489
|
+
"prompt": prompt,
|
|
490
|
+
"call_original": call_original,
|
|
491
|
+
}
|
|
492
|
+
governed = govern_execution(
|
|
493
|
+
tool_fn=lambda *a, **kw: original_chat_fetch(
|
|
494
|
+
self,
|
|
495
|
+
system_instructions,
|
|
496
|
+
input,
|
|
497
|
+
model_settings,
|
|
498
|
+
tools,
|
|
499
|
+
output_schema,
|
|
500
|
+
handoffs,
|
|
501
|
+
span,
|
|
502
|
+
tracing,
|
|
503
|
+
stream=stream,
|
|
504
|
+
),
|
|
505
|
+
tool_metadata=llm_metadata,
|
|
506
|
+
cortex_hub=cortex_hub,
|
|
507
|
+
)
|
|
508
|
+
return await governed()
|
|
509
|
+
|
|
510
|
+
OpenAIResponsesModel._fetch_response = patched_responses_fetch
|
|
511
|
+
OpenAIChatCompletionsModel._fetch_response = patched_chat_fetch
|
|
512
|
+
setattr(OpenAIResponsesModel, _PATCHED_LLM_ATTR, True)
|
|
513
|
+
setattr(OpenAIChatCompletionsModel, _PATCHED_LLM_ATTR, True)
|
|
514
|
+
logger.info("OpenAI Agents LLM interception patched successfully")
|
|
515
|
+
except Exception as e:
|
|
516
|
+
logger.debug("OpenAI Agents LLM interception skipped", reason=str(e))
|
cortexhub/client.py
CHANGED
|
@@ -26,6 +26,7 @@ Privacy Mode:
|
|
|
26
26
|
- privacy=False: Raw data included (for testing policies in dev/staging)
|
|
27
27
|
"""
|
|
28
28
|
|
|
29
|
+
import contextvars
|
|
29
30
|
import json
|
|
30
31
|
import os
|
|
31
32
|
import time
|
|
@@ -65,6 +66,11 @@ from cortexhub.policy.models import (
|
|
|
65
66
|
from cortexhub.version import __version__
|
|
66
67
|
|
|
67
68
|
logger = structlog.get_logger(__name__)
|
|
69
|
+
_run_depth: contextvars.ContextVar[int] = contextvars.ContextVar("cortexhub_run_depth", default=0)
|
|
70
|
+
_session_id_var: contextvars.ContextVar[str | None] = contextvars.ContextVar(
|
|
71
|
+
"cortexhub_session_id",
|
|
72
|
+
default=None,
|
|
73
|
+
)
|
|
68
74
|
|
|
69
75
|
|
|
70
76
|
class CortexHub:
|
|
@@ -138,7 +144,7 @@ class CortexHub:
|
|
|
138
144
|
# Internal state
|
|
139
145
|
self._project_id: str | None = None
|
|
140
146
|
self._sdk_config = None # SDKConfig from backend
|
|
141
|
-
self.
|
|
147
|
+
self._session_id = self._generate_session_id()
|
|
142
148
|
|
|
143
149
|
# Initialize OpenTelemetry
|
|
144
150
|
self._tracer_provider = None
|
|
@@ -1998,6 +2004,83 @@ class CortexHub:
|
|
|
1998
2004
|
self._tracer_provider.force_flush()
|
|
1999
2005
|
return True
|
|
2000
2006
|
|
|
2007
|
+
@property
|
|
2008
|
+
def session_id(self) -> str:
|
|
2009
|
+
"""Return the current session ID for this execution context."""
|
|
2010
|
+
return _session_id_var.get() or self._session_id
|
|
2011
|
+
|
|
2012
|
+
def finish_run(
|
|
2013
|
+
self,
|
|
2014
|
+
*,
|
|
2015
|
+
framework: str | None = None,
|
|
2016
|
+
status: str = "completed",
|
|
2017
|
+
metadata: dict[str, Any] | None = None,
|
|
2018
|
+
) -> None:
|
|
2019
|
+
"""Emit a run completion span and flush telemetry."""
|
|
2020
|
+
depth = _run_depth.get()
|
|
2021
|
+
if depth > 0:
|
|
2022
|
+
_run_depth.set(depth - 1)
|
|
2023
|
+
if depth > 1:
|
|
2024
|
+
return
|
|
2025
|
+
|
|
2026
|
+
normalized_status = "failed" if status == "failed" else "completed"
|
|
2027
|
+
span_name = "run.failed" if normalized_status == "failed" else "run.completed"
|
|
2028
|
+
|
|
2029
|
+
with self._tracer.start_as_current_span(
|
|
2030
|
+
name=span_name,
|
|
2031
|
+
kind=trace.SpanKind.INTERNAL,
|
|
2032
|
+
) as span:
|
|
2033
|
+
span.set_attribute("cortexhub.session.id", self.session_id)
|
|
2034
|
+
span.set_attribute("cortexhub.agent.id", self.agent_id)
|
|
2035
|
+
span.set_attribute("cortexhub.run.status", normalized_status)
|
|
2036
|
+
|
|
2037
|
+
if framework:
|
|
2038
|
+
span.set_attribute("cortexhub.run.framework", framework)
|
|
2039
|
+
if metadata:
|
|
2040
|
+
span.set_attribute(
|
|
2041
|
+
"cortexhub.run.metadata",
|
|
2042
|
+
json.dumps(metadata, default=str),
|
|
2043
|
+
)
|
|
2044
|
+
|
|
2045
|
+
if normalized_status == "failed":
|
|
2046
|
+
span.set_status(Status(StatusCode.ERROR, "Run failed"))
|
|
2047
|
+
else:
|
|
2048
|
+
span.set_status(Status(StatusCode.OK))
|
|
2049
|
+
|
|
2050
|
+
self.export_telemetry()
|
|
2051
|
+
|
|
2052
|
+
def start_run(
|
|
2053
|
+
self,
|
|
2054
|
+
*,
|
|
2055
|
+
framework: str | None = None,
|
|
2056
|
+
metadata: dict[str, Any] | None = None,
|
|
2057
|
+
) -> None:
|
|
2058
|
+
"""Emit a run started span."""
|
|
2059
|
+
depth = _run_depth.get()
|
|
2060
|
+
if depth == 0:
|
|
2061
|
+
_session_id_var.set(self._generate_session_id())
|
|
2062
|
+
_run_depth.set(depth + 1)
|
|
2063
|
+
if depth > 0:
|
|
2064
|
+
return
|
|
2065
|
+
|
|
2066
|
+
with self._tracer.start_as_current_span(
|
|
2067
|
+
name="run.started",
|
|
2068
|
+
kind=trace.SpanKind.INTERNAL,
|
|
2069
|
+
) as span:
|
|
2070
|
+
span.set_attribute("cortexhub.session.id", self.session_id)
|
|
2071
|
+
span.set_attribute("cortexhub.agent.id", self.agent_id)
|
|
2072
|
+
span.set_attribute("cortexhub.run.status", "running")
|
|
2073
|
+
|
|
2074
|
+
if framework:
|
|
2075
|
+
span.set_attribute("cortexhub.run.framework", framework)
|
|
2076
|
+
if metadata:
|
|
2077
|
+
span.set_attribute(
|
|
2078
|
+
"cortexhub.run.metadata",
|
|
2079
|
+
json.dumps(metadata, default=str),
|
|
2080
|
+
)
|
|
2081
|
+
|
|
2082
|
+
span.set_status(Status(StatusCode.OK))
|
|
2083
|
+
|
|
2001
2084
|
def has_policies(self) -> bool:
|
|
2002
2085
|
"""Check if enforcement mode is active (policies loaded from CortexHub).
|
|
2003
2086
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: cortexhub
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.4
|
|
4
4
|
Summary: CortexHub Python SDK - Policy-as-Code for AI Agents
|
|
5
5
|
Project-URL: Homepage, https://cortexhub.ai
|
|
6
6
|
Project-URL: Documentation, https://docs.cortexhub.ai
|
|
@@ -39,12 +39,14 @@ Requires-Dist: crewai>=0.50.0; extra == 'all'
|
|
|
39
39
|
Requires-Dist: langchain-core>=0.2.0; extra == 'all'
|
|
40
40
|
Requires-Dist: langchain-openai>=0.1.0; extra == 'all'
|
|
41
41
|
Requires-Dist: langgraph>=0.2.0; extra == 'all'
|
|
42
|
+
Requires-Dist: litellm>=1.81.5; extra == 'all'
|
|
42
43
|
Requires-Dist: openai-agents>=0.0.3; extra == 'all'
|
|
43
44
|
Provides-Extra: claude-agents
|
|
44
45
|
Requires-Dist: anthropic>=0.40.0; extra == 'claude-agents'
|
|
45
46
|
Requires-Dist: claude-agent-sdk>=0.0.1; extra == 'claude-agents'
|
|
46
47
|
Provides-Extra: crewai
|
|
47
48
|
Requires-Dist: crewai>=0.50.0; extra == 'crewai'
|
|
49
|
+
Requires-Dist: litellm>=1.81.5; extra == 'crewai'
|
|
48
50
|
Provides-Extra: dev
|
|
49
51
|
Requires-Dist: mypy>=1.10.0; extra == 'dev'
|
|
50
52
|
Requires-Dist: pytest-asyncio>=0.24.0; extra == 'dev'
|
|
@@ -105,6 +107,16 @@ from langgraph.prebuilt import create_react_agent
|
|
|
105
107
|
| OpenAI Agents | `Framework.OPENAI_AGENTS` | `pip install cortexhub[openai-agents]` |
|
|
106
108
|
| Claude Agents | `Framework.CLAUDE_AGENTS` | `pip install cortexhub[claude-agents]` |
|
|
107
109
|
|
|
110
|
+
## Tracing Coverage
|
|
111
|
+
|
|
112
|
+
All frameworks emit `run.started` and `run.completed`/`run.failed` for each run.
|
|
113
|
+
Tool spans (`tool.invoke`) and model spans (`llm.call`) vary by SDK:
|
|
114
|
+
|
|
115
|
+
- **LangGraph**: tool calls via `BaseTool.invoke`, LLM calls via `BaseChatModel.invoke/ainvoke`
|
|
116
|
+
- **CrewAI**: tool calls via `CrewStructuredTool.invoke`/`BaseTool.run`, LLM calls via LiteLLM and `BaseLLM.call/acall`
|
|
117
|
+
- **OpenAI Agents**: tool calls via `function_tool`, LLM calls via `OpenAIResponsesModel` and `OpenAIChatCompletionsModel`
|
|
118
|
+
- **Claude Agents**: tool calls via `@tool` and built-in tool hooks; LLM calls run inside the Claude Code CLI and are not intercepted by the Python SDK
|
|
119
|
+
|
|
108
120
|
## Configuration
|
|
109
121
|
|
|
110
122
|
```bash
|