cortexhub 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cortexhub/__init__.py +143 -0
- cortexhub/adapters/__init__.py +5 -0
- cortexhub/adapters/base.py +131 -0
- cortexhub/adapters/claude_agents.py +322 -0
- cortexhub/adapters/crewai.py +297 -0
- cortexhub/adapters/langgraph.py +386 -0
- cortexhub/adapters/openai_agents.py +192 -0
- cortexhub/audit/__init__.py +25 -0
- cortexhub/audit/events.py +165 -0
- cortexhub/auto_protect.py +128 -0
- cortexhub/backend/__init__.py +5 -0
- cortexhub/backend/client.py +348 -0
- cortexhub/client.py +2149 -0
- cortexhub/config.py +37 -0
- cortexhub/context/__init__.py +5 -0
- cortexhub/context/enricher.py +172 -0
- cortexhub/errors.py +123 -0
- cortexhub/frameworks.py +83 -0
- cortexhub/guardrails/__init__.py +3 -0
- cortexhub/guardrails/injection.py +180 -0
- cortexhub/guardrails/pii.py +378 -0
- cortexhub/guardrails/secrets.py +206 -0
- cortexhub/interceptors/__init__.py +3 -0
- cortexhub/interceptors/llm.py +62 -0
- cortexhub/interceptors/mcp.py +96 -0
- cortexhub/pipeline.py +92 -0
- cortexhub/policy/__init__.py +6 -0
- cortexhub/policy/effects.py +87 -0
- cortexhub/policy/evaluator.py +267 -0
- cortexhub/policy/loader.py +158 -0
- cortexhub/policy/models.py +123 -0
- cortexhub/policy/sync.py +183 -0
- cortexhub/telemetry/__init__.py +40 -0
- cortexhub/telemetry/otel.py +481 -0
- cortexhub/version.py +3 -0
- cortexhub-0.1.0.dist-info/METADATA +275 -0
- cortexhub-0.1.0.dist-info/RECORD +38 -0
- cortexhub-0.1.0.dist-info/WHEEL +4 -0
|
@@ -0,0 +1,192 @@
|
|
|
1
|
+
"""OpenAI Agents SDK adapter for tool interception.
|
|
2
|
+
|
|
3
|
+
Intercepts tool execution by wrapping the function_tool decorator.
|
|
4
|
+
|
|
5
|
+
Architectural rules:
|
|
6
|
+
- Adapter is DUMB plumbing
|
|
7
|
+
- Adapter calls ONE SDK entrypoint: govern_execution()
|
|
8
|
+
- SDK orchestrates everything
|
|
9
|
+
- No governance logic in adapter
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
import json
|
|
13
|
+
from functools import wraps
|
|
14
|
+
from typing import TYPE_CHECKING, Any, Callable
|
|
15
|
+
|
|
16
|
+
import structlog
|
|
17
|
+
|
|
18
|
+
from cortexhub.adapters.base import ToolAdapter
|
|
19
|
+
from cortexhub.pipeline import govern_execution
|
|
20
|
+
|
|
21
|
+
if TYPE_CHECKING:
|
|
22
|
+
from cortexhub.client import CortexHub
|
|
23
|
+
|
|
24
|
+
logger = structlog.get_logger(__name__)
|
|
25
|
+
|
|
26
|
+
# Attribute names for storing originals
|
|
27
|
+
_ORIGINAL_FUNCTION_TOOL_ATTR = "__cortexhub_original_function_tool__"
|
|
28
|
+
_PATCHED_ATTR = "__cortexhub_patched__"
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
class OpenAIAgentsAdapter(ToolAdapter):
|
|
32
|
+
"""Adapter for OpenAI Agents SDK.
|
|
33
|
+
|
|
34
|
+
Wraps the function_tool decorator to intercept tool creation
|
|
35
|
+
and wrap the on_invoke_tool method for governance.
|
|
36
|
+
|
|
37
|
+
Key properties:
|
|
38
|
+
- Adapter is dumb plumbing
|
|
39
|
+
- Calls SDK entrypoint, doesn't implement governance
|
|
40
|
+
- Wraps decorator to intercept all tools
|
|
41
|
+
- Async-safe via SDK
|
|
42
|
+
"""
|
|
43
|
+
|
|
44
|
+
@property
|
|
45
|
+
def framework_name(self) -> str:
|
|
46
|
+
return "openai_agents"
|
|
47
|
+
|
|
48
|
+
def _get_framework_modules(self) -> list[str]:
|
|
49
|
+
return ["agents", "openai_agents"]
|
|
50
|
+
|
|
51
|
+
def patch(self) -> None:
|
|
52
|
+
"""Patch OpenAI Agents by wrapping the function_tool decorator."""
|
|
53
|
+
try:
|
|
54
|
+
import agents
|
|
55
|
+
import agents.tool as tool_module
|
|
56
|
+
|
|
57
|
+
# Check if already patched
|
|
58
|
+
if getattr(tool_module, _PATCHED_ATTR, False):
|
|
59
|
+
logger.info("OpenAI Agents already patched")
|
|
60
|
+
return
|
|
61
|
+
|
|
62
|
+
cortex_hub = self.cortex_hub
|
|
63
|
+
tools = self._discover_tools()
|
|
64
|
+
if tools:
|
|
65
|
+
cortex_hub.backend.register_tool_inventory(
|
|
66
|
+
agent_id=cortex_hub.agent_id,
|
|
67
|
+
framework=self.framework_name,
|
|
68
|
+
tools=tools,
|
|
69
|
+
)
|
|
70
|
+
|
|
71
|
+
# Store original function_tool decorator
|
|
72
|
+
if not hasattr(tool_module, _ORIGINAL_FUNCTION_TOOL_ATTR):
|
|
73
|
+
setattr(tool_module, _ORIGINAL_FUNCTION_TOOL_ATTR, tool_module.function_tool)
|
|
74
|
+
|
|
75
|
+
original_function_tool = getattr(tool_module, _ORIGINAL_FUNCTION_TOOL_ATTR)
|
|
76
|
+
|
|
77
|
+
def patched_function_tool(
|
|
78
|
+
func: Callable | None = None,
|
|
79
|
+
*,
|
|
80
|
+
name_override: str | None = None,
|
|
81
|
+
description_override: str | None = None,
|
|
82
|
+
use_docstring_info: bool = True,
|
|
83
|
+
failure_error_function: Callable | None = None,
|
|
84
|
+
strict_mode: bool = True,
|
|
85
|
+
is_enabled: bool | Callable = True,
|
|
86
|
+
):
|
|
87
|
+
"""Wrapped function_tool that adds CortexHub governance."""
|
|
88
|
+
|
|
89
|
+
def decorator(fn: Callable) -> Any:
|
|
90
|
+
# Create the original FunctionTool
|
|
91
|
+
tool = original_function_tool(
|
|
92
|
+
fn,
|
|
93
|
+
name_override=name_override,
|
|
94
|
+
description_override=description_override,
|
|
95
|
+
use_docstring_info=use_docstring_info,
|
|
96
|
+
failure_error_function=failure_error_function,
|
|
97
|
+
strict_mode=strict_mode,
|
|
98
|
+
is_enabled=is_enabled,
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
# Wrap on_invoke_tool with governance
|
|
102
|
+
original_invoke = tool.on_invoke_tool
|
|
103
|
+
tool_name = tool.name
|
|
104
|
+
tool_description = tool.description
|
|
105
|
+
|
|
106
|
+
async def governed_invoke(ctx, input_json: str) -> Any:
|
|
107
|
+
"""Governed tool invocation."""
|
|
108
|
+
try:
|
|
109
|
+
args = json.loads(input_json) if input_json else {}
|
|
110
|
+
except json.JSONDecodeError:
|
|
111
|
+
args = {"_raw": input_json}
|
|
112
|
+
|
|
113
|
+
tool_metadata = {
|
|
114
|
+
"name": tool_name,
|
|
115
|
+
"description": tool_description,
|
|
116
|
+
"framework": "openai_agents",
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
# Create governed function
|
|
120
|
+
governed_fn = govern_execution(
|
|
121
|
+
tool_fn=lambda **kw: original_invoke(ctx, input_json),
|
|
122
|
+
tool_metadata=tool_metadata,
|
|
123
|
+
cortex_hub=cortex_hub,
|
|
124
|
+
)
|
|
125
|
+
|
|
126
|
+
# Execute with governance
|
|
127
|
+
result = governed_fn(**args)
|
|
128
|
+
# Handle async
|
|
129
|
+
if hasattr(result, '__await__'):
|
|
130
|
+
result = await result
|
|
131
|
+
return result
|
|
132
|
+
|
|
133
|
+
# Replace on_invoke_tool with governed version
|
|
134
|
+
# FunctionTool is a dataclass, so we need to create a new instance
|
|
135
|
+
from agents.tool import FunctionTool
|
|
136
|
+
|
|
137
|
+
governed_tool = FunctionTool(
|
|
138
|
+
name=tool.name,
|
|
139
|
+
description=tool.description,
|
|
140
|
+
params_json_schema=tool.params_json_schema,
|
|
141
|
+
on_invoke_tool=governed_invoke,
|
|
142
|
+
strict_json_schema=tool.strict_json_schema,
|
|
143
|
+
is_enabled=tool.is_enabled,
|
|
144
|
+
tool_input_guardrails=tool.tool_input_guardrails,
|
|
145
|
+
tool_output_guardrails=tool.tool_output_guardrails,
|
|
146
|
+
)
|
|
147
|
+
|
|
148
|
+
return governed_tool
|
|
149
|
+
|
|
150
|
+
# Handle @function_tool vs @function_tool()
|
|
151
|
+
if func is not None:
|
|
152
|
+
return decorator(func)
|
|
153
|
+
return decorator
|
|
154
|
+
|
|
155
|
+
# Apply patch
|
|
156
|
+
tool_module.function_tool = patched_function_tool
|
|
157
|
+
agents.function_tool = patched_function_tool
|
|
158
|
+
setattr(tool_module, _PATCHED_ATTR, True)
|
|
159
|
+
|
|
160
|
+
logger.info("OpenAI Agents adapter patched successfully")
|
|
161
|
+
|
|
162
|
+
except ImportError:
|
|
163
|
+
logger.debug("OpenAI Agents SDK not installed, skipping")
|
|
164
|
+
except Exception as e:
|
|
165
|
+
logger.error("Failed to patch OpenAI Agents", error=str(e))
|
|
166
|
+
|
|
167
|
+
def unpatch(self) -> None:
|
|
168
|
+
"""Restore original function_tool decorator."""
|
|
169
|
+
try:
|
|
170
|
+
import agents
|
|
171
|
+
import agents.tool as tool_module
|
|
172
|
+
|
|
173
|
+
if not hasattr(tool_module, _ORIGINAL_FUNCTION_TOOL_ATTR):
|
|
174
|
+
logger.debug("OpenAI Agents not patched, nothing to restore")
|
|
175
|
+
return
|
|
176
|
+
|
|
177
|
+
original = getattr(tool_module, _ORIGINAL_FUNCTION_TOOL_ATTR)
|
|
178
|
+
tool_module.function_tool = original
|
|
179
|
+
agents.function_tool = original
|
|
180
|
+
setattr(tool_module, _PATCHED_ATTR, False)
|
|
181
|
+
|
|
182
|
+
logger.info("OpenAI Agents adapter unpatched")
|
|
183
|
+
except ImportError:
|
|
184
|
+
pass
|
|
185
|
+
|
|
186
|
+
def intercept(self, tool_fn, tool_name, args, **kwargs):
|
|
187
|
+
"""Not used - governance happens via wrapped decorator."""
|
|
188
|
+
raise NotImplementedError("Use govern_execution via wrapped decorator")
|
|
189
|
+
|
|
190
|
+
def _discover_tools(self) -> list[dict[str, Any]]:
|
|
191
|
+
"""Discover tools from OpenAI Agents SDK (best-effort)."""
|
|
192
|
+
return []
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
"""Audit trail schemas for enforcement and compliance."""
|
|
2
|
+
|
|
3
|
+
from cortexhub.audit.events import (
|
|
4
|
+
AgentDecisionEvent,
|
|
5
|
+
ApprovalRequestEvent,
|
|
6
|
+
BaseEvent,
|
|
7
|
+
ComplianceEvent,
|
|
8
|
+
GuardrailViolationEvent,
|
|
9
|
+
LLMCallEvent,
|
|
10
|
+
PolicyDecisionEvent,
|
|
11
|
+
ToolExecutionEvent,
|
|
12
|
+
ToolInvocationEvent,
|
|
13
|
+
)
|
|
14
|
+
|
|
15
|
+
__all__ = [
|
|
16
|
+
"BaseEvent",
|
|
17
|
+
"ToolInvocationEvent",
|
|
18
|
+
"PolicyDecisionEvent",
|
|
19
|
+
"GuardrailViolationEvent",
|
|
20
|
+
"ApprovalRequestEvent",
|
|
21
|
+
"ToolExecutionEvent",
|
|
22
|
+
"LLMCallEvent",
|
|
23
|
+
"AgentDecisionEvent",
|
|
24
|
+
"ComplianceEvent",
|
|
25
|
+
]
|
|
@@ -0,0 +1,165 @@
|
|
|
1
|
+
"""Audit event schemas for governance telemetry.
|
|
2
|
+
|
|
3
|
+
All events include trace_id for traceability across spans and debugging.
|
|
4
|
+
Uses Pydantic for type-safe, structured events.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from datetime import datetime
|
|
8
|
+
from typing import Any
|
|
9
|
+
|
|
10
|
+
from pydantic import BaseModel, Field
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class BaseEvent(BaseModel):
|
|
14
|
+
"""Base event with common fields for all audit events."""
|
|
15
|
+
|
|
16
|
+
event_type: str
|
|
17
|
+
trace_id: str
|
|
18
|
+
session_id: str | None
|
|
19
|
+
timestamp: datetime = Field(default_factory=datetime.utcnow)
|
|
20
|
+
sequence: int # Monotonic sequence number per session for replay ordering
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class ToolInvocationEvent(BaseEvent):
|
|
24
|
+
"""Event logged when a tool is invoked.
|
|
25
|
+
|
|
26
|
+
SDK is DUMB - just sends metadata, no classifications or counts.
|
|
27
|
+
Backend aggregates and uses LLM for analysis.
|
|
28
|
+
|
|
29
|
+
NOTE: No guardrail findings here - tools NEED the sensitive data to work.
|
|
30
|
+
NOTE: Argument VALUES only sent when privacy=False (for dev/testing).
|
|
31
|
+
"""
|
|
32
|
+
|
|
33
|
+
event_type: str = "tool.invocation"
|
|
34
|
+
tool_name: str
|
|
35
|
+
tool_description: str | None = None # Human-readable description from framework
|
|
36
|
+
arg_names: list[str] = Field(default_factory=list) # Argument names only (NOT values)
|
|
37
|
+
framework: str # "langchain", "openai_agents", etc.
|
|
38
|
+
agent_id: str | None = None # Agent identifier (from cortexhub.init)
|
|
39
|
+
|
|
40
|
+
# Only populated when privacy=False (for testing policies in dev/staging)
|
|
41
|
+
args: dict[str, Any] | None = None # Raw argument values (NEVER in production!)
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
class PolicyDecisionEvent(BaseEvent):
|
|
45
|
+
"""Event logged for policy evaluation results."""
|
|
46
|
+
|
|
47
|
+
event_type: str = "policy.decision"
|
|
48
|
+
effect: str # "allow", "deny", "escalate"
|
|
49
|
+
policy_id: str | None
|
|
50
|
+
reasoning: str
|
|
51
|
+
latency_ms: float # Time taken to evaluate policy
|
|
52
|
+
agent_id: str | None = None # Agent identifier
|
|
53
|
+
tool_name: str | None = None # Tool being evaluated
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
class GuardrailViolationEvent(BaseEvent):
|
|
57
|
+
"""Event logged when a guardrail detects a violation."""
|
|
58
|
+
|
|
59
|
+
event_type: str = "guardrail.violation"
|
|
60
|
+
guardrail_type: str # "pii", "secrets", "injection"
|
|
61
|
+
findings: list[dict[str, Any]] # Detailed findings (entities, locations, scores)
|
|
62
|
+
blocked: bool # Whether execution was blocked
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
class ApprovalRequestEvent(BaseEvent):
|
|
66
|
+
"""Event logged for approval requests (ESCALATE flow)."""
|
|
67
|
+
|
|
68
|
+
event_type: str = "approval.request"
|
|
69
|
+
tool_name: str
|
|
70
|
+
args: dict[str, Any]
|
|
71
|
+
approved: bool | None # None if pending, True/False after decision
|
|
72
|
+
approver: str | None # Who approved/denied (None for auto-approve/deny)
|
|
73
|
+
approval_mode: str # "auto-approve", "auto-deny", "cli-prompt"
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
class ToolExecutionEvent(BaseEvent):
|
|
77
|
+
"""Event logged after tool execution completes."""
|
|
78
|
+
|
|
79
|
+
event_type: str = "tool.execution"
|
|
80
|
+
tool_name: str
|
|
81
|
+
success: bool
|
|
82
|
+
error: str | None # Error message if execution failed
|
|
83
|
+
latency_ms: float # Time taken to execute tool
|
|
84
|
+
agent_id: str | None = None # Agent identifier
|
|
85
|
+
|
|
86
|
+
# Only populated when privacy=False (for testing policies in dev/staging)
|
|
87
|
+
result: Any | None = None # Raw result (NEVER in production!)
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
class LLMGuardrailFindings(BaseModel):
|
|
91
|
+
"""Guardrail findings for LLM calls.
|
|
92
|
+
|
|
93
|
+
THIS is where guardrails matter - sensitive data should NOT go to LLMs.
|
|
94
|
+
"""
|
|
95
|
+
pii_in_prompt: dict[str, Any] = Field(default_factory=lambda: {
|
|
96
|
+
"detected": False,
|
|
97
|
+
"count": 0,
|
|
98
|
+
"types": [], # ["email_address", "person", "ssn"]
|
|
99
|
+
"findings": [], # [{"type": "email", "score": 0.95}]
|
|
100
|
+
})
|
|
101
|
+
secrets_in_prompt: dict[str, Any] = Field(default_factory=lambda: {
|
|
102
|
+
"detected": False,
|
|
103
|
+
"count": 0,
|
|
104
|
+
"types": [], # ["api_key", "password"]
|
|
105
|
+
"findings": [],
|
|
106
|
+
})
|
|
107
|
+
pii_in_response: dict[str, Any] = Field(default_factory=lambda: {
|
|
108
|
+
"detected": False,
|
|
109
|
+
"count": 0,
|
|
110
|
+
"types": [],
|
|
111
|
+
"findings": [],
|
|
112
|
+
})
|
|
113
|
+
prompt_manipulation: dict[str, Any] = Field(default_factory=lambda: {
|
|
114
|
+
"detected": False,
|
|
115
|
+
"count": 0,
|
|
116
|
+
"patterns": [], # ["ignore_instructions", "jailbreak"]
|
|
117
|
+
"findings": [],
|
|
118
|
+
})
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
class LLMCallEvent(BaseEvent):
|
|
122
|
+
"""Event logged for LLM API calls.
|
|
123
|
+
|
|
124
|
+
Guardrails ARE relevant here - sensitive data flowing to LLMs is a risk.
|
|
125
|
+
"""
|
|
126
|
+
|
|
127
|
+
event_type: str = "llm.call"
|
|
128
|
+
model: str # "gpt-4", "claude-3", etc.
|
|
129
|
+
prompt_tokens: int | None = None
|
|
130
|
+
completion_tokens: int | None = None
|
|
131
|
+
latency_ms: float = 0.0
|
|
132
|
+
cost_estimate: float | None = None
|
|
133
|
+
agent_id: str | None = None # Agent identifier (same as tool calls)
|
|
134
|
+
|
|
135
|
+
# Rich guardrail findings - THIS is where guardrails matter
|
|
136
|
+
guardrail_findings: LLMGuardrailFindings = Field(default_factory=LLMGuardrailFindings)
|
|
137
|
+
|
|
138
|
+
# Only populated when privacy=False (for testing policies in dev/staging)
|
|
139
|
+
prompt: str | None = None # Raw prompt content (NEVER in production!)
|
|
140
|
+
response: str | None = None # Raw response content (NEVER in production!)
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
class AgentDecisionEvent(BaseEvent):
|
|
144
|
+
"""Event logged for agent decision-making."""
|
|
145
|
+
|
|
146
|
+
event_type: str = "agent.decision"
|
|
147
|
+
agent_id: str
|
|
148
|
+
agent_role: str | None
|
|
149
|
+
decision: str # What the agent decided to do
|
|
150
|
+
reasoning: str | None # Why the agent made this decision (from LLM output)
|
|
151
|
+
alternatives_considered: list[str] | None # Other options the agent considered
|
|
152
|
+
confidence: float | None # Confidence score (0-1)
|
|
153
|
+
context_used: dict[str, Any] # What context the agent had
|
|
154
|
+
|
|
155
|
+
|
|
156
|
+
class ComplianceEvent(BaseEvent):
|
|
157
|
+
"""Event logged for regulatory compliance tracking."""
|
|
158
|
+
|
|
159
|
+
event_type: str = "compliance.audit"
|
|
160
|
+
regulation: str # "HIPAA", "SOX", "GDPR"
|
|
161
|
+
regulation_section: str | None # e.g., "164.312(a)(1)"
|
|
162
|
+
access_justification: str # Why data was accessed (HIPAA minimum necessary)
|
|
163
|
+
data_classification: str | None # "PHI", "PII", "confidential"
|
|
164
|
+
compliant: bool # Whether action was compliant
|
|
165
|
+
violations: list[str] | None # Any violations detected
|
|
@@ -0,0 +1,128 @@
|
|
|
1
|
+
"""Framework auto-detection and automatic protection.
|
|
2
|
+
|
|
3
|
+
Detects imported frameworks and applies appropriate adapters automatically.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import sys
|
|
7
|
+
from typing import TYPE_CHECKING
|
|
8
|
+
|
|
9
|
+
import structlog
|
|
10
|
+
|
|
11
|
+
if TYPE_CHECKING:
|
|
12
|
+
from cortexhub.client import CortexHub
|
|
13
|
+
|
|
14
|
+
logger = structlog.get_logger(__name__)
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def auto_protect_frameworks(
|
|
18
|
+
cortex_hub: "CortexHub",
|
|
19
|
+
*,
|
|
20
|
+
enable_llm: bool = True,
|
|
21
|
+
enable_tools: bool = True,
|
|
22
|
+
) -> None:
|
|
23
|
+
"""Auto-detect and patch supported frameworks.
|
|
24
|
+
|
|
25
|
+
Checks sys.modules to see which frameworks are loaded, then applies
|
|
26
|
+
appropriate adapters.
|
|
27
|
+
|
|
28
|
+
Args:
|
|
29
|
+
cortex_hub: CortexHub instance
|
|
30
|
+
"""
|
|
31
|
+
protected_count = 0
|
|
32
|
+
|
|
33
|
+
# Check for LangChain
|
|
34
|
+
if enable_tools and _is_langchain_available():
|
|
35
|
+
logger.info("LangChain detected - applying adapter")
|
|
36
|
+
try:
|
|
37
|
+
from cortexhub.adapters.langchain import LangChainAdapter
|
|
38
|
+
|
|
39
|
+
adapter = LangChainAdapter(cortex_hub)
|
|
40
|
+
adapter.patch()
|
|
41
|
+
protected_count += 1
|
|
42
|
+
except Exception as e:
|
|
43
|
+
logger.error("Failed to apply LangChain adapter", error=str(e))
|
|
44
|
+
|
|
45
|
+
# Check for OpenAI Agents
|
|
46
|
+
if enable_tools and _is_openai_agents_available():
|
|
47
|
+
logger.info("OpenAI Agents detected - applying adapter")
|
|
48
|
+
try:
|
|
49
|
+
from cortexhub.adapters.openai_agents import OpenAIAgentsAdapter
|
|
50
|
+
|
|
51
|
+
adapter = OpenAIAgentsAdapter(cortex_hub)
|
|
52
|
+
adapter.patch()
|
|
53
|
+
protected_count += 1
|
|
54
|
+
except Exception as e:
|
|
55
|
+
logger.error("Failed to apply OpenAI Agents adapter", error=str(e))
|
|
56
|
+
|
|
57
|
+
# Check for CrewAI
|
|
58
|
+
if enable_tools and _is_crewai_available():
|
|
59
|
+
logger.info("CrewAI detected - applying adapter")
|
|
60
|
+
try:
|
|
61
|
+
from cortexhub.adapters.crewai import CrewAIAdapter
|
|
62
|
+
|
|
63
|
+
adapter = CrewAIAdapter(cortex_hub)
|
|
64
|
+
adapter.patch()
|
|
65
|
+
protected_count += 1
|
|
66
|
+
except Exception as e:
|
|
67
|
+
logger.error("Failed to apply CrewAI adapter", error=str(e))
|
|
68
|
+
|
|
69
|
+
# Check for LlamaIndex
|
|
70
|
+
if enable_tools and _is_llamaindex_available():
|
|
71
|
+
logger.info("LlamaIndex detected - applying adapter")
|
|
72
|
+
try:
|
|
73
|
+
from cortexhub.adapters.llamaindex import LlamaIndexAdapter
|
|
74
|
+
|
|
75
|
+
adapter = LlamaIndexAdapter(cortex_hub)
|
|
76
|
+
adapter.patch()
|
|
77
|
+
protected_count += 1
|
|
78
|
+
except Exception as e:
|
|
79
|
+
logger.error("Failed to apply LlamaIndex adapter", error=str(e))
|
|
80
|
+
|
|
81
|
+
if enable_llm and _is_litellm_available():
|
|
82
|
+
logger.info("LiteLLM detected - applying adapter")
|
|
83
|
+
try:
|
|
84
|
+
from cortexhub.adapters.litellm import LiteLLMAdapter
|
|
85
|
+
|
|
86
|
+
adapter = LiteLLMAdapter(cortex_hub)
|
|
87
|
+
adapter.patch()
|
|
88
|
+
protected_count += 1
|
|
89
|
+
except Exception as e:
|
|
90
|
+
logger.error("Failed to apply LiteLLM adapter", error=str(e))
|
|
91
|
+
|
|
92
|
+
if protected_count == 0:
|
|
93
|
+
logger.warning(
|
|
94
|
+
"No supported frameworks detected. "
|
|
95
|
+
"Make sure you import your framework before calling auto_protect()"
|
|
96
|
+
)
|
|
97
|
+
else:
|
|
98
|
+
logger.info(f"Auto-protection enabled for {protected_count} framework(s)")
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
def _is_langchain_available() -> bool:
|
|
102
|
+
"""Check if LangChain is available."""
|
|
103
|
+
langchain_modules = [
|
|
104
|
+
"langchain",
|
|
105
|
+
"langchain_core",
|
|
106
|
+
"langchain.tools",
|
|
107
|
+
]
|
|
108
|
+
return any(mod in sys.modules for mod in langchain_modules)
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
def _is_openai_agents_available() -> bool:
|
|
112
|
+
"""Check if OpenAI Agents is available."""
|
|
113
|
+
return "openai_agents" in sys.modules or "agents" in sys.modules
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
def _is_crewai_available() -> bool:
|
|
117
|
+
"""Check if CrewAI is available."""
|
|
118
|
+
return "crewai" in sys.modules
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
def _is_llamaindex_available() -> bool:
|
|
122
|
+
"""Check if LlamaIndex is available."""
|
|
123
|
+
return any(mod in sys.modules for mod in ["llama_index", "llama_index.core"])
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
def _is_litellm_available() -> bool:
|
|
127
|
+
"""Check if LiteLLM is available."""
|
|
128
|
+
return "litellm" in sys.modules
|