aip-agents-binary 0.5.21__py3-none-macosx_13_0_arm64.whl → 0.6.8__py3-none-macosx_13_0_arm64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aip_agents/agent/__init__.py +44 -4
- aip_agents/agent/base_langgraph_agent.py +169 -74
- aip_agents/agent/base_langgraph_agent.pyi +3 -2
- aip_agents/agent/langgraph_memory_enhancer_agent.py +368 -34
- aip_agents/agent/langgraph_memory_enhancer_agent.pyi +3 -2
- aip_agents/agent/langgraph_react_agent.py +424 -35
- aip_agents/agent/langgraph_react_agent.pyi +46 -2
- aip_agents/examples/{hello_world_langgraph_bosa_twitter.py → hello_world_langgraph_gl_connector_twitter.py} +10 -7
- aip_agents/examples/hello_world_langgraph_gl_connector_twitter.pyi +5 -0
- aip_agents/examples/hello_world_ptc.py +49 -0
- aip_agents/examples/hello_world_ptc.pyi +5 -0
- aip_agents/examples/hello_world_ptc_custom_tools.py +83 -0
- aip_agents/examples/hello_world_ptc_custom_tools.pyi +7 -0
- aip_agents/examples/hello_world_sentry.py +2 -2
- aip_agents/examples/hello_world_tool_output_client.py +9 -0
- aip_agents/examples/tools/multiply_tool.py +43 -0
- aip_agents/examples/tools/multiply_tool.pyi +18 -0
- aip_agents/guardrails/__init__.py +83 -0
- aip_agents/guardrails/__init__.pyi +6 -0
- aip_agents/guardrails/engines/__init__.py +69 -0
- aip_agents/guardrails/engines/__init__.pyi +4 -0
- aip_agents/guardrails/engines/base.py +90 -0
- aip_agents/guardrails/engines/base.pyi +61 -0
- aip_agents/guardrails/engines/nemo.py +101 -0
- aip_agents/guardrails/engines/nemo.pyi +46 -0
- aip_agents/guardrails/engines/phrase_matcher.py +113 -0
- aip_agents/guardrails/engines/phrase_matcher.pyi +48 -0
- aip_agents/guardrails/exceptions.py +39 -0
- aip_agents/guardrails/exceptions.pyi +23 -0
- aip_agents/guardrails/manager.py +163 -0
- aip_agents/guardrails/manager.pyi +42 -0
- aip_agents/guardrails/middleware.py +199 -0
- aip_agents/guardrails/middleware.pyi +87 -0
- aip_agents/guardrails/schemas.py +63 -0
- aip_agents/guardrails/schemas.pyi +43 -0
- aip_agents/guardrails/utils.py +45 -0
- aip_agents/guardrails/utils.pyi +19 -0
- aip_agents/mcp/client/__init__.py +38 -2
- aip_agents/mcp/client/connection_manager.py +36 -1
- aip_agents/mcp/client/connection_manager.pyi +3 -0
- aip_agents/mcp/client/persistent_session.py +318 -65
- aip_agents/mcp/client/persistent_session.pyi +9 -0
- aip_agents/mcp/client/transports.py +52 -4
- aip_agents/mcp/client/transports.pyi +9 -0
- aip_agents/memory/adapters/base_adapter.py +98 -0
- aip_agents/memory/adapters/base_adapter.pyi +25 -0
- aip_agents/middleware/base.py +8 -0
- aip_agents/middleware/base.pyi +4 -0
- aip_agents/middleware/manager.py +22 -0
- aip_agents/middleware/manager.pyi +4 -0
- aip_agents/ptc/__init__.py +87 -0
- aip_agents/ptc/__init__.pyi +14 -0
- aip_agents/ptc/custom_tools.py +473 -0
- aip_agents/ptc/custom_tools.pyi +184 -0
- aip_agents/ptc/custom_tools_payload.py +400 -0
- aip_agents/ptc/custom_tools_payload.pyi +31 -0
- aip_agents/ptc/custom_tools_templates/__init__.py +1 -0
- aip_agents/ptc/custom_tools_templates/__init__.pyi +0 -0
- aip_agents/ptc/custom_tools_templates/custom_build_function.py.template +23 -0
- aip_agents/ptc/custom_tools_templates/custom_init.py.template +15 -0
- aip_agents/ptc/custom_tools_templates/custom_invoke.py.template +60 -0
- aip_agents/ptc/custom_tools_templates/custom_registry.py.template +87 -0
- aip_agents/ptc/custom_tools_templates/custom_sources_init.py.template +7 -0
- aip_agents/ptc/custom_tools_templates/custom_wrapper.py.template +19 -0
- aip_agents/ptc/doc_gen.py +122 -0
- aip_agents/ptc/doc_gen.pyi +40 -0
- aip_agents/ptc/exceptions.py +57 -0
- aip_agents/ptc/exceptions.pyi +37 -0
- aip_agents/ptc/executor.py +261 -0
- aip_agents/ptc/executor.pyi +99 -0
- aip_agents/ptc/mcp/__init__.py +45 -0
- aip_agents/ptc/mcp/__init__.pyi +7 -0
- aip_agents/ptc/mcp/sandbox_bridge.py +668 -0
- aip_agents/ptc/mcp/sandbox_bridge.pyi +47 -0
- aip_agents/ptc/mcp/templates/__init__.py +1 -0
- aip_agents/ptc/mcp/templates/__init__.pyi +0 -0
- aip_agents/ptc/mcp/templates/mcp_client.py.template +239 -0
- aip_agents/ptc/naming.py +196 -0
- aip_agents/ptc/naming.pyi +85 -0
- aip_agents/ptc/payload.py +26 -0
- aip_agents/ptc/payload.pyi +15 -0
- aip_agents/ptc/prompt_builder.py +673 -0
- aip_agents/ptc/prompt_builder.pyi +59 -0
- aip_agents/ptc/ptc_helper.py +16 -0
- aip_agents/ptc/ptc_helper.pyi +1 -0
- aip_agents/ptc/sandbox_bridge.py +256 -0
- aip_agents/ptc/sandbox_bridge.pyi +38 -0
- aip_agents/ptc/template_utils.py +33 -0
- aip_agents/ptc/template_utils.pyi +13 -0
- aip_agents/ptc/templates/__init__.py +1 -0
- aip_agents/ptc/templates/__init__.pyi +0 -0
- aip_agents/ptc/templates/ptc_helper.py.template +134 -0
- aip_agents/ptc/tool_def_helpers.py +101 -0
- aip_agents/ptc/tool_def_helpers.pyi +38 -0
- aip_agents/ptc/tool_enrichment.py +163 -0
- aip_agents/ptc/tool_enrichment.pyi +60 -0
- aip_agents/sandbox/__init__.py +43 -0
- aip_agents/sandbox/__init__.pyi +5 -0
- aip_agents/sandbox/defaults.py +205 -0
- aip_agents/sandbox/defaults.pyi +30 -0
- aip_agents/sandbox/e2b_runtime.py +295 -0
- aip_agents/sandbox/e2b_runtime.pyi +57 -0
- aip_agents/sandbox/template_builder.py +131 -0
- aip_agents/sandbox/template_builder.pyi +36 -0
- aip_agents/sandbox/types.py +24 -0
- aip_agents/sandbox/types.pyi +14 -0
- aip_agents/sandbox/validation.py +50 -0
- aip_agents/sandbox/validation.pyi +20 -0
- aip_agents/sentry/__init__.py +1 -1
- aip_agents/sentry/sentry.py +33 -12
- aip_agents/sentry/sentry.pyi +5 -4
- aip_agents/tools/__init__.py +20 -3
- aip_agents/tools/__init__.pyi +4 -2
- aip_agents/tools/browser_use/browser_use_tool.py +8 -0
- aip_agents/tools/browser_use/streaming.py +2 -0
- aip_agents/tools/code_sandbox/e2b_cloud_sandbox_extended.py +80 -31
- aip_agents/tools/code_sandbox/e2b_cloud_sandbox_extended.pyi +25 -9
- aip_agents/tools/code_sandbox/e2b_sandbox_tool.py +6 -6
- aip_agents/tools/constants.py +24 -12
- aip_agents/tools/constants.pyi +14 -11
- aip_agents/tools/date_range_tool.py +554 -0
- aip_agents/tools/date_range_tool.pyi +21 -0
- aip_agents/tools/execute_ptc_code.py +357 -0
- aip_agents/tools/execute_ptc_code.pyi +90 -0
- aip_agents/tools/gl_connector/__init__.py +1 -1
- aip_agents/tools/gl_connector/tool.py +62 -30
- aip_agents/tools/gl_connector/tool.pyi +3 -3
- aip_agents/tools/gl_connector_tools.py +119 -0
- aip_agents/tools/gl_connector_tools.pyi +39 -0
- aip_agents/tools/memory_search/__init__.py +8 -1
- aip_agents/tools/memory_search/__init__.pyi +3 -3
- aip_agents/tools/memory_search/mem0.py +114 -1
- aip_agents/tools/memory_search/mem0.pyi +11 -1
- aip_agents/tools/memory_search/schema.py +33 -0
- aip_agents/tools/memory_search/schema.pyi +10 -0
- aip_agents/tools/memory_search_tool.py +8 -0
- aip_agents/tools/memory_search_tool.pyi +2 -2
- aip_agents/utils/langgraph/tool_managers/delegation_tool_manager.py +26 -1
- aip_agents/utils/langgraph/tool_output_management.py +80 -0
- aip_agents/utils/langgraph/tool_output_management.pyi +37 -0
- {aip_agents_binary-0.5.21.dist-info → aip_agents_binary-0.6.8.dist-info}/METADATA +14 -22
- {aip_agents_binary-0.5.21.dist-info → aip_agents_binary-0.6.8.dist-info}/RECORD +144 -58
- {aip_agents_binary-0.5.21.dist-info → aip_agents_binary-0.6.8.dist-info}/WHEEL +1 -1
- aip_agents/examples/demo_memory_recall.py +0 -401
- aip_agents/examples/demo_memory_recall.pyi +0 -58
- aip_agents/examples/hello_world_langgraph_bosa_twitter.pyi +0 -5
- aip_agents/tools/bosa_tools.py +0 -105
- aip_agents/tools/bosa_tools.pyi +0 -37
- {aip_agents_binary-0.5.21.dist-info → aip_agents_binary-0.6.8.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,199 @@
|
|
|
1
|
+
"""GuardrailMiddleware for integrating guardrails into agent execution.
|
|
2
|
+
|
|
3
|
+
This module provides GuardrailMiddleware that hooks into the agent execution
|
|
4
|
+
flow to automatically check content before and after model invocations.
|
|
5
|
+
|
|
6
|
+
Authors:
|
|
7
|
+
Reinhart Linanda (reinhart.linanda@gdplabs.id)
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
from __future__ import annotations
|
|
11
|
+
|
|
12
|
+
from typing import TYPE_CHECKING, Any
|
|
13
|
+
|
|
14
|
+
from langchain_core.messages import AIMessage, HumanMessage
|
|
15
|
+
|
|
16
|
+
from aip_agents.guardrails.exceptions import GuardrailViolationError
|
|
17
|
+
from aip_agents.guardrails.schemas import GuardrailInput
|
|
18
|
+
from aip_agents.middleware.base import AgentMiddleware, ModelRequest
|
|
19
|
+
|
|
20
|
+
if TYPE_CHECKING:
|
|
21
|
+
from aip_agents.guardrails.manager import GuardrailManager
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class GuardrailMiddleware(AgentMiddleware):
|
|
25
|
+
"""Middleware that integrates guardrails into agent execution.
|
|
26
|
+
|
|
27
|
+
This middleware wraps a GuardrailManager and automatically checks content
|
|
28
|
+
at appropriate points during agent execution:
|
|
29
|
+
|
|
30
|
+
- Before model invocation: checks user input from messages
|
|
31
|
+
- After model invocation: checks AI output from messages
|
|
32
|
+
|
|
33
|
+
If unsafe content is detected, raises GuardrailViolationError to stop execution.
|
|
34
|
+
|
|
35
|
+
Attributes:
|
|
36
|
+
guardrail_manager: The GuardrailManager to use for content checking
|
|
37
|
+
"""
|
|
38
|
+
|
|
39
|
+
def __init__(self, guardrail_manager: GuardrailManager) -> None:
|
|
40
|
+
"""Initialize the GuardrailMiddleware.
|
|
41
|
+
|
|
42
|
+
Args:
|
|
43
|
+
guardrail_manager: GuardrailManager instance to use for checking
|
|
44
|
+
"""
|
|
45
|
+
self.guardrail_manager = guardrail_manager
|
|
46
|
+
|
|
47
|
+
@property
|
|
48
|
+
def tools(self) -> list:
|
|
49
|
+
"""Guardrails are passive filters and don't contribute tools."""
|
|
50
|
+
return []
|
|
51
|
+
|
|
52
|
+
@property
|
|
53
|
+
def system_prompt_additions(self) -> str | None:
|
|
54
|
+
"""Guardrails are passive filters and don't modify system prompts."""
|
|
55
|
+
return None
|
|
56
|
+
|
|
57
|
+
async def abefore_model(self, state: dict[str, Any]) -> dict[str, Any]:
|
|
58
|
+
"""Asynchronously check user input before model invocation.
|
|
59
|
+
|
|
60
|
+
Extracts the last user message from state and checks it with guardrails.
|
|
61
|
+
If unsafe, raises GuardrailViolationError to stop execution.
|
|
62
|
+
|
|
63
|
+
Args:
|
|
64
|
+
state: Current agent state containing messages and context
|
|
65
|
+
|
|
66
|
+
Returns:
|
|
67
|
+
Empty dict (no state modifications needed)
|
|
68
|
+
|
|
69
|
+
Raises:
|
|
70
|
+
GuardrailViolationError: If user input violates safety policies
|
|
71
|
+
"""
|
|
72
|
+
# Extract last user message from state
|
|
73
|
+
messages = state.get("messages", [])
|
|
74
|
+
user_input = self._extract_last_user_message(messages)
|
|
75
|
+
|
|
76
|
+
if user_input is not None:
|
|
77
|
+
# Check input content
|
|
78
|
+
result = await self.guardrail_manager.check_content(user_input)
|
|
79
|
+
|
|
80
|
+
if not result.is_safe:
|
|
81
|
+
raise GuardrailViolationError(result)
|
|
82
|
+
|
|
83
|
+
return {}
|
|
84
|
+
|
|
85
|
+
def before_model(self, state: dict[str, Any]) -> dict[str, Any]:
|
|
86
|
+
"""Check user input before model invocation (synchronous wrapper).
|
|
87
|
+
|
|
88
|
+
Note:
|
|
89
|
+
This is a synchronous wrapper for the async `abefore_model()` method.
|
|
90
|
+
LangGraph agents primarily use `abefore_model()` in async contexts.
|
|
91
|
+
This method should rarely be called directly. If called from an async
|
|
92
|
+
context with a running event loop, it will attempt to handle it,
|
|
93
|
+
but `abefore_model()` should be preferred.
|
|
94
|
+
|
|
95
|
+
Args:
|
|
96
|
+
state: Current agent state containing messages and context
|
|
97
|
+
|
|
98
|
+
Returns:
|
|
99
|
+
Empty dict (no state modifications needed)
|
|
100
|
+
|
|
101
|
+
Raises:
|
|
102
|
+
GuardrailViolationError: If user input violates safety policies
|
|
103
|
+
"""
|
|
104
|
+
import asyncio
|
|
105
|
+
|
|
106
|
+
user_input = self._extract_last_user_message(state.get("messages", []))
|
|
107
|
+
if user_input is None:
|
|
108
|
+
return {}
|
|
109
|
+
|
|
110
|
+
# Check if we're in an async context with a running loop
|
|
111
|
+
try:
|
|
112
|
+
loop = asyncio.get_running_loop()
|
|
113
|
+
if loop.is_running():
|
|
114
|
+
# We're in an async context with a running loop
|
|
115
|
+
# Use nest_asyncio to allow nested event loops
|
|
116
|
+
# This enables calling asyncio.run() from within a running loop
|
|
117
|
+
import nest_asyncio
|
|
118
|
+
|
|
119
|
+
nest_asyncio.apply()
|
|
120
|
+
result = asyncio.run(self.guardrail_manager.check_content(user_input))
|
|
121
|
+
else:
|
|
122
|
+
# Loop exists but not running - safe to use asyncio.run()
|
|
123
|
+
result = asyncio.run(self.guardrail_manager.check_content(user_input))
|
|
124
|
+
except RuntimeError:
|
|
125
|
+
# No running loop - safe to use asyncio.run()
|
|
126
|
+
result = asyncio.run(self.guardrail_manager.check_content(user_input))
|
|
127
|
+
|
|
128
|
+
if not result.is_safe:
|
|
129
|
+
raise GuardrailViolationError(result)
|
|
130
|
+
|
|
131
|
+
return {}
|
|
132
|
+
|
|
133
|
+
def modify_model_request(self, request: ModelRequest, state: dict[str, Any]) -> ModelRequest:
|
|
134
|
+
"""Guardrails don't modify model requests."""
|
|
135
|
+
return request
|
|
136
|
+
|
|
137
|
+
async def aafter_model(self, state: dict[str, Any]) -> dict[str, Any]:
|
|
138
|
+
"""Asynchronously check AI output after model invocation.
|
|
139
|
+
|
|
140
|
+
Extracts the last AI message from state and checks it with guardrails.
|
|
141
|
+
If unsafe, raises GuardrailViolationError to stop execution.
|
|
142
|
+
|
|
143
|
+
Args:
|
|
144
|
+
state: Current agent state after model invocation
|
|
145
|
+
|
|
146
|
+
Returns:
|
|
147
|
+
Empty dict (no state modifications needed)
|
|
148
|
+
|
|
149
|
+
Raises:
|
|
150
|
+
GuardrailViolationError: If AI output violates safety policies
|
|
151
|
+
"""
|
|
152
|
+
# Extract last AI message from state
|
|
153
|
+
messages = state.get("messages", [])
|
|
154
|
+
ai_output = self._extract_last_ai_message(messages)
|
|
155
|
+
|
|
156
|
+
if ai_output is not None:
|
|
157
|
+
# Check output content
|
|
158
|
+
result = await self.guardrail_manager.check_content(GuardrailInput(input=None, output=ai_output))
|
|
159
|
+
|
|
160
|
+
if not result.is_safe:
|
|
161
|
+
raise GuardrailViolationError(result)
|
|
162
|
+
|
|
163
|
+
return {}
|
|
164
|
+
|
|
165
|
+
def after_model(self, state: dict[str, Any]) -> dict[str, Any]:
|
|
166
|
+
"""Check AI output after model invocation (synchronous wrapper)."""
|
|
167
|
+
return {}
|
|
168
|
+
|
|
169
|
+
def _extract_last_user_message(self, messages: list) -> str | None:
|
|
170
|
+
"""Extract the last user message from a list of messages.
|
|
171
|
+
|
|
172
|
+
Searches backwards through messages to find the most recent HumanMessage.
|
|
173
|
+
|
|
174
|
+
Args:
|
|
175
|
+
messages: List of message objects
|
|
176
|
+
|
|
177
|
+
Returns:
|
|
178
|
+
Content of the last user message, or None if not found
|
|
179
|
+
"""
|
|
180
|
+
for message in reversed(messages):
|
|
181
|
+
if isinstance(message, HumanMessage) and message.content:
|
|
182
|
+
return str(message.content)
|
|
183
|
+
return None
|
|
184
|
+
|
|
185
|
+
def _extract_last_ai_message(self, messages: list) -> str | None:
|
|
186
|
+
"""Extract the last AI message from a list of messages.
|
|
187
|
+
|
|
188
|
+
Searches backwards through messages to find the most recent AIMessage.
|
|
189
|
+
|
|
190
|
+
Args:
|
|
191
|
+
messages: List of message objects
|
|
192
|
+
|
|
193
|
+
Returns:
|
|
194
|
+
Content of the last AI message, or None if not found
|
|
195
|
+
"""
|
|
196
|
+
for message in reversed(messages):
|
|
197
|
+
if isinstance(message, AIMessage) and message.content:
|
|
198
|
+
return str(message.content)
|
|
199
|
+
return None
|
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
from _typeshed import Incomplete
|
|
2
|
+
from aip_agents.guardrails.exceptions import GuardrailViolationError as GuardrailViolationError
|
|
3
|
+
from aip_agents.guardrails.manager import GuardrailManager as GuardrailManager
|
|
4
|
+
from aip_agents.guardrails.schemas import GuardrailInput as GuardrailInput
|
|
5
|
+
from aip_agents.middleware.base import AgentMiddleware as AgentMiddleware, ModelRequest as ModelRequest
|
|
6
|
+
from typing import Any
|
|
7
|
+
|
|
8
|
+
class GuardrailMiddleware(AgentMiddleware):
|
|
9
|
+
"""Middleware that integrates guardrails into agent execution.
|
|
10
|
+
|
|
11
|
+
This middleware wraps a GuardrailManager and automatically checks content
|
|
12
|
+
at appropriate points during agent execution:
|
|
13
|
+
|
|
14
|
+
- Before model invocation: checks user input from messages
|
|
15
|
+
- After model invocation: checks AI output from messages
|
|
16
|
+
|
|
17
|
+
If unsafe content is detected, raises GuardrailViolationError to stop execution.
|
|
18
|
+
|
|
19
|
+
Attributes:
|
|
20
|
+
guardrail_manager: The GuardrailManager to use for content checking
|
|
21
|
+
"""
|
|
22
|
+
guardrail_manager: Incomplete
|
|
23
|
+
def __init__(self, guardrail_manager: GuardrailManager) -> None:
|
|
24
|
+
"""Initialize the GuardrailMiddleware.
|
|
25
|
+
|
|
26
|
+
Args:
|
|
27
|
+
guardrail_manager: GuardrailManager instance to use for checking
|
|
28
|
+
"""
|
|
29
|
+
@property
|
|
30
|
+
def tools(self) -> list:
|
|
31
|
+
"""Guardrails are passive filters and don't contribute tools."""
|
|
32
|
+
@property
|
|
33
|
+
def system_prompt_additions(self) -> str | None:
|
|
34
|
+
"""Guardrails are passive filters and don't modify system prompts."""
|
|
35
|
+
async def abefore_model(self, state: dict[str, Any]) -> dict[str, Any]:
|
|
36
|
+
"""Asynchronously check user input before model invocation.
|
|
37
|
+
|
|
38
|
+
Extracts the last user message from state and checks it with guardrails.
|
|
39
|
+
If unsafe, raises GuardrailViolationError to stop execution.
|
|
40
|
+
|
|
41
|
+
Args:
|
|
42
|
+
state: Current agent state containing messages and context
|
|
43
|
+
|
|
44
|
+
Returns:
|
|
45
|
+
Empty dict (no state modifications needed)
|
|
46
|
+
|
|
47
|
+
Raises:
|
|
48
|
+
GuardrailViolationError: If user input violates safety policies
|
|
49
|
+
"""
|
|
50
|
+
def before_model(self, state: dict[str, Any]) -> dict[str, Any]:
|
|
51
|
+
"""Check user input before model invocation (synchronous wrapper).
|
|
52
|
+
|
|
53
|
+
Note:
|
|
54
|
+
This is a synchronous wrapper for the async `abefore_model()` method.
|
|
55
|
+
LangGraph agents primarily use `abefore_model()` in async contexts.
|
|
56
|
+
This method should rarely be called directly. If called from an async
|
|
57
|
+
context with a running event loop, it will attempt to handle it,
|
|
58
|
+
but `abefore_model()` should be preferred.
|
|
59
|
+
|
|
60
|
+
Args:
|
|
61
|
+
state: Current agent state containing messages and context
|
|
62
|
+
|
|
63
|
+
Returns:
|
|
64
|
+
Empty dict (no state modifications needed)
|
|
65
|
+
|
|
66
|
+
Raises:
|
|
67
|
+
GuardrailViolationError: If user input violates safety policies
|
|
68
|
+
"""
|
|
69
|
+
def modify_model_request(self, request: ModelRequest, state: dict[str, Any]) -> ModelRequest:
|
|
70
|
+
"""Guardrails don't modify model requests."""
|
|
71
|
+
async def aafter_model(self, state: dict[str, Any]) -> dict[str, Any]:
|
|
72
|
+
"""Asynchronously check AI output after model invocation.
|
|
73
|
+
|
|
74
|
+
Extracts the last AI message from state and checks it with guardrails.
|
|
75
|
+
If unsafe, raises GuardrailViolationError to stop execution.
|
|
76
|
+
|
|
77
|
+
Args:
|
|
78
|
+
state: Current agent state after model invocation
|
|
79
|
+
|
|
80
|
+
Returns:
|
|
81
|
+
Empty dict (no state modifications needed)
|
|
82
|
+
|
|
83
|
+
Raises:
|
|
84
|
+
GuardrailViolationError: If AI output violates safety policies
|
|
85
|
+
"""
|
|
86
|
+
def after_model(self, state: dict[str, Any]) -> dict[str, Any]:
|
|
87
|
+
"""Check AI output after model invocation (synchronous wrapper)."""
|
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
"""Schemas for guardrail input, output, and configuration.
|
|
2
|
+
|
|
3
|
+
This module defines the data structures used throughout the guardrails system,
|
|
4
|
+
including input/output schemas and configuration objects.
|
|
5
|
+
|
|
6
|
+
Authors:
|
|
7
|
+
Reinhart Linanda (reinhart.linanda@gdplabs.id)
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
from enum import StrEnum
|
|
11
|
+
|
|
12
|
+
from pydantic import BaseModel, ConfigDict
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class GuardrailMode(StrEnum):
|
|
16
|
+
"""Modes determining what content an engine checks."""
|
|
17
|
+
|
|
18
|
+
INPUT_ONLY = "input_only"
|
|
19
|
+
OUTPUT_ONLY = "output_only"
|
|
20
|
+
INPUT_OUTPUT = "input_output"
|
|
21
|
+
DISABLED = "disabled"
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class GuardrailInput(BaseModel):
|
|
25
|
+
"""Input schema for guardrail checks.
|
|
26
|
+
|
|
27
|
+
Attributes:
|
|
28
|
+
input: User input content to check (queries, prompts, context)
|
|
29
|
+
output: AI output content to check (responses, generated text)
|
|
30
|
+
"""
|
|
31
|
+
|
|
32
|
+
model_config = ConfigDict(extra="forbid")
|
|
33
|
+
|
|
34
|
+
input: str | None = None
|
|
35
|
+
output: str | None = None
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
class GuardrailResult(BaseModel):
|
|
39
|
+
"""Result schema returned by guardrail engines and managers.
|
|
40
|
+
|
|
41
|
+
Attributes:
|
|
42
|
+
is_safe: Whether the content passed all checks
|
|
43
|
+
reason: Explanation when content is blocked (None if safe)
|
|
44
|
+
filtered_content: Cleaned/sanitized content if engine provides it
|
|
45
|
+
"""
|
|
46
|
+
|
|
47
|
+
model_config = ConfigDict(extra="forbid")
|
|
48
|
+
|
|
49
|
+
is_safe: bool
|
|
50
|
+
reason: str | None = None
|
|
51
|
+
filtered_content: str | None = None
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
class BaseGuardrailEngineConfig(BaseModel):
|
|
55
|
+
"""Base configuration for guardrail engines.
|
|
56
|
+
|
|
57
|
+
Attributes:
|
|
58
|
+
guardrail_mode: What content this engine should check
|
|
59
|
+
"""
|
|
60
|
+
|
|
61
|
+
model_config = ConfigDict(extra="forbid")
|
|
62
|
+
|
|
63
|
+
guardrail_mode: GuardrailMode = GuardrailMode.INPUT_OUTPUT
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
from _typeshed import Incomplete
|
|
2
|
+
from enum import StrEnum
|
|
3
|
+
from pydantic import BaseModel
|
|
4
|
+
|
|
5
|
+
class GuardrailMode(StrEnum):
|
|
6
|
+
"""Modes determining what content an engine checks."""
|
|
7
|
+
INPUT_ONLY: str
|
|
8
|
+
OUTPUT_ONLY: str
|
|
9
|
+
INPUT_OUTPUT: str
|
|
10
|
+
DISABLED: str
|
|
11
|
+
|
|
12
|
+
class GuardrailInput(BaseModel):
|
|
13
|
+
"""Input schema for guardrail checks.
|
|
14
|
+
|
|
15
|
+
Attributes:
|
|
16
|
+
input: User input content to check (queries, prompts, context)
|
|
17
|
+
output: AI output content to check (responses, generated text)
|
|
18
|
+
"""
|
|
19
|
+
model_config: Incomplete
|
|
20
|
+
input: str | None
|
|
21
|
+
output: str | None
|
|
22
|
+
|
|
23
|
+
class GuardrailResult(BaseModel):
|
|
24
|
+
"""Result schema returned by guardrail engines and managers.
|
|
25
|
+
|
|
26
|
+
Attributes:
|
|
27
|
+
is_safe: Whether the content passed all checks
|
|
28
|
+
reason: Explanation when content is blocked (None if safe)
|
|
29
|
+
filtered_content: Cleaned/sanitized content if engine provides it
|
|
30
|
+
"""
|
|
31
|
+
model_config: Incomplete
|
|
32
|
+
is_safe: bool
|
|
33
|
+
reason: str | None
|
|
34
|
+
filtered_content: str | None
|
|
35
|
+
|
|
36
|
+
class BaseGuardrailEngineConfig(BaseModel):
|
|
37
|
+
"""Base configuration for guardrail engines.
|
|
38
|
+
|
|
39
|
+
Attributes:
|
|
40
|
+
guardrail_mode: What content this engine should check
|
|
41
|
+
"""
|
|
42
|
+
model_config: Incomplete
|
|
43
|
+
guardrail_mode: GuardrailMode
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
"""Utility functions for guardrail mode conversion.
|
|
2
|
+
|
|
3
|
+
This module provides utilities for converting between aip-agents GuardrailMode
|
|
4
|
+
and gllm-guardrail GuardrailMode enums.
|
|
5
|
+
|
|
6
|
+
Authors:
|
|
7
|
+
Reinhart Linanda (reinhart.linanda@gdplabs.id)
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
from typing import Any
|
|
11
|
+
|
|
12
|
+
from aip_agents.guardrails.schemas import GuardrailMode
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def convert_guardrail_mode_to_gl_sdk(mode: GuardrailMode) -> Any:
|
|
16
|
+
"""Convert aip-agents GuardrailMode to gllm-guardrail GuardrailMode.
|
|
17
|
+
|
|
18
|
+
This function performs lazy import of gllm-guardrail to support optional
|
|
19
|
+
dependencies. The conversion is necessary because we maintain our own
|
|
20
|
+
GuardrailMode enum for API consistency while wrapping the external library.
|
|
21
|
+
|
|
22
|
+
Args:
|
|
23
|
+
mode: The aip-agents GuardrailMode to convert
|
|
24
|
+
|
|
25
|
+
Returns:
|
|
26
|
+
The corresponding gllm-guardrail GuardrailMode enum value
|
|
27
|
+
|
|
28
|
+
Raises:
|
|
29
|
+
ImportError: If gllm-guardrail is not installed
|
|
30
|
+
"""
|
|
31
|
+
try:
|
|
32
|
+
from gllm_guardrail.constants import GuardrailMode as GLGuardrailMode # pragma: no cover
|
|
33
|
+
except ImportError as e: # pragma: no cover
|
|
34
|
+
raise ImportError(
|
|
35
|
+
"gllm-guardrail is required for guardrails. Install with: pip install 'aip-agents[guardrails]'"
|
|
36
|
+
) from e # pragma: no cover
|
|
37
|
+
|
|
38
|
+
mode_mapping = {
|
|
39
|
+
GuardrailMode.INPUT_ONLY: GLGuardrailMode.INPUT_ONLY,
|
|
40
|
+
GuardrailMode.OUTPUT_ONLY: GLGuardrailMode.OUTPUT_ONLY,
|
|
41
|
+
GuardrailMode.INPUT_OUTPUT: GLGuardrailMode.BOTH,
|
|
42
|
+
GuardrailMode.DISABLED: GLGuardrailMode.DISABLED,
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
return mode_mapping[mode]
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
from aip_agents.guardrails.schemas import GuardrailMode as GuardrailMode
|
|
2
|
+
from typing import Any
|
|
3
|
+
|
|
4
|
+
def convert_guardrail_mode_to_gl_sdk(mode: GuardrailMode) -> Any:
|
|
5
|
+
"""Convert aip-agents GuardrailMode to gllm-guardrail GuardrailMode.
|
|
6
|
+
|
|
7
|
+
This function performs lazy import of gllm-guardrail to support optional
|
|
8
|
+
dependencies. The conversion is necessary because we maintain our own
|
|
9
|
+
GuardrailMode enum for API consistency while wrapping the external library.
|
|
10
|
+
|
|
11
|
+
Args:
|
|
12
|
+
mode: The aip-agents GuardrailMode to convert
|
|
13
|
+
|
|
14
|
+
Returns:
|
|
15
|
+
The corresponding gllm-guardrail GuardrailMode enum value
|
|
16
|
+
|
|
17
|
+
Raises:
|
|
18
|
+
ImportError: If gllm-guardrail is not installed
|
|
19
|
+
"""
|
|
@@ -7,8 +7,44 @@ Authors:
|
|
|
7
7
|
Putu Ravindra Wiguna (putu.r.wiguna@gdplabs.id)
|
|
8
8
|
"""
|
|
9
9
|
|
|
10
|
+
from __future__ import annotations
|
|
11
|
+
|
|
12
|
+
from typing import TYPE_CHECKING, Any
|
|
13
|
+
|
|
10
14
|
from aip_agents.mcp.client.base_mcp_client import BaseMCPClient
|
|
11
|
-
|
|
12
|
-
|
|
15
|
+
|
|
16
|
+
if TYPE_CHECKING:
|
|
17
|
+
from aip_agents.mcp.client.google_adk.client import GoogleADKMCPClient
|
|
18
|
+
from aip_agents.mcp.client.langchain.client import LangchainMCPClient
|
|
13
19
|
|
|
14
20
|
__all__ = ["GoogleADKMCPClient", "LangchainMCPClient", "BaseMCPClient"]
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def __getattr__(name: str) -> Any:
|
|
24
|
+
"""Lazy import of MCP client implementations.
|
|
25
|
+
|
|
26
|
+
This avoids importing heavy dependencies (Google ADK, Vertex AI, etc.)
|
|
27
|
+
when they are not needed.
|
|
28
|
+
|
|
29
|
+
Args:
|
|
30
|
+
name: Attribute name to import.
|
|
31
|
+
|
|
32
|
+
Returns:
|
|
33
|
+
The requested class.
|
|
34
|
+
|
|
35
|
+
Raises:
|
|
36
|
+
AttributeError: If attribute is not found.
|
|
37
|
+
"""
|
|
38
|
+
if name == "GoogleADKMCPClient":
|
|
39
|
+
from aip_agents.mcp.client.google_adk.client import (
|
|
40
|
+
GoogleADKMCPClient as _GoogleADKMCPClient,
|
|
41
|
+
)
|
|
42
|
+
|
|
43
|
+
return _GoogleADKMCPClient
|
|
44
|
+
elif name == "LangchainMCPClient":
|
|
45
|
+
from aip_agents.mcp.client.langchain.client import (
|
|
46
|
+
LangchainMCPClient as _LangchainMCPClient,
|
|
47
|
+
)
|
|
48
|
+
|
|
49
|
+
return _LangchainMCPClient
|
|
50
|
+
raise AttributeError(f"module '{__name__}' has no attribute '{name}'")
|
|
@@ -60,6 +60,9 @@ class MCPConnectionManager:
|
|
|
60
60
|
async def start(self) -> tuple[Any, Any]:
|
|
61
61
|
"""Start connection in background task.
|
|
62
62
|
|
|
63
|
+
For HTTP/SSE transports, establishes connection directly to avoid anyio context issues.
|
|
64
|
+
For stdio transport, uses background task to manage subprocess lifecycle.
|
|
65
|
+
|
|
63
66
|
Returns:
|
|
64
67
|
tuple[Any, Any]: Tuple of (read_stream, write_stream) for ClientSession
|
|
65
68
|
|
|
@@ -67,6 +70,17 @@ class MCPConnectionManager:
|
|
|
67
70
|
Exception: If connection establishment fails
|
|
68
71
|
"""
|
|
69
72
|
logger.debug(f"Starting connection manager for {self.server_name}")
|
|
73
|
+
|
|
74
|
+
# Determine transport type first
|
|
75
|
+
self.transport_type = self._get_transport_type()
|
|
76
|
+
|
|
77
|
+
# For HTTP/SSE: connect directly (no background task needed)
|
|
78
|
+
# This avoids anyio.BrokenResourceError when streams cross task boundaries
|
|
79
|
+
if self.transport_type in (TransportType.HTTP, TransportType.SSE):
|
|
80
|
+
await self._establish_connection()
|
|
81
|
+
return self._connection
|
|
82
|
+
|
|
83
|
+
# For stdio: use background task to manage subprocess
|
|
70
84
|
self._task = asyncio.create_task(self._connection_task())
|
|
71
85
|
await self._ready_event.wait()
|
|
72
86
|
|
|
@@ -78,6 +92,20 @@ class MCPConnectionManager:
|
|
|
78
92
|
async def stop(self) -> None:
|
|
79
93
|
"""Stop connection gracefully."""
|
|
80
94
|
logger.debug(f"Stopping connection manager for {self.server_name}")
|
|
95
|
+
|
|
96
|
+
# For HTTP/SSE (no background task), just close transport
|
|
97
|
+
if self.transport_type in (TransportType.HTTP, TransportType.SSE):
|
|
98
|
+
if self._transport:
|
|
99
|
+
try:
|
|
100
|
+
close_result = self._transport.close()
|
|
101
|
+
if inspect.isawaitable(close_result):
|
|
102
|
+
await close_result
|
|
103
|
+
except Exception as exc:
|
|
104
|
+
logger.warning(f"Failed to close transport cleanly for {self.server_name}: {exc}")
|
|
105
|
+
self._connection = None
|
|
106
|
+
return
|
|
107
|
+
|
|
108
|
+
# For stdio (with background task), wait for task to finish
|
|
81
109
|
if self._task and not self._task.done():
|
|
82
110
|
self._stop_event.set()
|
|
83
111
|
try:
|
|
@@ -94,6 +122,11 @@ class MCPConnectionManager:
|
|
|
94
122
|
Returns:
|
|
95
123
|
bool: True if connected, False otherwise
|
|
96
124
|
"""
|
|
125
|
+
# For HTTP/SSE (no background task), just check if connection exists
|
|
126
|
+
if self.transport_type in (TransportType.HTTP, TransportType.SSE):
|
|
127
|
+
return self._connection is not None
|
|
128
|
+
|
|
129
|
+
# For stdio (with background task), check task status too
|
|
97
130
|
return (
|
|
98
131
|
self._connection is not None
|
|
99
132
|
and self._task is not None
|
|
@@ -144,7 +177,9 @@ class MCPConnectionManager:
|
|
|
144
177
|
Raises:
|
|
145
178
|
ConnectionError: If all connection attempts fail
|
|
146
179
|
"""
|
|
147
|
-
|
|
180
|
+
# transport_type may already be set by start() for HTTP/SSE
|
|
181
|
+
if not self.transport_type:
|
|
182
|
+
self.transport_type = self._get_transport_type()
|
|
148
183
|
details = f"URL: {self.config.get('url', 'N/A')}, Command: {self.config.get('command', 'N/A')}"
|
|
149
184
|
logger.info(f"Establishing connection to {self.server_name} via {self.transport_type} ({details})")
|
|
150
185
|
|
|
@@ -31,6 +31,9 @@ class MCPConnectionManager:
|
|
|
31
31
|
async def start(self) -> tuple[Any, Any]:
|
|
32
32
|
"""Start connection in background task.
|
|
33
33
|
|
|
34
|
+
For HTTP/SSE transports, establishes connection directly to avoid anyio context issues.
|
|
35
|
+
For stdio transport, uses background task to manage subprocess lifecycle.
|
|
36
|
+
|
|
34
37
|
Returns:
|
|
35
38
|
tuple[Any, Any]: Tuple of (read_stream, write_stream) for ClientSession
|
|
36
39
|
|