aip-agents-binary 0.5.25b8__py3-none-any.whl → 0.6.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aip_agents/__init__.pyi +19 -0
- aip_agents/a2a/__init__.pyi +3 -0
- aip_agents/a2a/server/__init__.pyi +4 -0
- aip_agents/a2a/server/base_executor.pyi +73 -0
- aip_agents/a2a/server/google_adk_executor.pyi +51 -0
- aip_agents/a2a/server/langflow_executor.pyi +43 -0
- aip_agents/a2a/server/langgraph_executor.pyi +47 -0
- aip_agents/a2a/types.pyi +132 -0
- aip_agents/agent/__init__.pyi +9 -0
- aip_agents/agent/base_agent.pyi +221 -0
- aip_agents/agent/base_langgraph_agent.py +137 -68
- aip_agents/agent/base_langgraph_agent.pyi +233 -0
- aip_agents/agent/google_adk_agent.pyi +141 -0
- aip_agents/agent/google_adk_constants.pyi +3 -0
- aip_agents/agent/hitl/__init__.pyi +6 -0
- aip_agents/agent/hitl/config.pyi +15 -0
- aip_agents/agent/hitl/langgraph_hitl_mixin.pyi +42 -0
- aip_agents/agent/hitl/manager.pyi +200 -0
- aip_agents/agent/hitl/models.pyi +3 -0
- aip_agents/agent/hitl/prompt/__init__.pyi +4 -0
- aip_agents/agent/hitl/prompt/base.pyi +24 -0
- aip_agents/agent/hitl/prompt/deferred.pyi +30 -0
- aip_agents/agent/hitl/registry.pyi +101 -0
- aip_agents/agent/interface.pyi +81 -0
- aip_agents/agent/interfaces.pyi +44 -0
- aip_agents/agent/langflow_agent.pyi +133 -0
- aip_agents/agent/langgraph_memory_enhancer_agent.pyi +49 -0
- aip_agents/agent/langgraph_react_agent.py +58 -14
- aip_agents/agent/langgraph_react_agent.pyi +131 -0
- aip_agents/agent/system_instruction_context.pyi +13 -0
- aip_agents/clients/__init__.pyi +4 -0
- aip_agents/clients/langflow/__init__.pyi +4 -0
- aip_agents/clients/langflow/client.pyi +140 -0
- aip_agents/clients/langflow/types.pyi +7 -0
- aip_agents/constants.pyi +7 -0
- aip_agents/examples/__init__.pyi +0 -0
- aip_agents/examples/compare_streaming_client.py +2 -2
- aip_agents/examples/compare_streaming_client.pyi +48 -0
- aip_agents/examples/compare_streaming_server.py +1 -1
- aip_agents/examples/compare_streaming_server.pyi +18 -0
- aip_agents/examples/demo_memory_recall.pyi +58 -0
- aip_agents/examples/hello_world_a2a_google_adk_client.pyi +9 -0
- aip_agents/examples/hello_world_a2a_google_adk_client_agent.pyi +9 -0
- aip_agents/examples/hello_world_a2a_google_adk_client_streaming.pyi +9 -0
- aip_agents/examples/hello_world_a2a_google_adk_server.pyi +15 -0
- aip_agents/examples/hello_world_a2a_langchain_client.pyi +5 -0
- aip_agents/examples/hello_world_a2a_langchain_client_agent.pyi +5 -0
- aip_agents/examples/hello_world_a2a_langchain_client_lm_invoker.pyi +5 -0
- aip_agents/examples/hello_world_a2a_langchain_client_streaming.pyi +5 -0
- aip_agents/examples/hello_world_a2a_langchain_reference_client_streaming.pyi +5 -0
- aip_agents/examples/hello_world_a2a_langchain_reference_server.pyi +15 -0
- aip_agents/examples/hello_world_a2a_langchain_server.pyi +15 -0
- aip_agents/examples/hello_world_a2a_langchain_server_lm_invoker.pyi +15 -0
- aip_agents/examples/hello_world_a2a_langflow_client.pyi +9 -0
- aip_agents/examples/hello_world_a2a_langflow_server.pyi +14 -0
- aip_agents/examples/hello_world_a2a_langgraph_artifact_client.pyi +5 -0
- aip_agents/examples/hello_world_a2a_langgraph_artifact_client_streaming.pyi +5 -0
- aip_agents/examples/hello_world_a2a_langgraph_artifact_server.pyi +16 -0
- aip_agents/examples/hello_world_a2a_langgraph_client.pyi +9 -0
- aip_agents/examples/hello_world_a2a_langgraph_client_agent.pyi +9 -0
- aip_agents/examples/hello_world_a2a_langgraph_client_agent_lm_invoker.pyi +2 -0
- aip_agents/examples/hello_world_a2a_langgraph_client_streaming.pyi +9 -0
- aip_agents/examples/hello_world_a2a_langgraph_client_streaming_lm_invoker.pyi +5 -0
- aip_agents/examples/hello_world_a2a_langgraph_client_streaming_tool_streaming.pyi +5 -0
- aip_agents/examples/hello_world_a2a_langgraph_server.pyi +14 -0
- aip_agents/examples/hello_world_a2a_langgraph_server_lm_invoker.pyi +15 -0
- aip_agents/examples/hello_world_a2a_langgraph_server_tool_streaming.pyi +15 -0
- aip_agents/examples/hello_world_a2a_mcp_langgraph.pyi +48 -0
- aip_agents/examples/hello_world_a2a_three_level_agent_hierarchy_client.pyi +48 -0
- aip_agents/examples/hello_world_a2a_three_level_agent_hierarchy_server.pyi +45 -0
- aip_agents/examples/hello_world_a2a_with_metadata_langchain_client.pyi +5 -0
- aip_agents/examples/hello_world_a2a_with_metadata_langchain_server_lm_invoker.pyi +15 -0
- aip_agents/examples/hello_world_google_adk.pyi +5 -0
- aip_agents/examples/hello_world_google_adk_mcp_http.pyi +5 -0
- aip_agents/examples/hello_world_google_adk_mcp_http_stream.pyi +5 -0
- aip_agents/examples/hello_world_google_adk_mcp_sse.pyi +5 -0
- aip_agents/examples/hello_world_google_adk_mcp_sse_stream.pyi +5 -0
- aip_agents/examples/hello_world_google_adk_mcp_stdio.pyi +5 -0
- aip_agents/examples/hello_world_google_adk_mcp_stdio_stream.pyi +5 -0
- aip_agents/examples/hello_world_google_adk_stream.pyi +5 -0
- aip_agents/examples/hello_world_langchain.pyi +5 -0
- aip_agents/examples/hello_world_langchain_lm_invoker.pyi +2 -0
- aip_agents/examples/hello_world_langchain_mcp_http.pyi +5 -0
- aip_agents/examples/hello_world_langchain_mcp_http_interactive.pyi +16 -0
- aip_agents/examples/hello_world_langchain_mcp_http_stream.pyi +5 -0
- aip_agents/examples/hello_world_langchain_mcp_multi_server.pyi +18 -0
- aip_agents/examples/hello_world_langchain_mcp_sse.pyi +5 -0
- aip_agents/examples/hello_world_langchain_mcp_sse_stream.pyi +5 -0
- aip_agents/examples/hello_world_langchain_mcp_stdio.pyi +5 -0
- aip_agents/examples/hello_world_langchain_mcp_stdio_stream.pyi +5 -0
- aip_agents/examples/hello_world_langchain_stream.pyi +5 -0
- aip_agents/examples/hello_world_langchain_stream_lm_invoker.pyi +5 -0
- aip_agents/examples/hello_world_langflow_agent.pyi +35 -0
- aip_agents/examples/hello_world_langgraph.pyi +5 -0
- aip_agents/examples/hello_world_langgraph_gl_connector_twitter.pyi +5 -0
- aip_agents/examples/hello_world_langgraph_mcp_http.pyi +5 -0
- aip_agents/examples/hello_world_langgraph_mcp_http_stream.pyi +5 -0
- aip_agents/examples/hello_world_langgraph_mcp_sse.pyi +5 -0
- aip_agents/examples/hello_world_langgraph_mcp_sse_stream.pyi +5 -0
- aip_agents/examples/hello_world_langgraph_mcp_stdio.pyi +5 -0
- aip_agents/examples/hello_world_langgraph_mcp_stdio_stream.pyi +5 -0
- aip_agents/examples/hello_world_langgraph_stream.pyi +5 -0
- aip_agents/examples/hello_world_langgraph_stream_lm_invoker.pyi +5 -0
- aip_agents/examples/hello_world_model_switch_cli.pyi +30 -0
- aip_agents/examples/hello_world_multi_agent_adk.pyi +6 -0
- aip_agents/examples/hello_world_multi_agent_langchain.pyi +5 -0
- aip_agents/examples/hello_world_multi_agent_langgraph.pyi +5 -0
- aip_agents/examples/hello_world_multi_agent_langgraph_lm_invoker.pyi +5 -0
- aip_agents/examples/hello_world_pii_logger.pyi +5 -0
- aip_agents/examples/hello_world_sentry.pyi +21 -0
- aip_agents/examples/hello_world_step_limits.pyi +17 -0
- aip_agents/examples/hello_world_stock_a2a_server.pyi +17 -0
- aip_agents/examples/hello_world_tool_output_client.py +9 -0
- aip_agents/examples/hello_world_tool_output_client.pyi +5 -0
- aip_agents/examples/hello_world_tool_output_server.pyi +19 -0
- aip_agents/examples/hitl_demo.pyi +67 -0
- aip_agents/examples/pii_demo_langgraph_client.pyi +5 -0
- aip_agents/examples/pii_demo_langgraph_server.pyi +20 -0
- aip_agents/examples/pii_demo_multi_agent_client.pyi +5 -0
- aip_agents/examples/pii_demo_multi_agent_server.pyi +40 -0
- aip_agents/examples/todolist_planning_a2a_langchain_client.py +2 -2
- aip_agents/examples/todolist_planning_a2a_langchain_client.pyi +5 -0
- aip_agents/examples/todolist_planning_a2a_langgraph_server.py +1 -1
- aip_agents/examples/todolist_planning_a2a_langgraph_server.pyi +19 -0
- aip_agents/examples/tools/__init__.pyi +9 -0
- aip_agents/examples/tools/adk_arithmetic_tools.pyi +24 -0
- aip_agents/examples/tools/adk_weather_tool.pyi +18 -0
- aip_agents/examples/tools/data_generator_tool.pyi +15 -0
- aip_agents/examples/tools/data_visualization_tool.pyi +19 -0
- aip_agents/examples/tools/image_artifact_tool.pyi +26 -0
- aip_agents/examples/tools/langchain_arithmetic_tools.pyi +17 -0
- aip_agents/examples/tools/langchain_currency_exchange_tool.pyi +20 -0
- aip_agents/examples/tools/langchain_graph_artifact_tool.pyi +25 -0
- aip_agents/examples/tools/langchain_weather_tool.pyi +19 -0
- aip_agents/examples/tools/langgraph_streaming_tool.pyi +43 -0
- aip_agents/examples/tools/mock_retrieval_tool.pyi +13 -0
- aip_agents/examples/tools/pii_demo_tools.pyi +54 -0
- aip_agents/examples/tools/random_chart_tool.pyi +20 -0
- aip_agents/examples/tools/serper_tool.pyi +16 -0
- aip_agents/examples/tools/stock_tools.pyi +36 -0
- aip_agents/examples/tools/table_generator_tool.pyi +22 -0
- aip_agents/examples/tools/time_tool.pyi +15 -0
- aip_agents/examples/tools/weather_forecast_tool.pyi +14 -0
- aip_agents/guardrails/__init__.pyi +6 -0
- aip_agents/guardrails/engines/__init__.pyi +4 -0
- aip_agents/guardrails/engines/base.py +6 -6
- aip_agents/guardrails/engines/base.pyi +61 -0
- aip_agents/guardrails/engines/nemo.pyi +46 -0
- aip_agents/guardrails/engines/phrase_matcher.pyi +48 -0
- aip_agents/guardrails/exceptions.pyi +23 -0
- aip_agents/guardrails/manager.pyi +42 -0
- aip_agents/guardrails/middleware.pyi +87 -0
- aip_agents/guardrails/schemas.pyi +43 -0
- aip_agents/guardrails/utils.pyi +19 -0
- aip_agents/mcp/__init__.pyi +0 -0
- aip_agents/mcp/client/__init__.pyi +5 -0
- aip_agents/mcp/client/base_mcp_client.pyi +148 -0
- aip_agents/mcp/client/connection_manager.py +36 -1
- aip_agents/mcp/client/connection_manager.pyi +51 -0
- aip_agents/mcp/client/google_adk/__init__.pyi +3 -0
- aip_agents/mcp/client/google_adk/client.pyi +75 -0
- aip_agents/mcp/client/langchain/__init__.pyi +3 -0
- aip_agents/mcp/client/langchain/client.pyi +48 -0
- aip_agents/mcp/client/persistent_session.py +318 -68
- aip_agents/mcp/client/persistent_session.pyi +122 -0
- aip_agents/mcp/client/session_pool.pyi +101 -0
- aip_agents/mcp/client/transports.py +33 -2
- aip_agents/mcp/client/transports.pyi +132 -0
- aip_agents/mcp/utils/__init__.pyi +0 -0
- aip_agents/mcp/utils/config_validator.pyi +82 -0
- aip_agents/memory/__init__.pyi +5 -0
- aip_agents/memory/adapters/__init__.pyi +4 -0
- aip_agents/memory/adapters/base_adapter.pyi +150 -0
- aip_agents/memory/adapters/mem0.pyi +22 -0
- aip_agents/memory/base.pyi +60 -0
- aip_agents/memory/constants.pyi +25 -0
- aip_agents/memory/factory.pyi +24 -0
- aip_agents/memory/guidance.pyi +3 -0
- aip_agents/memory/simple_memory.pyi +23 -0
- aip_agents/middleware/__init__.pyi +5 -0
- aip_agents/middleware/base.pyi +75 -0
- aip_agents/middleware/manager.pyi +84 -0
- aip_agents/middleware/todolist.pyi +125 -0
- aip_agents/schema/__init__.pyi +9 -0
- aip_agents/schema/a2a.pyi +40 -0
- aip_agents/schema/agent.pyi +65 -0
- aip_agents/schema/hitl.pyi +89 -0
- aip_agents/schema/langgraph.pyi +28 -0
- aip_agents/schema/model_id.pyi +54 -0
- aip_agents/schema/step_limit.pyi +63 -0
- aip_agents/schema/storage.pyi +21 -0
- aip_agents/sentry/__init__.pyi +3 -0
- aip_agents/sentry/sentry.pyi +48 -0
- aip_agents/storage/__init__.pyi +8 -0
- aip_agents/storage/base.pyi +58 -0
- aip_agents/storage/clients/__init__.pyi +3 -0
- aip_agents/storage/clients/minio_client.pyi +137 -0
- aip_agents/storage/config.pyi +29 -0
- aip_agents/storage/providers/__init__.pyi +5 -0
- aip_agents/storage/providers/base.pyi +88 -0
- aip_agents/storage/providers/memory.pyi +79 -0
- aip_agents/storage/providers/object_storage.pyi +98 -0
- aip_agents/tools/__init__.pyi +9 -0
- aip_agents/tools/browser_use/__init__.pyi +14 -0
- aip_agents/tools/browser_use/action_parser.pyi +18 -0
- aip_agents/tools/browser_use/browser_use_tool.py +8 -0
- aip_agents/tools/browser_use/browser_use_tool.pyi +50 -0
- aip_agents/tools/browser_use/llm_config.pyi +52 -0
- aip_agents/tools/browser_use/minio_storage.pyi +109 -0
- aip_agents/tools/browser_use/schemas.pyi +32 -0
- aip_agents/tools/browser_use/session.pyi +4 -0
- aip_agents/tools/browser_use/session_errors.pyi +53 -0
- aip_agents/tools/browser_use/steel_session_recording.pyi +63 -0
- aip_agents/tools/browser_use/streaming.py +2 -0
- aip_agents/tools/browser_use/streaming.pyi +81 -0
- aip_agents/tools/browser_use/structured_data_parser.pyi +86 -0
- aip_agents/tools/browser_use/structured_data_recovery.pyi +43 -0
- aip_agents/tools/browser_use/types.pyi +45 -0
- aip_agents/tools/code_sandbox/__init__.pyi +3 -0
- aip_agents/tools/code_sandbox/constant.pyi +4 -0
- aip_agents/tools/code_sandbox/e2b_cloud_sandbox_extended.pyi +102 -0
- aip_agents/tools/code_sandbox/e2b_sandbox_tool.pyi +29 -0
- aip_agents/tools/constants.pyi +138 -0
- aip_agents/tools/document_loader/__init__.pyi +7 -0
- aip_agents/tools/document_loader/base_reader.pyi +75 -0
- aip_agents/tools/document_loader/docx_reader_tool.pyi +10 -0
- aip_agents/tools/document_loader/excel_reader_tool.pyi +26 -0
- aip_agents/tools/document_loader/pdf_reader_tool.pyi +11 -0
- aip_agents/tools/document_loader/pdf_splitter.pyi +18 -0
- aip_agents/tools/gl_connector/__init__.pyi +3 -0
- aip_agents/tools/gl_connector/tool.pyi +74 -0
- aip_agents/tools/gl_connector_tools.pyi +39 -0
- aip_agents/tools/memory_search/__init__.pyi +5 -0
- aip_agents/tools/memory_search/base.pyi +69 -0
- aip_agents/tools/memory_search/mem0.pyi +19 -0
- aip_agents/tools/memory_search/schema.pyi +15 -0
- aip_agents/tools/memory_search_tool.pyi +3 -0
- aip_agents/tools/time_tool.pyi +16 -0
- aip_agents/tools/tool_config_injector.pyi +26 -0
- aip_agents/tools/web_search/__init__.pyi +3 -0
- aip_agents/tools/web_search/serper_tool.pyi +19 -0
- aip_agents/types/__init__.pyi +36 -0
- aip_agents/types/a2a_events.pyi +3 -0
- aip_agents/utils/__init__.pyi +11 -0
- aip_agents/utils/a2a_connector.pyi +146 -0
- aip_agents/utils/artifact_helpers.pyi +203 -0
- aip_agents/utils/constants.pyi +10 -0
- aip_agents/utils/datetime/__init__.pyi +4 -0
- aip_agents/utils/datetime/normalization.pyi +95 -0
- aip_agents/utils/datetime/timezone.pyi +48 -0
- aip_agents/utils/env_loader.pyi +10 -0
- aip_agents/utils/event_handler_registry.pyi +23 -0
- aip_agents/utils/file_prompt_utils.pyi +21 -0
- aip_agents/utils/final_response_builder.pyi +34 -0
- aip_agents/utils/formatter_llm_client.pyi +71 -0
- aip_agents/utils/langgraph/__init__.pyi +3 -0
- aip_agents/utils/langgraph/converter.pyi +49 -0
- aip_agents/utils/langgraph/tool_managers/__init__.pyi +5 -0
- aip_agents/utils/langgraph/tool_managers/a2a_tool_manager.pyi +35 -0
- aip_agents/utils/langgraph/tool_managers/base_tool_manager.pyi +48 -0
- aip_agents/utils/langgraph/tool_managers/delegation_tool_manager.py +26 -1
- aip_agents/utils/langgraph/tool_managers/delegation_tool_manager.pyi +56 -0
- aip_agents/utils/langgraph/tool_output_management.py +80 -0
- aip_agents/utils/langgraph/tool_output_management.pyi +329 -0
- aip_agents/utils/logger.pyi +60 -0
- aip_agents/utils/metadata/__init__.pyi +5 -0
- aip_agents/utils/metadata/activity_metadata_helper.pyi +25 -0
- aip_agents/utils/metadata/activity_narrative/__init__.pyi +7 -0
- aip_agents/utils/metadata/activity_narrative/builder.pyi +35 -0
- aip_agents/utils/metadata/activity_narrative/constants.pyi +10 -0
- aip_agents/utils/metadata/activity_narrative/context.pyi +32 -0
- aip_agents/utils/metadata/activity_narrative/formatters.pyi +48 -0
- aip_agents/utils/metadata/activity_narrative/utils.pyi +12 -0
- aip_agents/utils/metadata/schemas/__init__.pyi +4 -0
- aip_agents/utils/metadata/schemas/activity_schema.pyi +18 -0
- aip_agents/utils/metadata/schemas/thinking_schema.pyi +20 -0
- aip_agents/utils/metadata/thinking_metadata_helper.pyi +4 -0
- aip_agents/utils/metadata_helper.pyi +117 -0
- aip_agents/utils/name_preprocessor/__init__.pyi +6 -0
- aip_agents/utils/name_preprocessor/base_name_preprocessor.pyi +52 -0
- aip_agents/utils/name_preprocessor/google_name_preprocessor.pyi +38 -0
- aip_agents/utils/name_preprocessor/name_preprocessor.pyi +41 -0
- aip_agents/utils/name_preprocessor/openai_name_preprocessor.pyi +34 -0
- aip_agents/utils/pii/__init__.pyi +5 -0
- aip_agents/utils/pii/pii_handler.pyi +96 -0
- aip_agents/utils/pii/pii_helper.pyi +78 -0
- aip_agents/utils/pii/uuid_deanonymizer_mapping.pyi +73 -0
- aip_agents/utils/reference_helper.pyi +81 -0
- aip_agents/utils/sse_chunk_transformer.pyi +166 -0
- aip_agents/utils/step_limit_manager.pyi +112 -0
- aip_agents/utils/token_usage_helper.pyi +60 -0
- {aip_agents_binary-0.5.25b8.dist-info → aip_agents_binary-0.6.0.dist-info}/METADATA +51 -48
- aip_agents_binary-0.6.0.dist-info/RECORD +566 -0
- aip_agents_binary-0.5.25b8.dist-info/RECORD +0 -290
- {aip_agents_binary-0.5.25b8.dist-info → aip_agents_binary-0.6.0.dist-info}/WHEEL +0 -0
- {aip_agents_binary-0.5.25b8.dist-info → aip_agents_binary-0.6.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,75 @@
|
|
|
1
|
+
from langchain_core.tools import BaseTool as BaseTool
|
|
2
|
+
from typing import Any, Protocol, TypedDict
|
|
3
|
+
|
|
4
|
+
class ModelRequest(TypedDict, total=False):
|
|
5
|
+
"""Represents parameters for a model invocation that middleware can modify.
|
|
6
|
+
|
|
7
|
+
This TypedDict defines the structure of requests passed to the LLM, allowing
|
|
8
|
+
middleware to add tools or modify prompts before each invocation.
|
|
9
|
+
|
|
10
|
+
Attributes:
|
|
11
|
+
messages: List of messages in the conversation.
|
|
12
|
+
tools: List of tools available to the model.
|
|
13
|
+
system_prompt: System-level instruction for the model.
|
|
14
|
+
"""
|
|
15
|
+
messages: list[Any]
|
|
16
|
+
tools: list[BaseTool]
|
|
17
|
+
system_prompt: str
|
|
18
|
+
|
|
19
|
+
class AgentMiddleware(Protocol):
|
|
20
|
+
"""Protocol defining the interface for composable agent middleware.
|
|
21
|
+
|
|
22
|
+
Middleware components can contribute tools, enhance system prompts, and provide
|
|
23
|
+
lifecycle hooks that execute before, during, and after model invocations.
|
|
24
|
+
|
|
25
|
+
All middleware must implement this protocol to be compatible with MiddlewareManager.
|
|
26
|
+
|
|
27
|
+
Attributes:
|
|
28
|
+
tools: List of tools contributed by this middleware.
|
|
29
|
+
system_prompt_additions: Optional text to append to the agent's system prompt.
|
|
30
|
+
"""
|
|
31
|
+
tools: list[BaseTool]
|
|
32
|
+
system_prompt_additions: str | None
|
|
33
|
+
def before_model(self, state: dict[str, Any]) -> dict[str, Any]:
|
|
34
|
+
"""Hook executed before each model invocation.
|
|
35
|
+
|
|
36
|
+
Use this hook to prepare state, log context, or perform setup tasks
|
|
37
|
+
before the model is called.
|
|
38
|
+
|
|
39
|
+
Args:
|
|
40
|
+
state: Current agent state containing messages and other context.
|
|
41
|
+
|
|
42
|
+
Returns:
|
|
43
|
+
Dict of state updates to merge into the agent state. Return empty dict
|
|
44
|
+
if no updates are needed.
|
|
45
|
+
"""
|
|
46
|
+
def modify_model_request(self, request: ModelRequest, state: dict[str, Any]) -> ModelRequest:
|
|
47
|
+
"""Hook to modify the model request before invocation.
|
|
48
|
+
|
|
49
|
+
Use this hook to add tools, modify the system prompt, adjust model parameters,
|
|
50
|
+
or change tool selection strategy.
|
|
51
|
+
|
|
52
|
+
Args:
|
|
53
|
+
request: The model request that will be sent to the LLM.
|
|
54
|
+
state: Current agent state for context.
|
|
55
|
+
|
|
56
|
+
Returns:
|
|
57
|
+
Modified ModelRequest. Can return the same request if no changes needed.
|
|
58
|
+
"""
|
|
59
|
+
def after_model(self, state: dict[str, Any]) -> dict[str, Any]:
|
|
60
|
+
"""Hook executed after each model invocation.
|
|
61
|
+
|
|
62
|
+
Use this hook for cleanup, logging, state updates, or post-processing
|
|
63
|
+
of model outputs.
|
|
64
|
+
|
|
65
|
+
Args:
|
|
66
|
+
state: Current agent state after model invocation.
|
|
67
|
+
|
|
68
|
+
Returns:
|
|
69
|
+
Dict of state updates to merge into the agent state. Return empty dict
|
|
70
|
+
if no updates are needed.
|
|
71
|
+
"""
|
|
72
|
+
async def abefore_model(self, state: dict[str, Any]) -> dict[str, Any]:
|
|
73
|
+
"""Asynchronous version of before_model hook."""
|
|
74
|
+
async def aafter_model(self, state: dict[str, Any]) -> dict[str, Any]:
|
|
75
|
+
"""Asynchronous version of after_model hook."""
|
|
@@ -0,0 +1,84 @@
|
|
|
1
|
+
from _typeshed import Incomplete
|
|
2
|
+
from aip_agents.middleware.base import AgentMiddleware as AgentMiddleware, ModelRequest as ModelRequest
|
|
3
|
+
from langchain_core.tools import BaseTool as BaseTool
|
|
4
|
+
from typing import Any
|
|
5
|
+
|
|
6
|
+
class MiddlewareManager:
|
|
7
|
+
"""Orchestrates multiple middleware components and manages hook execution.
|
|
8
|
+
|
|
9
|
+
The manager collects tools from all middleware, builds enhanced system prompts,
|
|
10
|
+
and executes lifecycle hooks in the correct order (forward for setup, reverse
|
|
11
|
+
for cleanup).
|
|
12
|
+
|
|
13
|
+
Attributes:
|
|
14
|
+
middleware: List of middleware components in registration order.
|
|
15
|
+
"""
|
|
16
|
+
middleware: Incomplete
|
|
17
|
+
def __init__(self, middleware: list[AgentMiddleware]) -> None:
|
|
18
|
+
"""Initialize the middleware manager.
|
|
19
|
+
|
|
20
|
+
Args:
|
|
21
|
+
middleware: List of middleware components to manage. Order matters:
|
|
22
|
+
hooks execute forward (first to last) for before/modify,
|
|
23
|
+
and reverse (last to first) for after.
|
|
24
|
+
"""
|
|
25
|
+
def get_all_tools(self) -> list[BaseTool]:
|
|
26
|
+
"""Collect tools from all registered middleware.
|
|
27
|
+
|
|
28
|
+
Returns:
|
|
29
|
+
Combined list of all tools contributed by all middleware components.
|
|
30
|
+
Empty list if no middleware or if middleware provide no tools.
|
|
31
|
+
"""
|
|
32
|
+
def build_system_prompt(self, base_instruction: str) -> str:
|
|
33
|
+
"""Build enhanced system prompt by concatenating base instruction with middleware additions.
|
|
34
|
+
|
|
35
|
+
Args:
|
|
36
|
+
base_instruction: The base system prompt for the agent.
|
|
37
|
+
|
|
38
|
+
Returns:
|
|
39
|
+
Enhanced system prompt with all middleware additions appended.
|
|
40
|
+
If no middleware provide additions, returns base_instruction unchanged.
|
|
41
|
+
"""
|
|
42
|
+
def before_model(self, state: dict[str, Any]) -> dict[str, Any]:
|
|
43
|
+
"""Execute before_model hooks for all middleware in forward order.
|
|
44
|
+
|
|
45
|
+
Hooks run first to last, allowing earlier middleware to prepare state
|
|
46
|
+
for later middleware.
|
|
47
|
+
|
|
48
|
+
Args:
|
|
49
|
+
state: Current agent state.
|
|
50
|
+
|
|
51
|
+
Returns:
|
|
52
|
+
Merged dictionary of all state updates from all middleware.
|
|
53
|
+
Updates are accumulated in order of execution.
|
|
54
|
+
"""
|
|
55
|
+
def modify_model_request(self, request: ModelRequest, state: dict[str, Any]) -> ModelRequest:
|
|
56
|
+
"""Execute modify_model_request hooks for all middleware in forward order.
|
|
57
|
+
|
|
58
|
+
Each middleware receives the request modified by previous middleware,
|
|
59
|
+
allowing them to build on each other's changes.
|
|
60
|
+
|
|
61
|
+
Args:
|
|
62
|
+
request: The model request to be modified.
|
|
63
|
+
state: Current agent state for context.
|
|
64
|
+
|
|
65
|
+
Returns:
|
|
66
|
+
Final modified request after all middleware have processed it.
|
|
67
|
+
"""
|
|
68
|
+
def after_model(self, state: dict[str, Any]) -> dict[str, Any]:
|
|
69
|
+
"""Execute after_model hooks for all middleware in reverse order.
|
|
70
|
+
|
|
71
|
+
Hooks run last to first (reverse of registration order), allowing
|
|
72
|
+
proper cleanup and unwinding of middleware operations.
|
|
73
|
+
|
|
74
|
+
Args:
|
|
75
|
+
state: Current agent state after model invocation.
|
|
76
|
+
|
|
77
|
+
Returns:
|
|
78
|
+
Merged dictionary of all state updates from all middleware.
|
|
79
|
+
Updates are accumulated in reverse order of execution.
|
|
80
|
+
"""
|
|
81
|
+
async def abefore_model(self, state: dict[str, Any]) -> dict[str, Any]:
|
|
82
|
+
"""Asynchronously execute before_model hooks for all middleware."""
|
|
83
|
+
async def aafter_model(self, state: dict[str, Any]) -> dict[str, Any]:
|
|
84
|
+
"""Asynchronously execute after_model hooks for all middleware in reverse order."""
|
|
@@ -0,0 +1,125 @@
|
|
|
1
|
+
import threading
|
|
2
|
+
from _typeshed import Incomplete
|
|
3
|
+
from aip_agents.middleware.base import ModelRequest as ModelRequest
|
|
4
|
+
from aip_agents.utils.logger import get_logger as get_logger
|
|
5
|
+
from enum import StrEnum
|
|
6
|
+
from langchain_core.tools import BaseTool
|
|
7
|
+
from pydantic import BaseModel, SkipValidation
|
|
8
|
+
from typing import Any
|
|
9
|
+
|
|
10
|
+
logger: Incomplete
|
|
11
|
+
|
|
12
|
+
class TodoStatus(StrEnum):
|
|
13
|
+
"""Enumeration of possible todo item statuses."""
|
|
14
|
+
PENDING: str
|
|
15
|
+
IN_PROGRESS: str
|
|
16
|
+
COMPLETED: str
|
|
17
|
+
|
|
18
|
+
WRITE_TODOS_SYSTEM_PROMPT: str
|
|
19
|
+
EMPTY_TODO_REMINDER: str
|
|
20
|
+
|
|
21
|
+
class TodoItem(BaseModel):
|
|
22
|
+
"""Represents a single todo item in the agent's plan.
|
|
23
|
+
|
|
24
|
+
Attributes:
|
|
25
|
+
content: Human-readable description of the task.
|
|
26
|
+
active_form: Short imperative phrase for UI display.
|
|
27
|
+
status: Current status of the todo item.
|
|
28
|
+
"""
|
|
29
|
+
content: str
|
|
30
|
+
active_form: str
|
|
31
|
+
status: TodoStatus
|
|
32
|
+
|
|
33
|
+
class TodoList(BaseModel):
|
|
34
|
+
"""Represents a complete todo list for a thread.
|
|
35
|
+
|
|
36
|
+
Attributes:
|
|
37
|
+
items: List of todo items in order.
|
|
38
|
+
"""
|
|
39
|
+
items: list[TodoItem]
|
|
40
|
+
|
|
41
|
+
WRITE_TODOS_TOOL_DESCRIPTION: str
|
|
42
|
+
|
|
43
|
+
class WriteTodosInput(BaseModel):
|
|
44
|
+
"""Input schema for the write_todos tool."""
|
|
45
|
+
todos: TodoList
|
|
46
|
+
|
|
47
|
+
class WriteTodosTool(BaseTool):
|
|
48
|
+
"""LangChain-compatible tool for managing todo lists via TodoListMiddleware."""
|
|
49
|
+
name: str
|
|
50
|
+
description: str
|
|
51
|
+
args_schema: type[BaseModel]
|
|
52
|
+
storage: SkipValidation[dict[str, TodoList]]
|
|
53
|
+
storage_lock: SkipValidation[threading.RLock]
|
|
54
|
+
|
|
55
|
+
class TodoListMiddleware:
|
|
56
|
+
"""Middleware that provides planning capabilities via todo list management.
|
|
57
|
+
|
|
58
|
+
Adds the write_todos tool and enhances the system prompt with planning
|
|
59
|
+
instructions, encouraging agents to break down complex tasks.
|
|
60
|
+
|
|
61
|
+
This middleware maintains thread-isolated todo lists, ensuring that
|
|
62
|
+
different conversation threads don't interfere with each other.
|
|
63
|
+
|
|
64
|
+
Each middleware instance has its own storage, preventing race conditions
|
|
65
|
+
when multiple agent instances are used concurrently.
|
|
66
|
+
"""
|
|
67
|
+
tools: Incomplete
|
|
68
|
+
system_prompt_additions: Incomplete
|
|
69
|
+
def __init__(self) -> None:
|
|
70
|
+
"""Initialize the TodoList middleware with planning tools and instructions."""
|
|
71
|
+
def before_model(self, state: dict[str, Any]) -> dict[str, Any]:
|
|
72
|
+
"""Hook executed before model invocation.
|
|
73
|
+
|
|
74
|
+
Syncs todos FROM state TO internal storage for LangGraph agents.
|
|
75
|
+
This allows todos to be persisted via LangGraph checkpointer.
|
|
76
|
+
|
|
77
|
+
Args:
|
|
78
|
+
state: Current agent state (may contain 'todos' key).
|
|
79
|
+
|
|
80
|
+
Returns:
|
|
81
|
+
Empty dict (no state updates needed).
|
|
82
|
+
"""
|
|
83
|
+
def modify_model_request(self, request: ModelRequest, state: dict[str, Any]) -> ModelRequest:
|
|
84
|
+
"""Hook to modify model request before invocation.
|
|
85
|
+
|
|
86
|
+
Injects current todo list status into the system prompt, ensuring
|
|
87
|
+
the agent has visibility into its current plan on every turn.
|
|
88
|
+
This follows Claude Code's pattern of injecting system reminders.
|
|
89
|
+
|
|
90
|
+
Args:
|
|
91
|
+
request: The model request.
|
|
92
|
+
state: Current agent state.
|
|
93
|
+
|
|
94
|
+
Returns:
|
|
95
|
+
Modified request with todo status injected into system prompt.
|
|
96
|
+
"""
|
|
97
|
+
def after_model(self, state: dict[str, Any]) -> dict[str, Any]:
|
|
98
|
+
"""Hook executed after model invocation.
|
|
99
|
+
|
|
100
|
+
Syncs todos FROM internal storage TO state for LangGraph agents.
|
|
101
|
+
This ensures any tool updates are reflected in the checkpointed state.
|
|
102
|
+
|
|
103
|
+
Args:
|
|
104
|
+
state: Current agent state.
|
|
105
|
+
|
|
106
|
+
Returns:
|
|
107
|
+
Dict with 'todos' key containing updated TodoList, or empty dict.
|
|
108
|
+
"""
|
|
109
|
+
def get_todos(self, thread_id: str = 'default') -> TodoList:
|
|
110
|
+
'''Retrieve the todo list for a specific thread.
|
|
111
|
+
|
|
112
|
+
Args:
|
|
113
|
+
thread_id: Thread identifier. Defaults to "default".
|
|
114
|
+
|
|
115
|
+
Returns:
|
|
116
|
+
TodoList for the thread, or empty list if none exists.
|
|
117
|
+
'''
|
|
118
|
+
def clear_todos(self, thread_id: str = 'default') -> None:
|
|
119
|
+
'''Clear the todo list for a specific thread.
|
|
120
|
+
|
|
121
|
+
Useful for cleanup between test runs or conversation resets.
|
|
122
|
+
|
|
123
|
+
Args:
|
|
124
|
+
thread_id: Thread identifier. Defaults to "default".
|
|
125
|
+
'''
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
from aip_agents.schema.a2a import A2AEvent as A2AEvent, A2AStreamEventType as A2AStreamEventType, ToolCallInfo as ToolCallInfo, ToolResultInfo as ToolResultInfo
|
|
2
|
+
from aip_agents.schema.agent import A2AClientConfig as A2AClientConfig, AgentConfig as AgentConfig, BaseAgentConfig as BaseAgentConfig, CredentialType as CredentialType, HttpxClientOptions as HttpxClientOptions, LangflowAgentConfig as LangflowAgentConfig, StreamMode as StreamMode
|
|
3
|
+
from aip_agents.schema.hitl import ApprovalDecision as ApprovalDecision, ApprovalDecisionType as ApprovalDecisionType, ApprovalLogEntry as ApprovalLogEntry, ApprovalRequest as ApprovalRequest, HitlMetadata as HitlMetadata
|
|
4
|
+
from aip_agents.schema.langgraph import ToolCallResult as ToolCallResult, ToolStorageParams as ToolStorageParams
|
|
5
|
+
from aip_agents.schema.model_id import ModelId as ModelId, ModelProvider as ModelProvider
|
|
6
|
+
from aip_agents.schema.step_limit import MaxDelegationDepthExceededError as MaxDelegationDepthExceededError, MaxStepsExceededError as MaxStepsExceededError, StepLimitConfig as StepLimitConfig, StepLimitError as StepLimitError, StepLimitErrorResponse as StepLimitErrorResponse, StepLimitErrorType as StepLimitErrorType
|
|
7
|
+
from aip_agents.schema.storage import OBJECT_STORAGE_PREFIX as OBJECT_STORAGE_PREFIX, StorageConfig as StorageConfig, StorageType as StorageType
|
|
8
|
+
|
|
9
|
+
__all__ = ['A2AEvent', 'A2AStreamEventType', 'ToolCallInfo', 'ToolResultInfo', 'A2AClientConfig', 'AgentConfig', 'BaseAgentConfig', 'CredentialType', 'HttpxClientOptions', 'LangflowAgentConfig', 'StreamMode', 'ApprovalDecision', 'ApprovalDecisionType', 'ApprovalLogEntry', 'ApprovalRequest', 'HitlMetadata', 'ToolCallResult', 'ToolStorageParams', 'ModelId', 'ModelProvider', 'OBJECT_STORAGE_PREFIX', 'StorageConfig', 'StorageType', 'MaxDelegationDepthExceededError', 'MaxStepsExceededError', 'StepLimitConfig', 'StepLimitError', 'StepLimitErrorResponse', 'StepLimitErrorType']
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
from enum import Enum
|
|
2
|
+
from typing import Any
|
|
3
|
+
from typing_extensions import TypedDict
|
|
4
|
+
|
|
5
|
+
__all__ = ['A2AStreamEventType', 'A2AEvent', 'ToolCallInfo', 'ToolResultInfo']
|
|
6
|
+
|
|
7
|
+
class A2AStreamEventType(Enum):
|
|
8
|
+
"""Semantic event types for A2A agent-executor communication."""
|
|
9
|
+
STATUS_UPDATE: str
|
|
10
|
+
CONTENT_CHUNK: str
|
|
11
|
+
FINAL_RESPONSE: str
|
|
12
|
+
TOOL_CALL: str
|
|
13
|
+
TOOL_RESULT: str
|
|
14
|
+
ERROR: str
|
|
15
|
+
STEP_LIMIT_EXCEEDED: str
|
|
16
|
+
|
|
17
|
+
class A2AEvent(TypedDict):
|
|
18
|
+
"""Structured event data used by the A2A connector."""
|
|
19
|
+
event_type: A2AStreamEventType
|
|
20
|
+
content: str
|
|
21
|
+
metadata: dict[str, Any]
|
|
22
|
+
tool_info: dict[str, Any] | None
|
|
23
|
+
is_final: bool
|
|
24
|
+
artifacts: list[dict[str, Any]] | None
|
|
25
|
+
references: list[Any] | None
|
|
26
|
+
step_usage: dict[str, Any] | None
|
|
27
|
+
total_usage: dict[str, Any] | None
|
|
28
|
+
thinking_and_activity_info: dict[str, Any] | None
|
|
29
|
+
|
|
30
|
+
class ToolCallInfo(TypedDict):
|
|
31
|
+
"""Structured information for tool invocation events."""
|
|
32
|
+
tool_calls: list[dict[str, Any]]
|
|
33
|
+
status: str
|
|
34
|
+
|
|
35
|
+
class ToolResultInfo(TypedDict):
|
|
36
|
+
"""Structured information for tool completion events."""
|
|
37
|
+
name: str
|
|
38
|
+
args: dict[str, Any]
|
|
39
|
+
output: str
|
|
40
|
+
execution_time: float | None
|
|
@@ -0,0 +1,65 @@
|
|
|
1
|
+
from _typeshed import Incomplete
|
|
2
|
+
from a2a.types import AgentCard
|
|
3
|
+
from enum import StrEnum
|
|
4
|
+
from gllm_core.utils.retry import RetryConfig
|
|
5
|
+
from pydantic import BaseModel
|
|
6
|
+
from typing import Any
|
|
7
|
+
|
|
8
|
+
__all__ = ['CredentialType', 'StreamMode', 'HttpxClientOptions', 'A2AClientConfig', 'BaseAgentConfig', 'AgentConfig', 'LangflowAgentConfig']
|
|
9
|
+
|
|
10
|
+
class CredentialType(StrEnum):
|
|
11
|
+
"""Credential type enumeration for type safety and better developer experience."""
|
|
12
|
+
API_KEY: str
|
|
13
|
+
FILE: str
|
|
14
|
+
DICT: str
|
|
15
|
+
|
|
16
|
+
class StreamMode(StrEnum):
|
|
17
|
+
"""LangGraph stream modes for astream operations."""
|
|
18
|
+
VALUES: str
|
|
19
|
+
CUSTOM: str
|
|
20
|
+
MESSAGES: str
|
|
21
|
+
|
|
22
|
+
class HttpxClientOptions(BaseModel):
|
|
23
|
+
"""Options for the HTTP client."""
|
|
24
|
+
timeout: float
|
|
25
|
+
trust_env: bool
|
|
26
|
+
follow_redirects: bool
|
|
27
|
+
model_config: Incomplete
|
|
28
|
+
class Config:
|
|
29
|
+
"""Pydantic v1 fallback config for HttpxClientOptions."""
|
|
30
|
+
extra: str
|
|
31
|
+
|
|
32
|
+
class A2AClientConfig(BaseModel):
|
|
33
|
+
"""Configuration for A2A client."""
|
|
34
|
+
discovery_urls: list[str] | None
|
|
35
|
+
known_agents: dict[str, AgentCard]
|
|
36
|
+
httpx_client_options: HttpxClientOptions | None
|
|
37
|
+
|
|
38
|
+
class BaseAgentConfig(BaseModel):
|
|
39
|
+
"""Base configuration for agent implementations."""
|
|
40
|
+
tools: list[Any] | None
|
|
41
|
+
default_hyperparameters: dict[str, Any] | None
|
|
42
|
+
model_config: Incomplete
|
|
43
|
+
class Config:
|
|
44
|
+
"""Pydantic v1 fallback config for BaseAgentConfig."""
|
|
45
|
+
extra: str
|
|
46
|
+
|
|
47
|
+
class AgentConfig(BaseAgentConfig):
|
|
48
|
+
"""Configuration for agent implementations with language model settings."""
|
|
49
|
+
lm_name: str | None
|
|
50
|
+
lm_hyperparameters: dict[str, Any] | None
|
|
51
|
+
lm_provider: str | None
|
|
52
|
+
lm_base_url: str | None
|
|
53
|
+
lm_api_key: str | None
|
|
54
|
+
lm_credentials: str | dict[str, Any] | None
|
|
55
|
+
lm_retry_config: RetryConfig | None
|
|
56
|
+
|
|
57
|
+
class LangflowAgentConfig(BaseAgentConfig):
|
|
58
|
+
"""Configuration for Langflow agent implementations."""
|
|
59
|
+
flow_id: str
|
|
60
|
+
base_url: str | None
|
|
61
|
+
api_key: str | None
|
|
62
|
+
model_config: Incomplete
|
|
63
|
+
class Config:
|
|
64
|
+
"""Pydantic v1 fallback config for LangflowAgentConfig."""
|
|
65
|
+
extra: str
|
|
@@ -0,0 +1,89 @@
|
|
|
1
|
+
from _typeshed import Incomplete
|
|
2
|
+
from dataclasses import dataclass
|
|
3
|
+
from datetime import datetime
|
|
4
|
+
from enum import StrEnum
|
|
5
|
+
from pydantic import BaseModel
|
|
6
|
+
from typing import Any
|
|
7
|
+
|
|
8
|
+
__all__ = ['ApprovalDecisionType', 'ApprovalRequest', 'ApprovalDecision', 'ApprovalLogEntry', 'HitlMetadata']
|
|
9
|
+
|
|
10
|
+
class ApprovalDecisionType(StrEnum):
|
|
11
|
+
"""Enumeration of possible approval decision types."""
|
|
12
|
+
APPROVED: str
|
|
13
|
+
REJECTED: str
|
|
14
|
+
SKIPPED: str
|
|
15
|
+
TIMEOUT_SKIP: str
|
|
16
|
+
PENDING: str
|
|
17
|
+
|
|
18
|
+
@dataclass
|
|
19
|
+
class ApprovalRequest:
|
|
20
|
+
"""Represents an in-flight prompt shown to the operator."""
|
|
21
|
+
request_id: str
|
|
22
|
+
tool_name: str
|
|
23
|
+
arguments_preview: str
|
|
24
|
+
context: dict[str, str] | None = ...
|
|
25
|
+
created_at: datetime | None = ...
|
|
26
|
+
timeout_at: datetime | None = ...
|
|
27
|
+
def __post_init__(self) -> None:
|
|
28
|
+
"""Initialize timestamps if not provided."""
|
|
29
|
+
@classmethod
|
|
30
|
+
def create(cls, tool_name: str, arguments_preview: str, context: dict[str, str] | None = None) -> ApprovalRequest:
|
|
31
|
+
"""Create a new approval request with generated request_id.
|
|
32
|
+
|
|
33
|
+
Args:
|
|
34
|
+
tool_name (str): The name of the tool requiring approval.
|
|
35
|
+
arguments_preview (str): A preview of the arguments for display.
|
|
36
|
+
context (dict[str, str] | None, optional): Additional context information.
|
|
37
|
+
|
|
38
|
+
Returns:
|
|
39
|
+
ApprovalRequest: A new approval request instance.
|
|
40
|
+
"""
|
|
41
|
+
|
|
42
|
+
@dataclass
|
|
43
|
+
class ApprovalDecision:
|
|
44
|
+
"""Captures the operator outcome."""
|
|
45
|
+
request_id: str
|
|
46
|
+
decision: ApprovalDecisionType
|
|
47
|
+
operator_input: str
|
|
48
|
+
decided_at: datetime | None = ...
|
|
49
|
+
latency_ms: int | None = ...
|
|
50
|
+
def __post_init__(self) -> None:
|
|
51
|
+
"""Initialize timestamp if not provided."""
|
|
52
|
+
|
|
53
|
+
@dataclass
|
|
54
|
+
class ApprovalLogEntry:
|
|
55
|
+
"""Structured log entry for HITL decisions."""
|
|
56
|
+
request_id: str
|
|
57
|
+
tool_name: str
|
|
58
|
+
decision: str
|
|
59
|
+
event: str = ...
|
|
60
|
+
agent_id: str | None = ...
|
|
61
|
+
thread_id: str | None = ...
|
|
62
|
+
additional_context: dict[str, Any] | None = ...
|
|
63
|
+
timestamp: datetime | None = ...
|
|
64
|
+
def __post_init__(self) -> None:
|
|
65
|
+
"""Initialize timestamp if not provided."""
|
|
66
|
+
|
|
67
|
+
class HitlMetadata(BaseModel):
|
|
68
|
+
"""Structured metadata payload included in agent streaming events."""
|
|
69
|
+
required: bool
|
|
70
|
+
decision: ApprovalDecisionType
|
|
71
|
+
request_id: str
|
|
72
|
+
timeout_seconds: int | None
|
|
73
|
+
timeout_at: datetime | None
|
|
74
|
+
model_config: Incomplete
|
|
75
|
+
def as_payload(self) -> dict[str, Any]:
|
|
76
|
+
"""Return a JSON-ready metadata payload."""
|
|
77
|
+
@classmethod
|
|
78
|
+
def from_decision(cls, decision: ApprovalDecision, *, required: bool = True, timeout_seconds: int | None = None, timeout_at: datetime | None = None) -> HitlMetadata:
|
|
79
|
+
"""Build metadata from an ``ApprovalDecision``.
|
|
80
|
+
|
|
81
|
+
Args:
|
|
82
|
+
decision (ApprovalDecision): The approval decision to build metadata from.
|
|
83
|
+
required (bool, optional): Whether approval is required. Defaults to True.
|
|
84
|
+
timeout_seconds (int | None, optional): Timeout in seconds for the decision.
|
|
85
|
+
timeout_at (datetime | None, optional): Specific timeout datetime.
|
|
86
|
+
|
|
87
|
+
Returns:
|
|
88
|
+
HitlMetadata: The constructed metadata instance.
|
|
89
|
+
"""
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
from dataclasses import dataclass
|
|
2
|
+
from gllm_core.schema import Chunk
|
|
3
|
+
from langchain_core.messages import ToolMessage
|
|
4
|
+
from langchain_core.messages.ai import UsageMetadata
|
|
5
|
+
from langchain_core.tools import BaseTool
|
|
6
|
+
from typing import Any
|
|
7
|
+
|
|
8
|
+
__all__ = ['ToolCallResult', 'ToolStorageParams']
|
|
9
|
+
|
|
10
|
+
@dataclass
|
|
11
|
+
class ToolCallResult:
|
|
12
|
+
"""Container for the results of a single tool call execution."""
|
|
13
|
+
messages: list[ToolMessage]
|
|
14
|
+
artifacts: list[dict[str, Any]]
|
|
15
|
+
metadata_delta: dict[str, Any]
|
|
16
|
+
references: list[Chunk]
|
|
17
|
+
step_usage: UsageMetadata | None
|
|
18
|
+
pii_mapping: dict[str, str] | None = ...
|
|
19
|
+
|
|
20
|
+
@dataclass
|
|
21
|
+
class ToolStorageParams:
|
|
22
|
+
"""Parameters required for automatically storing tool outputs."""
|
|
23
|
+
tool: BaseTool
|
|
24
|
+
tool_output: Any
|
|
25
|
+
tool_call: dict[str, Any]
|
|
26
|
+
tool_call_id: str
|
|
27
|
+
resolved_args: dict[str, Any]
|
|
28
|
+
state: dict[str, Any]
|
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
from _typeshed import Incomplete
|
|
2
|
+
from enum import StrEnum
|
|
3
|
+
|
|
4
|
+
__all__ = ['ModelId', 'ModelProvider']
|
|
5
|
+
|
|
6
|
+
class ModelProvider(StrEnum):
|
|
7
|
+
"""Enumeration of supported model providers for the AI agent platform."""
|
|
8
|
+
OPENAI: str
|
|
9
|
+
ANTHROPIC: str
|
|
10
|
+
AZURE_OPENAI: str
|
|
11
|
+
GOOGLE_GENAI: str
|
|
12
|
+
GROQ: str
|
|
13
|
+
TOGETHER_AI: str
|
|
14
|
+
DEEPINFRA: str
|
|
15
|
+
DEEPSEEK: str
|
|
16
|
+
OPENAI_COMPATIBLE: str
|
|
17
|
+
|
|
18
|
+
class ModelId:
|
|
19
|
+
"""Model identifier class for representing language models."""
|
|
20
|
+
provider: Incomplete
|
|
21
|
+
name: Incomplete
|
|
22
|
+
path: Incomplete
|
|
23
|
+
def __init__(self, provider: str, name: str, path: str | None = None) -> None:
|
|
24
|
+
"""Initialize a ModelId.
|
|
25
|
+
|
|
26
|
+
Args:
|
|
27
|
+
provider: The model provider (e.g., 'openai', 'anthropic')
|
|
28
|
+
name: The specific model name
|
|
29
|
+
path: Optional path component for some providers
|
|
30
|
+
"""
|
|
31
|
+
@classmethod
|
|
32
|
+
def from_string(cls, model_string: str) -> ModelId:
|
|
33
|
+
"""Create a ModelId from a string representation.
|
|
34
|
+
|
|
35
|
+
Args:
|
|
36
|
+
model_string: String in format 'provider:name' or 'provider/path:name'
|
|
37
|
+
|
|
38
|
+
Returns:
|
|
39
|
+
ModelId instance
|
|
40
|
+
|
|
41
|
+
Raises:
|
|
42
|
+
ValueError: If the string format is invalid
|
|
43
|
+
"""
|
|
44
|
+
def __eq__(self, other: object) -> bool:
|
|
45
|
+
"""Check equality with another ModelId object.
|
|
46
|
+
|
|
47
|
+
Args:
|
|
48
|
+
other (object): The object to compare with.
|
|
49
|
+
|
|
50
|
+
Returns:
|
|
51
|
+
bool: True if the objects are equal, False otherwise.
|
|
52
|
+
"""
|
|
53
|
+
def __hash__(self) -> int:
|
|
54
|
+
"""Return hash of the ModelId."""
|
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
from _typeshed import Incomplete
|
|
2
|
+
from dataclasses import dataclass, field
|
|
3
|
+
from enum import StrEnum
|
|
4
|
+
|
|
5
|
+
class StepLimitErrorType(StrEnum):
|
|
6
|
+
"""Types of step-related limit violations."""
|
|
7
|
+
STEP_LIMIT_EXCEEDED: str
|
|
8
|
+
DELEGATION_DEPTH_EXCEEDED: str
|
|
9
|
+
|
|
10
|
+
@dataclass
|
|
11
|
+
class StepLimitConfig:
|
|
12
|
+
"""Configuration for agent step and delegation limits.
|
|
13
|
+
|
|
14
|
+
Attributes:
|
|
15
|
+
max_steps: Maximum number of execution steps allowed per invocation.
|
|
16
|
+
Includes agent node (LLM call) and every tool call (parallel batches count per call).
|
|
17
|
+
max_delegation_depth: Maximum depth of delegation chain allowed.
|
|
18
|
+
Depth 0 means no delegation allowed.
|
|
19
|
+
"""
|
|
20
|
+
max_steps: int = field(default_factory=Incomplete)
|
|
21
|
+
max_delegation_depth: int = field(default_factory=Incomplete)
|
|
22
|
+
def __post_init__(self) -> None:
|
|
23
|
+
"""Validate configuration values and normalize range."""
|
|
24
|
+
|
|
25
|
+
@dataclass
|
|
26
|
+
class StepLimitErrorResponse:
|
|
27
|
+
"""Structured error response for step limit violations.
|
|
28
|
+
|
|
29
|
+
Attributes:
|
|
30
|
+
error_type: The type of limit that was exceeded.
|
|
31
|
+
agent_name: Name of the agent that hit the limit.
|
|
32
|
+
current_value: Current step count or delegation depth.
|
|
33
|
+
configured_limit: The configured limit that was exceeded.
|
|
34
|
+
message: Human-readable error message.
|
|
35
|
+
delegation_chain: Full chain for delegation errors.
|
|
36
|
+
partial_result: Any output generated before hitting the limit.
|
|
37
|
+
"""
|
|
38
|
+
error_type: StepLimitErrorType
|
|
39
|
+
agent_name: str
|
|
40
|
+
current_value: int
|
|
41
|
+
configured_limit: int
|
|
42
|
+
message: str
|
|
43
|
+
delegation_chain: list[str] | None = ...
|
|
44
|
+
partial_result: str | None = ...
|
|
45
|
+
|
|
46
|
+
class StepLimitError(Exception):
|
|
47
|
+
"""Base exception for step and delegation limit violations.
|
|
48
|
+
|
|
49
|
+
Attributes:
|
|
50
|
+
error_response: Structured error response with details.
|
|
51
|
+
"""
|
|
52
|
+
error_response: Incomplete
|
|
53
|
+
def __init__(self, error_response: StepLimitErrorResponse) -> None:
|
|
54
|
+
"""Initialize with error response.
|
|
55
|
+
|
|
56
|
+
Args:
|
|
57
|
+
error_response: Structured error details.
|
|
58
|
+
"""
|
|
59
|
+
|
|
60
|
+
class MaxStepsExceededError(StepLimitError):
|
|
61
|
+
"""Raised when agent exceeds configured max_steps limit."""
|
|
62
|
+
class MaxDelegationDepthExceededError(StepLimitError):
|
|
63
|
+
"""Raised when delegation would exceed max_delegation_depth limit."""
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
from dataclasses import dataclass
|
|
2
|
+
from enum import StrEnum
|
|
3
|
+
|
|
4
|
+
__all__ = ['OBJECT_STORAGE_PREFIX', 'StorageType', 'StorageConfig']
|
|
5
|
+
|
|
6
|
+
OBJECT_STORAGE_PREFIX: str
|
|
7
|
+
|
|
8
|
+
class StorageType(StrEnum):
|
|
9
|
+
"""Supported storage types."""
|
|
10
|
+
MEMORY: str
|
|
11
|
+
OBJECT_STORAGE: str
|
|
12
|
+
|
|
13
|
+
@dataclass
|
|
14
|
+
class StorageConfig:
|
|
15
|
+
"""Configuration for storage providers."""
|
|
16
|
+
storage_type: StorageType = ...
|
|
17
|
+
object_prefix: str = ...
|
|
18
|
+
object_use_json: bool = ...
|
|
19
|
+
@classmethod
|
|
20
|
+
def from_env(cls) -> StorageConfig:
|
|
21
|
+
"""Create StorageConfig from environment variables."""
|