aip-agents-binary 0.5.20__py3-none-any.whl → 0.5.22__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aip_agents/__init__.pyi +19 -0
- aip_agents/a2a/__init__.pyi +3 -0
- aip_agents/a2a/server/__init__.pyi +4 -0
- aip_agents/a2a/server/base_executor.pyi +73 -0
- aip_agents/a2a/server/google_adk_executor.pyi +51 -0
- aip_agents/a2a/server/langflow_executor.pyi +43 -0
- aip_agents/a2a/server/langgraph_executor.pyi +47 -0
- aip_agents/a2a/types.pyi +132 -0
- aip_agents/agent/__init__.pyi +9 -0
- aip_agents/agent/base_agent.pyi +221 -0
- aip_agents/agent/base_langgraph_agent.pyi +232 -0
- aip_agents/agent/google_adk_agent.pyi +141 -0
- aip_agents/agent/google_adk_constants.pyi +3 -0
- aip_agents/agent/hitl/__init__.pyi +6 -0
- aip_agents/agent/hitl/config.pyi +15 -0
- aip_agents/agent/hitl/langgraph_hitl_mixin.pyi +42 -0
- aip_agents/agent/hitl/manager.pyi +200 -0
- aip_agents/agent/hitl/models.pyi +3 -0
- aip_agents/agent/hitl/prompt/__init__.pyi +4 -0
- aip_agents/agent/hitl/prompt/base.pyi +24 -0
- aip_agents/agent/hitl/prompt/deferred.pyi +30 -0
- aip_agents/agent/hitl/registry.pyi +101 -0
- aip_agents/agent/interface.pyi +81 -0
- aip_agents/agent/interfaces.pyi +44 -0
- aip_agents/agent/langflow_agent.pyi +133 -0
- aip_agents/agent/langgraph_memory_enhancer_agent.pyi +49 -0
- aip_agents/agent/langgraph_react_agent.pyi +126 -0
- aip_agents/agent/system_instruction_context.pyi +13 -0
- aip_agents/clients/__init__.pyi +4 -0
- aip_agents/clients/langflow/__init__.pyi +4 -0
- aip_agents/clients/langflow/client.pyi +140 -0
- aip_agents/clients/langflow/types.pyi +7 -0
- aip_agents/constants.pyi +7 -0
- aip_agents/examples/__init__.pyi +0 -0
- aip_agents/examples/compare_streaming_client.pyi +48 -0
- aip_agents/examples/compare_streaming_server.pyi +18 -0
- aip_agents/examples/demo_memory_recall.pyi +58 -0
- aip_agents/examples/hello_world_a2a_google_adk_client.pyi +9 -0
- aip_agents/examples/hello_world_a2a_google_adk_client_agent.pyi +9 -0
- aip_agents/examples/hello_world_a2a_google_adk_client_streaming.pyi +9 -0
- aip_agents/examples/hello_world_a2a_google_adk_server.pyi +15 -0
- aip_agents/examples/hello_world_a2a_langchain_client.pyi +5 -0
- aip_agents/examples/hello_world_a2a_langchain_client_agent.pyi +5 -0
- aip_agents/examples/hello_world_a2a_langchain_client_lm_invoker.pyi +5 -0
- aip_agents/examples/hello_world_a2a_langchain_client_streaming.pyi +5 -0
- aip_agents/examples/hello_world_a2a_langchain_reference_client_streaming.pyi +5 -0
- aip_agents/examples/hello_world_a2a_langchain_reference_server.pyi +15 -0
- aip_agents/examples/hello_world_a2a_langchain_server.pyi +15 -0
- aip_agents/examples/hello_world_a2a_langchain_server_lm_invoker.pyi +15 -0
- aip_agents/examples/hello_world_a2a_langflow_client.pyi +9 -0
- aip_agents/examples/hello_world_a2a_langflow_server.pyi +14 -0
- aip_agents/examples/hello_world_a2a_langgraph_artifact_client.pyi +5 -0
- aip_agents/examples/hello_world_a2a_langgraph_artifact_client_streaming.pyi +5 -0
- aip_agents/examples/hello_world_a2a_langgraph_artifact_server.pyi +16 -0
- aip_agents/examples/hello_world_a2a_langgraph_client.pyi +9 -0
- aip_agents/examples/hello_world_a2a_langgraph_client_agent.pyi +9 -0
- aip_agents/examples/hello_world_a2a_langgraph_client_agent_lm_invoker.pyi +2 -0
- aip_agents/examples/hello_world_a2a_langgraph_client_streaming.pyi +9 -0
- aip_agents/examples/hello_world_a2a_langgraph_client_streaming_lm_invoker.pyi +5 -0
- aip_agents/examples/hello_world_a2a_langgraph_client_streaming_tool_streaming.pyi +5 -0
- aip_agents/examples/hello_world_a2a_langgraph_server.pyi +14 -0
- aip_agents/examples/hello_world_a2a_langgraph_server_lm_invoker.pyi +15 -0
- aip_agents/examples/hello_world_a2a_langgraph_server_tool_streaming.pyi +15 -0
- aip_agents/examples/hello_world_a2a_mcp_langgraph.pyi +48 -0
- aip_agents/examples/hello_world_a2a_three_level_agent_hierarchy_client.pyi +48 -0
- aip_agents/examples/hello_world_a2a_three_level_agent_hierarchy_server.pyi +45 -0
- aip_agents/examples/hello_world_a2a_with_metadata_langchain_client.pyi +5 -0
- aip_agents/examples/hello_world_a2a_with_metadata_langchain_server_lm_invoker.pyi +15 -0
- aip_agents/examples/hello_world_google_adk.pyi +5 -0
- aip_agents/examples/hello_world_google_adk_mcp_http.pyi +5 -0
- aip_agents/examples/hello_world_google_adk_mcp_http_stream.pyi +5 -0
- aip_agents/examples/hello_world_google_adk_mcp_sse.pyi +5 -0
- aip_agents/examples/hello_world_google_adk_mcp_sse_stream.pyi +5 -0
- aip_agents/examples/hello_world_google_adk_mcp_stdio.pyi +5 -0
- aip_agents/examples/hello_world_google_adk_mcp_stdio_stream.pyi +5 -0
- aip_agents/examples/hello_world_google_adk_stream.pyi +5 -0
- aip_agents/examples/hello_world_langchain.pyi +5 -0
- aip_agents/examples/hello_world_langchain_lm_invoker.pyi +2 -0
- aip_agents/examples/hello_world_langchain_mcp_http.pyi +5 -0
- aip_agents/examples/hello_world_langchain_mcp_http_interactive.pyi +16 -0
- aip_agents/examples/hello_world_langchain_mcp_http_stream.pyi +5 -0
- aip_agents/examples/hello_world_langchain_mcp_multi_server.pyi +18 -0
- aip_agents/examples/hello_world_langchain_mcp_sse.pyi +5 -0
- aip_agents/examples/hello_world_langchain_mcp_sse_stream.pyi +5 -0
- aip_agents/examples/hello_world_langchain_mcp_stdio.pyi +5 -0
- aip_agents/examples/hello_world_langchain_mcp_stdio_stream.pyi +5 -0
- aip_agents/examples/hello_world_langchain_stream.pyi +5 -0
- aip_agents/examples/hello_world_langchain_stream_lm_invoker.pyi +5 -0
- aip_agents/examples/hello_world_langflow_agent.pyi +35 -0
- aip_agents/examples/hello_world_langgraph.pyi +5 -0
- aip_agents/examples/{hello_world_langgraph_bosa_twitter.py → hello_world_langgraph_gl_connector_twitter.py} +10 -8
- aip_agents/examples/hello_world_langgraph_gl_connector_twitter.pyi +5 -0
- aip_agents/examples/hello_world_langgraph_mcp_http.pyi +5 -0
- aip_agents/examples/hello_world_langgraph_mcp_http_stream.pyi +5 -0
- aip_agents/examples/hello_world_langgraph_mcp_sse.pyi +5 -0
- aip_agents/examples/hello_world_langgraph_mcp_sse_stream.pyi +5 -0
- aip_agents/examples/hello_world_langgraph_mcp_stdio.pyi +5 -0
- aip_agents/examples/hello_world_langgraph_mcp_stdio_stream.pyi +5 -0
- aip_agents/examples/hello_world_langgraph_stream.pyi +5 -0
- aip_agents/examples/hello_world_langgraph_stream_lm_invoker.pyi +5 -0
- aip_agents/examples/hello_world_model_switch_cli.pyi +30 -0
- aip_agents/examples/hello_world_multi_agent_adk.pyi +6 -0
- aip_agents/examples/hello_world_multi_agent_langchain.pyi +5 -0
- aip_agents/examples/hello_world_multi_agent_langgraph.pyi +5 -0
- aip_agents/examples/hello_world_multi_agent_langgraph_lm_invoker.pyi +5 -0
- aip_agents/examples/hello_world_pii_logger.pyi +5 -0
- aip_agents/examples/hello_world_sentry.py +8 -7
- aip_agents/examples/hello_world_sentry.pyi +21 -0
- aip_agents/examples/hello_world_step_limits.pyi +17 -0
- aip_agents/examples/hello_world_stock_a2a_server.pyi +17 -0
- aip_agents/examples/hello_world_tool_output_client.pyi +5 -0
- aip_agents/examples/hello_world_tool_output_server.pyi +19 -0
- aip_agents/examples/hitl_demo.pyi +67 -0
- aip_agents/examples/pii_demo_langgraph_client.pyi +5 -0
- aip_agents/examples/pii_demo_langgraph_server.pyi +20 -0
- aip_agents/examples/pii_demo_multi_agent_client.pyi +5 -0
- aip_agents/examples/pii_demo_multi_agent_server.pyi +40 -0
- aip_agents/examples/todolist_planning_a2a_langchain_client.pyi +5 -0
- aip_agents/examples/todolist_planning_a2a_langgraph_server.pyi +19 -0
- aip_agents/examples/tools/__init__.pyi +9 -0
- aip_agents/examples/tools/adk_arithmetic_tools.pyi +24 -0
- aip_agents/examples/tools/adk_weather_tool.pyi +18 -0
- aip_agents/examples/tools/data_generator_tool.pyi +15 -0
- aip_agents/examples/tools/data_visualization_tool.pyi +19 -0
- aip_agents/examples/tools/image_artifact_tool.pyi +26 -0
- aip_agents/examples/tools/langchain_arithmetic_tools.pyi +17 -0
- aip_agents/examples/tools/langchain_currency_exchange_tool.pyi +20 -0
- aip_agents/examples/tools/langchain_graph_artifact_tool.pyi +25 -0
- aip_agents/examples/tools/langchain_weather_tool.pyi +19 -0
- aip_agents/examples/tools/langgraph_streaming_tool.pyi +43 -0
- aip_agents/examples/tools/mock_retrieval_tool.pyi +13 -0
- aip_agents/examples/tools/pii_demo_tools.pyi +54 -0
- aip_agents/examples/tools/random_chart_tool.pyi +20 -0
- aip_agents/examples/tools/serper_tool.pyi +16 -0
- aip_agents/examples/tools/stock_tools.pyi +36 -0
- aip_agents/examples/tools/table_generator_tool.pyi +22 -0
- aip_agents/examples/tools/time_tool.pyi +15 -0
- aip_agents/examples/tools/weather_forecast_tool.pyi +14 -0
- aip_agents/mcp/__init__.pyi +0 -0
- aip_agents/mcp/client/__init__.pyi +5 -0
- aip_agents/mcp/client/base_mcp_client.pyi +148 -0
- aip_agents/mcp/client/connection_manager.pyi +48 -0
- aip_agents/mcp/client/google_adk/__init__.pyi +3 -0
- aip_agents/mcp/client/google_adk/client.pyi +75 -0
- aip_agents/mcp/client/langchain/__init__.pyi +3 -0
- aip_agents/mcp/client/langchain/client.pyi +48 -0
- aip_agents/mcp/client/persistent_session.pyi +113 -0
- aip_agents/mcp/client/session_pool.pyi +101 -0
- aip_agents/mcp/client/transports.pyi +123 -0
- aip_agents/mcp/utils/__init__.pyi +0 -0
- aip_agents/mcp/utils/config_validator.pyi +82 -0
- aip_agents/memory/__init__.pyi +5 -0
- aip_agents/memory/adapters/__init__.pyi +4 -0
- aip_agents/memory/adapters/base_adapter.pyi +150 -0
- aip_agents/memory/adapters/mem0.pyi +22 -0
- aip_agents/memory/base.pyi +60 -0
- aip_agents/memory/constants.pyi +25 -0
- aip_agents/memory/factory.pyi +24 -0
- aip_agents/memory/guidance.pyi +3 -0
- aip_agents/memory/simple_memory.pyi +23 -0
- aip_agents/middleware/__init__.pyi +5 -0
- aip_agents/middleware/base.pyi +71 -0
- aip_agents/middleware/manager.pyi +80 -0
- aip_agents/middleware/todolist.pyi +125 -0
- aip_agents/schema/__init__.pyi +9 -0
- aip_agents/schema/a2a.pyi +40 -0
- aip_agents/schema/agent.pyi +65 -0
- aip_agents/schema/hitl.pyi +89 -0
- aip_agents/schema/langgraph.pyi +28 -0
- aip_agents/schema/model_id.pyi +54 -0
- aip_agents/schema/step_limit.pyi +63 -0
- aip_agents/schema/storage.pyi +21 -0
- aip_agents/sentry/__init__.py +1 -1
- aip_agents/sentry/__init__.pyi +3 -0
- aip_agents/sentry/sentry.py +17 -10
- aip_agents/sentry/sentry.pyi +48 -0
- aip_agents/storage/__init__.pyi +8 -0
- aip_agents/storage/base.pyi +58 -0
- aip_agents/storage/clients/__init__.pyi +3 -0
- aip_agents/storage/clients/minio_client.pyi +137 -0
- aip_agents/storage/config.pyi +29 -0
- aip_agents/storage/providers/__init__.pyi +5 -0
- aip_agents/storage/providers/base.pyi +88 -0
- aip_agents/storage/providers/memory.pyi +79 -0
- aip_agents/storage/providers/object_storage.pyi +98 -0
- aip_agents/tools/__init__.py +26 -6
- aip_agents/tools/__init__.pyi +9 -0
- aip_agents/tools/browser_use/__init__.pyi +14 -0
- aip_agents/tools/browser_use/action_parser.pyi +18 -0
- aip_agents/tools/browser_use/browser_use_tool.pyi +50 -0
- aip_agents/tools/browser_use/llm_config.pyi +52 -0
- aip_agents/tools/browser_use/minio_storage.pyi +109 -0
- aip_agents/tools/browser_use/schemas.pyi +32 -0
- aip_agents/tools/browser_use/session.pyi +4 -0
- aip_agents/tools/browser_use/session_errors.pyi +53 -0
- aip_agents/tools/browser_use/steel_session_recording.pyi +63 -0
- aip_agents/tools/browser_use/streaming.pyi +81 -0
- aip_agents/tools/browser_use/structured_data_parser.pyi +86 -0
- aip_agents/tools/browser_use/structured_data_recovery.pyi +43 -0
- aip_agents/tools/browser_use/types.pyi +45 -0
- aip_agents/tools/code_sandbox/__init__.pyi +3 -0
- aip_agents/tools/code_sandbox/constant.pyi +4 -0
- aip_agents/tools/code_sandbox/e2b_cloud_sandbox_extended.pyi +86 -0
- aip_agents/tools/code_sandbox/e2b_sandbox_tool.pyi +29 -0
- aip_agents/tools/constants.py +26 -12
- aip_agents/tools/constants.pyi +138 -0
- aip_agents/tools/document_loader/__init__.pyi +7 -0
- aip_agents/tools/document_loader/base_reader.pyi +75 -0
- aip_agents/tools/document_loader/docx_reader_tool.pyi +10 -0
- aip_agents/tools/document_loader/excel_reader_tool.pyi +26 -0
- aip_agents/tools/document_loader/pdf_reader_tool.pyi +11 -0
- aip_agents/tools/document_loader/pdf_splitter.pyi +18 -0
- aip_agents/tools/gl_connector/__init__.py +1 -1
- aip_agents/tools/gl_connector/__init__.pyi +3 -0
- aip_agents/tools/gl_connector/tool.py +104 -45
- aip_agents/tools/gl_connector/tool.pyi +74 -0
- aip_agents/tools/gl_connector_tools.py +122 -0
- aip_agents/tools/gl_connector_tools.pyi +39 -0
- aip_agents/tools/memory_search/__init__.pyi +5 -0
- aip_agents/tools/memory_search/base.pyi +69 -0
- aip_agents/tools/memory_search/mem0.pyi +19 -0
- aip_agents/tools/memory_search/schema.pyi +15 -0
- aip_agents/tools/memory_search_tool.pyi +3 -0
- aip_agents/tools/time_tool.pyi +16 -0
- aip_agents/tools/tool_config_injector.pyi +26 -0
- aip_agents/tools/web_search/__init__.pyi +3 -0
- aip_agents/tools/web_search/serper_tool.pyi +19 -0
- aip_agents/types/__init__.pyi +36 -0
- aip_agents/types/a2a_events.pyi +3 -0
- aip_agents/utils/__init__.pyi +11 -0
- aip_agents/utils/a2a_connector.pyi +146 -0
- aip_agents/utils/artifact_helpers.pyi +203 -0
- aip_agents/utils/constants.pyi +10 -0
- aip_agents/utils/datetime/__init__.pyi +4 -0
- aip_agents/utils/datetime/normalization.pyi +95 -0
- aip_agents/utils/datetime/timezone.pyi +48 -0
- aip_agents/utils/env_loader.pyi +10 -0
- aip_agents/utils/event_handler_registry.pyi +23 -0
- aip_agents/utils/file_prompt_utils.pyi +21 -0
- aip_agents/utils/final_response_builder.pyi +34 -0
- aip_agents/utils/formatter_llm_client.pyi +71 -0
- aip_agents/utils/langgraph/__init__.pyi +3 -0
- aip_agents/utils/langgraph/converter.pyi +49 -0
- aip_agents/utils/langgraph/tool_managers/__init__.pyi +5 -0
- aip_agents/utils/langgraph/tool_managers/a2a_tool_manager.pyi +35 -0
- aip_agents/utils/langgraph/tool_managers/base_tool_manager.pyi +48 -0
- aip_agents/utils/langgraph/tool_managers/delegation_tool_manager.pyi +56 -0
- aip_agents/utils/langgraph/tool_output_management.pyi +292 -0
- aip_agents/utils/logger.pyi +60 -0
- aip_agents/utils/metadata/__init__.pyi +5 -0
- aip_agents/utils/metadata/activity_metadata_helper.pyi +25 -0
- aip_agents/utils/metadata/activity_narrative/__init__.pyi +7 -0
- aip_agents/utils/metadata/activity_narrative/builder.pyi +35 -0
- aip_agents/utils/metadata/activity_narrative/constants.pyi +10 -0
- aip_agents/utils/metadata/activity_narrative/context.pyi +32 -0
- aip_agents/utils/metadata/activity_narrative/formatters.pyi +48 -0
- aip_agents/utils/metadata/activity_narrative/utils.pyi +12 -0
- aip_agents/utils/metadata/schemas/__init__.pyi +4 -0
- aip_agents/utils/metadata/schemas/activity_schema.pyi +18 -0
- aip_agents/utils/metadata/schemas/thinking_schema.pyi +20 -0
- aip_agents/utils/metadata/thinking_metadata_helper.pyi +4 -0
- aip_agents/utils/metadata_helper.pyi +117 -0
- aip_agents/utils/name_preprocessor/__init__.pyi +6 -0
- aip_agents/utils/name_preprocessor/base_name_preprocessor.pyi +52 -0
- aip_agents/utils/name_preprocessor/google_name_preprocessor.pyi +38 -0
- aip_agents/utils/name_preprocessor/name_preprocessor.pyi +41 -0
- aip_agents/utils/name_preprocessor/openai_name_preprocessor.pyi +34 -0
- aip_agents/utils/pii/__init__.pyi +5 -0
- aip_agents/utils/pii/pii_handler.pyi +96 -0
- aip_agents/utils/pii/pii_helper.pyi +78 -0
- aip_agents/utils/pii/uuid_deanonymizer_mapping.pyi +73 -0
- aip_agents/utils/reference_helper.pyi +81 -0
- aip_agents/utils/sse_chunk_transformer.pyi +166 -0
- aip_agents/utils/step_limit_manager.pyi +112 -0
- aip_agents/utils/token_usage_helper.pyi +60 -0
- {aip_agents_binary-0.5.20.dist-info → aip_agents_binary-0.5.22.dist-info}/METADATA +3 -3
- aip_agents_binary-0.5.22.dist-info/RECORD +546 -0
- aip_agents/tools/bosa_tools.py +0 -105
- aip_agents_binary-0.5.20.dist-info/RECORD +0 -280
- {aip_agents_binary-0.5.20.dist-info → aip_agents_binary-0.5.22.dist-info}/WHEEL +0 -0
- {aip_agents_binary-0.5.20.dist-info → aip_agents_binary-0.5.22.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,96 @@
|
|
|
1
|
+
from _typeshed import Incomplete
|
|
2
|
+
from aip_agents.utils.logger import LoggerManager as LoggerManager
|
|
3
|
+
from aip_agents.utils.pii.uuid_deanonymizer_mapping import UUIDDeanonymizerMapping as UUIDDeanonymizerMapping
|
|
4
|
+
from enum import Enum
|
|
5
|
+
from gllm_privacy.pii_detector import TextAnalyzer, TextAnonymizer
|
|
6
|
+
from typing import Any
|
|
7
|
+
|
|
8
|
+
TextAnalyzer = Any
|
|
9
|
+
TextAnonymizer = Any
|
|
10
|
+
GDPLabsNerApiRemoteRecognizer = Any
|
|
11
|
+
|
|
12
|
+
class _Operation(str, Enum):
|
|
13
|
+
ANONYMIZE: str
|
|
14
|
+
DEANONYMIZE: str
|
|
15
|
+
|
|
16
|
+
logger: Incomplete
|
|
17
|
+
NER_API_URL_ENV_VAR: str
|
|
18
|
+
NER_API_KEY_ENV_VAR: str
|
|
19
|
+
NER_API_TIMEOUT: int
|
|
20
|
+
EXCLUDED_ENTITIES: Incomplete
|
|
21
|
+
DEFAULT_SUPPORTED_ENTITIES: Incomplete
|
|
22
|
+
|
|
23
|
+
class ToolPIIHandler:
|
|
24
|
+
"""Handles PII masking/demasking for tool calling.
|
|
25
|
+
|
|
26
|
+
Tag replacement based on runner-provided mappings always works. Optional
|
|
27
|
+
NER-powered masking/de-masking is only enabled when NER_API_URL and
|
|
28
|
+
NER_API_KEY environment variables are set.
|
|
29
|
+
|
|
30
|
+
Attributes:
|
|
31
|
+
flat_pii_mapping: Flat mapping from runner service (tag → value)
|
|
32
|
+
text_analyzer: GLLM Privacy TextAnalyzer instance
|
|
33
|
+
text_anonymizer: GLLM Privacy TextAnonymizer instance
|
|
34
|
+
enable_ner: Whether NER is enabled
|
|
35
|
+
"""
|
|
36
|
+
flat_pii_mapping: dict[str, str]
|
|
37
|
+
enable_ner: bool
|
|
38
|
+
text_analyzer: TextAnalyzer | None
|
|
39
|
+
text_anonymizer: TextAnonymizer | None
|
|
40
|
+
def __init__(self, pii_mapping: dict[str, str] | None = None, ner_api_url: str | None = None, ner_api_key: str | None = None) -> None:
|
|
41
|
+
"""Initialize PII handler (private - use create_if_enabled() instead).
|
|
42
|
+
|
|
43
|
+
Initializes GLLM Privacy components (TextAnalyzer, TextAnonymizer) if NER credentials
|
|
44
|
+
are provided. Creates dual recognizers for Indonesian and English languages.
|
|
45
|
+
Pre-loads any existing PII mappings into the anonymizer's internal state.
|
|
46
|
+
|
|
47
|
+
Args:
|
|
48
|
+
pii_mapping: Existing PII mapping from runner service (flat format: tag -> value)
|
|
49
|
+
ner_api_url: NER API endpoint URL
|
|
50
|
+
ner_api_key: NER API authentication key
|
|
51
|
+
"""
|
|
52
|
+
@classmethod
|
|
53
|
+
def create_if_enabled(cls, pii_mapping: dict[str, str] | None = None) -> ToolPIIHandler | None:
|
|
54
|
+
"""Create ToolPIIHandler when mappings or NER configuration exist.
|
|
55
|
+
|
|
56
|
+
Args:
|
|
57
|
+
pii_mapping: Existing PII mapping from runner service
|
|
58
|
+
|
|
59
|
+
Returns:
|
|
60
|
+
ToolPIIHandler instance when mapping or NER config is available, None otherwise
|
|
61
|
+
"""
|
|
62
|
+
@classmethod
|
|
63
|
+
def create_mapping_only(cls, pii_mapping: dict[str, str] | None = None) -> ToolPIIHandler | None:
|
|
64
|
+
"""Create ToolPIIHandler in mapping-only mode (no NER).
|
|
65
|
+
|
|
66
|
+
Args:
|
|
67
|
+
pii_mapping: Existing PII mapping from runner service
|
|
68
|
+
|
|
69
|
+
Returns:
|
|
70
|
+
ToolPIIHandler instance when mapping exists, None otherwise
|
|
71
|
+
"""
|
|
72
|
+
def deanonymize_tool_args(self, args: dict[str, Any]) -> dict[str, Any]:
|
|
73
|
+
"""Replace PII tags in tool arguments with real values.
|
|
74
|
+
|
|
75
|
+
Recursively processes dictionaries, lists, and strings to replace all PII tags
|
|
76
|
+
(e.g., '<EMAIL_1>') with their corresponding real values from flat_pii_mapping.
|
|
77
|
+
|
|
78
|
+
Args:
|
|
79
|
+
args: Tool arguments that may contain PII tags
|
|
80
|
+
|
|
81
|
+
Returns:
|
|
82
|
+
Arguments with tags replaced by real values
|
|
83
|
+
"""
|
|
84
|
+
def anonymize_tool_output(self, output: Any) -> tuple[Any, dict[str, str]]:
|
|
85
|
+
"""Mask PII values in tool output.
|
|
86
|
+
|
|
87
|
+
Handles string and dictionary outputs. For strings, uses two-phase anonymization:
|
|
88
|
+
first masks known PII, then detects new PII via NER. For dictionaries, recursively
|
|
89
|
+
processes all string values. Returns updated mapping with any newly discovered PII.
|
|
90
|
+
|
|
91
|
+
Args:
|
|
92
|
+
output: Tool output that may contain PII values (string, dict, or other)
|
|
93
|
+
|
|
94
|
+
Returns:
|
|
95
|
+
Tuple of (anonymized_output, updated_flat_pii_mapping)
|
|
96
|
+
"""
|
|
@@ -0,0 +1,78 @@
|
|
|
1
|
+
from _typeshed import Incomplete
|
|
2
|
+
from aip_agents.utils.logger import LoggerManager as LoggerManager
|
|
3
|
+
from typing import Any
|
|
4
|
+
|
|
5
|
+
logger: Incomplete
|
|
6
|
+
|
|
7
|
+
def normalize_enable_pii(enable_pii: Any) -> bool | None:
|
|
8
|
+
"""Normalize enable_pii value from agent configuration.
|
|
9
|
+
|
|
10
|
+
Args:
|
|
11
|
+
enable_pii: Raw enable_pii value from agent configuration.
|
|
12
|
+
|
|
13
|
+
Returns:
|
|
14
|
+
The normalized enable_pii flag when explicitly set (True/False), otherwise None.
|
|
15
|
+
"""
|
|
16
|
+
def add_pii_mappings(left: dict[str, str] | None, right: dict[str, str] | None) -> dict[str, str]:
|
|
17
|
+
"""Reducer function to merge PII mappings from multiple sources.
|
|
18
|
+
|
|
19
|
+
This is a LangGraph reducer function that merges PII mappings from:
|
|
20
|
+
- Parent agent's initial mapping
|
|
21
|
+
- Tool outputs with newly discovered PII
|
|
22
|
+
- Subagent responses with their discovered PII
|
|
23
|
+
|
|
24
|
+
Args:
|
|
25
|
+
left: Existing PII mapping (or None)
|
|
26
|
+
right: New PII mapping to merge (or None)
|
|
27
|
+
|
|
28
|
+
Returns:
|
|
29
|
+
Merged PII mapping dictionary
|
|
30
|
+
|
|
31
|
+
Note:
|
|
32
|
+
- Right (new) mappings take precedence over left (existing)
|
|
33
|
+
- Handles None/non-dict cases gracefully
|
|
34
|
+
- Preserves all unique PII tags
|
|
35
|
+
- Returns empty dict if both inputs are None/empty
|
|
36
|
+
"""
|
|
37
|
+
def extract_pii_mapping_from_agent_response(result: Any) -> dict[str, str] | None:
|
|
38
|
+
"""Extract PII mapping from subagent response.
|
|
39
|
+
|
|
40
|
+
Used by DelegationToolManager to propagate PII mappings from subagents
|
|
41
|
+
back to parent agents.
|
|
42
|
+
|
|
43
|
+
Args:
|
|
44
|
+
result: The result returned by the delegated agent
|
|
45
|
+
|
|
46
|
+
Returns:
|
|
47
|
+
PII mapping dictionary if found, None otherwise
|
|
48
|
+
|
|
49
|
+
Note:
|
|
50
|
+
- Checks if result is a dict
|
|
51
|
+
- Extracts 'full_final_state' from result
|
|
52
|
+
- Extracts 'pii_mapping' from full_final_state
|
|
53
|
+
- Validates mapping is a non-empty dict
|
|
54
|
+
- Returns None if any step fails
|
|
55
|
+
"""
|
|
56
|
+
def deanonymize_final_response_content(content: str, is_final_response: bool, metadata: dict[str, Any] | None) -> str:
|
|
57
|
+
"""Deanonymize final response content using PII mapping from metadata.
|
|
58
|
+
|
|
59
|
+
Args:
|
|
60
|
+
content: Final response content that may contain PII tags.
|
|
61
|
+
is_final_response: Flag indicating whether this message is a final response.
|
|
62
|
+
metadata: Optional metadata dict (or event payload containing ``metadata``) with
|
|
63
|
+
``pii_mapping`` tag-to-value mapping.
|
|
64
|
+
|
|
65
|
+
Returns:
|
|
66
|
+
Content string with PII tags replaced by real values when applicable.
|
|
67
|
+
"""
|
|
68
|
+
def anonymize_final_response_content(content: str, metadata: dict[str, Any] | None) -> str:
|
|
69
|
+
"""Anonymize final response content using PII mapping from metadata.
|
|
70
|
+
|
|
71
|
+
Args:
|
|
72
|
+
content: Final response content that may contain real PII values.
|
|
73
|
+
metadata: Metadata dict (or event payload containing ``metadata``) with
|
|
74
|
+
``pii_mapping`` tag-to-value mapping.
|
|
75
|
+
|
|
76
|
+
Returns:
|
|
77
|
+
Content string with real PII values replaced by their PII tags when mapping is present.
|
|
78
|
+
"""
|
|
@@ -0,0 +1,73 @@
|
|
|
1
|
+
from _typeshed import Incomplete
|
|
2
|
+
from aip_agents.utils.constants import DEFAULT_PII_TAG_NAMESPACE as DEFAULT_PII_TAG_NAMESPACE
|
|
3
|
+
from gllm_privacy.pii_detector.utils.deanonymizer_mapping import DeanonymizerMapping, MappingDataType
|
|
4
|
+
from typing import Any
|
|
5
|
+
|
|
6
|
+
MappingDataType = dict[str, dict[str, str]]
|
|
7
|
+
|
|
8
|
+
class DeanonymizerMapping:
|
|
9
|
+
"""Fallback deanonymizer mapping when optional dependency is missing.
|
|
10
|
+
|
|
11
|
+
This class exists only to keep the module importable when `gllm-privacy`
|
|
12
|
+
is not installed.
|
|
13
|
+
"""
|
|
14
|
+
def __init__(self, *args: Any, **kwargs: Any) -> None:
|
|
15
|
+
"""Initialize the mapping.
|
|
16
|
+
|
|
17
|
+
Raises:
|
|
18
|
+
ImportError: Always raised because `gllm-privacy` is required.
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
logger: Incomplete
|
|
22
|
+
|
|
23
|
+
def format_operator_with_uuid(operator_name: str, uuid_suffix: str) -> str:
|
|
24
|
+
"""Format the operator name with a UUID suffix.
|
|
25
|
+
|
|
26
|
+
Args:
|
|
27
|
+
operator_name: The operator name.
|
|
28
|
+
uuid_suffix: The UUID suffix to append.
|
|
29
|
+
|
|
30
|
+
Returns:
|
|
31
|
+
The formatted operator name with UUID suffix.
|
|
32
|
+
"""
|
|
33
|
+
|
|
34
|
+
class UUIDDeanonymizerMapping(DeanonymizerMapping):
|
|
35
|
+
"""Class to store the deanonymizer mapping with UUID suffixes.
|
|
36
|
+
|
|
37
|
+
This class extends DeanonymizerMapping to use UUID suffixes instead of
|
|
38
|
+
sequential numbers for differentiating multiple entities of the same type.
|
|
39
|
+
|
|
40
|
+
Attributes:
|
|
41
|
+
mapping: The deanonymizer mapping.
|
|
42
|
+
skip_format_duplicates: Whether to skip formatting duplicated operators.
|
|
43
|
+
uuid_length: The length of the UUID suffix to use (default: 8).
|
|
44
|
+
"""
|
|
45
|
+
mapping: Incomplete
|
|
46
|
+
skip_format_duplicates: Incomplete
|
|
47
|
+
uuid_length: Incomplete
|
|
48
|
+
def __init__(self, mapping: MappingDataType | None = None, skip_format_duplicates: bool = False, uuid_length: int = 8) -> None:
|
|
49
|
+
"""Initialize UUIDDeanonymizerMapping.
|
|
50
|
+
|
|
51
|
+
Args:
|
|
52
|
+
mapping: The deanonymizer mapping. If None, creates an empty defaultdict(dict).
|
|
53
|
+
skip_format_duplicates: Whether to skip formatting duplicated operators.
|
|
54
|
+
uuid_length: The length of the UUID suffix to use (default: 8).
|
|
55
|
+
"""
|
|
56
|
+
def update(self, new_mapping: MappingDataType, use_uuid_suffix: bool | None = None) -> None:
|
|
57
|
+
"""Update the deanonymizer mapping with new values using UUID suffixes.
|
|
58
|
+
|
|
59
|
+
Duplicated values will not be added. If there are multiple entities of the same type,
|
|
60
|
+
the mapping will include a UUID suffix to differentiate them. For example, if there are
|
|
61
|
+
two names in the input text, the mapping will include NAME_<uuid1> and NAME_<uuid2>.
|
|
62
|
+
|
|
63
|
+
Args:
|
|
64
|
+
new_mapping: The new mapping to be added to the existing deanonymizer mapping.
|
|
65
|
+
use_uuid_suffix: Whether to apply UUID suffixes to keys.
|
|
66
|
+
If True, keys will always be formatted with UUID suffixes.
|
|
67
|
+
If False, keys will be used as-is without UUID formatting.
|
|
68
|
+
If None, behavior falls back to the instance configuration via
|
|
69
|
+
skip_format_duplicates (preserving existing behavior).
|
|
70
|
+
|
|
71
|
+
Returns:
|
|
72
|
+
None
|
|
73
|
+
"""
|
|
@@ -0,0 +1,81 @@
|
|
|
1
|
+
from _typeshed import Incomplete
|
|
2
|
+
from aip_agents.utils.logger import get_logger as get_logger
|
|
3
|
+
from aip_agents.utils.metadata_helper import MetadataFieldKeys as MetadataFieldKeys
|
|
4
|
+
from gllm_core.schema import Chunk
|
|
5
|
+
from langgraph.types import Command
|
|
6
|
+
from typing import Any
|
|
7
|
+
|
|
8
|
+
logger: Incomplete
|
|
9
|
+
SAVE_OUTPUT_HISTORY_ATTR: str
|
|
10
|
+
FORMAT_AGENT_REFERENCE: str
|
|
11
|
+
|
|
12
|
+
def extract_references_from_tool(tool: Any, tool_output: Any) -> list[Chunk]:
|
|
13
|
+
"""Extract reference data from tools that support it.
|
|
14
|
+
|
|
15
|
+
Extraction priority:
|
|
16
|
+
1. Direct tool references via _format_agent_reference (preferred)
|
|
17
|
+
2. Command.update references (fallback for delegation tools)
|
|
18
|
+
|
|
19
|
+
Args:
|
|
20
|
+
tool: The tool instance to extract references from
|
|
21
|
+
tool_output: The output from the tool execution
|
|
22
|
+
Returns:
|
|
23
|
+
List of deduplicated Chunk objects containing reference data
|
|
24
|
+
|
|
25
|
+
Note:
|
|
26
|
+
- Never raises exceptions; logs warnings for issues
|
|
27
|
+
- Direct tool references take precedence over Command references
|
|
28
|
+
"""
|
|
29
|
+
def extract_references_from_command_update(command: Command) -> list[Any]:
|
|
30
|
+
"""Extract references from a Command object's update dictionary.
|
|
31
|
+
|
|
32
|
+
Args:
|
|
33
|
+
command: A Command object potentially containing references in its update dict.
|
|
34
|
+
|
|
35
|
+
Returns:
|
|
36
|
+
List of reference data (not yet validated/deduplicated)
|
|
37
|
+
|
|
38
|
+
Note:
|
|
39
|
+
- Never raises exceptions; logs warnings for malformed data
|
|
40
|
+
- Skips non-Chunk items with warning log
|
|
41
|
+
"""
|
|
42
|
+
def validate_references(references: list[Any]) -> list[Chunk]:
|
|
43
|
+
"""Validate and deduplicate reference data.
|
|
44
|
+
|
|
45
|
+
Args:
|
|
46
|
+
references: List of reference data (expected to be Chunk objects).
|
|
47
|
+
|
|
48
|
+
Returns:
|
|
49
|
+
List of validated, deduplicated Chunk objects by content.
|
|
50
|
+
"""
|
|
51
|
+
def serialize_references_for_metadata(references: list[Any]) -> list[dict[str, Any]]:
|
|
52
|
+
"""Serialize references for inclusion in A2A metadata.
|
|
53
|
+
|
|
54
|
+
Args:
|
|
55
|
+
references: List of reference objects (typically Chunk objects).
|
|
56
|
+
|
|
57
|
+
Returns:
|
|
58
|
+
List of serialized reference dictionaries.
|
|
59
|
+
"""
|
|
60
|
+
def add_references_chunks(left: list[Chunk], right: list[Chunk]) -> list[Chunk]:
|
|
61
|
+
"""Reducer function to accumulate reference data from multiple tool calls.
|
|
62
|
+
|
|
63
|
+
This is a LangGraph reducer function that should be forgiving and handle
|
|
64
|
+
edge cases gracefully. Non-Chunk items are filtered out.
|
|
65
|
+
|
|
66
|
+
Args:
|
|
67
|
+
left: Existing list of reference chunks (or None/non-list)
|
|
68
|
+
right: New list of reference chunks to add (or None/non-list)
|
|
69
|
+
|
|
70
|
+
Returns:
|
|
71
|
+
Combined list of valid Chunk objects
|
|
72
|
+
"""
|
|
73
|
+
def extract_references_from_agent_response(result: Any) -> list[dict[str, Any]] | None:
|
|
74
|
+
"""Extract references from agent response for delegation tools.
|
|
75
|
+
|
|
76
|
+
Args:
|
|
77
|
+
result: The result returned by the delegated agent.
|
|
78
|
+
|
|
79
|
+
Returns:
|
|
80
|
+
List of reference chunks if found, None otherwise.
|
|
81
|
+
"""
|
|
@@ -0,0 +1,166 @@
|
|
|
1
|
+
from _typeshed import Incomplete
|
|
2
|
+
from aip_agents.schema.a2a import A2AEvent
|
|
3
|
+
from collections.abc import AsyncGenerator
|
|
4
|
+
from enum import StrEnum
|
|
5
|
+
from typing import Any
|
|
6
|
+
|
|
7
|
+
__all__ = ['SSEChunkTransformer', 'TaskState', 'ChunkStatus', 'ChunkReason', 'ChunkFieldKeys']
|
|
8
|
+
|
|
9
|
+
class TaskState(StrEnum):
|
|
10
|
+
"""Task state values for SSE chunks."""
|
|
11
|
+
WORKING: str
|
|
12
|
+
COMPLETED: str
|
|
13
|
+
FAILED: str
|
|
14
|
+
|
|
15
|
+
class ChunkStatus(StrEnum):
|
|
16
|
+
"""Status values for SSE chunks."""
|
|
17
|
+
SUCCESS: str
|
|
18
|
+
ERROR: str
|
|
19
|
+
|
|
20
|
+
class ChunkReason(StrEnum):
|
|
21
|
+
"""Reason codes for special chunk states."""
|
|
22
|
+
EMPTY_PAYLOAD: str
|
|
23
|
+
|
|
24
|
+
class ChunkFieldKeys(StrEnum):
|
|
25
|
+
"""Field name constants for SSE chunk structure."""
|
|
26
|
+
STATUS: str
|
|
27
|
+
TASK_STATE: str
|
|
28
|
+
CONTENT: str
|
|
29
|
+
EVENT_TYPE: str
|
|
30
|
+
FINAL: str
|
|
31
|
+
METADATA: str
|
|
32
|
+
TIMESTAMP: str
|
|
33
|
+
TASK_ID: str
|
|
34
|
+
CONTEXT_ID: str
|
|
35
|
+
ARTIFACTS: str
|
|
36
|
+
REASON: str
|
|
37
|
+
|
|
38
|
+
class SSEChunkTransformer:
|
|
39
|
+
'''Transforms A2AEvent stream to SSE-compatible output.
|
|
40
|
+
|
|
41
|
+
This class converts events from arun_a2a_stream into the normalized dict format
|
|
42
|
+
matching A2AConnector.astream_to_agent output.
|
|
43
|
+
|
|
44
|
+
Lifecycle:
|
|
45
|
+
Single-stream instance. Must NOT be reused across concurrent streams.
|
|
46
|
+
Each arun_sse_stream call creates a fresh instance.
|
|
47
|
+
|
|
48
|
+
Attributes:
|
|
49
|
+
task_id: Optional task identifier for the stream.
|
|
50
|
+
context_id: Optional context identifier for the stream.
|
|
51
|
+
|
|
52
|
+
Example:
|
|
53
|
+
>>> transformer = SSEChunkTransformer(task_id="task-123")
|
|
54
|
+
>>> async for chunk in transformer.transform_stream(agent.arun_a2a_stream("query")):
|
|
55
|
+
... print(chunk)
|
|
56
|
+
'''
|
|
57
|
+
task_id: Incomplete
|
|
58
|
+
context_id: Incomplete
|
|
59
|
+
def __init__(self, task_id: str | None = None, context_id: str | None = None, pii_mapping: dict[str, str] | None = None) -> None:
|
|
60
|
+
"""Initialize the transformer with optional task and context IDs.
|
|
61
|
+
|
|
62
|
+
Args:
|
|
63
|
+
task_id: Optional task identifier for the stream.
|
|
64
|
+
context_id: Optional context identifier for the stream.
|
|
65
|
+
pii_mapping: Optional PII mapping to inject into each chunk's metadata.
|
|
66
|
+
"""
|
|
67
|
+
@staticmethod
|
|
68
|
+
def normalize_metadata_enums(data: Any) -> Any:
|
|
69
|
+
"""Recursively convert enum keys/values to their string values.
|
|
70
|
+
|
|
71
|
+
This is a pure normalization utility that converts any enum instances
|
|
72
|
+
(MetadataFieldKeys, Kind, Status, etc.) to their .value strings.
|
|
73
|
+
|
|
74
|
+
Args:
|
|
75
|
+
data: Dict, list, or value that may contain enum keys/values.
|
|
76
|
+
|
|
77
|
+
Returns:
|
|
78
|
+
Normalized data with all enums converted to their .value strings.
|
|
79
|
+
"""
|
|
80
|
+
@staticmethod
|
|
81
|
+
def normalize_event_type_value(event_type: Any) -> str | None:
|
|
82
|
+
"""Convert A2AStreamEventType enum to string.
|
|
83
|
+
|
|
84
|
+
Args:
|
|
85
|
+
event_type: Event type (enum, string, or None).
|
|
86
|
+
|
|
87
|
+
Returns:
|
|
88
|
+
String value of the event type, or None if invalid.
|
|
89
|
+
"""
|
|
90
|
+
@staticmethod
|
|
91
|
+
def create_artifact_hash(artifact: dict[str, Any]) -> str:
|
|
92
|
+
"""Create a stable hash for artifact deduplication.
|
|
93
|
+
|
|
94
|
+
Uses name, content_type, mime_type, and file_data for hashing,
|
|
95
|
+
excluding artifact_id which may be randomly generated.
|
|
96
|
+
|
|
97
|
+
Args:
|
|
98
|
+
artifact: Artifact dict with name, content_type, mime_type, and optionally file_data.
|
|
99
|
+
|
|
100
|
+
Returns:
|
|
101
|
+
SHA256 hexdigest hash string for deduplication.
|
|
102
|
+
"""
|
|
103
|
+
@staticmethod
|
|
104
|
+
def extract_tool_outputs(tool_calls: list[dict[str, Any]]) -> list[str]:
|
|
105
|
+
"""Extract human-readable output strings from tool calls.
|
|
106
|
+
|
|
107
|
+
Args:
|
|
108
|
+
tool_calls: List of tool call dictionaries.
|
|
109
|
+
|
|
110
|
+
Returns:
|
|
111
|
+
List of human-readable output strings.
|
|
112
|
+
"""
|
|
113
|
+
@staticmethod
|
|
114
|
+
def format_tool_output(output: Any, tool_name: str) -> str:
|
|
115
|
+
"""Format a single tool output for display.
|
|
116
|
+
|
|
117
|
+
Args:
|
|
118
|
+
output: The tool output to format.
|
|
119
|
+
tool_name: The name of the tool.
|
|
120
|
+
|
|
121
|
+
Returns:
|
|
122
|
+
The formatted output string.
|
|
123
|
+
"""
|
|
124
|
+
@staticmethod
|
|
125
|
+
def apply_hitl_content_override(content: str | None, event_type_str: str, metadata: dict[str, Any]) -> str | None:
|
|
126
|
+
"""Apply HITL content override when HITL is active and tool results are available.
|
|
127
|
+
|
|
128
|
+
This method overrides the content with human-readable tool output when HITL
|
|
129
|
+
is active, matching A2AConnector behavior.
|
|
130
|
+
|
|
131
|
+
Args:
|
|
132
|
+
content: The original content/status message.
|
|
133
|
+
event_type_str: The type of event being processed (normalized string).
|
|
134
|
+
metadata: The metadata dictionary containing tool_info and hitl flag.
|
|
135
|
+
|
|
136
|
+
Returns:
|
|
137
|
+
The original content or human-readable tool output if HITL is active.
|
|
138
|
+
"""
|
|
139
|
+
def transform_event(self, event: A2AEvent) -> dict[str, Any]:
|
|
140
|
+
"""Transform a single A2AEvent to SSE chunk format.
|
|
141
|
+
|
|
142
|
+
Converts the A2AEvent structure to the normalized SSE chunk format,
|
|
143
|
+
relocating fields like tool_info and thinking_and_activity_info into
|
|
144
|
+
metadata, and normalizing enum values to strings.
|
|
145
|
+
|
|
146
|
+
Args:
|
|
147
|
+
event: Single A2AEvent dict from arun_a2a_stream.
|
|
148
|
+
|
|
149
|
+
Returns:
|
|
150
|
+
SSEChunk dict with normalized structure.
|
|
151
|
+
"""
|
|
152
|
+
async def transform_stream(self, stream: AsyncGenerator[A2AEvent, None]) -> AsyncGenerator[dict[str, Any], None]:
|
|
153
|
+
"""Transform A2AEvent stream to SSE-compatible chunks.
|
|
154
|
+
|
|
155
|
+
Wraps the input stream and transforms each event, handling artifact
|
|
156
|
+
deduplication and time tracking across the stream.
|
|
157
|
+
|
|
158
|
+
Args:
|
|
159
|
+
stream: Async generator yielding A2AEvent dicts.
|
|
160
|
+
|
|
161
|
+
Yields:
|
|
162
|
+
SSEChunk dicts with normalized structure.
|
|
163
|
+
|
|
164
|
+
Raises:
|
|
165
|
+
Exceptions from underlying stream propagate to caller.
|
|
166
|
+
"""
|
|
@@ -0,0 +1,112 @@
|
|
|
1
|
+
from _typeshed import Incomplete
|
|
2
|
+
from aip_agents.schema.step_limit import MaxDelegationDepthExceededError as MaxDelegationDepthExceededError, MaxStepsExceededError as MaxStepsExceededError, StepLimitConfig as StepLimitConfig, StepLimitErrorResponse as StepLimitErrorResponse, StepLimitErrorType as StepLimitErrorType
|
|
3
|
+
from dataclasses import dataclass, field
|
|
4
|
+
from typing import Any
|
|
5
|
+
|
|
6
|
+
@dataclass
|
|
7
|
+
class StepExecutionContext:
|
|
8
|
+
"""Runtime context for tracking step execution and delegation depth.
|
|
9
|
+
|
|
10
|
+
Attributes:
|
|
11
|
+
current_step: Current step number (0-indexed).
|
|
12
|
+
delegation_depth: Current depth in delegation chain (0 for root agent).
|
|
13
|
+
remaining_step_budget: Steps remaining before limit is hit.
|
|
14
|
+
delegation_chain: List of agent names in the delegation chain.
|
|
15
|
+
"""
|
|
16
|
+
current_step: int = ...
|
|
17
|
+
delegation_depth: int = ...
|
|
18
|
+
remaining_step_budget: int | None = ...
|
|
19
|
+
delegation_chain: list[str] = field(default_factory=list)
|
|
20
|
+
|
|
21
|
+
class StepLimitManager:
|
|
22
|
+
"""Manages step and delegation limit enforcement during agent execution.
|
|
23
|
+
|
|
24
|
+
This manager integrates with LangGraph's existing step mechanisms and adds
|
|
25
|
+
delegation depth tracking and budget propagation.
|
|
26
|
+
|
|
27
|
+
Attributes:
|
|
28
|
+
config: Step limit configuration.
|
|
29
|
+
context: Current execution context.
|
|
30
|
+
"""
|
|
31
|
+
config: Incomplete
|
|
32
|
+
context: Incomplete
|
|
33
|
+
def __init__(self, config: StepLimitConfig | None = None, initial_delegation_depth: int = 0, parent_step_budget: int | None = None) -> None:
|
|
34
|
+
"""Initialize step limit manager.
|
|
35
|
+
|
|
36
|
+
Args:
|
|
37
|
+
config: Optional step limit configuration. Uses defaults if None.
|
|
38
|
+
initial_delegation_depth: Starting delegation depth (from parent).
|
|
39
|
+
parent_step_budget: Remaining step budget inherited from parent agent.
|
|
40
|
+
"""
|
|
41
|
+
def check_step_limit(self, agent_name: str = 'agent', count: int = 1) -> None:
|
|
42
|
+
"""Check if taking 'count' steps would exceed limit.
|
|
43
|
+
|
|
44
|
+
Args:
|
|
45
|
+
agent_name: Name of the agent to identify in error message.
|
|
46
|
+
count: Number of steps to check (useful for parallel tool batches).
|
|
47
|
+
|
|
48
|
+
Raises:
|
|
49
|
+
MaxStepsExceededError: If max_steps limit is exceeded.
|
|
50
|
+
"""
|
|
51
|
+
def check_delegation_depth(self, target_agent_name: str) -> None:
|
|
52
|
+
"""Check if delegation to target agent would exceed depth limit.
|
|
53
|
+
|
|
54
|
+
Args:
|
|
55
|
+
target_agent_name: Name of the agent to delegate to.
|
|
56
|
+
|
|
57
|
+
Raises:
|
|
58
|
+
MaxDelegationDepthExceededError: If delegation depth limit exceeded.
|
|
59
|
+
"""
|
|
60
|
+
def increment_step(self, count: int = 1) -> None:
|
|
61
|
+
"""Increment step counter and update remaining budget.
|
|
62
|
+
|
|
63
|
+
Args:
|
|
64
|
+
count: Number of steps to consume (defaults to 1).
|
|
65
|
+
"""
|
|
66
|
+
def get_child_budget(self, child_max_steps: int | None = None) -> int:
|
|
67
|
+
"""Calculate step budget to allocate to child agent.
|
|
68
|
+
|
|
69
|
+
Algorithm:
|
|
70
|
+
1. If remaining_step_budget is None (root with no limit), use config.max_steps - 1
|
|
71
|
+
2. If remaining_step_budget <= 1, return 0 (no budget left for child)
|
|
72
|
+
3. Calculate child_budget = remaining_step_budget - 1 (reserve 1 for parent)
|
|
73
|
+
4. If child has own max_steps config, return min(child_budget, child.max_steps)
|
|
74
|
+
5. Otherwise return child_budget
|
|
75
|
+
|
|
76
|
+
Args:
|
|
77
|
+
child_max_steps: Optional child agent's own max_steps limit.
|
|
78
|
+
|
|
79
|
+
Returns:
|
|
80
|
+
Step budget for child agent, accounting for parent's continuation.
|
|
81
|
+
Returns 0 if no budget available for child.
|
|
82
|
+
|
|
83
|
+
Edge Cases:
|
|
84
|
+
- remaining=1: Returns 0 (parent needs the last step)
|
|
85
|
+
- remaining=None: Uses config.max_steps - 1
|
|
86
|
+
- child has own limit: Returns min(calculated_budget, child_limit)
|
|
87
|
+
"""
|
|
88
|
+
def add_to_delegation_chain(self, agent_name: str) -> None:
|
|
89
|
+
"""Add agent to delegation chain for tracking.
|
|
90
|
+
|
|
91
|
+
Args:
|
|
92
|
+
agent_name: Name of the agent being delegated to.
|
|
93
|
+
"""
|
|
94
|
+
@classmethod
|
|
95
|
+
def from_state(cls, state: dict[str, Any], config: StepLimitConfig | None = None) -> StepLimitManager:
|
|
96
|
+
"""Create manager from LangGraph state.
|
|
97
|
+
|
|
98
|
+
Args:
|
|
99
|
+
state: LangGraph agent state containing remaining_steps, etc.
|
|
100
|
+
config: Optional step limit configuration.
|
|
101
|
+
|
|
102
|
+
Returns:
|
|
103
|
+
Initialized step limit manager.
|
|
104
|
+
"""
|
|
105
|
+
def set_context(self) -> None:
|
|
106
|
+
"""Set context variables for downstream consumption (e.g. by delegation tools)."""
|
|
107
|
+
def to_state_update(self) -> dict[str, Any]:
|
|
108
|
+
"""Convert current context to LangGraph state update.
|
|
109
|
+
|
|
110
|
+
Returns:
|
|
111
|
+
Dictionary of state fields to update.
|
|
112
|
+
"""
|
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
from langchain_core.messages.ai import AIMessage, UsageMetadata
|
|
2
|
+
from langchain_core.messages.tool import ToolMessage as ToolMessage
|
|
3
|
+
from langgraph.types import Command
|
|
4
|
+
from typing import Any
|
|
5
|
+
|
|
6
|
+
USAGE_METADATA_KEY: str
|
|
7
|
+
TOTAL_USAGE_KEY: str
|
|
8
|
+
STEP_USAGE_KEY: str
|
|
9
|
+
|
|
10
|
+
def add_usage_metadata(cur_accumulated_token_usage: UsageMetadata | None, new_token_usage: UsageMetadata | None) -> UsageMetadata | None:
|
|
11
|
+
"""Reducer function to accumulate UsageMetadata across agent runs.
|
|
12
|
+
|
|
13
|
+
Args:
|
|
14
|
+
cur_accumulated_token_usage: The current accumulated token usage metadata.
|
|
15
|
+
new_token_usage: New token usage metadata to add.
|
|
16
|
+
|
|
17
|
+
Returns:
|
|
18
|
+
Accumulated usage metadata or None if both inputs are None.
|
|
19
|
+
"""
|
|
20
|
+
def extract_and_update_token_usage_from_ai_message(ai_message: AIMessage) -> dict[str, Any]:
|
|
21
|
+
"""Extract token usage from AI message and prepare state update.
|
|
22
|
+
|
|
23
|
+
Args:
|
|
24
|
+
ai_message: The AI message containing usage metadata.
|
|
25
|
+
|
|
26
|
+
Returns:
|
|
27
|
+
Dictionary with accumulated_usage_metadata update if usage metadata is available.
|
|
28
|
+
"""
|
|
29
|
+
def extract_token_usage_from_tool_output(tool_output: Any) -> UsageMetadata | None:
|
|
30
|
+
"""Extract token usage from various tool output formats.
|
|
31
|
+
|
|
32
|
+
Supports multiple tool output formats:
|
|
33
|
+
1. Dictionary with 'usage_metadata' field
|
|
34
|
+
2. Command with 'usage_metadata' attribute
|
|
35
|
+
3. Any object with 'usage_metadata' attribute
|
|
36
|
+
|
|
37
|
+
Args:
|
|
38
|
+
tool_output: The output from a tool execution.
|
|
39
|
+
|
|
40
|
+
Returns:
|
|
41
|
+
UsageMetadata if found, None otherwise.
|
|
42
|
+
"""
|
|
43
|
+
def extract_token_usage_from_command(command: Command) -> UsageMetadata | None:
|
|
44
|
+
"""Extract token usage from Command object.
|
|
45
|
+
|
|
46
|
+
Args:
|
|
47
|
+
command: The Command object to extract token usage from.
|
|
48
|
+
|
|
49
|
+
Returns:
|
|
50
|
+
UsageMetadata if found, None otherwise.
|
|
51
|
+
"""
|
|
52
|
+
def extract_token_usage_from_agent_response(agent_response: dict[str, Any]) -> UsageMetadata | None:
|
|
53
|
+
"""Extract accumulated token usage from agent response.
|
|
54
|
+
|
|
55
|
+
Args:
|
|
56
|
+
agent_response: The agent response to extract token usage from.
|
|
57
|
+
|
|
58
|
+
Returns:
|
|
59
|
+
UsageMetadata if found, None otherwise.
|
|
60
|
+
"""
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: aip-agents-binary
|
|
3
|
-
Version: 0.5.
|
|
3
|
+
Version: 0.5.22
|
|
4
4
|
Summary: A library for managing agents in Gen AI applications.
|
|
5
5
|
Author-email: Raymond Christopher <raymond.christopher@gdplabs.id>
|
|
6
6
|
Requires-Python: <3.13,>=3.11
|
|
@@ -159,9 +159,9 @@ Navigate to the library's root directory (e.g., `python/aip-agents` if you clone
|
|
|
159
159
|
python aip_agents/examples/hello_world_langgraph.py
|
|
160
160
|
```
|
|
161
161
|
|
|
162
|
-
**LangGraph with
|
|
162
|
+
**LangGraph with GL Connectors (OpenAI):**
|
|
163
163
|
```bash
|
|
164
|
-
python aip_agents/examples/
|
|
164
|
+
python aip_agents/examples/hello_world_langgraph_gl_connector_twitter.py
|
|
165
165
|
```
|
|
166
166
|
|
|
167
167
|
**LangGraph Streaming (OpenAI):**
|