aip-agents-binary 0.5.25b9__py3-none-any.whl → 0.6.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (73) hide show
  1. aip_agents/agent/base_langgraph_agent.py +137 -68
  2. aip_agents/agent/base_langgraph_agent.pyi +3 -2
  3. aip_agents/agent/langgraph_react_agent.py +252 -16
  4. aip_agents/agent/langgraph_react_agent.pyi +40 -1
  5. aip_agents/examples/compare_streaming_client.py +2 -2
  6. aip_agents/examples/compare_streaming_server.py +1 -1
  7. aip_agents/examples/hello_world_ptc.py +51 -0
  8. aip_agents/examples/hello_world_ptc.pyi +5 -0
  9. aip_agents/examples/hello_world_tool_output_client.py +9 -0
  10. aip_agents/examples/todolist_planning_a2a_langchain_client.py +2 -2
  11. aip_agents/examples/todolist_planning_a2a_langgraph_server.py +1 -1
  12. aip_agents/guardrails/engines/base.py +6 -6
  13. aip_agents/mcp/client/connection_manager.py +36 -1
  14. aip_agents/mcp/client/connection_manager.pyi +3 -0
  15. aip_agents/mcp/client/persistent_session.py +318 -68
  16. aip_agents/mcp/client/persistent_session.pyi +9 -0
  17. aip_agents/mcp/client/transports.py +33 -2
  18. aip_agents/mcp/client/transports.pyi +9 -0
  19. aip_agents/ptc/__init__.py +48 -0
  20. aip_agents/ptc/__init__.pyi +10 -0
  21. aip_agents/ptc/doc_gen.py +122 -0
  22. aip_agents/ptc/doc_gen.pyi +40 -0
  23. aip_agents/ptc/exceptions.py +39 -0
  24. aip_agents/ptc/exceptions.pyi +22 -0
  25. aip_agents/ptc/executor.py +143 -0
  26. aip_agents/ptc/executor.pyi +73 -0
  27. aip_agents/ptc/mcp/__init__.py +45 -0
  28. aip_agents/ptc/mcp/__init__.pyi +7 -0
  29. aip_agents/ptc/mcp/sandbox_bridge.py +668 -0
  30. aip_agents/ptc/mcp/sandbox_bridge.pyi +47 -0
  31. aip_agents/ptc/mcp/templates/__init__.py +1 -0
  32. aip_agents/ptc/mcp/templates/__init__.pyi +0 -0
  33. aip_agents/ptc/mcp/templates/mcp_client.py.template +239 -0
  34. aip_agents/ptc/naming.py +184 -0
  35. aip_agents/ptc/naming.pyi +76 -0
  36. aip_agents/ptc/payload.py +26 -0
  37. aip_agents/ptc/payload.pyi +15 -0
  38. aip_agents/ptc/prompt_builder.py +571 -0
  39. aip_agents/ptc/prompt_builder.pyi +55 -0
  40. aip_agents/ptc/ptc_helper.py +16 -0
  41. aip_agents/ptc/ptc_helper.pyi +1 -0
  42. aip_agents/ptc/sandbox_bridge.py +58 -0
  43. aip_agents/ptc/sandbox_bridge.pyi +25 -0
  44. aip_agents/ptc/template_utils.py +33 -0
  45. aip_agents/ptc/template_utils.pyi +13 -0
  46. aip_agents/ptc/templates/__init__.py +1 -0
  47. aip_agents/ptc/templates/__init__.pyi +0 -0
  48. aip_agents/ptc/templates/ptc_helper.py.template +134 -0
  49. aip_agents/sandbox/__init__.py +43 -0
  50. aip_agents/sandbox/__init__.pyi +5 -0
  51. aip_agents/sandbox/defaults.py +9 -0
  52. aip_agents/sandbox/defaults.pyi +2 -0
  53. aip_agents/sandbox/e2b_runtime.py +267 -0
  54. aip_agents/sandbox/e2b_runtime.pyi +51 -0
  55. aip_agents/sandbox/template_builder.py +131 -0
  56. aip_agents/sandbox/template_builder.pyi +36 -0
  57. aip_agents/sandbox/types.py +24 -0
  58. aip_agents/sandbox/types.pyi +14 -0
  59. aip_agents/sandbox/validation.py +50 -0
  60. aip_agents/sandbox/validation.pyi +20 -0
  61. aip_agents/tools/__init__.py +2 -0
  62. aip_agents/tools/__init__.pyi +2 -1
  63. aip_agents/tools/browser_use/browser_use_tool.py +8 -0
  64. aip_agents/tools/browser_use/streaming.py +2 -0
  65. aip_agents/tools/execute_ptc_code.py +305 -0
  66. aip_agents/tools/execute_ptc_code.pyi +87 -0
  67. aip_agents/utils/langgraph/tool_managers/delegation_tool_manager.py +26 -1
  68. aip_agents/utils/langgraph/tool_output_management.py +80 -0
  69. aip_agents/utils/langgraph/tool_output_management.pyi +37 -0
  70. {aip_agents_binary-0.5.25b9.dist-info → aip_agents_binary-0.6.1.dist-info}/METADATA +51 -48
  71. {aip_agents_binary-0.5.25b9.dist-info → aip_agents_binary-0.6.1.dist-info}/RECORD +73 -27
  72. {aip_agents_binary-0.5.25b9.dist-info → aip_agents_binary-0.6.1.dist-info}/WHEEL +0 -0
  73. {aip_agents_binary-0.5.25b9.dist-info → aip_agents_binary-0.6.1.dist-info}/top_level.txt +0 -0
@@ -18,14 +18,14 @@ from collections.abc import Awaitable, Callable, Sequence
18
18
  from dataclasses import asdict, dataclass
19
19
  from functools import reduce
20
20
  from textwrap import dedent
21
- from typing import TYPE_CHECKING, Annotated, Any
21
+ from typing import TYPE_CHECKING, Annotated, Any, cast
22
22
 
23
- from deprecated import deprecated
23
+ from deprecated import deprecated # type: ignore[import-untyped]
24
24
 
25
25
  if TYPE_CHECKING:
26
26
  from aip_agents.guardrails.manager import GuardrailManager
27
- from gllm_core.event import EventEmitter
28
- from gllm_core.schema import Chunk
27
+ from gllm_core.event import EventEmitter # type: ignore[import-untyped]
28
+ from gllm_core.schema import Chunk # type: ignore[import-untyped]
29
29
  from langchain_core.language_models import BaseChatModel
30
30
  from langchain_core.messages import (
31
31
  AIMessage,
@@ -87,6 +87,9 @@ from aip_agents.utils.token_usage_helper import (
87
87
  extract_token_usage_from_tool_output,
88
88
  )
89
89
 
90
+ if TYPE_CHECKING:
91
+ from aip_agents.ptc import PTCSandboxConfig
92
+
90
93
  logger = get_logger(__name__)
91
94
 
92
95
  # Default instruction for ReAct agents
@@ -165,6 +168,7 @@ class LangGraphReactAgent(LangGraphHitLMixin, BaseLangGraphAgent):
165
168
  middlewares: Sequence[AgentMiddleware] | None = None,
166
169
  guardrail: GuardrailManager | None = None,
167
170
  step_limit_config: StepLimitConfig | None = None,
171
+ ptc_config: PTCSandboxConfig | None = None,
168
172
  **kwargs: Any,
169
173
  ):
170
174
  """Initialize the LangGraph ReAct Agent.
@@ -193,6 +197,11 @@ class LangGraphReactAgent(LangGraphHitLMixin, BaseLangGraphAgent):
193
197
  input/output filtering during agent execution.
194
198
  enable_pii: Optional toggle to enable PII handling for tool inputs and outputs.
195
199
  step_limit_config: Optional configuration for step limits and delegation depth.
200
+ ptc_config: Optional configuration for PTC sandbox execution. See PTCSandboxConfig
201
+ for available options including enabled flag, sandbox timeout, and template settings.
202
+ PTC is enabled when ptc_config is not None and ptc_config.enabled is True.
203
+ When enabled, prompt guidance is automatically injected into the agent's instruction.
204
+ PTC runs in a sandbox only; there is no in-process trusted PTC path.
196
205
  **kwargs: Additional keyword arguments passed to BaseLangGraphAgent.
197
206
  """
198
207
  # Use LangGraph's standard AgentState for ReAct
@@ -212,6 +221,12 @@ class LangGraphReactAgent(LangGraphHitLMixin, BaseLangGraphAgent):
212
221
  **kwargs,
213
222
  )
214
223
 
224
+ if self.model is None and self.lm_invoker is None:
225
+ logger.warning(
226
+ "Agent '%s': Model and LM invoker are both unset. Calls that require a model will fail.",
227
+ self.name,
228
+ )
229
+
215
230
  # Handle tool output management
216
231
  self.tool_output_manager = tool_output_manager
217
232
  self._pii_handlers_by_thread: dict[str, ToolPIIHandler] = {}
@@ -233,6 +248,18 @@ class LangGraphReactAgent(LangGraphHitLMixin, BaseLangGraphAgent):
233
248
 
234
249
  self.step_limit_config = step_limit_config
235
250
 
251
+ # Initialize PTC state (Programmatic Tool Calling)
252
+ self._ptc_config: PTCSandboxConfig | None = None
253
+ self._ptc_tool_synced = False
254
+ self._ptc_tool: BaseTool | None = None
255
+ self._ptc_prompt_hash: str = ""
256
+ # Capture instruction after middleware setup so middleware prompts are preserved
257
+ self._original_instruction: str = self.instruction
258
+
259
+ # Enable PTC if requested via constructor
260
+ if ptc_config is not None and ptc_config.enabled:
261
+ self.enable_ptc(ptc_config)
262
+
236
263
  def _setup_middleware(
237
264
  self,
238
265
  planning: bool,
@@ -256,7 +283,7 @@ class LangGraphReactAgent(LangGraphHitLMixin, BaseLangGraphAgent):
256
283
 
257
284
  # Auto-configure TodoListMiddleware if planning enabled
258
285
  if planning:
259
- middleware_list.append(TodoListMiddleware())
286
+ middleware_list.append(cast(AgentMiddleware, TodoListMiddleware()))
260
287
 
261
288
  # Auto-configure GuardrailMiddleware if guardrail provided
262
289
  if guardrail:
@@ -419,9 +446,9 @@ class LangGraphReactAgent(LangGraphHitLMixin, BaseLangGraphAgent):
419
446
  )
420
447
 
421
448
  def _rebuild_resolved_tools(self) -> None:
422
- """Rebuild resolved tools including middleware tools.
449
+ """Rebuild resolved tools including middleware and PTC tools.
423
450
 
424
- Overrides base class to ensure middleware tools are preserved
451
+ Overrides base class to ensure middleware tools and the PTC tool are preserved
425
452
  when tools are rebuilt (e.g., after update_regular_tools).
426
453
  """
427
454
  # Call base class to rebuild with regular, a2a, delegation, and mcp tools
@@ -431,6 +458,10 @@ class LangGraphReactAgent(LangGraphHitLMixin, BaseLangGraphAgent):
431
458
  if hasattr(self, "_middleware_tools") and self._middleware_tools:
432
459
  self.resolved_tools.extend(self._middleware_tools)
433
460
 
461
+ # Add PTC tool if synced
462
+ if hasattr(self, "_ptc_tool") and self._ptc_tool is not None:
463
+ self.resolved_tools.append(self._ptc_tool)
464
+
434
465
  def _handle_tool_artifacts(
435
466
  self, tool_output: Any, pending_artifacts: list[dict[str, Any]]
436
467
  ) -> tuple[str, list[dict[str, Any]]]:
@@ -732,7 +763,7 @@ class LangGraphReactAgent(LangGraphHitLMixin, BaseLangGraphAgent):
732
763
  pending_artifacts: list[dict[str, Any]] = state.get("artifacts") or []
733
764
  reference_updates: list[Chunk] = []
734
765
  tool_map = {tool.name: tool for tool in self.resolved_tools}
735
- pii_mapping = {}
766
+ pii_mapping: dict[str, str] = {}
736
767
 
737
768
  aggregated_metadata_delta: dict[str, Any] = {}
738
769
  total_tools_token_usage: list[UsageMetadata] = []
@@ -756,7 +787,8 @@ class LangGraphReactAgent(LangGraphHitLMixin, BaseLangGraphAgent):
756
787
  ),
757
788
  )
758
789
 
759
- tasks = [asyncio.create_task(run_tool(tc)) for tc in last_message.tool_calls]
790
+ normalized_tool_calls = [self._normalize_tool_call(tc) for tc in last_message.tool_calls]
791
+ tasks = [asyncio.create_task(run_tool(tc)) for tc in normalized_tool_calls]
760
792
 
761
793
  for coro in asyncio.as_completed(tasks):
762
794
  tool_result = await coro
@@ -779,6 +811,31 @@ class LangGraphReactAgent(LangGraphHitLMixin, BaseLangGraphAgent):
779
811
  pii_mapping,
780
812
  )
781
813
 
814
+ def _normalize_tool_call(self, tool_call: Any) -> dict[str, Any]:
815
+ """Normalize tool call inputs into a dict with required keys."""
816
+ if isinstance(tool_call, dict):
817
+ normalized = dict(tool_call)
818
+ elif hasattr(tool_call, "model_dump"):
819
+ normalized = tool_call.model_dump()
820
+ elif hasattr(tool_call, "dict"):
821
+ normalized = tool_call.dict()
822
+ elif hasattr(tool_call, "name") and hasattr(tool_call, "args"):
823
+ normalized = {
824
+ "id": getattr(tool_call, "id", None),
825
+ "name": getattr(tool_call, "name", None),
826
+ "args": getattr(tool_call, "args", None),
827
+ }
828
+ else:
829
+ raise TypeError("Tool call must be a dict-like object or ToolCall instance.")
830
+
831
+ if not isinstance(normalized, dict):
832
+ raise TypeError("Tool call normalization did not produce a dict.")
833
+
834
+ if "name" not in normalized or "args" not in normalized:
835
+ raise TypeError("Tool call must include 'name' and 'args' fields.")
836
+
837
+ return normalized
838
+
782
839
  def _accumulate_tool_result( # noqa: PLR0913
783
840
  self,
784
841
  tool_result: Any,
@@ -787,7 +844,7 @@ class LangGraphReactAgent(LangGraphHitLMixin, BaseLangGraphAgent):
787
844
  aggregated_metadata_delta: dict[str, Any],
788
845
  reference_updates: list[Chunk],
789
846
  total_tools_token_usage: list[UsageMetadata],
790
- pii_mapping: dict[str, str] | None,
847
+ pii_mapping: dict[str, str],
791
848
  ) -> None: # noqa: PLR0913
792
849
  """Accumulate results from a single tool call.
793
850
 
@@ -1233,13 +1290,16 @@ class LangGraphReactAgent(LangGraphHitLMixin, BaseLangGraphAgent):
1233
1290
 
1234
1291
  # Create enhanced tool configuration with output management
1235
1292
  tool_config = self._create_enhanced_tool_config(config, state, tool_call["name"], tool_call_id)
1293
+ if not isinstance(tool_config, dict):
1294
+ raise TypeError("Tool configuration must be a dictionary.")
1295
+ tool_config_runnable = tool_config
1236
1296
 
1237
1297
  arun_streaming_method = getattr(tool, TOOL_RUN_STREAMING_METHOD, None)
1238
1298
 
1239
1299
  if arun_streaming_method and callable(arun_streaming_method):
1240
1300
  tool_output = await self._execute_tool_with_streaming(tool, tool_call, tool_config)
1241
1301
  else:
1242
- tool_output = await tool.ainvoke(resolved_args, tool_config)
1302
+ tool_output = await tool.ainvoke(resolved_args, tool_config_runnable)
1243
1303
 
1244
1304
  references = extract_references_from_tool(tool, tool_output)
1245
1305
 
@@ -1513,7 +1573,7 @@ class LangGraphReactAgent(LangGraphHitLMixin, BaseLangGraphAgent):
1513
1573
  tool_call: dict[str, Any],
1514
1574
  execution_time: float,
1515
1575
  pending_artifacts: list[dict[str, Any]],
1516
- ) -> tuple[list[BaseMessage], list[dict[str, Any]], dict[str, Any]]:
1576
+ ) -> tuple[list[ToolMessage], list[dict[str, Any]], dict[str, Any]]:
1517
1577
  """Process tool output into messages, artifacts, and metadata.
1518
1578
 
1519
1579
  Args:
@@ -1541,7 +1601,7 @@ class LangGraphReactAgent(LangGraphHitLMixin, BaseLangGraphAgent):
1541
1601
 
1542
1602
  def _handle_command_output(
1543
1603
  self, tool_output: Command, tool_call: dict[str, Any], execution_time: float, metadata_delta: dict[str, Any]
1544
- ) -> tuple[list[BaseMessage], list[dict[str, Any]], dict[str, Any]]:
1604
+ ) -> tuple[list[ToolMessage], list[dict[str, Any]], dict[str, Any]]:
1545
1605
  """Handle Command type tool outputs.
1546
1606
 
1547
1607
  Args:
@@ -1570,7 +1630,7 @@ class LangGraphReactAgent(LangGraphHitLMixin, BaseLangGraphAgent):
1570
1630
 
1571
1631
  def _handle_string_output(
1572
1632
  self, tool_output: str, tool_call: dict[str, Any], execution_time: float
1573
- ) -> tuple[list[BaseMessage], list[dict[str, Any]], dict[str, Any]]:
1633
+ ) -> tuple[list[ToolMessage], list[dict[str, Any]], dict[str, Any]]:
1574
1634
  """Handle string type tool outputs.
1575
1635
 
1576
1636
  Args:
@@ -1596,7 +1656,7 @@ class LangGraphReactAgent(LangGraphHitLMixin, BaseLangGraphAgent):
1596
1656
  execution_time: float,
1597
1657
  pending_artifacts: list[dict[str, Any]],
1598
1658
  metadata_delta: dict[str, Any],
1599
- ) -> tuple[list[BaseMessage], list[dict[str, Any]], dict[str, Any]]:
1659
+ ) -> tuple[list[ToolMessage], list[dict[str, Any]], dict[str, Any]]:
1600
1660
  """Handle legacy dict and other tool outputs.
1601
1661
 
1602
1662
  Args:
@@ -1694,8 +1754,11 @@ class LangGraphReactAgent(LangGraphHitLMixin, BaseLangGraphAgent):
1694
1754
  self._emit_default_tool_call_event(writer, tool_name, tool_call_id, tool_args)
1695
1755
 
1696
1756
  streaming_kwargs = self._build_streaming_kwargs(tool_args, tool_config)
1757
+ arun_streaming_method = getattr(tool, TOOL_RUN_STREAMING_METHOD, None)
1758
+ if not callable(arun_streaming_method):
1759
+ raise RuntimeError(f"Tool '{tool_name}' does not implement streaming.")
1697
1760
 
1698
- async for chunk in tool.arun_streaming(**streaming_kwargs):
1761
+ async for chunk in arun_streaming_method(**streaming_kwargs):
1699
1762
  final_output, saw_tool_result = self._handle_streaming_chunk(
1700
1763
  chunk=chunk,
1701
1764
  writer=writer,
@@ -2125,6 +2188,9 @@ class LangGraphReactAgent(LangGraphHitLMixin, BaseLangGraphAgent):
2125
2188
 
2126
2189
  effective_event_emitter = state.get("event_emitter") or self.event_emitter
2127
2190
 
2191
+ if self.lm_invoker is None:
2192
+ raise RuntimeError("LM invoker is required for this execution path.")
2193
+
2128
2194
  if self.resolved_tools:
2129
2195
  self.lm_invoker.set_tools(self.resolved_tools)
2130
2196
 
@@ -2183,6 +2249,9 @@ class LangGraphReactAgent(LangGraphHitLMixin, BaseLangGraphAgent):
2183
2249
  ):
2184
2250
  langchain_prompt = [SystemMessage(content=enhanced_instruction)] + list(current_messages)
2185
2251
 
2252
+ if self.model is None:
2253
+ raise RuntimeError("Model is required for this execution path.")
2254
+
2186
2255
  model_with_tools = self.model.bind_tools(self.resolved_tools) if self.resolved_tools else self.model
2187
2256
 
2188
2257
  ai_message = await model_with_tools.ainvoke(langchain_prompt, config)
@@ -2576,6 +2645,173 @@ class LangGraphReactAgent(LangGraphHitLMixin, BaseLangGraphAgent):
2576
2645
  if current_thread_id:
2577
2646
  self._pii_handlers_by_thread.pop(current_thread_id, None)
2578
2647
 
2648
+ # ==========================================================================
2649
+ # Programmatic Tool Calling (PTC) Methods
2650
+ # ==========================================================================
2651
+
2652
+ def add_mcp_server(self, mcp_config: dict[str, dict[str, Any]]) -> None:
2653
+ """Add MCP servers and refresh PTC tool state if needed."""
2654
+ super().add_mcp_server(mcp_config)
2655
+
2656
+ if not self._ptc_config or not self._ptc_config.enabled:
2657
+ return
2658
+
2659
+ if self._ptc_tool is not None:
2660
+ self._ptc_tool = None
2661
+
2662
+ self._ptc_tool_synced = False
2663
+ logger.debug(f"Agent '{self.name}': PTC tool will resync after MCP changes")
2664
+
2665
+ def enable_ptc(self, config: PTCSandboxConfig | None = None) -> None:
2666
+ """Enable Programmatic Tool Calling (PTC) for this agent.
2667
+
2668
+ PTC allows the LLM to execute Python code that calls MCP tools
2669
+ programmatically inside a sandboxed environment. This is useful for
2670
+ chaining multiple tool calls with local data processing.
2671
+
2672
+ The execute_ptc_code tool is automatically added to the agent's tools
2673
+ after MCP servers are configured. If no MCP servers are configured,
2674
+ the tool sync is deferred until servers are added.
2675
+
2676
+ Args:
2677
+ config: Optional configuration for PTC sandbox execution.
2678
+ See PTCSandboxConfig for options like enabled flag and sandbox_timeout.
2679
+ If None is passed, a default config with enabled=True will be created.
2680
+
2681
+ Example:
2682
+ agent.enable_ptc(PTCSandboxConfig(enabled=True))
2683
+ agent.add_mcp_server({"yfinance": {...}})
2684
+ # execute_ptc_code tool is now available
2685
+
2686
+ Note:
2687
+ PTC can also be enabled via the constructor by passing
2688
+ ptc_config=PTCSandboxConfig(enabled=True, ...).
2689
+ """
2690
+ # Lazy import to avoid circular dependencies
2691
+ from aip_agents.ptc.executor import PTCSandboxConfig
2692
+
2693
+ self._ptc_config = config or PTCSandboxConfig()
2694
+ self._ptc_config.enabled = True
2695
+ self._ptc_tool_synced = False
2696
+
2697
+ logger.info(f"Agent '{self.name}': PTC enabled")
2698
+
2699
+ # Attempt to sync PTC tool if MCP client is available
2700
+ self._sync_ptc_tool()
2701
+
2702
+ def _sync_ptc_tool(self) -> None:
2703
+ """Build and register the execute_ptc_code tool when MCP servers are available.
2704
+
2705
+ This method is called after enable_ptc() and after MCP servers are added.
2706
+ It creates the execute_ptc_code tool using the current MCP client
2707
+ configuration and adds it to the agent's resolved tools.
2708
+
2709
+ The tool is only created once. Subsequent calls are no-ops if the tool
2710
+ has already been synced.
2711
+ """
2712
+ if not self._ptc_config or not self._ptc_config.enabled:
2713
+ return
2714
+
2715
+ if self._ptc_tool_synced:
2716
+ return
2717
+
2718
+ # Check if we have MCP servers configured
2719
+ if not self.mcp_config:
2720
+ logger.debug(f"Agent '{self.name}': PTC tool sync deferred - no MCP servers configured")
2721
+ return
2722
+
2723
+ if not self.mcp_client:
2724
+ logger.debug(f"Agent '{self.name}': PTC tool sync deferred - no MCP client yet")
2725
+ return
2726
+
2727
+ if not self.mcp_client.is_initialized:
2728
+ logger.debug(f"Agent '{self.name}': PTC tool sync deferred - MCP client not initialized")
2729
+ return
2730
+
2731
+ # Lazy import to avoid circular dependencies
2732
+ from aip_agents.tools.execute_ptc_code import create_execute_ptc_code_tool
2733
+
2734
+ logger.info(f"Agent '{self.name}': Syncing PTC tool with MCP client")
2735
+
2736
+ # Create the execute_ptc_code tool with agent's tool configs
2737
+ self._ptc_tool = create_execute_ptc_code_tool(
2738
+ self.mcp_client, self._ptc_config, agent_tool_configs=self.tool_configs
2739
+ )
2740
+
2741
+ # Rebuild graph to include PTC tool
2742
+ self._rebuild_graph()
2743
+
2744
+ self._ptc_tool_synced = True
2745
+ logger.info(f"Agent '{self.name}': PTC tool synced successfully")
2746
+
2747
+ # Sync PTC prompt guidance
2748
+ self._sync_ptc_prompt()
2749
+
2750
+ def _sync_ptc_prompt(self) -> None:
2751
+ """Sync PTC usage guidance into the agent instruction.
2752
+
2753
+ This method builds and injects a PTC usage block into the agent's
2754
+ instruction when PTC is enabled. The prompt is refreshed when MCP
2755
+ configuration changes (detected via hash).
2756
+ """
2757
+ if not self._ptc_config or not self._ptc_config.enabled:
2758
+ return
2759
+
2760
+ if not self.mcp_client:
2761
+ return
2762
+
2763
+ # Lazy import to avoid circular dependencies
2764
+ from aip_agents.ptc.prompt_builder import build_ptc_prompt, compute_ptc_prompt_hash
2765
+
2766
+ # Get prompt config from PTC sandbox config
2767
+ prompt_config = self._ptc_config.prompt if self._ptc_config else None
2768
+
2769
+ # Check if MCP config has changed
2770
+ current_hash = compute_ptc_prompt_hash(self.mcp_client, config=prompt_config)
2771
+ if current_hash == self._ptc_prompt_hash:
2772
+ logger.debug(f"Agent '{self.name}': PTC prompt unchanged, skipping refresh")
2773
+ return
2774
+
2775
+ # Build and inject the prompt
2776
+ ptc_prompt = build_ptc_prompt(self.mcp_client, config=prompt_config)
2777
+
2778
+ # Rebuild instruction from original + PTC guidance
2779
+ self.instruction = f"{self._original_instruction}\n\n{ptc_prompt}"
2780
+ self._ptc_prompt_hash = current_hash
2781
+
2782
+ logger.info(f"Agent '{self.name}': PTC prompt guidance injected")
2783
+
2784
+ async def _register_mcp_tools(self) -> None:
2785
+ """Override to sync PTC tool after MCP tools are registered.
2786
+
2787
+ This extends the base implementation to ensure the execute_ptc_code
2788
+ tool is added after MCP servers are initialized.
2789
+ """
2790
+ await super()._register_mcp_tools()
2791
+
2792
+ # Sync PTC tool after MCP tools are registered
2793
+ if self._ptc_config and self._ptc_config.enabled and not self._ptc_tool_synced:
2794
+ self._sync_ptc_tool()
2795
+
2796
+ async def cleanup(self) -> None:
2797
+ """Cleanup agent resources including PTC sandbox.
2798
+
2799
+ Extends base cleanup to also cleanup the PTC sandbox runtime if
2800
+ execute_ptc_code tool was created.
2801
+ """
2802
+ # Cleanup PTC tool's sandbox runtime if present
2803
+ if self._ptc_tool is not None:
2804
+ try:
2805
+ cleanup_method = getattr(self._ptc_tool, "cleanup", None)
2806
+ if cleanup_method and callable(cleanup_method):
2807
+ await cleanup_method()
2808
+ logger.debug(f"Agent '{self.name}': PTC sandbox cleanup completed")
2809
+ except Exception as e:
2810
+ logger.warning(f"Agent '{self.name}': Error during PTC sandbox cleanup: {e}")
2811
+
2812
+ # Call parent cleanup for MCP client
2813
+ await super().cleanup()
2814
+
2579
2815
  def _format_graph_output(self, final_state_result: dict[str, Any]) -> Any:
2580
2816
  """Convert final graph state to user-friendly output.
2581
2817
 
@@ -6,6 +6,7 @@ from aip_agents.guardrails.manager import GuardrailManager as GuardrailManager
6
6
  from aip_agents.middleware.base import AgentMiddleware as AgentMiddleware, ModelRequest as ModelRequest
7
7
  from aip_agents.middleware.manager import MiddlewareManager as MiddlewareManager
8
8
  from aip_agents.middleware.todolist import TodoList as TodoList, TodoListMiddleware as TodoListMiddleware
9
+ from aip_agents.ptc import PTCSandboxConfig as PTCSandboxConfig
9
10
  from aip_agents.schema.a2a import A2AStreamEventType as A2AStreamEventType
10
11
  from aip_agents.schema.hitl import ApprovalDecision as ApprovalDecision, HitlMetadata as HitlMetadata
11
12
  from aip_agents.schema.langgraph import ToolCallResult as ToolCallResult, ToolStorageParams as ToolStorageParams
@@ -86,7 +87,7 @@ class LangGraphReactAgent(LangGraphHitLMixin, BaseLangGraphAgent):
86
87
  """
87
88
  tool_output_manager: Incomplete
88
89
  step_limit_config: Incomplete
89
- def __init__(self, name: str, instruction: str = ..., model: BaseChatModel | str | Any | None = None, tools: Sequence[BaseTool] | None = None, agents: Sequence[Any] | None = None, description: str | None = None, thread_id_key: str = 'thread_id', event_emitter: EventEmitter | None = None, tool_output_manager: ToolOutputManager | None = None, planning: bool = False, middlewares: Sequence[AgentMiddleware] | None = None, guardrail: GuardrailManager | None = None, step_limit_config: StepLimitConfig | None = None, **kwargs: Any) -> None:
90
+ def __init__(self, name: str, instruction: str = ..., model: BaseChatModel | str | Any | None = None, tools: Sequence[BaseTool] | None = None, agents: Sequence[Any] | None = None, description: str | None = None, thread_id_key: str = 'thread_id', event_emitter: EventEmitter | None = None, tool_output_manager: ToolOutputManager | None = None, planning: bool = False, middlewares: Sequence[AgentMiddleware] | None = None, guardrail: GuardrailManager | None = None, step_limit_config: StepLimitConfig | None = None, ptc_config: PTCSandboxConfig | None = None, **kwargs: Any) -> None:
90
91
  """Initialize the LangGraph ReAct Agent.
91
92
 
92
93
  Args:
@@ -113,6 +114,11 @@ class LangGraphReactAgent(LangGraphHitLMixin, BaseLangGraphAgent):
113
114
  input/output filtering during agent execution.
114
115
  enable_pii: Optional toggle to enable PII handling for tool inputs and outputs.
115
116
  step_limit_config: Optional configuration for step limits and delegation depth.
117
+ ptc_config: Optional configuration for PTC sandbox execution. See PTCSandboxConfig
118
+ for available options including enabled flag, sandbox timeout, and template settings.
119
+ PTC is enabled when ptc_config is not None and ptc_config.enabled is True.
120
+ When enabled, prompt guidance is automatically injected into the agent's instruction.
121
+ PTC runs in a sandbox only; there is no in-process trusted PTC path.
116
122
  **kwargs: Additional keyword arguments passed to BaseLangGraphAgent.
117
123
  """
118
124
  def define_graph(self, graph_builder: StateGraph) -> CompiledStateGraph:
@@ -124,6 +130,39 @@ class LangGraphReactAgent(LangGraphHitLMixin, BaseLangGraphAgent):
124
130
  Returns:
125
131
  Compiled LangGraph ready for execution.
126
132
  """
133
+ def add_mcp_server(self, mcp_config: dict[str, dict[str, Any]]) -> None:
134
+ """Add MCP servers and refresh PTC tool state if needed."""
135
+ def enable_ptc(self, config: PTCSandboxConfig | None = None) -> None:
136
+ '''Enable Programmatic Tool Calling (PTC) for this agent.
137
+
138
+ PTC allows the LLM to execute Python code that calls MCP tools
139
+ programmatically inside a sandboxed environment. This is useful for
140
+ chaining multiple tool calls with local data processing.
141
+
142
+ The execute_ptc_code tool is automatically added to the agent\'s tools
143
+ after MCP servers are configured. If no MCP servers are configured,
144
+ the tool sync is deferred until servers are added.
145
+
146
+ Args:
147
+ config: Optional configuration for PTC sandbox execution.
148
+ See PTCSandboxConfig for options like enabled flag and sandbox_timeout.
149
+ If None is passed, a default config with enabled=True will be created.
150
+
151
+ Example:
152
+ agent.enable_ptc(PTCSandboxConfig(enabled=True))
153
+ agent.add_mcp_server({"yfinance": {...}})
154
+ # execute_ptc_code tool is now available
155
+
156
+ Note:
157
+ PTC can also be enabled via the constructor by passing
158
+ ptc_config=PTCSandboxConfig(enabled=True, ...).
159
+ '''
160
+ async def cleanup(self) -> None:
161
+ """Cleanup agent resources including PTC sandbox.
162
+
163
+ Extends base cleanup to also cleanup the PTC sandbox runtime if
164
+ execute_ptc_code tool was created.
165
+ """
127
166
 
128
167
  class LangGraphAgent(LangGraphReactAgent):
129
168
  """Alias for LangGraphReactAgent."""
@@ -8,10 +8,10 @@ This script demonstrates:
8
8
 
9
9
  Prerequisites:
10
10
  Start the server first:
11
- uv run python -m aip_agents.examples.compare_streaming_server
11
+ poetry run python -m aip_agents.examples.compare_streaming_server
12
12
 
13
13
  Then run this client:
14
- uv run python -m aip_agents.examples.compare_streaming_client
14
+ poetry run python -m aip_agents.examples.compare_streaming_client
15
15
 
16
16
  Authors:
17
17
  AI Agent Platform Team
@@ -6,7 +6,7 @@ This server provides an agent with:
6
6
 
7
7
  To run this server:
8
8
  cd libs/aip_agents
9
- uv run python -m aip_agents.examples.compare_streaming_server
9
+ poetry run python -m aip_agents.examples.compare_streaming_server
10
10
 
11
11
  It will listen on http://localhost:18999 by default.
12
12
 
@@ -0,0 +1,51 @@
1
+ """Minimal PTC hello world example.
2
+
3
+ Required environment variables:
4
+ - OPENAI_API_KEY
5
+ - E2B_API_KEY
6
+ """
7
+
8
+ import asyncio
9
+
10
+ from langchain_openai import ChatOpenAI
11
+
12
+ from aip_agents.agent import LangGraphReactAgent
13
+ from aip_agents.ptc import PromptConfig, PTCSandboxConfig
14
+
15
+
16
+ async def main() -> None:
17
+ """Run a hello-world PTC flow."""
18
+ instruction = (
19
+ "You are a helpful assistant with access to execute_ptc_code. "
20
+ "Use execute_ptc_code to run Python and print output. "
21
+ "The tool returns JSON with ok/stdout/stderr/exit_code."
22
+ )
23
+
24
+ agent = LangGraphReactAgent(
25
+ name="ptc_hello_world",
26
+ instruction=instruction,
27
+ model=ChatOpenAI(model="gpt-5.2"),
28
+ ptc_config=PTCSandboxConfig(enabled=True, sandbox_timeout=180.0, prompt=PromptConfig(mode="index")),
29
+ )
30
+ agent.add_mcp_server(
31
+ {
32
+ "deepwiki": {
33
+ "transport": "streamable-http",
34
+ "url": "https://mcp.deepwiki.com/mcp",
35
+ "headers": {},
36
+ "timeout": 60.0,
37
+ }
38
+ }
39
+ )
40
+
41
+ try:
42
+ response = await agent.arun(
43
+ query="Use execute_ptc_code to print 'Hello, world!' and count the number of words in the output of deepwiki.read_wiki_structure('anthropics/claude-code')."
44
+ )
45
+ print("execute_ptc_code output:", response["output"])
46
+ finally:
47
+ await agent.cleanup()
48
+
49
+
50
+ if __name__ == "__main__":
51
+ asyncio.run(main())
@@ -0,0 +1,5 @@
1
+ from aip_agents.agent import LangGraphReactAgent as LangGraphReactAgent
2
+ from aip_agents.ptc import PTCSandboxConfig as PTCSandboxConfig, PromptConfig as PromptConfig
3
+
4
+ async def main() -> None:
5
+ """Run a hello-world PTC flow."""
@@ -39,6 +39,15 @@ async def main():
39
39
  print(chunk["content"], end="", flush=True)
40
40
  if chunk.get("metadata"):
41
41
  print(f"\nMetadata: {chunk['metadata']}", end="\n\n", flush=True)
42
+ tool_info = chunk.get("metadata", {}).get("tool_info") if isinstance(chunk.get("metadata"), dict) else None
43
+ if isinstance(tool_info, dict):
44
+ for tool_call in tool_info.get("tool_calls", []):
45
+ if tool_call.get("name") == "data_visualizer":
46
+ data_source = tool_call.get("args", {}).get("data_source")
47
+ if not (isinstance(data_source, str) and data_source.startswith("$tool_output.")):
48
+ raise RuntimeError(
49
+ "Tool output sharing failed: expected data_source to reference $tool_output.<call_id>."
50
+ )
42
51
  print("\n")
43
52
 
44
53
 
@@ -1,10 +1,10 @@
1
1
  """A2A client for the planning LangGraphReactAgent.
2
2
 
3
3
  Run the planning server first:
4
- uv run python -m aip_agents.examples.todolist_planning_a2a_langgraph_server
4
+ poetry run python -m aip_agents.examples.todolist_planning_a2a_langgraph_server
5
5
 
6
6
  Then run this client:
7
- uv run python -m aip_agents.examples.todolist_planning_a2a_langchain_client
7
+ poetry run python -m aip_agents.examples.todolist_planning_a2a_langchain_client
8
8
 
9
9
  You should see streaming output, including when write_todos_tool is called.
10
10
  """
@@ -1,7 +1,7 @@
1
1
  """A2A server exposing a LangGraphReactAgent with planning (TodoListMiddleware).
2
2
 
3
3
  Run:
4
- uv run python -m aip_agents.examples.todolist_planning_a2a_langgraph_server \
4
+ poetry run python -m aip_agents.examples.todolist_planning_a2a_langgraph_server \
5
5
  --host localhost --port 8002
6
6
 
7
7
  Then connect with the matching A2A client to observe write_todos_tool calls.
@@ -39,7 +39,7 @@ class GuardrailEngine(Protocol):
39
39
  Returns:
40
40
  GuardrailResult indicating if content is safe
41
41
  """
42
- ...
42
+ ... # pragma: no cover
43
43
 
44
44
  @abstractmethod
45
45
  async def check_output(self, content: str) -> GuardrailResult:
@@ -51,12 +51,12 @@ class GuardrailEngine(Protocol):
51
51
  Returns:
52
52
  GuardrailResult indicating if content is safe
53
53
  """
54
- ...
54
+ ... # pragma: no cover
55
55
 
56
56
  @abstractmethod
57
57
  def model_dump(self) -> dict:
58
58
  """Serialize engine configuration into a JSON-compatible dictionary."""
59
- ...
59
+ ... # pragma: no cover
60
60
 
61
61
 
62
62
  class BaseGuardrailEngine(ABC):
@@ -77,14 +77,14 @@ class BaseGuardrailEngine(ABC):
77
77
  @abstractmethod
78
78
  async def check_input(self, content: str) -> GuardrailResult:
79
79
  """Check user input content for safety violations."""
80
- ...
80
+ ... # pragma: no cover
81
81
 
82
82
  @abstractmethod
83
83
  async def check_output(self, content: str) -> GuardrailResult:
84
84
  """Check AI output content for safety violations."""
85
- ...
85
+ ... # pragma: no cover
86
86
 
87
87
  @abstractmethod
88
88
  def model_dump(self) -> dict:
89
89
  """Serialize engine configuration into a JSON-compatible dictionary."""
90
- ...
90
+ ... # pragma: no cover