naas-abi-core 1.4.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (124) hide show
  1. assets/favicon.ico +0 -0
  2. assets/logo.png +0 -0
  3. naas_abi_core/__init__.py +1 -0
  4. naas_abi_core/apps/api/api.py +245 -0
  5. naas_abi_core/apps/api/api_test.py +281 -0
  6. naas_abi_core/apps/api/openapi_doc.py +144 -0
  7. naas_abi_core/apps/mcp/Dockerfile.mcp +35 -0
  8. naas_abi_core/apps/mcp/mcp_server.py +243 -0
  9. naas_abi_core/apps/mcp/mcp_server_test.py +163 -0
  10. naas_abi_core/apps/terminal_agent/main.py +555 -0
  11. naas_abi_core/apps/terminal_agent/terminal_style.py +175 -0
  12. naas_abi_core/engine/Engine.py +87 -0
  13. naas_abi_core/engine/EngineProxy.py +109 -0
  14. naas_abi_core/engine/Engine_test.py +6 -0
  15. naas_abi_core/engine/IEngine.py +91 -0
  16. naas_abi_core/engine/conftest.py +45 -0
  17. naas_abi_core/engine/engine_configuration/EngineConfiguration.py +216 -0
  18. naas_abi_core/engine/engine_configuration/EngineConfiguration_Deploy.py +7 -0
  19. naas_abi_core/engine/engine_configuration/EngineConfiguration_GenericLoader.py +49 -0
  20. naas_abi_core/engine/engine_configuration/EngineConfiguration_ObjectStorageService.py +159 -0
  21. naas_abi_core/engine/engine_configuration/EngineConfiguration_ObjectStorageService_test.py +26 -0
  22. naas_abi_core/engine/engine_configuration/EngineConfiguration_SecretService.py +138 -0
  23. naas_abi_core/engine/engine_configuration/EngineConfiguration_SecretService_test.py +74 -0
  24. naas_abi_core/engine/engine_configuration/EngineConfiguration_TripleStoreService.py +224 -0
  25. naas_abi_core/engine/engine_configuration/EngineConfiguration_TripleStoreService_test.py +109 -0
  26. naas_abi_core/engine/engine_configuration/EngineConfiguration_VectorStoreService.py +76 -0
  27. naas_abi_core/engine/engine_configuration/EngineConfiguration_VectorStoreService_test.py +33 -0
  28. naas_abi_core/engine/engine_configuration/EngineConfiguration_test.py +9 -0
  29. naas_abi_core/engine/engine_configuration/utils/PydanticModelValidator.py +15 -0
  30. naas_abi_core/engine/engine_loaders/EngineModuleLoader.py +302 -0
  31. naas_abi_core/engine/engine_loaders/EngineOntologyLoader.py +16 -0
  32. naas_abi_core/engine/engine_loaders/EngineServiceLoader.py +47 -0
  33. naas_abi_core/integration/__init__.py +7 -0
  34. naas_abi_core/integration/integration.py +28 -0
  35. naas_abi_core/models/Model.py +198 -0
  36. naas_abi_core/models/OpenRouter.py +18 -0
  37. naas_abi_core/models/OpenRouter_test.py +36 -0
  38. naas_abi_core/module/Module.py +252 -0
  39. naas_abi_core/module/ModuleAgentLoader.py +50 -0
  40. naas_abi_core/module/ModuleUtils.py +20 -0
  41. naas_abi_core/modules/templatablesparqlquery/README.md +196 -0
  42. naas_abi_core/modules/templatablesparqlquery/__init__.py +39 -0
  43. naas_abi_core/modules/templatablesparqlquery/ontologies/TemplatableSparqlQueryOntology.ttl +116 -0
  44. naas_abi_core/modules/templatablesparqlquery/workflows/GenericWorkflow.py +48 -0
  45. naas_abi_core/modules/templatablesparqlquery/workflows/TemplatableSparqlQueryLoader.py +192 -0
  46. naas_abi_core/pipeline/__init__.py +6 -0
  47. naas_abi_core/pipeline/pipeline.py +70 -0
  48. naas_abi_core/services/__init__.py +0 -0
  49. naas_abi_core/services/agent/Agent.py +1619 -0
  50. naas_abi_core/services/agent/AgentMemory_test.py +28 -0
  51. naas_abi_core/services/agent/Agent_test.py +214 -0
  52. naas_abi_core/services/agent/IntentAgent.py +1179 -0
  53. naas_abi_core/services/agent/IntentAgent_test.py +139 -0
  54. naas_abi_core/services/agent/beta/Embeddings.py +181 -0
  55. naas_abi_core/services/agent/beta/IntentMapper.py +120 -0
  56. naas_abi_core/services/agent/beta/LocalModel.py +88 -0
  57. naas_abi_core/services/agent/beta/VectorStore.py +89 -0
  58. naas_abi_core/services/agent/test_agent_memory.py +278 -0
  59. naas_abi_core/services/agent/test_postgres_integration.py +145 -0
  60. naas_abi_core/services/cache/CacheFactory.py +31 -0
  61. naas_abi_core/services/cache/CachePort.py +63 -0
  62. naas_abi_core/services/cache/CacheService.py +246 -0
  63. naas_abi_core/services/cache/CacheService_test.py +85 -0
  64. naas_abi_core/services/cache/adapters/secondary/CacheFSAdapter.py +39 -0
  65. naas_abi_core/services/object_storage/ObjectStorageFactory.py +57 -0
  66. naas_abi_core/services/object_storage/ObjectStoragePort.py +47 -0
  67. naas_abi_core/services/object_storage/ObjectStorageService.py +41 -0
  68. naas_abi_core/services/object_storage/adapters/secondary/ObjectStorageSecondaryAdapterFS.py +52 -0
  69. naas_abi_core/services/object_storage/adapters/secondary/ObjectStorageSecondaryAdapterNaas.py +131 -0
  70. naas_abi_core/services/object_storage/adapters/secondary/ObjectStorageSecondaryAdapterS3.py +171 -0
  71. naas_abi_core/services/ontology/OntologyPorts.py +36 -0
  72. naas_abi_core/services/ontology/OntologyService.py +17 -0
  73. naas_abi_core/services/ontology/adaptors/secondary/OntologyService_SecondaryAdaptor_NERPort.py +37 -0
  74. naas_abi_core/services/secret/Secret.py +138 -0
  75. naas_abi_core/services/secret/SecretPorts.py +45 -0
  76. naas_abi_core/services/secret/Secret_test.py +65 -0
  77. naas_abi_core/services/secret/adaptors/secondary/Base64Secret.py +57 -0
  78. naas_abi_core/services/secret/adaptors/secondary/Base64Secret_test.py +39 -0
  79. naas_abi_core/services/secret/adaptors/secondary/NaasSecret.py +88 -0
  80. naas_abi_core/services/secret/adaptors/secondary/NaasSecret_test.py +25 -0
  81. naas_abi_core/services/secret/adaptors/secondary/dotenv_secret_secondaryadaptor.py +29 -0
  82. naas_abi_core/services/triple_store/TripleStoreFactory.py +116 -0
  83. naas_abi_core/services/triple_store/TripleStorePorts.py +223 -0
  84. naas_abi_core/services/triple_store/TripleStoreService.py +419 -0
  85. naas_abi_core/services/triple_store/adaptors/secondary/AWSNeptune.py +1300 -0
  86. naas_abi_core/services/triple_store/adaptors/secondary/AWSNeptune_test.py +284 -0
  87. naas_abi_core/services/triple_store/adaptors/secondary/Oxigraph.py +597 -0
  88. naas_abi_core/services/triple_store/adaptors/secondary/Oxigraph_test.py +1474 -0
  89. naas_abi_core/services/triple_store/adaptors/secondary/TripleStoreService__SecondaryAdaptor__Filesystem.py +223 -0
  90. naas_abi_core/services/triple_store/adaptors/secondary/TripleStoreService__SecondaryAdaptor__ObjectStorage.py +234 -0
  91. naas_abi_core/services/triple_store/adaptors/secondary/base/TripleStoreService__SecondaryAdaptor__FileBase.py +18 -0
  92. naas_abi_core/services/vector_store/IVectorStorePort.py +101 -0
  93. naas_abi_core/services/vector_store/IVectorStorePort_test.py +189 -0
  94. naas_abi_core/services/vector_store/VectorStoreFactory.py +47 -0
  95. naas_abi_core/services/vector_store/VectorStoreService.py +171 -0
  96. naas_abi_core/services/vector_store/VectorStoreService_test.py +185 -0
  97. naas_abi_core/services/vector_store/__init__.py +13 -0
  98. naas_abi_core/services/vector_store/adapters/QdrantAdapter.py +251 -0
  99. naas_abi_core/services/vector_store/adapters/QdrantAdapter_test.py +57 -0
  100. naas_abi_core/tests/test_services_imports.py +69 -0
  101. naas_abi_core/utils/Expose.py +55 -0
  102. naas_abi_core/utils/Graph.py +182 -0
  103. naas_abi_core/utils/JSON.py +49 -0
  104. naas_abi_core/utils/LazyLoader.py +44 -0
  105. naas_abi_core/utils/Logger.py +12 -0
  106. naas_abi_core/utils/OntologyReasoner.py +141 -0
  107. naas_abi_core/utils/OntologyYaml.py +681 -0
  108. naas_abi_core/utils/SPARQL.py +256 -0
  109. naas_abi_core/utils/Storage.py +33 -0
  110. naas_abi_core/utils/StorageUtils.py +398 -0
  111. naas_abi_core/utils/String.py +52 -0
  112. naas_abi_core/utils/Workers.py +114 -0
  113. naas_abi_core/utils/__init__.py +0 -0
  114. naas_abi_core/utils/onto2py/README.md +0 -0
  115. naas_abi_core/utils/onto2py/__init__.py +10 -0
  116. naas_abi_core/utils/onto2py/__main__.py +29 -0
  117. naas_abi_core/utils/onto2py/onto2py.py +611 -0
  118. naas_abi_core/utils/onto2py/tests/ttl2py_test.py +271 -0
  119. naas_abi_core/workflow/__init__.py +5 -0
  120. naas_abi_core/workflow/workflow.py +48 -0
  121. naas_abi_core-1.4.1.dist-info/METADATA +630 -0
  122. naas_abi_core-1.4.1.dist-info/RECORD +124 -0
  123. naas_abi_core-1.4.1.dist-info/WHEEL +4 -0
  124. naas_abi_core-1.4.1.dist-info/entry_points.txt +2 -0
@@ -0,0 +1,1619 @@
1
+ from __future__ import annotations
2
+
3
+ # Standard library imports for type hints
4
+ import os
5
+ import re
6
+ import uuid
7
+
8
+ # Dataclass imports for configuration
9
+ from dataclasses import dataclass, field
10
+ from enum import Enum
11
+ from queue import Empty, Queue
12
+ from typing import (
13
+ TYPE_CHECKING,
14
+ Annotated,
15
+ Any,
16
+ Callable,
17
+ Dict,
18
+ Generator,
19
+ Literal,
20
+ Optional,
21
+ Sequence,
22
+ Union,
23
+ cast,
24
+ )
25
+
26
+ import pydash as pd
27
+ from langchain_core.language_models import BaseChatModel
28
+ from langchain_core.tools import BaseTool, StructuredTool, Tool, tool
29
+ from langgraph.prebuilt import InjectedState
30
+ from naas_abi_core.models.Model import ChatModel
31
+ from naas_abi_core.utils.Expose import Expose
32
+ from naas_abi_core.utils.Logger import logger
33
+
34
+ # Pydantic imports for schema validation (keep - it's already loaded by other modules)
35
+ from pydantic import BaseModel, Field
36
+
37
+ ###
38
+
39
+
40
+ # Only import heavy modules for type checking
41
+ if TYPE_CHECKING:
42
+ from fastapi import APIRouter
43
+ from langchain_core.runnables import Runnable
44
+ from langgraph.checkpoint.base import BaseCheckpointSaver
45
+ from langgraph.graph.state import CompiledStateGraph
46
+
47
+ from langchain_core.messages import (
48
+ AIMessage,
49
+ AnyMessage,
50
+ BaseMessage,
51
+ HumanMessage,
52
+ SystemMessage,
53
+ ToolCall,
54
+ ToolMessage,
55
+ )
56
+ from langgraph.checkpoint.memory import MemorySaver
57
+ from langgraph.graph import START, StateGraph
58
+ from langgraph.graph.message import MessagesState
59
+ from langgraph.types import Command
60
+ from sse_starlette.sse import EventSourceResponse
61
+
62
+
63
+ def create_checkpointer() -> BaseCheckpointSaver:
64
+ """Create a checkpointer based on environment configuration.
65
+
66
+ Returns a PostgreSQL-backed checkpointer if POSTGRES_URL is set,
67
+ otherwise returns an in-memory checkpointer.
68
+ """
69
+ postgres_url = os.getenv("POSTGRES_URL")
70
+
71
+ if postgres_url:
72
+ try:
73
+ import time
74
+
75
+ from langgraph.checkpoint.postgres import PostgresSaver
76
+ from psycopg import Connection
77
+ from psycopg.rows import dict_row
78
+
79
+ logger.debug(
80
+ f"Using PostgreSQL checkpointer for persistent memory: {postgres_url}"
81
+ )
82
+
83
+ # Try connection with retries (PostgreSQL might still be starting)
84
+ max_retries = 3
85
+ for attempt in range(max_retries):
86
+ try:
87
+ # Create connection with proper configuration (matching from_conn_string)
88
+ conn = Connection.connect(
89
+ postgres_url,
90
+ autocommit=True,
91
+ prepare_threshold=0,
92
+ row_factory=dict_row,
93
+ )
94
+ checkpointer = PostgresSaver(conn)
95
+
96
+ # Setup tables if they don't exist
97
+ checkpointer.setup()
98
+ logger.debug("PostgreSQL checkpointer tables initialized")
99
+
100
+ return checkpointer
101
+
102
+ except Exception as conn_error:
103
+ if attempt < max_retries - 1:
104
+ logger.warning(
105
+ f"PostgreSQL connection attempt {attempt + 1} failed, retrying in 2 seconds..."
106
+ )
107
+ time.sleep(2)
108
+ else:
109
+ raise conn_error
110
+
111
+ except ImportError:
112
+ logger.error(
113
+ "PostgreSQL checkpointer requested but langgraph.checkpoint.postgres not available. Falling back to in-memory."
114
+ )
115
+ except Exception as e:
116
+ # Provide more helpful error messages
117
+ error_msg = str(e)
118
+ if "nodename nor servname provided" in error_msg:
119
+ logger.error(
120
+ f"PostgreSQL connection failed - cannot resolve hostname. Check if PostgreSQL is running and hostname is correct in POSTGRES_URL: {postgres_url}"
121
+ )
122
+ logger.error(
123
+ "Hint: If running outside Docker, use 'localhost' instead of 'postgres' in POSTGRES_URL"
124
+ )
125
+ else:
126
+ logger.error(f"Failed to initialize PostgreSQL checkpointer: {e}")
127
+ logger.error("Falling back to in-memory checkpointer")
128
+
129
+ # Fallback to in-memory checkpointer
130
+ return MemorySaver()
131
+ else:
132
+ logger.debug(
133
+ "Using in-memory checkpointer (set POSTGRES_URL for persistent memory)"
134
+ )
135
+ return MemorySaver()
136
+
137
+
138
+ class AgentSharedState:
139
+ _thread_id: str
140
+ _current_active_agent: Optional[str]
141
+ _supervisor_agent: Optional[str]
142
+ _requesting_help: bool
143
+
144
+ def __init__(
145
+ self,
146
+ thread_id: str = "1",
147
+ current_active_agent: Optional[str] = None,
148
+ supervisor_agent: Optional[str] = None,
149
+ ):
150
+ assert isinstance(thread_id, str)
151
+
152
+ self._thread_id = thread_id
153
+ self._current_active_agent = current_active_agent
154
+ self._supervisor_agent = supervisor_agent
155
+ self._requesting_help = False
156
+
157
+ @property
158
+ def thread_id(self) -> str:
159
+ return self._thread_id
160
+
161
+ def set_thread_id(self, thread_id: str):
162
+ self._thread_id = thread_id
163
+
164
+ @property
165
+ def current_active_agent(self) -> Optional[str]:
166
+ return self._current_active_agent
167
+
168
+ def set_current_active_agent(self, agent_name: Optional[str]):
169
+ self._current_active_agent = agent_name
170
+
171
+ @property
172
+ def supervisor_agent(self) -> Optional[str]:
173
+ return self._supervisor_agent
174
+
175
+ def set_supervisor_agent(self, agent_name: Optional[str]):
176
+ self._supervisor_agent = agent_name
177
+
178
+ @property
179
+ def requesting_help(self) -> bool:
180
+ return self._requesting_help
181
+
182
+ def set_requesting_help(self, requesting_help: bool):
183
+ self._requesting_help = requesting_help
184
+
185
+
186
+ @dataclass
187
+ class Event:
188
+ payload: Any
189
+
190
+
191
+ @dataclass
192
+ class ToolUsageEvent(Event):
193
+ pass
194
+
195
+
196
+ @dataclass
197
+ class ToolResponseEvent(Event):
198
+ pass
199
+
200
+
201
+ @dataclass
202
+ class AIMessageEvent(Event):
203
+ agent_name: str
204
+
205
+
206
+ @dataclass
207
+ class FinalStateEvent(Event):
208
+ pass
209
+
210
+
211
+ @dataclass
212
+ class AgentConfiguration:
213
+ on_tool_usage: Callable[[AnyMessage], None] = field(
214
+ default_factory=lambda: lambda _: None
215
+ )
216
+ on_tool_response: Callable[[AnyMessage], None] = field(
217
+ default_factory=lambda: lambda _: None
218
+ )
219
+ on_ai_message: Callable[[AnyMessage, str], None] = field(
220
+ default_factory=lambda: lambda _, __: None
221
+ )
222
+ system_prompt: str = field(
223
+ default="You are a helpful assistant. If a tool you used did not return the result you wanted, look for another tool that might be able to help you. If you don't find a suitable tool. Just output 'I DONT KNOW'"
224
+ )
225
+
226
+
227
+ class CompletionQuery(BaseModel):
228
+ prompt: str = Field(..., description="The prompt to send to the agent")
229
+ thread_id: str | int = Field(
230
+ ..., description="The thread ID to use for the conversation"
231
+ )
232
+
233
+
234
+ class Agent(Expose):
235
+ """An Agent class that orchestrates interactions between a language model and tools.
236
+
237
+ Performance Features:
238
+ • Lazy Initialization: Efficient resource utilization through lazy loading
239
+ • Connection Pooling: Optimized database connections for memory backends
240
+ • Parallel Execution: Concurrent tool execution where dependencies allow
241
+ • Caching: Intelligent caching of tool results and model responses
242
+ • Resource Management: Proper cleanup and resource management
243
+
244
+ Attributes:
245
+ _name (str): Unique identifier for the agent
246
+ _description (str): Human-readable description of the agent's purpose
247
+ _system_prompt (str): System prompt that defines the agent's behavior
248
+ _chat_model (BaseChatModel): The underlying language model with tool binding
249
+ _chat_model_with_tools (Runnable): Language model configured with available tools
250
+ _tools (list[Union[Tool, Agent]]): Original list of provided tools and agents
251
+ _structured_tools (list[Union[Tool, BaseTool]]): Processed and validated tools
252
+ _tools_by_name (dict[str, Union[Tool, BaseTool]]): Tool lookup dictionary
253
+ _native_tools (list[dict]): Native tools compatible with the language model
254
+ _agents (list[Agent]): List of sub-agents for delegation
255
+ _checkpointer (BaseCheckpointSaver): Memory backend for conversation persistence
256
+ _state (AgentSharedState): Shared state management for conversation threads
257
+ graph (CompiledStateGraph): Compiled workflow graph for conversation execution
258
+ _configuration (AgentConfiguration): Configuration settings for agent behavior
259
+ _event_queue (Queue): Event queue for real-time event streaming
260
+ _chat_model_output_version (str|None): Version identifier for model output format
261
+ """
262
+
263
+ _name: str
264
+ _description: str
265
+ _system_prompt: str
266
+
267
+ _chat_model: BaseChatModel
268
+ _chat_model_with_tools: Runnable[
269
+ Any
270
+ | str
271
+ | Sequence[BaseMessage | list[str] | tuple[str, str] | str | dict[str, Any]],
272
+ BaseMessage,
273
+ ]
274
+ _tools: list[Union[Tool, BaseTool, "Agent"]]
275
+ _original_tools: list[Union[Tool, BaseTool, "Agent"]]
276
+ _tools_by_name: dict[str, Union[Tool, BaseTool]]
277
+ _native_tools: list[dict]
278
+
279
+ # An agent can have other agents.
280
+ # He will be responsible to load them as tools.
281
+ _agents: list["Agent"] = []
282
+
283
+ _chekpointer: BaseCheckpointSaver
284
+ _state: AgentSharedState
285
+
286
+ graph: CompiledStateGraph
287
+ _workflow: StateGraph
288
+ _configuration: AgentConfiguration
289
+
290
+ _on_tool_usage: Callable[[AnyMessage], None]
291
+ _on_tool_response: Callable[[AnyMessage], None]
292
+ _on_ai_message: Callable[[AnyMessage, str], None]
293
+
294
+ # Avent queue used to stream tool usage and responses.
295
+ _event_queue: Queue
296
+
297
+ _chat_model_output_version: Union[str, None] = None
298
+
299
+ @classmethod
300
+ def New(
301
+ cls,
302
+ agent_shared_state: Optional[AgentSharedState] = None,
303
+ agent_configuration: Optional[AgentConfiguration] = None,
304
+ ) -> "Agent":
305
+ """Create a new instance of the agent.
306
+
307
+ Args:
308
+ agent_shared_state: Optional[AgentSharedState]: The shared state of the agent.
309
+ agent_configuration: Optional[AgentConfiguration]: The configuration of the agent.
310
+
311
+ Returns:
312
+ raise NotImplementedError("This method is not implemented")
313
+ """
314
+ raise NotImplementedError("This method is not implemented")
315
+
316
+ def __init__(
317
+ self,
318
+ name: str,
319
+ description: str,
320
+ chat_model: BaseChatModel | ChatModel,
321
+ tools: list[Union[Tool, BaseTool, "Agent"]] = [],
322
+ agents: list["Agent"] = [],
323
+ memory: BaseCheckpointSaver | None = None,
324
+ state: AgentSharedState = AgentSharedState(),
325
+ configuration: AgentConfiguration = AgentConfiguration(),
326
+ event_queue: Queue | None = None,
327
+ native_tools: list[dict] = [],
328
+ ):
329
+ """Initialize a new Agent instance.
330
+
331
+ Args:
332
+ chat_model (BaseChatModel): The language model to use for chat interactions.
333
+ Should support tool binding.
334
+ tools (list[Tool]): List of tools to make available to the agent.
335
+ memory (BaseCheckpointSaver, optional): Component to save conversation state.
336
+ If None, will use PostgreSQL if POSTGRES_URL env var is set, otherwise in-memory.
337
+ """
338
+ logger.debug(f"Initializing agent: {name}")
339
+ self._name = name
340
+ self._description = description
341
+ self._system_prompt = configuration.system_prompt
342
+ self._state = state
343
+ self._original_tools = tools
344
+ self._original_agents = agents
345
+
346
+ # We set the supervisor agent and current active agent before the default tools are injected.
347
+ if self._state.supervisor_agent is not None:
348
+ self._state.set_supervisor_agent(self._state.supervisor_agent)
349
+ logger.debug(f"Supervisor agent: {self._state.supervisor_agent}")
350
+
351
+ agent_names = [a.name for a in self._original_agents] + [name]
352
+ if (
353
+ self._state.current_active_agent is not None
354
+ and self._state.current_active_agent in agent_names
355
+ ):
356
+ self._state.set_current_active_agent(self._state.current_active_agent)
357
+ logger.debug(f"Current active agent: {self._state.current_active_agent}")
358
+
359
+ # We inject default tools
360
+ tools += self.default_tools()
361
+
362
+ # We store the original list of provided tools. This will be usefull for duplication.
363
+ self._tools = tools
364
+ self._native_tools = native_tools
365
+
366
+ # Assertions
367
+ assert isinstance(name, str)
368
+ assert isinstance(description, str)
369
+ assert isinstance(chat_model, BaseChatModel | ChatModel)
370
+
371
+ # We assert agents
372
+ for agent in agents:
373
+ assert isinstance(agent, Agent)
374
+
375
+ # We store the provided tools in __structured_tools because we will need to know which ones are provided by the user and which one are agents.
376
+ # This is needed when we duplicate the agent.
377
+ _structured_tools, _agents = self.prepare_tools(
378
+ cast(list[Union[Tool, BaseTool, "Agent"]], tools), agents
379
+ )
380
+ self._structured_tools = _structured_tools
381
+ self._agents = _agents
382
+
383
+ # We assert that the tool that are provided are valid.
384
+ for t in self._structured_tools:
385
+ assert isinstance(t, StructuredTool)
386
+ assert hasattr(t, "name")
387
+ assert hasattr(t, "description")
388
+ assert hasattr(t, "func")
389
+ assert hasattr(t, "args_schema")
390
+
391
+ self._tools_by_name: dict[str, Union[Tool, BaseTool]] = {
392
+ tool.name: tool for tool in self._structured_tools
393
+ }
394
+
395
+ base_chat_model: BaseChatModel = (
396
+ chat_model if isinstance(chat_model, BaseChatModel) else chat_model.model
397
+ )
398
+ assert isinstance(base_chat_model, BaseChatModel)
399
+
400
+ self._chat_model = base_chat_model
401
+ if hasattr(base_chat_model, "output_version"):
402
+ self._chat_model_output_version = base_chat_model.output_version
403
+
404
+ self._chat_model_with_tools = base_chat_model
405
+ if self._tools or self._native_tools:
406
+ tools_to_bind: list[Union[Tool, BaseTool, Dict]] = []
407
+ tools_to_bind.extend(self._structured_tools)
408
+ tools_to_bind.extend(self._native_tools)
409
+
410
+ # Test if the chat model can bind tools by trying with a default tool first
411
+ if self._can_bind_tools(base_chat_model):
412
+ self._chat_model_with_tools = base_chat_model.bind_tools(tools_to_bind)
413
+ else:
414
+ logger.warning(
415
+ f"Chat model {type(base_chat_model).__name__} does not support tool calling. Tools will not be available for agent '{self._name}'."
416
+ )
417
+ # Keep the original model without tools
418
+ self._chat_model_with_tools = base_chat_model
419
+
420
+ # Use provided memory or create based on environment
421
+ if memory is None:
422
+ self._checkpointer = create_checkpointer()
423
+ else:
424
+ self._checkpointer = memory
425
+
426
+ # Randomize the thread_id to prevent the same thread_id to be used by multiple agents.
427
+ if os.getenv("ENV") == "dev":
428
+ self._state.set_thread_id(str(uuid.uuid4()))
429
+
430
+ # We set the configuration.
431
+ self._configuration = configuration
432
+ self._on_tool_usage = configuration.on_tool_usage
433
+ self._on_tool_response = configuration.on_tool_response
434
+ self._on_ai_message = configuration.on_ai_message
435
+
436
+ # Initialize the event queue.
437
+ if event_queue is None:
438
+ self._event_queue = Queue()
439
+ else:
440
+ self._event_queue = event_queue
441
+
442
+ # We build the graph.
443
+ self.build_graph()
444
+
445
+ def _can_bind_tools(self, chat_model: BaseChatModel) -> bool:
446
+ """Test if the chat model can bind tools by attempting to bind the get_time_date default tool.
447
+
448
+ Args:
449
+ chat_model (BaseChatModel): The chat model to test
450
+
451
+ Returns:
452
+ bool: True if the model can bind tools, False otherwise
453
+ """
454
+ try:
455
+ # Create the get_time_date tool that's used in default_tools()
456
+ @tool(return_direct=True)
457
+ def get_time_date(timezone: str = "Europe/Paris") -> str:
458
+ """Get the current time and date."""
459
+ from datetime import datetime
460
+ from zoneinfo import ZoneInfo
461
+
462
+ return datetime.now(ZoneInfo(timezone)).strftime("%H:%M:%S %Y-%m-%d")
463
+
464
+ # Try to bind this single tool to test if the model supports tool binding
465
+ chat_model.bind_tools([get_time_date])
466
+
467
+ # If we get here without an exception, the model supports tool binding
468
+ # logger.debug(f"Chat model {type(chat_model).__name__} supports tool calling.")
469
+ return True
470
+
471
+ except Exception as e:
472
+ # If binding tools raises an exception, the model doesn't support tools
473
+ logger.debug(
474
+ f"Chat model {type(chat_model).__name__} does not support tool calling: {e}"
475
+ )
476
+ return False
477
+
478
+ def default_tools(self) -> list[Tool | BaseTool]:
479
+ @tool(return_direct=True)
480
+ def request_help(reason: str):
481
+ """
482
+ Request help from the supervisor agent when you (the LLM) are uncertain about the next step or do not have the required capability to fulfill the user's request.
483
+
484
+ Use this tool if:
485
+ - You are unsure how to proceed.
486
+ - You lack the necessary knowledge or ability to complete the task.
487
+ - The user's request is outside your capabilities or unclear.
488
+
489
+ Args:
490
+ reason (str): A brief explanation of why you are requesting help (e.g., "I am uncertain about the next step", "I do not have the required capability", "The user's request is unclear").
491
+
492
+ The supervisor agent will review your reason and provide assistance or take over the conversation.
493
+ """
494
+ logger.debug(f"'{self.name}' is requesting help from the supervisor agent")
495
+ return "Requesting help from the supervisor agent."
496
+
497
+ @tool(return_direct=False)
498
+ def get_time_date(timezone: str = "Europe/Paris") -> str:
499
+ """Returns the current date and time for a given timezone."""
500
+ from datetime import datetime
501
+ from zoneinfo import ZoneInfo
502
+
503
+ return datetime.now(ZoneInfo(timezone)).strftime("%H:%M:%S %Y-%m-%d")
504
+
505
+ @tool(return_direct=True)
506
+ def get_current_active_agent() -> str:
507
+ """Returns the current active agent."""
508
+ return "The current active agent is: " + (
509
+ self._state.current_active_agent or self.name
510
+ )
511
+
512
+ @tool(return_direct=True)
513
+ def get_supervisor_agent() -> str:
514
+ """Returns the supervisor agent."""
515
+ if self._state.supervisor_agent is None:
516
+ return "I don't have a supervisor agent."
517
+ return "The supervisor agent is: " + self._state.supervisor_agent
518
+
519
+ @tool(return_direct=True)
520
+ def list_tools_available() -> str:
521
+ """Displays a formatted list of all available tools."""
522
+ if (
523
+ not hasattr(self, "_structured_tools")
524
+ or len(self._structured_tools) == 0
525
+ ):
526
+ return "I don't have any tools available to help you at the moment."
527
+
528
+ tools_text = "Here are the tools I can use to help you:\n\n"
529
+ for t in self._structured_tools:
530
+ if not t.name.startswith("transfer_to"):
531
+ tools_text += f"- `{t.name}`: {t.description.splitlines()[0]}\n"
532
+ return tools_text.rstrip()
533
+
534
+ @tool(return_direct=True)
535
+ def list_subagents_available() -> str:
536
+ """Displays a formatted list of all available sub-agents."""
537
+ if not hasattr(self, "_agents") or len(self._agents) == 0:
538
+ return "I don't have any sub-agents that can assist me at the moment."
539
+
540
+ agents_text = "I can collaborate with these sub-agents:\n"
541
+ for agent in self._agents:
542
+ agents_text += f"- `{agent.name}`: {agent.description}\n"
543
+ return agents_text.rstrip()
544
+
545
+ @tool(return_direct=True)
546
+ def list_intents_available() -> str:
547
+ """Displays a formatted list of all available intents."""
548
+ if not hasattr(self, "_intents") or len(self._intents) == 0:
549
+ return "I haven't been configured with any specific intents yet."
550
+
551
+ from naas_abi_core.services.agent.IntentAgent import (
552
+ Intent,
553
+ IntentScope,
554
+ IntentType,
555
+ )
556
+
557
+ # Group intents by scope and type
558
+ intents_by_scope: dict[
559
+ Optional[IntentScope], dict[IntentType, list[Intent]]
560
+ ] = {}
561
+ for intent in self._intents:
562
+ if intent.intent_scope not in intents_by_scope:
563
+ intents_by_scope[intent.intent_scope] = {}
564
+ if intent.intent_type not in intents_by_scope[intent.intent_scope]:
565
+ intents_by_scope[intent.intent_scope][intent.intent_type] = []
566
+ intents_by_scope[intent.intent_scope][intent.intent_type].append(intent)
567
+
568
+ intents_text = "Here are all the intents I'm configured with:\n\n"
569
+ for scope, types_dict in intents_by_scope.items():
570
+ intents_text += f"### Intents for {str(scope)}\n\n"
571
+ for intent_type, intents in types_dict.items():
572
+ intents_text += f"#### {str(intent_type)}\n\n"
573
+ intents_text += "| Intent | Target |\n"
574
+ intents_text += "|--------|--------|\n"
575
+ for intent in intents:
576
+ if intent.intent_scope == IntentType.RAW:
577
+ intents_text += (
578
+ f"| {intent.intent_value} | {intent.intent_target} |\n"
579
+ )
580
+ else:
581
+ intents_text += f"| {intent.intent_value} | `{intent.intent_target}` |\n"
582
+ intents_text += "\n"
583
+ return intents_text.rstrip()
584
+
585
+ @tool(return_direct=False)
586
+ def read_makefile() -> str:
587
+ """Read the Makefile and return the content."""
588
+ try:
589
+ with open("Makefile", "r") as f:
590
+ makefile_content = f.read()
591
+
592
+ return "Here are the make commands available:\n\n" + makefile_content
593
+
594
+ except FileNotFoundError:
595
+ return "Could not find Makefile in the root directory."
596
+ except Exception as e:
597
+ return f"Error reading Makefile: {str(e)}"
598
+
599
+ tools: list[Tool | BaseTool] = [
600
+ get_time_date,
601
+ list_tools_available,
602
+ list_subagents_available,
603
+ list_intents_available,
604
+ ]
605
+ if self.state.supervisor_agent and self.state.supervisor_agent != self.name:
606
+ tools.append(request_help)
607
+
608
+ if self.state.supervisor_agent is not None or len(self._agents) > 0:
609
+ tools.append(get_current_active_agent)
610
+ tools.append(get_supervisor_agent)
611
+
612
+ if self.state.supervisor_agent == self.name and os.getenv("ENV") == "dev":
613
+ tools.append(read_makefile)
614
+ return tools
615
+
616
+ @property
617
+ def system_prompt(self) -> str:
618
+ return self._system_prompt
619
+
620
+ def set_system_prompt(self, system_prompt: str):
621
+ self._system_prompt = system_prompt
622
+
623
+ @property
624
+ def structured_tools(self) -> list[Tool | BaseTool]:
625
+ return self._structured_tools
626
+
627
+ @property
628
+ def state(self) -> AgentSharedState:
629
+ return self._state
630
+
631
+ def validate_tool_name(self, tool: BaseTool) -> BaseTool:
632
+ if not re.match(r"^[a-zA-Z0-9_-]+$", tool.name):
633
+ # Replace invalid characters with '_'
634
+ valid_name = re.sub(r"[^a-zA-Z0-9_-]", "_", tool.name)
635
+ logger.warning(
636
+ f"Tool name '{tool.name}' does not comply with '^[a-zA-Z0-9_-]+$'. Renaming to '{valid_name}'."
637
+ )
638
+ tool.name = valid_name
639
+ return tool
640
+
641
+ def prepare_tools(
642
+ self, tools: list[Union[Tool, BaseTool, "Agent"]], agents: list
643
+ ) -> tuple[list[Tool | BaseTool], list["Agent"]]:
644
+ """
645
+ If we have Agents in tools, we are properly loading them as handoff tools.
646
+ It will effectively make the 'self' agent a supervisor agent.
647
+
648
+ Ensures no duplicate tools or agents are added by tracking unique names/instances.
649
+ """
650
+ _tools: list[Tool | BaseTool] = []
651
+ _agents: list["Agent"] = []
652
+ _tool_names: set[str] = set()
653
+ _agent_names: set[str] = set()
654
+
655
+ # We process tools knowing that they can either be StructutedTools or Agent.
656
+ for t in tools:
657
+ if isinstance(t, Agent):
658
+ # TODO: We might want to duplicate the agent first.
659
+ # logger.debug(f"Agent passed as tool: {t}")
660
+ if t.name not in _agent_names:
661
+ _agents.append(t)
662
+ _agent_names.add(t.name)
663
+ for tool in t.as_tools():
664
+ tool = self.validate_tool_name(tool)
665
+ if tool.name not in _tool_names:
666
+ _tools.append(tool)
667
+ _tool_names.add(tool.name)
668
+ else:
669
+ # Accept both Tool and BaseTool
670
+ if hasattr(t, "name"):
671
+ if t.name not in _tool_names:
672
+ _tools.append(t)
673
+ _tool_names.add(t.name)
674
+
675
+ # We process agents that are not provided in tools.
676
+ for agent in agents:
677
+ if agent.name not in _agent_names:
678
+ _agents.append(agent)
679
+ _agent_names.add(agent.name)
680
+ for tool in agent.as_tools():
681
+ tool = self.validate_tool_name(tool)
682
+ if tool.name not in _tool_names:
683
+ _tools.append(tool)
684
+ _tool_names.add(tool.name)
685
+
686
+ return _tools, _agents
687
+
688
+ def as_tools(self, parent_graph: bool = False) -> list[BaseTool]:
689
+ return [make_handoff_tool(agent=self, parent_graph=parent_graph)]
690
+
691
+ def build_graph(self, patcher: Optional[Callable] = None):
692
+ graph = StateGraph(MessagesState)
693
+
694
+ graph.add_node(self.current_active_agent)
695
+ graph.add_edge(START, "current_active_agent")
696
+
697
+ graph.add_node(self.continue_conversation)
698
+
699
+ graph.add_node(self.call_model)
700
+
701
+ graph.add_node(self.call_tools)
702
+
703
+ for agent in self._agents:
704
+ graph.add_node(agent.name, agent.graph)
705
+
706
+ # Patcher is callable that can be passed and that will impact the graph before we compile it.
707
+ # This is used to be able to give more flexibility about how the graph is being built.
708
+ if patcher:
709
+ graph = patcher(graph)
710
+
711
+ self.graph = graph.compile(checkpointer=self._checkpointer)
712
+
713
+ def get_last_human_message(self, state: MessagesState) -> Any | None:
714
+ """Get the appropriate human message based on AI message context.
715
+
716
+ Args:
717
+ state (MessagesState): Current conversation state
718
+
719
+ Returns:
720
+ Any | None: The relevant human message
721
+ """
722
+ last_ai_message: Any | None = pd.find(
723
+ state["messages"][::-1], lambda m: isinstance(m, AIMessage)
724
+ )
725
+ if (
726
+ last_ai_message is not None
727
+ and last_ai_message.additional_kwargs.get("owner") == self.name
728
+ ):
729
+ return pd.find(
730
+ state["messages"][::-1], lambda m: isinstance(m, HumanMessage)
731
+ )
732
+ elif (
733
+ last_ai_message is not None
734
+ and hasattr(last_ai_message, "additional_kwargs")
735
+ and last_ai_message.additional_kwargs is not None
736
+ and "owner" in last_ai_message.additional_kwargs
737
+ ):
738
+ return pd.filter_(
739
+ state["messages"][::-1], lambda m: isinstance(m, HumanMessage)
740
+ )[1]
741
+ else:
742
+ return pd.find(
743
+ state["messages"][::-1], lambda m: isinstance(m, HumanMessage)
744
+ )
745
+
746
+ def current_active_agent(self, state: MessagesState) -> Command:
747
+ """Goto the current active agent.
748
+
749
+ Args:
750
+ state (MessagesState): Current conversation state
751
+
752
+ Returns:
753
+ Command: Command to goto the current active agent
754
+ """
755
+ # Log the current active agent
756
+ logger.debug(f"😏 Supervisor agent: '{self.state.supervisor_agent}'")
757
+ logger.debug(f"🟢 Active agent: '{self.state.current_active_agent}'")
758
+ logger.debug(f"🤖 Current Agent: '{self.name}'")
759
+
760
+ # Get the last human message
761
+ last_human_message = self.get_last_human_message(state)
762
+
763
+ # Handle agent routing via @mention
764
+ if (
765
+ last_human_message is not None
766
+ and isinstance(last_human_message.content, str)
767
+ and last_human_message.content.startswith("@")
768
+ and last_human_message.content.split(" ")[0].split("@")[1] != self.name
769
+ ):
770
+ at_mention = last_human_message.content.split(" ")[0].split("@")[1]
771
+ logger.debug(f"🔀 Handle agent routing via @mention to '{at_mention}'")
772
+
773
+ # Check if we have an agent with this name.
774
+ agent = pd.find(
775
+ self._agents, lambda a: a.name.lower() == at_mention.lower()
776
+ )
777
+
778
+ # Remove mention from the last human message with re.sub
779
+ import re
780
+
781
+ last_human_message.content = re.sub(
782
+ r"^@[^ ]* ", "", last_human_message.content
783
+ )
784
+
785
+ if agent is not None:
786
+ self._state.set_current_active_agent(agent.name)
787
+ return Command(goto=agent.name, update={"messages": state["messages"]})
788
+ else:
789
+ logger.debug(f"❌ Agent '{at_mention}' not found")
790
+
791
+ if (
792
+ self._state.current_active_agent is not None
793
+ and self._state.current_active_agent != self.name
794
+ ):
795
+ logger.debug(
796
+ f"⏩ Continuing conversation with agent '{self._state.current_active_agent}'"
797
+ )
798
+ # Check if current active agent is in list of agents.
799
+ if self._state.current_active_agent in [a.name for a in self._agents]:
800
+ self._state.set_current_active_agent(self._state.current_active_agent)
801
+ return Command(goto=self._state.current_active_agent)
802
+ else:
803
+ logger.debug(f"❌ Agent '{self._state.current_active_agent}' not found")
804
+
805
+ # self._state.set_current_active_agent(self.name)
806
+ logger.debug(f"💬 Starting chatting with agent '{self.name}'")
807
+ if (
808
+ self.state.supervisor_agent != self.name
809
+ and "SUPERVISOR SYSTEM PROMPT" not in self._system_prompt
810
+ ):
811
+ # This agent is a subagent with a supervisor
812
+ subagent_prompt = f"""
813
+ SUPERVISOR SYSTEM PROMPT:
814
+
815
+ Remember, you are a specialized agent working under the supervision of {self.state.supervisor_agent}.
816
+
817
+ 1. Stay focused on your specialized role and core capabilities.
818
+ 2. Follow your system prompt instructions precisely.
819
+ 3. For EVERY user message, first evaluate if you can handle it within your core capabilities.
820
+ 4. If the user message is not clear, ask for clarification and evaluate again if you can handle it within your core capabilities.
821
+ 5. If you encounter ANY of these situations:
822
+ - You are uncertain about how to proceed
823
+ - The task seems outside your core capabilities
824
+ - You need clarification about requirements
825
+ - You want to confirm a critical action
826
+ - You are not 100% confident in your ability to handle the task
827
+ Then you MUST use the `request_help` tool to ask your supervisor for help.
828
+ IMPORTANT: Do NOT explain to the user that you cannot perform the task or say it is outside your scope. Instead, IMMEDIATELY and directly use `request_help`.
829
+ 6. Do not attempt tasks beyond your defined role.
830
+ 7. Always maintain consistency with your system prompt rules.
831
+ 8. When in doubt, ALWAYS request help rather than risk mistakes. Do not type or explain your inability—just use the tool.
832
+
833
+ Your supervisor will help ensure you operate effectively within your role while providing guidance for complex scenarios.
834
+
835
+ --------------------------------
836
+
837
+ SUBAGENT SYSTEM PROMPT:
838
+
839
+ {self._system_prompt}
840
+ """
841
+ self.set_system_prompt(subagent_prompt)
842
+
843
+ if (
844
+ self.state.supervisor_agent == self.name
845
+ and os.getenv("ENV") == "dev"
846
+ and "DEVELOPPER SYSTEM PROMPT" not in self._system_prompt
847
+ ):
848
+ dev_prompt = f"""
849
+ DEVELOPPER SYSTEM PROMPT:
850
+
851
+ For any questions/commands related to the project, use tool: `read_makefile` to get the information you need.
852
+
853
+ --------------------------------
854
+
855
+ AGENT SYSTEM PROMPT:
856
+
857
+ {self._system_prompt}
858
+ """
859
+
860
+ self.set_system_prompt(dev_prompt)
861
+
862
+ if "CURRENT_DATE" not in self._system_prompt:
863
+ from datetime import datetime
864
+
865
+ current_date_str = f"CURRENT_DATE: The current date is {datetime.now().strftime('%Y-%m-%d')}\n"
866
+ self._system_prompt = self._system_prompt + "\n" + current_date_str
867
+ self.set_system_prompt(self._system_prompt)
868
+ return Command(goto="current_active_agent")
869
+
870
+ # logger.debug(f"💬 System prompt: {self._system_prompt}")
871
+ return Command(goto="continue_conversation")
872
+
873
+ def continue_conversation(self, state: MessagesState) -> Command:
874
+ return Command(goto="call_model")
875
+
876
+ def handle_openai_response_v1(self, response: BaseMessage) -> Command:
877
+ content_str: str = ""
878
+ tool_call: list[ToolCall] = []
879
+ logger.debug(f"Chat model output version is responses/v1: {response}")
880
+
881
+ if isinstance(response.content, list):
882
+ # Parse response content
883
+ for item in response.content:
884
+ # Ensure item is a dict before accessing attributes
885
+ if isinstance(item, dict):
886
+ # Get text content
887
+ if item.get("type") == "text":
888
+ text_content = item.get("text", "")
889
+ if isinstance(text_content, str):
890
+ content_str += text_content
891
+
892
+ # Add sources from annotations if any
893
+ annotations = item.get("annotations", [])
894
+ if isinstance(annotations, list) and len(annotations) > 0:
895
+ content_str += "\n\n\n\n*Annotations:*\n"
896
+ for annotation in annotations:
897
+ if (
898
+ isinstance(annotation, dict)
899
+ and annotation.get("type") == "url_citation"
900
+ ):
901
+ title = annotation.get("title", "")
902
+ url = annotation.get("url", "")
903
+ content_str += f"- [{title}]({url})\n"
904
+
905
+ if "action" in item:
906
+ tool_call.append(
907
+ ToolCall(
908
+ name=item["type"],
909
+ args={"query": item["action"].get("query", "")},
910
+ id=item.get("id"),
911
+ type="tool_call",
912
+ )
913
+ )
914
+
915
+ # Create AIMessage with the content
916
+ usage_metadata = None
917
+ if hasattr(response, "usage_metadata"):
918
+ usage_metadata = response.usage_metadata
919
+ ai_message = AIMessage(content=content_str, usage_metadata=usage_metadata)
920
+
921
+ # If action was detected, notify tool usage
922
+ if len(tool_call) > 0:
923
+ # Use the ai_message which is already the correct type
924
+ ai_message.tool_calls = tool_call
925
+ self._notify_tool_usage(ai_message)
926
+ tool_message = ToolMessage(
927
+ content=content_str, tool_call_id=tool_call[0].get("id")
928
+ )
929
+ self._notify_tool_response(tool_message)
930
+ return Command(
931
+ goto="__end__", update={"messages": [tool_message, ai_message]}
932
+ )
933
+
934
+ return Command(goto="__end__", update={"messages": [ai_message]})
935
+
936
+ def call_model(
937
+ self,
938
+ state: MessagesState,
939
+ ) -> Command[Literal["call_tools", "__end__"]]:
940
+ self._state.set_current_active_agent(self.name)
941
+ logger.debug(f"🧠 Calling model on current active agent: {self.name}")
942
+
943
+ # Inserting system prompt before messages.
944
+ messages = state["messages"]
945
+ if self._system_prompt:
946
+ messages = [
947
+ SystemMessage(content=self._system_prompt),
948
+ ] + messages
949
+ logger.debug(f"Messages before calling model: {messages}")
950
+
951
+ # Calling model
952
+ response: BaseMessage = self._chat_model_with_tools.invoke(messages)
953
+ logger.debug(
954
+ f"Model response content: {response.content if hasattr(response, 'content') else response}"
955
+ )
956
+
957
+ # Handle tool calls if present
958
+ if (
959
+ isinstance(response, AIMessage)
960
+ and hasattr(response, "tool_calls")
961
+ and len(response.tool_calls) > 0
962
+ ):
963
+ logger.debug("⏩ Calling tools")
964
+ # TODO: Rethink this.
965
+ # This is done to prevent an LLM to call multiple tools at once.
966
+ # It's important because, as some tools are subgraphs, and that we are passing the full state, the subgraph will be able to mess with the state.
967
+ # Therefore, if the LLM calls a tool here like the "add" method, and at the same time request the multiplication agent, the agent will mess with the state, and the result of the "add" tool will be lost.
968
+ #### -----> A solution would be to rebuild the state to make sure that the following message of a tool call it the response of that call. If we do that we should theroetically be able to call multiple tools at once, which would be more effective.
969
+ # response.tool_calls = [response.tool_calls[0]]
970
+
971
+ return Command(goto="call_tools", update={"messages": [response]})
972
+
973
+ elif self._chat_model_output_version == "responses/v1":
974
+ return self.handle_openai_response_v1(response)
975
+
976
+ return Command(goto="__end__", update={"messages": [response]})
977
+
978
+ def call_tools(self, state: MessagesState) -> list[Command]:
979
+ # Check if messages are present in the state.
980
+ if (
981
+ "messages" not in state
982
+ or not isinstance(state["messages"], list)
983
+ or len(state["messages"]) == 0
984
+ ):
985
+ logger.warning("No messages in state, cannot call tools")
986
+ return [Command(goto="__end__")]
987
+
988
+ # Check if the last message is an AIMessage and has tool calls.
989
+ last_message: AnyMessage = state["messages"][-1]
990
+ if (
991
+ not isinstance(last_message, AIMessage)
992
+ or not hasattr(last_message, "tool_calls")
993
+ or len(last_message.tool_calls) == 0
994
+ ):
995
+ logger.warning(
996
+ f"No tool calls found in last message but call_tools was called: {last_message}"
997
+ )
998
+ return [Command(goto="__end__", update={"messages": [last_message]})]
999
+
1000
+ # Get the tool calls from the last message.
1001
+ tool_calls: list[ToolCall] = last_message.tool_calls
1002
+ assert len(tool_calls) > 0, state["messages"][-1]
1003
+
1004
+ # Initialize the results list.
1005
+ results: list[Command] = []
1006
+
1007
+ # Initialize the called tools list.
1008
+ called_tools: list[BaseTool] = []
1009
+ for tool_call in tool_calls:
1010
+ tool_name: str = tool_call["name"]
1011
+ logger.debug(f"🛠️ Calling tool: {tool_name}")
1012
+ tool_: BaseTool = self._tools_by_name[tool_name]
1013
+
1014
+ tool_input_fields = tool_.get_input_schema().model_json_schema()[
1015
+ "properties"
1016
+ ]
1017
+
1018
+ # For tools with InjectedToolCallId, we must pass the full ToolCall object
1019
+ # according to LangChain's requirements
1020
+ args: dict[str, Any] | ToolCall = tool_call
1021
+
1022
+ # Check if tool needs state injection
1023
+ if "state" in tool_input_fields:
1024
+ args = {**tool_call, "state": state} # inject state
1025
+
1026
+ # Check if tool is a handoff tool
1027
+ is_handoff = tool_call["name"].startswith("transfer_to_")
1028
+ if is_handoff is True:
1029
+ args = {"state": state, "tool_call": {**tool_call, "role": "tool_call"}}
1030
+
1031
+ # Try to invoke the tool.
1032
+ try:
1033
+ logger.debug(f"🔧 Tool arguments: {args.get('args')}")
1034
+ tool_response = tool_.invoke(args)
1035
+ logger.debug(
1036
+ f"📦 Tool response: {tool_response.content if hasattr(tool_response, 'content') else tool_response}"
1037
+ )
1038
+ if (
1039
+ tool_response is not None
1040
+ and hasattr(tool_response, "name")
1041
+ and tool_response.name == "request_help"
1042
+ and self._state.supervisor_agent != self.name
1043
+ ):
1044
+ self._state.set_current_active_agent(self._state.supervisor_agent)
1045
+ self._state.set_requesting_help(True)
1046
+ results.append(
1047
+ Command(goto="current_active_agent", graph=Command.PARENT)
1048
+ )
1049
+ return results
1050
+
1051
+ called_tools.append(tool_)
1052
+
1053
+ # Handle tool response.
1054
+ if isinstance(tool_response, ToolMessage):
1055
+ results.append(Command(update={"messages": [tool_response]}))
1056
+ elif isinstance(tool_response, Command):
1057
+ results.append(tool_response)
1058
+ else:
1059
+ logger.warning(
1060
+ f"Tool call {tool_name} returned an unexpected type: {type(tool_response)}"
1061
+ )
1062
+ results.append(
1063
+ Command(
1064
+ goto="__end__",
1065
+ update={
1066
+ "messages": [
1067
+ ToolMessage(
1068
+ content=str(tool_response),
1069
+ tool_call_id=tool_call["id"],
1070
+ )
1071
+ ]
1072
+ },
1073
+ )
1074
+ )
1075
+ except Exception as e:
1076
+ logger.error(f"🚨 Tool call {tool_name} failed: {e}")
1077
+ results.append(
1078
+ Command(
1079
+ goto="__end__",
1080
+ update={
1081
+ "messages": [
1082
+ ToolMessage(
1083
+ content=f"Tool call {tool_name} failed: {str(e)}",
1084
+ tool_call_id=tool_call["id"],
1085
+ )
1086
+ ]
1087
+ },
1088
+ )
1089
+ )
1090
+
1091
+ assert len(results) > 0, state
1092
+
1093
+ # Checking if every called tools has return_direct set to True.
1094
+ # This is used to know if we can send the ToolMessage to the call_model node.
1095
+ return_direct: bool = True
1096
+ for t in called_tools:
1097
+ if hasattr(t, "return_direct") and t.return_direct is False:
1098
+ return_direct = False
1099
+ break
1100
+
1101
+ # If the last response is a ToolMessage, we want the model to interpret it.
1102
+ last_tool_reponse: ToolMessage | Command | None = pd.get(
1103
+ results[-1], "update.messages[-1]", None
1104
+ )
1105
+ logger.debug(f"last_tool_reponse: {last_tool_reponse}")
1106
+ if (
1107
+ isinstance(last_tool_reponse, ToolMessage)
1108
+ and hasattr(last_tool_reponse, "name")
1109
+ and last_tool_reponse.name is not None
1110
+ and not last_tool_reponse.name.startswith("transfer_to_")
1111
+ ):
1112
+ if return_direct is False:
1113
+ logger.debug("⏩ Calling model to interpret the tool response.")
1114
+ results.append(Command(goto="call_model"))
1115
+ else:
1116
+ logger.debug(
1117
+ "📧 Injecting ToolMessage into AIMessage for the user to see."
1118
+ )
1119
+ results.append(
1120
+ Command(
1121
+ update={
1122
+ "messages": [AIMessage(content=last_tool_reponse.content)]
1123
+ }
1124
+ )
1125
+ )
1126
+
1127
+ logger.debug(f"✅ Tool results: {results}")
1128
+ return results
1129
+
1130
+ @property
1131
+ def workflow(self) -> StateGraph:
1132
+ return self._workflow
1133
+
1134
+ def _notify_tool_usage(self, message: AnyMessage):
1135
+ self._event_queue.put(ToolUsageEvent(payload=message))
1136
+ self._on_tool_usage(message)
1137
+
1138
+ def _notify_tool_response(self, message: AnyMessage):
1139
+ self._event_queue.put(ToolResponseEvent(payload=message))
1140
+ self._on_tool_response(message)
1141
+
1142
+ def _notify_ai_message(self, message: AnyMessage, agent_name: str):
1143
+ self._event_queue.put(AIMessageEvent(payload=message, agent_name=agent_name))
1144
+ self._on_ai_message(message, agent_name)
1145
+
1146
+ def on_tool_usage(self, callback: Callable[[AnyMessage], None]):
1147
+ """Register a callback to be called when a tool is used.
1148
+
1149
+ The callback will be invoked whenever the model makes a tool call,
1150
+ before the tool is actually executed.
1151
+
1152
+ Args:
1153
+ callback (Callable[[AnyMessage], None]): Function to call with the message
1154
+ containing the tool call
1155
+ """
1156
+ self._on_tool_usage = callback
1157
+ # # Also set the callback on all sub-agents to ensure they notify properly
1158
+ # for agent in self._agents:
1159
+ # agent.on_tool_usage(callback)
1160
+
1161
+ def on_tool_response(self, callback: Callable[[AnyMessage], None]):
1162
+ """Register a callback to be called when a tool response is received.
1163
+
1164
+ The callback will be invoked whenever a tool response message is processed,
1165
+ before passing the messages to the model.
1166
+
1167
+ Args:
1168
+ callback (Callable[[AnyMessage], None]): Function to call with the message
1169
+ containing the tool response
1170
+ """
1171
+ self._on_tool_response = callback
1172
+ # # Also set the callback on all sub-agents to ensure they notify properly
1173
+ # for agent in self._agents:
1174
+ # agent.on_tool_response(callback)
1175
+
1176
+ def on_ai_message(self, callback: Callable[[AnyMessage, str], None]):
1177
+ """Register a callback to be called when an AI message is received."""
1178
+ self._on_ai_message = callback
1179
+ # Also set the callback on all sub-agents to ensure they notify properly
1180
+ for agent in self._agents:
1181
+ agent.on_ai_message(callback)
1182
+
1183
+ @property
1184
+ def app(self):
1185
+ """Get the underlying Langchain app.
1186
+ This property exposes the underlying Langchain app for advanced usage scenarios.
1187
+ Users can call app.invoke() directly with custom message sequences and configurations
1188
+ if they need more control than the standard invoke() method provides.
1189
+
1190
+ Returns:
1191
+ RunnableSequence: The Langchain runnable sequence that processes messages
1192
+ """
1193
+ return self._app
1194
+
1195
+ def stream(self, prompt: str) -> Generator[dict[str, Any] | Any, None, None]:
1196
+ """Process a user prompt through the agent and yield responses as they come.
1197
+
1198
+ Args:
1199
+ prompt (str): The user's text prompt to process
1200
+
1201
+ Yields:
1202
+ str: The model's response text
1203
+ """
1204
+ notified = {}
1205
+
1206
+ for chunk in self.graph.stream(
1207
+ {"messages": [HumanMessage(content=prompt)]},
1208
+ config={"configurable": {"thread_id": self._state.thread_id}},
1209
+ subgraphs=True,
1210
+ ):
1211
+ source, payload = chunk
1212
+ agent_name = self._name if len(source) == 0 else source[0].split(":")[0]
1213
+ if isinstance(payload, dict):
1214
+ last_messages = []
1215
+ v = list(payload.values())[0]
1216
+
1217
+ if v is None:
1218
+ continue
1219
+
1220
+ if isinstance(v, dict):
1221
+ if (
1222
+ "messages" in v
1223
+ and isinstance(v["messages"], list)
1224
+ and len(v["messages"]) > 0
1225
+ ):
1226
+ last_messages = [v["messages"][-1]]
1227
+ else:
1228
+ continue
1229
+ elif isinstance(v, list):
1230
+ last_messages = []
1231
+ for e in v:
1232
+ if (
1233
+ isinstance(e, dict)
1234
+ and "messages" in e
1235
+ and isinstance(e["messages"], list)
1236
+ and len(e["messages"]) > 0
1237
+ ):
1238
+ last_messages.append(e["messages"][-1])
1239
+ else:
1240
+ continue
1241
+
1242
+ for last_message in last_messages:
1243
+ if isinstance(last_message, AIMessage):
1244
+ if pd.get(last_message, "additional_kwargs.tool_calls"):
1245
+ # This is a tool call.
1246
+ self._notify_tool_usage(last_message)
1247
+ else:
1248
+ # This if is here to filter each source of AIMessage. Which means that it will notify ai message only if the methods:
1249
+ # - call_model
1250
+ # - call_tools
1251
+ # are called.
1252
+ # If you need another method to be able to return an AIMessage or a Command(..., update={"messages": [AIMessage(...)]}) we either need to add it to the list or have this specific method calling self._notify_ai_message directly.
1253
+
1254
+ allowed_sources_of_ai_message = ["call_model", "call_tools"]
1255
+ if any(
1256
+ source in payload
1257
+ for source in allowed_sources_of_ai_message
1258
+ ):
1259
+ self._notify_ai_message(last_message, agent_name)
1260
+
1261
+ elif isinstance(last_message, ToolMessage):
1262
+ if last_message.id not in notified:
1263
+ self._notify_tool_response(last_message)
1264
+ notified[last_message.id] = True
1265
+ else:
1266
+ if "tool_call_id" in last_message:
1267
+ if last_message["tool_call_id"] not in notified:
1268
+ self._notify_tool_response(last_message)
1269
+ notified[last_message["tool_call_id"]] = True
1270
+ yield chunk
1271
+
1272
+ def invoke(self, prompt: str) -> str:
1273
+ """Process a user prompt through the agent and return the response.
1274
+
1275
+ This method takes a text prompt from the user, processes it through the underlying
1276
+ Langchain app, and returns the model's response. The prompt is wrapped in a
1277
+ HumanMessage and processed in a new message sequence.
1278
+
1279
+ Args:
1280
+ prompt (str): The user's text prompt to process
1281
+
1282
+
1283
+ str: The model's response text
1284
+ """
1285
+
1286
+ chunks: list[dict[str, Any]] = []
1287
+ for chunk in self.stream(prompt):
1288
+ if isinstance(chunk, tuple):
1289
+ chunk = chunk[1]
1290
+
1291
+ assert isinstance(chunk, dict)
1292
+
1293
+ chunks.append(chunk)
1294
+
1295
+ if len(chunks) == 0:
1296
+ return ""
1297
+
1298
+ last_chunk_values = list(chunks[-1].values())
1299
+ if len(last_chunk_values) == 0:
1300
+ return ""
1301
+ value = last_chunk_values[0]
1302
+ messages = []
1303
+ if (
1304
+ isinstance(value, dict)
1305
+ and "messages" in value
1306
+ and isinstance(value["messages"], list)
1307
+ ):
1308
+ messages = value["messages"]
1309
+ elif isinstance(value, list) and len(value) > 0:
1310
+ last_item = value[-1]
1311
+ if (
1312
+ isinstance(last_item, dict)
1313
+ and "messages" in last_item
1314
+ and isinstance(last_item["messages"], list)
1315
+ ):
1316
+ messages = last_item["messages"]
1317
+
1318
+ if len(messages) == 0:
1319
+ return ""
1320
+
1321
+ last_message = messages[-1]
1322
+ if hasattr(last_message, "content"):
1323
+ content = last_message.content
1324
+ else:
1325
+ content = str(last_message) if last_message is not None else ""
1326
+ # content = list(chunks[-1].values())[0]["messages"][-1].content
1327
+
1328
+ return content
1329
+
1330
+ def reset(self):
1331
+ """Reset the agent's conversation state.
1332
+
1333
+ This method increments the internal thread ID counter, effectively starting a new
1334
+ conversation thread. Any subsequent invocations will be processed as part of a
1335
+ new conversation context.
1336
+ """
1337
+ try:
1338
+ current_thread_id = int(self._state.thread_id)
1339
+ self._state.set_thread_id(str(current_thread_id + 1))
1340
+ except (ValueError, TypeError):
1341
+ # If thread_id is not a valid integer, generate a new UUID
1342
+ self._state.set_thread_id(str(uuid.uuid4()))
1343
+
1344
+ def duplicate(
1345
+ self,
1346
+ queue: Queue | None = None,
1347
+ agent_shared_state: AgentSharedState | None = None,
1348
+ ) -> "Agent":
1349
+ """Create a new instance of the agent with the same configuration.
1350
+
1351
+ This method creates a deep copy of the agent with the same configuration
1352
+ but with its own independent state. This is useful when you need to run
1353
+ multiple instances of the same agent concurrently.
1354
+
1355
+ Returns:
1356
+ Agent: A new Agent instance with the same configuration
1357
+ """
1358
+ shared_state = agent_shared_state or AgentSharedState()
1359
+
1360
+ if queue is None:
1361
+ queue = Queue()
1362
+
1363
+ # We duplicated each agent and add them as tools.
1364
+ # This will be recursively done for each sub agents.
1365
+ agents: list[Agent] = [
1366
+ agent.duplicate(queue, shared_state) for agent in self._original_agents
1367
+ ]
1368
+
1369
+ new_agent = self.__class__(
1370
+ name=self._name,
1371
+ description=self._description,
1372
+ chat_model=self._chat_model,
1373
+ tools=self._original_tools,
1374
+ agents=agents,
1375
+ memory=self._checkpointer,
1376
+ state=shared_state, # Create new state instance
1377
+ configuration=self._configuration,
1378
+ event_queue=queue,
1379
+ )
1380
+
1381
+ return new_agent
1382
+
1383
+ def as_api(
1384
+ self,
1385
+ router: APIRouter,
1386
+ route_name: str = "",
1387
+ name: str = "",
1388
+ description: str = "",
1389
+ description_stream: str = "",
1390
+ tags: list[str | Enum] | None = None,
1391
+ ) -> None:
1392
+ """Adds API endpoints for this agent to the given router.
1393
+
1394
+ Args:
1395
+ router (APIRouter): The router to add endpoints to
1396
+ route_name (str): Optional prefix for route names. Defaults to ""
1397
+ name (str): Optional name to add to the endpoints. Defaults to ""
1398
+ description (str): Optional description to add to the endpoints. Defaults to ""
1399
+ description_stream (str): Optional description to add to the stream endpoints. Defaults to ""
1400
+ tags (list[str]): Optional list of tags to add to the endpoints. Defaults to None
1401
+ """
1402
+
1403
+ route_name = route_name or self._name
1404
+ name = name or self._name.capitalize().replace("_", " ")
1405
+ description = description or self._description
1406
+ description_stream = description_stream or self._description
1407
+
1408
+ @router.post(
1409
+ f"/{route_name}/completion" if route_name else "/completion",
1410
+ name=f"{name} completion",
1411
+ description=description,
1412
+ tags=tags,
1413
+ )
1414
+ def completion(query: CompletionQuery):
1415
+ if isinstance(query.thread_id, int):
1416
+ query.thread_id = str(query.thread_id)
1417
+ logger.debug(
1418
+ f"completion - current active agent: {self._state.current_active_agent}"
1419
+ )
1420
+ logger.debug(
1421
+ f"completion - supervisor agent: {self._state.supervisor_agent}"
1422
+ )
1423
+
1424
+ new_agent = self.duplicate(
1425
+ queue=self._event_queue, agent_shared_state=self._state
1426
+ )
1427
+ new_agent.state.set_thread_id(query.thread_id)
1428
+ return new_agent.invoke(query.prompt)
1429
+
1430
+ @router.post(
1431
+ f"/{route_name}/stream-completion" if route_name else "/stream-completion",
1432
+ name=f"{name} stream completion",
1433
+ description=description_stream,
1434
+ tags=tags,
1435
+ )
1436
+ async def stream_completion(query: CompletionQuery):
1437
+ if isinstance(query.thread_id, int):
1438
+ query.thread_id = str(query.thread_id)
1439
+ logger.debug(
1440
+ f"stream_completion - current active agent: {self._state.current_active_agent}"
1441
+ )
1442
+ logger.debug(
1443
+ f"stream_completion - supervisor agent: {self._state.supervisor_agent}"
1444
+ )
1445
+
1446
+ new_agent = self.duplicate(
1447
+ queue=self._event_queue, agent_shared_state=self._state
1448
+ )
1449
+ new_agent.state.set_thread_id(query.thread_id)
1450
+ return EventSourceResponse(
1451
+ new_agent.stream_invoke(query.prompt),
1452
+ media_type="text/event-stream; charset=utf-8",
1453
+ )
1454
+
1455
+ def stream_invoke(self, prompt: str):
1456
+ """Process a user prompt through the agent and yield responses as they come.
1457
+
1458
+ Args:
1459
+ prompt (str): The user's text prompt to process
1460
+
1461
+ Yields:
1462
+ dict: Event data formatted for SSE
1463
+ """
1464
+
1465
+ # Start a thread to run the invoke and put results in queue
1466
+ def run_invoke():
1467
+ final_state = self.invoke(prompt)
1468
+ self._event_queue.put(FinalStateEvent(payload=final_state))
1469
+
1470
+ from threading import Thread
1471
+
1472
+ thread = Thread(target=run_invoke)
1473
+ thread.start()
1474
+
1475
+ final_state = None
1476
+ while True:
1477
+ try:
1478
+ message = self._event_queue.get(timeout=0.05)
1479
+ if isinstance(message, ToolUsageEvent):
1480
+ yield {
1481
+ "event": "tool_usage",
1482
+ "data": str(message.payload.tool_calls[0]["name"]),
1483
+ }
1484
+ elif isinstance(message, ToolResponseEvent):
1485
+ yield {
1486
+ "event": "tool_response",
1487
+ "data": str(pd.get(message, "payload.content", "NULL")),
1488
+ }
1489
+ elif isinstance(message, AIMessageEvent):
1490
+ yield {
1491
+ "event": "ai_message",
1492
+ "data": str(message.payload.content),
1493
+ }
1494
+ elif isinstance(message, FinalStateEvent):
1495
+ final_state = message.payload
1496
+ break
1497
+
1498
+ if (
1499
+ not thread.is_alive()
1500
+ and self._event_queue.empty()
1501
+ and final_state is None
1502
+ ):
1503
+ # We have a problem.
1504
+ raise Exception(
1505
+ "Agent thread has died and no final state event was received."
1506
+ )
1507
+ except Empty:
1508
+ pass
1509
+
1510
+ response = final_state
1511
+ logger.debug(f"Response: {response}")
1512
+
1513
+ # Use a buffer to handle text chunks
1514
+ buffer = ""
1515
+ for char in response:
1516
+ buffer += char
1517
+ if char in ["\n", "\r"]:
1518
+ # if buffer.strip(): # Only send non-empty lines
1519
+ yield {"event": "message", "data": buffer.rstrip()}
1520
+ buffer = ""
1521
+
1522
+ # Don't forget remaining text
1523
+ if buffer.strip():
1524
+ yield {"event": "message", "data": buffer}
1525
+
1526
+ yield {"event": "done", "data": "[DONE]"}
1527
+
1528
+ @property
1529
+ def tools(self) -> list[Union[Tool, BaseTool]]:
1530
+ """Get the list of tools available to the agent.
1531
+
1532
+ Returns:
1533
+ list[Tool]: List of tools configured for this agent
1534
+ """
1535
+ return self._structured_tools
1536
+
1537
+ @property
1538
+ def name(self) -> str:
1539
+ """Get the name of the agent.
1540
+
1541
+ Returns:
1542
+ str: The agent's name
1543
+ """
1544
+ return self._name
1545
+
1546
+ @property
1547
+ def description(self) -> str:
1548
+ """Get the description of the agent.
1549
+
1550
+ Returns:
1551
+ str: The agent's description
1552
+ """
1553
+ return self._description
1554
+
1555
+ @property
1556
+ def agents(self) -> list["Agent"]:
1557
+ return self._agents
1558
+
1559
+ @property
1560
+ def chat_model(self) -> BaseChatModel:
1561
+ """Get the chat model used by the agent.
1562
+
1563
+ Returns:
1564
+ BaseChatModel: The agent's chat model
1565
+ """
1566
+ if isinstance(self._chat_model, ChatModel):
1567
+ return self._chat_model.model
1568
+ return self._chat_model
1569
+
1570
+ @property
1571
+ def configuration(self) -> AgentConfiguration:
1572
+ """Get the configuration used by the agent.
1573
+
1574
+ Returns:
1575
+ AgentConfiguration: The agent's configuration
1576
+ """
1577
+ return self._configuration
1578
+
1579
+ def hello(self) -> str:
1580
+ return "Hello"
1581
+
1582
+
1583
+ def make_handoff_tool(*, agent: Agent, parent_graph: bool = False) -> BaseTool:
1584
+ """Create a tool that can return handoff via a Command"""
1585
+ tool_name = f"transfer_to_{agent.name}"
1586
+
1587
+ @tool(tool_name)
1588
+ def handoff_to_agent(
1589
+ # # optionally pass current graph state to the tool (will be ignored by the LLM)
1590
+ state: Annotated[dict, InjectedState],
1591
+ # optionally pass the current tool call ID (will be ignored by the LLM)
1592
+ tool_call: Annotated[ToolCall, ToolCall],
1593
+ ):
1594
+ """Ask another agent for help."""
1595
+ agent_label = " ".join(
1596
+ word.capitalize() for word in agent.name.replace("_", " ").split()
1597
+ )
1598
+
1599
+ tool_message = ToolMessage(
1600
+ content=f"Conversation transferred to {agent_label}",
1601
+ name=tool_name,
1602
+ tool_call_id=tool_call["id"],
1603
+ )
1604
+
1605
+ agent.state.set_current_active_agent(agent.name)
1606
+
1607
+ return Command(
1608
+ # navigate to another agent node in the PARENT graph
1609
+ goto=agent.name,
1610
+ graph=Command.PARENT if parent_graph else None,
1611
+ # This is the state update that the agent `agent_name` will see when it is invoked.
1612
+ # We're passing agent's FULL internal message history AND adding a tool message to make sure
1613
+ # the resulting chat history is valid. See the paragraph above for more information.
1614
+ update={"messages": state["messages"] + [tool_message]},
1615
+ )
1616
+
1617
+ assert isinstance(handoff_to_agent, BaseTool)
1618
+
1619
+ return handoff_to_agent