alita-sdk 0.3.376__py3-none-any.whl → 0.3.435__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of alita-sdk might be problematic. Click here for more details.
- alita_sdk/configurations/bitbucket.py +95 -0
- alita_sdk/configurations/confluence.py +96 -1
- alita_sdk/configurations/gitlab.py +79 -0
- alita_sdk/configurations/jira.py +103 -0
- alita_sdk/configurations/testrail.py +88 -0
- alita_sdk/configurations/xray.py +93 -0
- alita_sdk/configurations/zephyr_enterprise.py +93 -0
- alita_sdk/configurations/zephyr_essential.py +75 -0
- alita_sdk/runtime/clients/client.py +9 -4
- alita_sdk/runtime/clients/mcp_discovery.py +342 -0
- alita_sdk/runtime/clients/mcp_manager.py +262 -0
- alita_sdk/runtime/clients/sandbox_client.py +8 -0
- alita_sdk/runtime/langchain/assistant.py +41 -38
- alita_sdk/runtime/langchain/constants.py +5 -1
- alita_sdk/runtime/langchain/document_loaders/AlitaDocxMammothLoader.py +315 -3
- alita_sdk/runtime/langchain/document_loaders/AlitaJSONLoader.py +4 -1
- alita_sdk/runtime/langchain/document_loaders/constants.py +28 -12
- alita_sdk/runtime/langchain/langraph_agent.py +91 -27
- alita_sdk/runtime/langchain/utils.py +24 -4
- alita_sdk/runtime/models/mcp_models.py +57 -0
- alita_sdk/runtime/toolkits/__init__.py +24 -0
- alita_sdk/runtime/toolkits/application.py +8 -1
- alita_sdk/runtime/toolkits/mcp.py +787 -0
- alita_sdk/runtime/toolkits/tools.py +98 -50
- alita_sdk/runtime/tools/__init__.py +7 -2
- alita_sdk/runtime/tools/application.py +7 -0
- alita_sdk/runtime/tools/function.py +20 -28
- alita_sdk/runtime/tools/graph.py +10 -4
- alita_sdk/runtime/tools/image_generation.py +104 -8
- alita_sdk/runtime/tools/llm.py +146 -114
- alita_sdk/runtime/tools/mcp_inspect_tool.py +284 -0
- alita_sdk/runtime/tools/mcp_server_tool.py +79 -10
- alita_sdk/runtime/tools/sandbox.py +166 -63
- alita_sdk/runtime/tools/vectorstore.py +3 -2
- alita_sdk/runtime/tools/vectorstore_base.py +4 -3
- alita_sdk/runtime/utils/streamlit.py +34 -3
- alita_sdk/runtime/utils/toolkit_utils.py +5 -2
- alita_sdk/runtime/utils/utils.py +1 -0
- alita_sdk/tools/__init__.py +48 -31
- alita_sdk/tools/ado/work_item/ado_wrapper.py +17 -8
- alita_sdk/tools/base_indexer_toolkit.py +75 -66
- alita_sdk/tools/chunkers/sematic/proposal_chunker.py +1 -1
- alita_sdk/tools/code_indexer_toolkit.py +13 -3
- alita_sdk/tools/confluence/api_wrapper.py +29 -7
- alita_sdk/tools/confluence/loader.py +10 -0
- alita_sdk/tools/elitea_base.py +7 -7
- alita_sdk/tools/gitlab/api_wrapper.py +11 -7
- alita_sdk/tools/jira/api_wrapper.py +1 -1
- alita_sdk/tools/openapi/__init__.py +10 -1
- alita_sdk/tools/qtest/api_wrapper.py +522 -74
- alita_sdk/tools/sharepoint/api_wrapper.py +104 -33
- alita_sdk/tools/sharepoint/authorization_helper.py +175 -1
- alita_sdk/tools/sharepoint/utils.py +8 -2
- alita_sdk/tools/utils/content_parser.py +27 -16
- alita_sdk/tools/vector_adapters/VectorStoreAdapter.py +19 -6
- {alita_sdk-0.3.376.dist-info → alita_sdk-0.3.435.dist-info}/METADATA +1 -1
- {alita_sdk-0.3.376.dist-info → alita_sdk-0.3.435.dist-info}/RECORD +60 -55
- {alita_sdk-0.3.376.dist-info → alita_sdk-0.3.435.dist-info}/WHEEL +0 -0
- {alita_sdk-0.3.376.dist-info → alita_sdk-0.3.435.dist-info}/licenses/LICENSE +0 -0
- {alita_sdk-0.3.376.dist-info → alita_sdk-0.3.435.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,262 @@
|
|
|
1
|
+
"""
|
|
2
|
+
MCP Manager - Unified interface for both static and dynamic MCP tool discovery.
|
|
3
|
+
Provides a single API that can work with both registry-based and live discovery.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import asyncio
|
|
7
|
+
import logging
|
|
8
|
+
from typing import Dict, List, Optional, Any, Union
|
|
9
|
+
from enum import Enum
|
|
10
|
+
|
|
11
|
+
from ..models.mcp_models import McpConnectionConfig, McpToolMetadata
|
|
12
|
+
from .mcp_discovery import McpDiscoveryService, get_discovery_service
|
|
13
|
+
|
|
14
|
+
logger = logging.getLogger(__name__)
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class DiscoveryMode(Enum):
|
|
18
|
+
"""MCP discovery modes."""
|
|
19
|
+
STATIC = "static" # Use alita.get_mcp_toolkits() registry
|
|
20
|
+
DYNAMIC = "dynamic" # Live discovery from MCP servers
|
|
21
|
+
HYBRID = "hybrid" # Try dynamic first, fallback to static
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class McpManager:
|
|
25
|
+
"""
|
|
26
|
+
Unified manager for MCP tool discovery supporting multiple modes.
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
def __init__(
|
|
30
|
+
self,
|
|
31
|
+
default_mode: DiscoveryMode = DiscoveryMode.DYNAMIC,
|
|
32
|
+
discovery_service: Optional[McpDiscoveryService] = None
|
|
33
|
+
):
|
|
34
|
+
self.default_mode = default_mode
|
|
35
|
+
self.discovery_service = discovery_service or get_discovery_service()
|
|
36
|
+
self._static_fallback_enabled = True
|
|
37
|
+
|
|
38
|
+
async def discover_server_tools(
|
|
39
|
+
self,
|
|
40
|
+
server_name: str,
|
|
41
|
+
connection_config: Optional[McpConnectionConfig] = None,
|
|
42
|
+
alita_client=None,
|
|
43
|
+
mode: Optional[DiscoveryMode] = None,
|
|
44
|
+
**kwargs
|
|
45
|
+
) -> List[McpToolMetadata]:
|
|
46
|
+
"""
|
|
47
|
+
Discover tools from an MCP server using the specified mode.
|
|
48
|
+
|
|
49
|
+
Args:
|
|
50
|
+
server_name: Name of the MCP server
|
|
51
|
+
connection_config: Connection configuration (required for dynamic mode)
|
|
52
|
+
alita_client: Alita client (required for static mode)
|
|
53
|
+
mode: Discovery mode to use (defaults to manager's default)
|
|
54
|
+
**kwargs: Additional options
|
|
55
|
+
|
|
56
|
+
Returns:
|
|
57
|
+
List of discovered tool metadata
|
|
58
|
+
"""
|
|
59
|
+
discovery_mode = mode or self.default_mode
|
|
60
|
+
|
|
61
|
+
if discovery_mode == DiscoveryMode.DYNAMIC:
|
|
62
|
+
return await self._discover_dynamic(server_name, connection_config)
|
|
63
|
+
|
|
64
|
+
elif discovery_mode == DiscoveryMode.STATIC:
|
|
65
|
+
return await self._discover_static(server_name, alita_client)
|
|
66
|
+
|
|
67
|
+
elif discovery_mode == DiscoveryMode.HYBRID:
|
|
68
|
+
return await self._discover_hybrid(server_name, connection_config, alita_client)
|
|
69
|
+
|
|
70
|
+
else:
|
|
71
|
+
raise ValueError(f"Unknown discovery mode: {discovery_mode}")
|
|
72
|
+
|
|
73
|
+
async def _discover_dynamic(
|
|
74
|
+
self,
|
|
75
|
+
server_name: str,
|
|
76
|
+
connection_config: Optional[McpConnectionConfig]
|
|
77
|
+
) -> List[McpToolMetadata]:
|
|
78
|
+
"""Discover tools using dynamic MCP protocol."""
|
|
79
|
+
if not connection_config:
|
|
80
|
+
raise ValueError("Connection configuration required for dynamic discovery")
|
|
81
|
+
|
|
82
|
+
try:
|
|
83
|
+
# Ensure discovery service is started
|
|
84
|
+
await self.discovery_service.start()
|
|
85
|
+
|
|
86
|
+
# Register and discover
|
|
87
|
+
await self.discovery_service.register_server(server_name, connection_config)
|
|
88
|
+
tools = await self.discovery_service.get_server_tools(server_name)
|
|
89
|
+
|
|
90
|
+
logger.info(f"Dynamic discovery found {len(tools)} tools from {server_name}")
|
|
91
|
+
return tools
|
|
92
|
+
|
|
93
|
+
except Exception as e:
|
|
94
|
+
logger.error(f"Dynamic discovery failed for {server_name}: {e}")
|
|
95
|
+
raise
|
|
96
|
+
|
|
97
|
+
async def _discover_static(
|
|
98
|
+
self,
|
|
99
|
+
server_name: str,
|
|
100
|
+
alita_client
|
|
101
|
+
) -> List[McpToolMetadata]:
|
|
102
|
+
"""Discover tools using static registry."""
|
|
103
|
+
if not alita_client or not hasattr(alita_client, 'get_mcp_toolkits'):
|
|
104
|
+
raise ValueError("Alita client with get_mcp_toolkits() required for static discovery")
|
|
105
|
+
|
|
106
|
+
try:
|
|
107
|
+
# Use existing registry approach
|
|
108
|
+
all_toolkits = alita_client.get_mcp_toolkits()
|
|
109
|
+
server_toolkit = next((tk for tk in all_toolkits if tk.get('name') == server_name), None)
|
|
110
|
+
|
|
111
|
+
if not server_toolkit:
|
|
112
|
+
logger.warning(f"Static registry: Server {server_name} not found")
|
|
113
|
+
return []
|
|
114
|
+
|
|
115
|
+
# Convert to metadata format
|
|
116
|
+
tools = []
|
|
117
|
+
for tool_info in server_toolkit.get('tools', []):
|
|
118
|
+
metadata = McpToolMetadata(
|
|
119
|
+
name=tool_info.get('name', ''),
|
|
120
|
+
description=tool_info.get('description', ''),
|
|
121
|
+
server=server_name,
|
|
122
|
+
input_schema=tool_info.get('inputSchema', {}),
|
|
123
|
+
enabled=True
|
|
124
|
+
)
|
|
125
|
+
tools.append(metadata)
|
|
126
|
+
|
|
127
|
+
logger.info(f"Static discovery found {len(tools)} tools from {server_name}")
|
|
128
|
+
return tools
|
|
129
|
+
|
|
130
|
+
except Exception as e:
|
|
131
|
+
logger.error(f"Static discovery failed for {server_name}: {e}")
|
|
132
|
+
raise
|
|
133
|
+
|
|
134
|
+
async def _discover_hybrid(
|
|
135
|
+
self,
|
|
136
|
+
server_name: str,
|
|
137
|
+
connection_config: Optional[McpConnectionConfig],
|
|
138
|
+
alita_client
|
|
139
|
+
) -> List[McpToolMetadata]:
|
|
140
|
+
"""Discover tools using hybrid approach (dynamic first, static fallback)."""
|
|
141
|
+
|
|
142
|
+
# Try dynamic discovery first
|
|
143
|
+
if connection_config:
|
|
144
|
+
try:
|
|
145
|
+
return await self._discover_dynamic(server_name, connection_config)
|
|
146
|
+
except Exception as e:
|
|
147
|
+
logger.warning(f"Dynamic discovery failed for {server_name}, trying static: {e}")
|
|
148
|
+
|
|
149
|
+
# Fallback to static discovery
|
|
150
|
+
if self._static_fallback_enabled and alita_client:
|
|
151
|
+
try:
|
|
152
|
+
return await self._discover_static(server_name, alita_client)
|
|
153
|
+
except Exception as e:
|
|
154
|
+
logger.error(f"Static fallback also failed for {server_name}: {e}")
|
|
155
|
+
|
|
156
|
+
logger.error(f"All discovery methods failed for {server_name}")
|
|
157
|
+
return []
|
|
158
|
+
|
|
159
|
+
async def get_server_health(
|
|
160
|
+
self,
|
|
161
|
+
server_name: Optional[str] = None
|
|
162
|
+
) -> Dict[str, Any]:
|
|
163
|
+
"""Get health information for servers."""
|
|
164
|
+
try:
|
|
165
|
+
if server_name:
|
|
166
|
+
# Get specific server health from discovery service
|
|
167
|
+
all_health = self.discovery_service.get_server_health()
|
|
168
|
+
return all_health.get(server_name, {"status": "unknown"})
|
|
169
|
+
else:
|
|
170
|
+
# Get all server health
|
|
171
|
+
return self.discovery_service.get_server_health()
|
|
172
|
+
except Exception as e:
|
|
173
|
+
logger.error(f"Failed to get server health: {e}")
|
|
174
|
+
return {"status": "error", "error": str(e)}
|
|
175
|
+
|
|
176
|
+
async def refresh_server(self, server_name: str):
|
|
177
|
+
"""Force refresh a specific server's tools."""
|
|
178
|
+
try:
|
|
179
|
+
await self.discovery_service.refresh_server(server_name)
|
|
180
|
+
except Exception as e:
|
|
181
|
+
logger.error(f"Failed to refresh server {server_name}: {e}")
|
|
182
|
+
|
|
183
|
+
async def start(self):
|
|
184
|
+
"""Start the MCP manager."""
|
|
185
|
+
await self.discovery_service.start()
|
|
186
|
+
|
|
187
|
+
async def stop(self):
|
|
188
|
+
"""Stop the MCP manager."""
|
|
189
|
+
await self.discovery_service.stop()
|
|
190
|
+
|
|
191
|
+
def set_static_fallback(self, enabled: bool):
|
|
192
|
+
"""Enable or disable static fallback in hybrid mode."""
|
|
193
|
+
self._static_fallback_enabled = enabled
|
|
194
|
+
|
|
195
|
+
|
|
196
|
+
# Global manager instance
|
|
197
|
+
_mcp_manager: Optional[McpManager] = None
|
|
198
|
+
|
|
199
|
+
|
|
200
|
+
def get_mcp_manager(mode: DiscoveryMode = DiscoveryMode.HYBRID) -> McpManager:
|
|
201
|
+
"""Get the global MCP manager instance."""
|
|
202
|
+
global _mcp_manager
|
|
203
|
+
if _mcp_manager is None:
|
|
204
|
+
_mcp_manager = McpManager(default_mode=mode)
|
|
205
|
+
return _mcp_manager
|
|
206
|
+
|
|
207
|
+
|
|
208
|
+
async def discover_mcp_tools(
|
|
209
|
+
server_name: str,
|
|
210
|
+
connection_config: Optional[McpConnectionConfig] = None,
|
|
211
|
+
alita_client=None,
|
|
212
|
+
mode: Optional[DiscoveryMode] = None
|
|
213
|
+
) -> List[McpToolMetadata]:
|
|
214
|
+
"""
|
|
215
|
+
Convenience function for discovering MCP tools.
|
|
216
|
+
|
|
217
|
+
Args:
|
|
218
|
+
server_name: Name of the MCP server
|
|
219
|
+
connection_config: Connection config (for dynamic discovery)
|
|
220
|
+
alita_client: Alita client (for static discovery)
|
|
221
|
+
mode: Discovery mode (defaults to HYBRID)
|
|
222
|
+
|
|
223
|
+
Returns:
|
|
224
|
+
List of discovered tool metadata
|
|
225
|
+
"""
|
|
226
|
+
manager = get_mcp_manager()
|
|
227
|
+
return await manager.discover_server_tools(
|
|
228
|
+
server_name=server_name,
|
|
229
|
+
connection_config=connection_config,
|
|
230
|
+
alita_client=alita_client,
|
|
231
|
+
mode=mode or DiscoveryMode.HYBRID
|
|
232
|
+
)
|
|
233
|
+
|
|
234
|
+
|
|
235
|
+
async def init_mcp_manager(mode: DiscoveryMode = DiscoveryMode.HYBRID):
|
|
236
|
+
"""Initialize the global MCP manager."""
|
|
237
|
+
manager = get_mcp_manager(mode)
|
|
238
|
+
await manager.start()
|
|
239
|
+
|
|
240
|
+
|
|
241
|
+
async def shutdown_mcp_manager():
|
|
242
|
+
"""Shutdown the global MCP manager."""
|
|
243
|
+
global _mcp_manager
|
|
244
|
+
if _mcp_manager:
|
|
245
|
+
await _mcp_manager.stop()
|
|
246
|
+
_mcp_manager = None
|
|
247
|
+
|
|
248
|
+
|
|
249
|
+
# Configuration helpers
|
|
250
|
+
def create_discovery_config(
|
|
251
|
+
mode: str = "hybrid",
|
|
252
|
+
discovery_interval: int = 300,
|
|
253
|
+
enable_static_fallback: bool = True,
|
|
254
|
+
**kwargs
|
|
255
|
+
) -> Dict[str, Any]:
|
|
256
|
+
"""Create a discovery configuration dictionary."""
|
|
257
|
+
return {
|
|
258
|
+
"discovery_mode": mode,
|
|
259
|
+
"discovery_interval": discovery_interval,
|
|
260
|
+
"enable_static_fallback": enable_static_fallback,
|
|
261
|
+
**kwargs
|
|
262
|
+
}
|
|
@@ -143,6 +143,7 @@ class SandboxClient:
|
|
|
143
143
|
self.configurations_url = f'{self.base_url}{self.api_path}/integrations/integrations/default/{self.project_id}?section=configurations&unsecret=true'
|
|
144
144
|
self.ai_section_url = f'{self.base_url}{self.api_path}/integrations/integrations/default/{self.project_id}?section=ai'
|
|
145
145
|
self.image_generation_url = f'{self.base_url}{self.llm_path}/images/generations'
|
|
146
|
+
self.auth_user_url = f'{self.base_url}{self.api_path}/auth/user'
|
|
146
147
|
self.configurations: list = configurations or []
|
|
147
148
|
self.model_timeout = kwargs.get('model_timeout', 120)
|
|
148
149
|
self.model_image_generation = kwargs.get('model_image_generation')
|
|
@@ -363,3 +364,10 @@ class SandboxClient:
|
|
|
363
364
|
url = f'{self.artifact_url}/{bucket_name}'
|
|
364
365
|
data = requests.delete(url, headers=self.headers, verify=False, params={'filename': quote(artifact_name)})
|
|
365
366
|
return self._process_requst(data)
|
|
367
|
+
|
|
368
|
+
def get_user_data(self) -> Dict[str, Any]:
|
|
369
|
+
resp = requests.get(self.auth_user_url, headers=self.headers, verify=False)
|
|
370
|
+
if resp.ok:
|
|
371
|
+
return resp.json()
|
|
372
|
+
logger.error(f'Failed to fetch user data: {resp.status_code} - {resp.text}')
|
|
373
|
+
raise ApiDetailsRequestError(f'Failed to fetch user data with status code {resp.status_code}.')
|
|
@@ -17,6 +17,7 @@ from .constants import REACT_ADDON, REACT_VARS, XML_ADDON
|
|
|
17
17
|
from .chat_message_template import Jinja2TemplatedChatMessagesTemplate
|
|
18
18
|
from ..tools.echo import EchoTool
|
|
19
19
|
from langchain_core.tools import BaseTool, ToolException
|
|
20
|
+
from jinja2 import Environment, DebugUndefined
|
|
20
21
|
|
|
21
22
|
logger = logging.getLogger(__name__)
|
|
22
23
|
|
|
@@ -29,7 +30,8 @@ class Assistant:
|
|
|
29
30
|
app_type: str = "openai",
|
|
30
31
|
tools: Optional[list] = [],
|
|
31
32
|
memory: Optional[Any] = None,
|
|
32
|
-
store: Optional[BaseStore] = None
|
|
33
|
+
store: Optional[BaseStore] = None,
|
|
34
|
+
debug_mode: Optional[bool] = False):
|
|
33
35
|
|
|
34
36
|
self.app_type = app_type
|
|
35
37
|
self.memory = memory
|
|
@@ -77,11 +79,17 @@ class Assistant:
|
|
|
77
79
|
else:
|
|
78
80
|
# For predict agents, initialize memory store to None since they don't use memory
|
|
79
81
|
self.store = None
|
|
80
|
-
|
|
82
|
+
|
|
81
83
|
# Lazy import to avoid circular dependency
|
|
82
84
|
from ..toolkits.tools import get_tools
|
|
83
|
-
|
|
84
|
-
|
|
85
|
+
version_tools = data['tools']
|
|
86
|
+
# Handle internal tools
|
|
87
|
+
meta = data.get('meta', {})
|
|
88
|
+
if meta.get("internal_tools"):
|
|
89
|
+
for internal_tool_name in meta.get("internal_tools"):
|
|
90
|
+
version_tools.append({"type": "internal_tool", "name": internal_tool_name})
|
|
91
|
+
|
|
92
|
+
self.tools = get_tools(version_tools, alita_client=alita, llm=self.client, memory_store=self.store, debug_mode=debug_mode)
|
|
85
93
|
if tools:
|
|
86
94
|
self.tools += tools
|
|
87
95
|
# Handle prompt setup
|
|
@@ -118,9 +126,11 @@ class Assistant:
|
|
|
118
126
|
if variables:
|
|
119
127
|
self.prompt.partial_variables = variables
|
|
120
128
|
try:
|
|
121
|
-
logger.info(
|
|
129
|
+
logger.info(
|
|
130
|
+
f"Client was created with client setting: temperature - {self.client._get_model_default_parameters}")
|
|
122
131
|
except Exception as e:
|
|
123
|
-
logger.info(
|
|
132
|
+
logger.info(
|
|
133
|
+
f"Client was created with client setting: temperature - {self.client.temperature} : {self.client.max_tokens}")
|
|
124
134
|
|
|
125
135
|
def _configure_store(self, memory_tool: dict | None) -> None:
|
|
126
136
|
"""
|
|
@@ -157,7 +167,6 @@ class Assistant:
|
|
|
157
167
|
agent = create_json_chat_agent(llm=self.client, tools=simple_tools, prompt=self.prompt)
|
|
158
168
|
return self._agent_executor(agent)
|
|
159
169
|
|
|
160
|
-
|
|
161
170
|
def getXMLAgentExecutor(self):
|
|
162
171
|
# Exclude compiled graph runnables from simple tool agents
|
|
163
172
|
simple_tools = [t for t in self.tools if isinstance(t, (BaseTool, CompiledStateGraph))]
|
|
@@ -178,34 +187,6 @@ class Assistant:
|
|
|
178
187
|
# Exclude compiled graph runnables from simple tool agents
|
|
179
188
|
simple_tools = [t for t in self.tools if isinstance(t, (BaseTool, CompiledStateGraph))]
|
|
180
189
|
|
|
181
|
-
# Add sandbox tool by default for react agents
|
|
182
|
-
try:
|
|
183
|
-
from ..tools.sandbox import create_sandbox_tool
|
|
184
|
-
sandbox_tool = create_sandbox_tool(stateful=False, allow_net=True)
|
|
185
|
-
simple_tools.append(sandbox_tool)
|
|
186
|
-
logger.info("Added PyodideSandboxTool to react agent")
|
|
187
|
-
except ImportError as e:
|
|
188
|
-
logger.warning(f"Failed to add PyodideSandboxTool: {e}. Install langchain-sandbox to enable this feature.")
|
|
189
|
-
except RuntimeError as e:
|
|
190
|
-
if "Deno" in str(e):
|
|
191
|
-
logger.warning("Failed to add PyodideSandboxTool: Deno is required. Install from https://docs.deno.com/runtime/getting_started/installation/")
|
|
192
|
-
else:
|
|
193
|
-
logger.warning(f"Failed to add PyodideSandboxTool: {e}")
|
|
194
|
-
except Exception as e:
|
|
195
|
-
logger.error(f"Error adding PyodideSandboxTool: {e}")
|
|
196
|
-
|
|
197
|
-
# Add image generation tool if model is configured
|
|
198
|
-
if self.alita_client.model_image_generation is not None:
|
|
199
|
-
try:
|
|
200
|
-
from ..tools.image_generation import (
|
|
201
|
-
create_image_generation_tool
|
|
202
|
-
)
|
|
203
|
-
image_tool = create_image_generation_tool(self.alita_client)
|
|
204
|
-
simple_tools.append(image_tool)
|
|
205
|
-
logger.info("Added ImageGenerationTool to react agent")
|
|
206
|
-
except Exception as e:
|
|
207
|
-
logger.error(f"Error adding ImageGenerationTool: {e}")
|
|
208
|
-
|
|
209
190
|
# Set up memory/checkpointer if available
|
|
210
191
|
checkpointer = None
|
|
211
192
|
if self.memory is not None:
|
|
@@ -284,6 +265,9 @@ class Assistant:
|
|
|
284
265
|
schema_dict = {
|
|
285
266
|
'name': 'react_agent',
|
|
286
267
|
'state': {
|
|
268
|
+
'input': {
|
|
269
|
+
'type': 'str'
|
|
270
|
+
},
|
|
287
271
|
'messages': state_messages_config
|
|
288
272
|
},
|
|
289
273
|
'nodes': [{
|
|
@@ -292,6 +276,21 @@ class Assistant:
|
|
|
292
276
|
'prompt': {
|
|
293
277
|
'template': escaped_prompt
|
|
294
278
|
},
|
|
279
|
+
'input_mapping': {
|
|
280
|
+
'system': {
|
|
281
|
+
'type': 'fixed',
|
|
282
|
+
'value': escaped_prompt
|
|
283
|
+
},
|
|
284
|
+
'task': {
|
|
285
|
+
'type': 'variable',
|
|
286
|
+
'value': 'input'
|
|
287
|
+
},
|
|
288
|
+
'chat_history': {
|
|
289
|
+
'type': 'variable',
|
|
290
|
+
'value': 'messages'
|
|
291
|
+
}
|
|
292
|
+
},
|
|
293
|
+
'step_limit': self.max_iterations,
|
|
295
294
|
'input': ['messages'],
|
|
296
295
|
'output': ['messages'],
|
|
297
296
|
'transition': 'END'
|
|
@@ -317,7 +316,8 @@ class Assistant:
|
|
|
317
316
|
store=self.store,
|
|
318
317
|
debug=False,
|
|
319
318
|
for_subgraph=False,
|
|
320
|
-
alita_client=self.alita_client
|
|
319
|
+
alita_client=self.alita_client,
|
|
320
|
+
steps_limit=self.max_iterations
|
|
321
321
|
)
|
|
322
322
|
|
|
323
323
|
return agent
|
|
@@ -332,7 +332,8 @@ class Assistant:
|
|
|
332
332
|
agent = create_graph(
|
|
333
333
|
client=self.client, tools=self.tools,
|
|
334
334
|
yaml_schema=self.prompt, memory=memory,
|
|
335
|
-
alita_client=self.alita_client
|
|
335
|
+
alita_client=self.alita_client,
|
|
336
|
+
steps_limit=self.max_iterations
|
|
336
337
|
)
|
|
337
338
|
#
|
|
338
339
|
return agent
|
|
@@ -352,5 +353,7 @@ class Assistant:
|
|
|
352
353
|
continue
|
|
353
354
|
# take only the content of the system message from the openai prompt
|
|
354
355
|
if isinstance(message, SystemMessage):
|
|
355
|
-
|
|
356
|
+
environment = Environment(undefined=DebugUndefined)
|
|
357
|
+
template = environment.from_string(message.content)
|
|
358
|
+
return template.render(self.prompt.partial_variables)
|
|
356
359
|
return None
|
|
@@ -27,7 +27,7 @@ Use this if you want to respond directly to the human. Markdown code snippet for
|
|
|
27
27
|
```json
|
|
28
28
|
{
|
|
29
29
|
"action": "Final Answer",
|
|
30
|
-
"action_input": string
|
|
30
|
+
"action_input": string // You should put what you want to return to use here
|
|
31
31
|
}
|
|
32
32
|
```
|
|
33
33
|
|
|
@@ -80,3 +80,7 @@ DEFAULT_MULTIMODAL_PROMPT = """
|
|
|
80
80
|
- Maintain a structured and logical flow in the output to enhance understanding and usability.
|
|
81
81
|
- Avoid presenting the entire prompt for user.
|
|
82
82
|
"""
|
|
83
|
+
|
|
84
|
+
ELITEA_RS = "elitea_response"
|
|
85
|
+
PRINTER = "printer"
|
|
86
|
+
PRINTER_NODE_RS = "printer_output"
|