alita-sdk 0.3.532__py3-none-any.whl → 0.3.602__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of alita-sdk might be problematic. Click here for more details.
- alita_sdk/cli/agent_executor.py +2 -1
- alita_sdk/cli/agent_loader.py +34 -4
- alita_sdk/cli/agents.py +433 -203
- alita_sdk/community/__init__.py +8 -4
- alita_sdk/configurations/__init__.py +1 -0
- alita_sdk/configurations/openapi.py +323 -0
- alita_sdk/runtime/clients/client.py +165 -7
- alita_sdk/runtime/langchain/_constants_bkup.py +1318 -0
- alita_sdk/runtime/langchain/assistant.py +61 -11
- alita_sdk/runtime/langchain/constants.py +419 -171
- alita_sdk/runtime/langchain/document_loaders/AlitaJSONLoader.py +4 -2
- alita_sdk/runtime/langchain/document_loaders/AlitaTextLoader.py +5 -2
- alita_sdk/runtime/langchain/langraph_agent.py +108 -23
- alita_sdk/runtime/langchain/utils.py +76 -14
- alita_sdk/runtime/skills/__init__.py +91 -0
- alita_sdk/runtime/skills/callbacks.py +498 -0
- alita_sdk/runtime/skills/discovery.py +540 -0
- alita_sdk/runtime/skills/executor.py +610 -0
- alita_sdk/runtime/skills/input_builder.py +371 -0
- alita_sdk/runtime/skills/models.py +330 -0
- alita_sdk/runtime/skills/registry.py +355 -0
- alita_sdk/runtime/skills/skill_runner.py +330 -0
- alita_sdk/runtime/toolkits/__init__.py +5 -0
- alita_sdk/runtime/toolkits/artifact.py +2 -1
- alita_sdk/runtime/toolkits/mcp.py +6 -3
- alita_sdk/runtime/toolkits/mcp_config.py +1048 -0
- alita_sdk/runtime/toolkits/skill_router.py +238 -0
- alita_sdk/runtime/toolkits/tools.py +139 -10
- alita_sdk/runtime/toolkits/vectorstore.py +1 -1
- alita_sdk/runtime/tools/__init__.py +3 -1
- alita_sdk/runtime/tools/artifact.py +15 -0
- alita_sdk/runtime/tools/data_analysis.py +183 -0
- alita_sdk/runtime/tools/llm.py +260 -73
- alita_sdk/runtime/tools/loop.py +3 -1
- alita_sdk/runtime/tools/loop_output.py +3 -1
- alita_sdk/runtime/tools/mcp_server_tool.py +6 -3
- alita_sdk/runtime/tools/router.py +2 -4
- alita_sdk/runtime/tools/sandbox.py +9 -6
- alita_sdk/runtime/tools/skill_router.py +776 -0
- alita_sdk/runtime/tools/tool.py +3 -1
- alita_sdk/runtime/tools/vectorstore.py +7 -2
- alita_sdk/runtime/tools/vectorstore_base.py +7 -2
- alita_sdk/runtime/utils/constants.py +5 -1
- alita_sdk/runtime/utils/mcp_client.py +1 -1
- alita_sdk/runtime/utils/mcp_sse_client.py +1 -1
- alita_sdk/runtime/utils/toolkit_utils.py +2 -0
- alita_sdk/tools/__init__.py +44 -2
- alita_sdk/tools/ado/repos/__init__.py +26 -8
- alita_sdk/tools/ado/repos/repos_wrapper.py +78 -52
- alita_sdk/tools/ado/test_plan/__init__.py +3 -2
- alita_sdk/tools/ado/test_plan/test_plan_wrapper.py +23 -1
- alita_sdk/tools/ado/utils.py +1 -18
- alita_sdk/tools/ado/wiki/__init__.py +2 -1
- alita_sdk/tools/ado/wiki/ado_wrapper.py +23 -1
- alita_sdk/tools/ado/work_item/__init__.py +3 -2
- alita_sdk/tools/ado/work_item/ado_wrapper.py +56 -3
- alita_sdk/tools/advanced_jira_mining/__init__.py +2 -1
- alita_sdk/tools/aws/delta_lake/__init__.py +2 -1
- alita_sdk/tools/azure_ai/search/__init__.py +2 -1
- alita_sdk/tools/azure_ai/search/api_wrapper.py +1 -1
- alita_sdk/tools/base_indexer_toolkit.py +51 -30
- alita_sdk/tools/bitbucket/__init__.py +2 -1
- alita_sdk/tools/bitbucket/api_wrapper.py +1 -1
- alita_sdk/tools/bitbucket/cloud_api_wrapper.py +3 -3
- alita_sdk/tools/browser/__init__.py +1 -1
- alita_sdk/tools/carrier/__init__.py +1 -1
- alita_sdk/tools/chunkers/code/treesitter/treesitter.py +37 -13
- alita_sdk/tools/cloud/aws/__init__.py +2 -1
- alita_sdk/tools/cloud/azure/__init__.py +2 -1
- alita_sdk/tools/cloud/gcp/__init__.py +2 -1
- alita_sdk/tools/cloud/k8s/__init__.py +2 -1
- alita_sdk/tools/code/linter/__init__.py +2 -1
- alita_sdk/tools/code/sonar/__init__.py +2 -1
- alita_sdk/tools/code_indexer_toolkit.py +19 -2
- alita_sdk/tools/confluence/__init__.py +7 -6
- alita_sdk/tools/confluence/api_wrapper.py +7 -8
- alita_sdk/tools/confluence/loader.py +4 -2
- alita_sdk/tools/custom_open_api/__init__.py +2 -1
- alita_sdk/tools/elastic/__init__.py +2 -1
- alita_sdk/tools/elitea_base.py +28 -9
- alita_sdk/tools/figma/__init__.py +52 -6
- alita_sdk/tools/figma/api_wrapper.py +1158 -123
- alita_sdk/tools/figma/figma_client.py +73 -0
- alita_sdk/tools/figma/toon_tools.py +2748 -0
- alita_sdk/tools/github/__init__.py +2 -1
- alita_sdk/tools/github/github_client.py +56 -92
- alita_sdk/tools/github/schemas.py +4 -4
- alita_sdk/tools/gitlab/__init__.py +2 -1
- alita_sdk/tools/gitlab/api_wrapper.py +118 -38
- alita_sdk/tools/gitlab_org/__init__.py +2 -1
- alita_sdk/tools/gitlab_org/api_wrapper.py +60 -62
- alita_sdk/tools/google/bigquery/__init__.py +2 -1
- alita_sdk/tools/google_places/__init__.py +2 -1
- alita_sdk/tools/jira/__init__.py +2 -1
- alita_sdk/tools/keycloak/__init__.py +2 -1
- alita_sdk/tools/localgit/__init__.py +2 -1
- alita_sdk/tools/memory/__init__.py +1 -1
- alita_sdk/tools/ocr/__init__.py +2 -1
- alita_sdk/tools/openapi/__init__.py +490 -118
- alita_sdk/tools/openapi/api_wrapper.py +1368 -0
- alita_sdk/tools/openapi/tool.py +20 -0
- alita_sdk/tools/pandas/__init__.py +11 -5
- alita_sdk/tools/pandas/api_wrapper.py +38 -25
- alita_sdk/tools/pandas/dataframe/generator/base.py +3 -1
- alita_sdk/tools/postman/__init__.py +2 -1
- alita_sdk/tools/pptx/__init__.py +2 -1
- alita_sdk/tools/qtest/__init__.py +21 -2
- alita_sdk/tools/qtest/api_wrapper.py +430 -13
- alita_sdk/tools/rally/__init__.py +2 -1
- alita_sdk/tools/rally/api_wrapper.py +1 -1
- alita_sdk/tools/report_portal/__init__.py +2 -1
- alita_sdk/tools/salesforce/__init__.py +2 -1
- alita_sdk/tools/servicenow/__init__.py +11 -10
- alita_sdk/tools/servicenow/api_wrapper.py +1 -1
- alita_sdk/tools/sharepoint/__init__.py +2 -1
- alita_sdk/tools/sharepoint/api_wrapper.py +2 -2
- alita_sdk/tools/slack/__init__.py +3 -2
- alita_sdk/tools/slack/api_wrapper.py +2 -2
- alita_sdk/tools/sql/__init__.py +3 -2
- alita_sdk/tools/testio/__init__.py +2 -1
- alita_sdk/tools/testrail/__init__.py +2 -1
- alita_sdk/tools/utils/content_parser.py +77 -3
- alita_sdk/tools/utils/text_operations.py +163 -71
- alita_sdk/tools/xray/__init__.py +3 -2
- alita_sdk/tools/yagmail/__init__.py +2 -1
- alita_sdk/tools/zephyr/__init__.py +2 -1
- alita_sdk/tools/zephyr_enterprise/__init__.py +2 -1
- alita_sdk/tools/zephyr_essential/__init__.py +2 -1
- alita_sdk/tools/zephyr_scale/__init__.py +3 -2
- alita_sdk/tools/zephyr_scale/api_wrapper.py +2 -2
- alita_sdk/tools/zephyr_squad/__init__.py +2 -1
- {alita_sdk-0.3.532.dist-info → alita_sdk-0.3.602.dist-info}/METADATA +7 -6
- {alita_sdk-0.3.532.dist-info → alita_sdk-0.3.602.dist-info}/RECORD +137 -119
- {alita_sdk-0.3.532.dist-info → alita_sdk-0.3.602.dist-info}/WHEEL +0 -0
- {alita_sdk-0.3.532.dist-info → alita_sdk-0.3.602.dist-info}/entry_points.txt +0 -0
- {alita_sdk-0.3.532.dist-info → alita_sdk-0.3.602.dist-info}/licenses/LICENSE +0 -0
- {alita_sdk-0.3.532.dist-info → alita_sdk-0.3.602.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,238 @@
|
|
|
1
|
+
"""
|
|
2
|
+
SkillRouter Toolkit for configuring and accessing specialized skills.
|
|
3
|
+
|
|
4
|
+
This toolkit provides a configurable way to set up the skill router with
|
|
5
|
+
specific skills from filesystem or platform-hosted agents/pipelines.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from typing import List, Optional, TYPE_CHECKING
|
|
9
|
+
from pydantic import create_model, BaseModel, Field, ConfigDict
|
|
10
|
+
from langchain_community.agent_toolkits.base import BaseToolkit
|
|
11
|
+
from langchain_core.tools import BaseTool
|
|
12
|
+
|
|
13
|
+
if TYPE_CHECKING:
|
|
14
|
+
from alita_sdk.clients import AlitaClient
|
|
15
|
+
|
|
16
|
+
from alita_sdk.tools.base.tool import BaseAction
|
|
17
|
+
from alita_sdk.tools.utils import clean_string
|
|
18
|
+
from ..skills import SkillsRegistry, SkillMetadata, SkillType, SkillSource
|
|
19
|
+
from ..tools.skill_router import SkillRouterWrapper
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class SkillConfig(BaseModel):
|
|
23
|
+
"""Configuration for a single skill."""
|
|
24
|
+
|
|
25
|
+
# Platform skill fields (type is implicit from parent field: agents or pipelines)
|
|
26
|
+
id: int = Field(description="Platform ID (for agent/pipeline skills)")
|
|
27
|
+
version_id: int = Field(description="Platform version ID (for agent/pipeline skills)")
|
|
28
|
+
name: Optional[str] = Field(default=None, description="Skill name (optional override)")
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
class SkillRouterToolkit(BaseToolkit):
|
|
32
|
+
"""Toolkit for configuring skill router with specific skills."""
|
|
33
|
+
|
|
34
|
+
tools: List[BaseTool] = []
|
|
35
|
+
|
|
36
|
+
@staticmethod
|
|
37
|
+
def toolkit_config_schema() -> BaseModel:
|
|
38
|
+
"""Define the configuration schema for the skill router toolkit."""
|
|
39
|
+
# Get available tools for selected_tools field
|
|
40
|
+
selected_tools_options = {x['name']: x['args_schema'].schema() for x in SkillRouterWrapper.model_construct().get_available_tools()}
|
|
41
|
+
|
|
42
|
+
return create_model(
|
|
43
|
+
"skill_router",
|
|
44
|
+
# Separate fields for agents and pipelines - optional but default to empty lists
|
|
45
|
+
agents=(Optional[List[SkillConfig]], Field(
|
|
46
|
+
description="List of agents to make available as skills",
|
|
47
|
+
default=[],
|
|
48
|
+
json_schema_extra={
|
|
49
|
+
"agent_tags": ["skill"]
|
|
50
|
+
}
|
|
51
|
+
)),
|
|
52
|
+
pipelines=(Optional[List[SkillConfig]], Field(
|
|
53
|
+
description="List of pipelines to make available as skills",
|
|
54
|
+
default=[],
|
|
55
|
+
json_schema_extra={
|
|
56
|
+
"pipeline_tags": ["skill"]
|
|
57
|
+
}
|
|
58
|
+
)),
|
|
59
|
+
prompt=(Optional[str], Field(
|
|
60
|
+
description="Custom system prompt for skill routing",
|
|
61
|
+
default="",
|
|
62
|
+
json_schema_extra={"lines": 4}
|
|
63
|
+
)),
|
|
64
|
+
timeout=(Optional[int], Field(description="Default timeout in seconds for skill execution", default=300)),
|
|
65
|
+
execution_mode=(Optional[str], Field(
|
|
66
|
+
description="Default execution mode for skills",
|
|
67
|
+
default=None,
|
|
68
|
+
json_schema_extra={"enum": ["subprocess", "remote"]}
|
|
69
|
+
)),
|
|
70
|
+
selected_tools=(List[str], Field(
|
|
71
|
+
description="List of tools to enable",
|
|
72
|
+
default=list(selected_tools_options.keys()),
|
|
73
|
+
json_schema_extra={'args_schemas': selected_tools_options}
|
|
74
|
+
)),
|
|
75
|
+
__config__=ConfigDict(json_schema_extra={'metadata': {"label": "Skill Router", "icon_url": None, "hidden": True}})
|
|
76
|
+
)
|
|
77
|
+
|
|
78
|
+
@classmethod
|
|
79
|
+
def get_toolkit(
|
|
80
|
+
cls,
|
|
81
|
+
client: 'AlitaClient',
|
|
82
|
+
llm = None,
|
|
83
|
+
toolkit_name: Optional[str] = None,
|
|
84
|
+
selected_tools: List[str] = None,
|
|
85
|
+
agents: List[SkillConfig] = None,
|
|
86
|
+
pipelines: List[SkillConfig] = None,
|
|
87
|
+
prompt: Optional[str] = None,
|
|
88
|
+
timeout: Optional[int] = None,
|
|
89
|
+
execution_mode: Optional[str] = None
|
|
90
|
+
):
|
|
91
|
+
"""Create a skill router toolkit with configured skills."""
|
|
92
|
+
|
|
93
|
+
if selected_tools is None:
|
|
94
|
+
selected_tools = []
|
|
95
|
+
|
|
96
|
+
# Create a custom registry for this toolkit
|
|
97
|
+
registry = SkillsRegistry(search_paths=[])
|
|
98
|
+
|
|
99
|
+
# Helper function to process skill configs
|
|
100
|
+
def add_skills_to_registry(skill_configs, skill_type):
|
|
101
|
+
if skill_configs:
|
|
102
|
+
for skill_config_dict in skill_configs:
|
|
103
|
+
# Convert dict to SkillConfig object
|
|
104
|
+
skill_config = SkillConfig(**skill_config_dict)
|
|
105
|
+
skill_metadata = cls._create_skill_from_config(skill_config, client, skill_type)
|
|
106
|
+
if skill_metadata:
|
|
107
|
+
# Add skill to registry manually
|
|
108
|
+
registry.discovery.cache[skill_metadata.name] = skill_metadata
|
|
109
|
+
|
|
110
|
+
# Add configured agents (if provided)
|
|
111
|
+
add_skills_to_registry(agents or [], "agent")
|
|
112
|
+
|
|
113
|
+
# Add configured pipelines (if provided)
|
|
114
|
+
add_skills_to_registry(pipelines or [], "pipeline")
|
|
115
|
+
|
|
116
|
+
# Create skill router wrapper with custom configuration
|
|
117
|
+
wrapper = SkillRouterWrapper(
|
|
118
|
+
registry=registry,
|
|
119
|
+
alita_client=client,
|
|
120
|
+
llm=llm,
|
|
121
|
+
enable_callbacks=True,
|
|
122
|
+
default_timeout=timeout,
|
|
123
|
+
default_execution_mode=execution_mode,
|
|
124
|
+
custom_prompt=prompt
|
|
125
|
+
)
|
|
126
|
+
|
|
127
|
+
# Get available tools from wrapper
|
|
128
|
+
available_tools = wrapper.get_available_tools()
|
|
129
|
+
|
|
130
|
+
# Filter by selected_tools if provided
|
|
131
|
+
tools = []
|
|
132
|
+
toolkit_context = f" [Toolkit: {clean_string(toolkit_name, 0)}]" if toolkit_name else ''
|
|
133
|
+
|
|
134
|
+
for tool in available_tools:
|
|
135
|
+
if selected_tools:
|
|
136
|
+
if tool["name"] not in selected_tools:
|
|
137
|
+
continue
|
|
138
|
+
|
|
139
|
+
# Add toolkit context to description with character limit
|
|
140
|
+
description = tool["description"]
|
|
141
|
+
if toolkit_context and len(description + toolkit_context) <= 1000:
|
|
142
|
+
description = description + toolkit_context
|
|
143
|
+
|
|
144
|
+
# Wrap in BaseAction
|
|
145
|
+
tools.append(BaseAction(
|
|
146
|
+
api_wrapper=wrapper,
|
|
147
|
+
name=tool["name"],
|
|
148
|
+
description=description,
|
|
149
|
+
args_schema=tool["args_schema"],
|
|
150
|
+
metadata={"toolkit_name": toolkit_name, "toolkit_type": "skill_router"} if toolkit_name else {}
|
|
151
|
+
))
|
|
152
|
+
|
|
153
|
+
return cls(tools=tools)
|
|
154
|
+
|
|
155
|
+
@classmethod
|
|
156
|
+
def _create_skill_from_config(cls, config: SkillConfig, client: 'AlitaClient', skill_type: str) -> Optional[SkillMetadata]:
|
|
157
|
+
"""Create SkillMetadata from SkillConfig.
|
|
158
|
+
|
|
159
|
+
Args:
|
|
160
|
+
config: SkillConfig with id, version_id, and optional name
|
|
161
|
+
client: AlitaClient for fetching skill details
|
|
162
|
+
skill_type: Either "agent" or "pipeline" (from parent field)
|
|
163
|
+
"""
|
|
164
|
+
try:
|
|
165
|
+
# Get skill details from platform
|
|
166
|
+
if skill_type == "agent":
|
|
167
|
+
skill_details = cls._get_agent_details(client, config.id, config.version_id)
|
|
168
|
+
metadata_type = SkillType.AGENT
|
|
169
|
+
else: # pipeline
|
|
170
|
+
skill_details = cls._get_pipeline_details(client, config.id, config.version_id)
|
|
171
|
+
metadata_type = SkillType.PIPELINE
|
|
172
|
+
|
|
173
|
+
# Create SkillMetadata for platform skill
|
|
174
|
+
return SkillMetadata(
|
|
175
|
+
name=config.name or skill_details.get('name', f"{skill_type}_{config.id}"),
|
|
176
|
+
skill_type=metadata_type,
|
|
177
|
+
source=SkillSource.PLATFORM,
|
|
178
|
+
id=config.id,
|
|
179
|
+
version_id=config.version_id,
|
|
180
|
+
description=skill_details.get('description', ''),
|
|
181
|
+
capabilities=skill_details.get('capabilities', []),
|
|
182
|
+
tags=skill_details.get('tags', []),
|
|
183
|
+
version=skill_details.get('version', '1.0.0'),
|
|
184
|
+
# Set default execution config - platform skills run remotely
|
|
185
|
+
execution={"mode": "remote", "timeout": 300},
|
|
186
|
+
results={"format": "text_with_links"},
|
|
187
|
+
inputs={},
|
|
188
|
+
outputs={}
|
|
189
|
+
)
|
|
190
|
+
|
|
191
|
+
except Exception as e:
|
|
192
|
+
import logging
|
|
193
|
+
logging.getLogger(__name__).error(f"Failed to create skill from config {config}: {e}")
|
|
194
|
+
return None
|
|
195
|
+
|
|
196
|
+
@classmethod
|
|
197
|
+
def _get_agent_details(cls, client: 'AlitaClient', agent_id: int, version_id: int) -> dict:
|
|
198
|
+
"""Get agent details from platform."""
|
|
199
|
+
try:
|
|
200
|
+
app_details = client.get_app_details(agent_id)
|
|
201
|
+
version_details = client.get_app_version_details(agent_id, version_id)
|
|
202
|
+
|
|
203
|
+
return {
|
|
204
|
+
'name': app_details.get('name', f'agent_{agent_id}'),
|
|
205
|
+
'description': app_details.get('description', ''),
|
|
206
|
+
'capabilities': [], # Could be extracted from app metadata
|
|
207
|
+
'tags': [], # Could be extracted from app metadata
|
|
208
|
+
'version': version_details.get('version', '1.0.0')
|
|
209
|
+
}
|
|
210
|
+
except Exception as e:
|
|
211
|
+
import logging
|
|
212
|
+
logging.getLogger(__name__).error(f"Failed to get agent details for {agent_id}/{version_id}: {e}")
|
|
213
|
+
return {'name': f'agent_{agent_id}', 'description': 'Platform-hosted agent'}
|
|
214
|
+
|
|
215
|
+
@classmethod
|
|
216
|
+
def _get_pipeline_details(cls, client: 'AlitaClient', pipeline_id: int, version_id: int) -> dict:
|
|
217
|
+
"""Get pipeline details from platform."""
|
|
218
|
+
try:
|
|
219
|
+
# For now, use the same method as agents since they use the same API
|
|
220
|
+
# In the future, this might use a different endpoint for pipelines
|
|
221
|
+
app_details = client.get_app_details(pipeline_id)
|
|
222
|
+
version_details = client.get_app_version_details(pipeline_id, version_id)
|
|
223
|
+
|
|
224
|
+
return {
|
|
225
|
+
'name': app_details.get('name', f'pipeline_{pipeline_id}'),
|
|
226
|
+
'description': app_details.get('description', ''),
|
|
227
|
+
'capabilities': [], # Could be extracted from pipeline metadata
|
|
228
|
+
'tags': [], # Could be extracted from pipeline metadata
|
|
229
|
+
'version': version_details.get('version', '1.0.0')
|
|
230
|
+
}
|
|
231
|
+
except Exception as e:
|
|
232
|
+
import logging
|
|
233
|
+
logging.getLogger(__name__).error(f"Failed to get pipeline details for {pipeline_id}/{version_id}: {e}")
|
|
234
|
+
return {'name': f'pipeline_{pipeline_id}', 'description': 'Platform-hosted pipeline'}
|
|
235
|
+
|
|
236
|
+
def get_tools(self):
|
|
237
|
+
"""Get the configured tools."""
|
|
238
|
+
return self.tools
|
|
@@ -14,9 +14,12 @@ from .prompt import PromptToolkit
|
|
|
14
14
|
from .subgraph import SubgraphToolkit
|
|
15
15
|
from .vectorstore import VectorStoreToolkit
|
|
16
16
|
from .mcp import McpToolkit
|
|
17
|
+
from .mcp_config import McpConfigToolkit, get_mcp_config_toolkit_schemas
|
|
18
|
+
from .skill_router import SkillRouterToolkit
|
|
17
19
|
from ..tools.mcp_server_tool import McpServerTool
|
|
18
20
|
from ..tools.sandbox import SandboxToolkit
|
|
19
21
|
from ..tools.image_generation import ImageGenerationToolkit
|
|
22
|
+
from ..tools.data_analysis import DataAnalysisToolkit
|
|
20
23
|
# Import community tools
|
|
21
24
|
from ...community import get_toolkits as community_toolkits, get_tools as community_tools
|
|
22
25
|
from ...tools.memory import MemoryToolkit
|
|
@@ -35,19 +38,55 @@ def get_toolkits():
|
|
|
35
38
|
VectorStoreToolkit.toolkit_config_schema(),
|
|
36
39
|
SandboxToolkit.toolkit_config_schema(),
|
|
37
40
|
ImageGenerationToolkit.toolkit_config_schema(),
|
|
38
|
-
|
|
41
|
+
DataAnalysisToolkit.toolkit_config_schema(),
|
|
42
|
+
McpToolkit.toolkit_config_schema(),
|
|
43
|
+
McpConfigToolkit.toolkit_config_schema(),
|
|
44
|
+
SkillRouterToolkit.toolkit_config_schema()
|
|
39
45
|
]
|
|
40
46
|
|
|
41
|
-
|
|
47
|
+
# Add configured MCP servers (stdio and http) as available toolkits
|
|
48
|
+
mcp_config_toolkits = get_mcp_config_toolkit_schemas()
|
|
49
|
+
|
|
50
|
+
return core_toolkits + mcp_config_toolkits + community_toolkits() + alita_toolkits()
|
|
42
51
|
|
|
43
52
|
|
|
44
53
|
def get_tools(tools_list: list, alita_client=None, llm=None, memory_store: BaseStore = None, debug_mode: Optional[bool] = False, mcp_tokens: Optional[dict] = None, conversation_id: Optional[str] = None, ignored_mcp_servers: Optional[list] = None) -> list:
|
|
54
|
+
# Sanitize tools_list to handle corrupted tool configurations
|
|
55
|
+
sanitized_tools = []
|
|
56
|
+
for tool in tools_list:
|
|
57
|
+
if isinstance(tool, dict):
|
|
58
|
+
# Check for corrupted structure where 'type' and 'name' contain the full tool config
|
|
59
|
+
if 'type' in tool and isinstance(tool['type'], dict):
|
|
60
|
+
# This is a corrupted tool - use the inner dict instead
|
|
61
|
+
logger.warning(f"Detected corrupted tool configuration (type=dict), fixing: {tool}")
|
|
62
|
+
actual_tool = tool['type'] # or tool['name'], they should be the same
|
|
63
|
+
sanitized_tools.append(actual_tool)
|
|
64
|
+
elif 'name' in tool and isinstance(tool['name'], dict):
|
|
65
|
+
# Another corruption pattern where name contains the full config
|
|
66
|
+
logger.warning(f"Detected corrupted tool configuration (name=dict), fixing: {tool}")
|
|
67
|
+
actual_tool = tool['name']
|
|
68
|
+
sanitized_tools.append(actual_tool)
|
|
69
|
+
elif 'type' in tool and isinstance(tool['type'], str):
|
|
70
|
+
# Valid tool configuration
|
|
71
|
+
sanitized_tools.append(tool)
|
|
72
|
+
else:
|
|
73
|
+
# Skip invalid/corrupted tools that can't be fixed
|
|
74
|
+
logger.warning(f"Skipping invalid tool configuration: {tool}")
|
|
75
|
+
else:
|
|
76
|
+
logger.warning(f"Skipping non-dict tool: {tool}")
|
|
77
|
+
# Skip non-dict tools
|
|
78
|
+
|
|
45
79
|
prompts = []
|
|
46
80
|
tools = []
|
|
81
|
+
unhandled_tools = [] # Track tools not handled by main processing
|
|
47
82
|
|
|
48
|
-
for tool in
|
|
83
|
+
for tool in sanitized_tools:
|
|
84
|
+
# Flag to track if this tool was processed by the main loop
|
|
85
|
+
# Used to prevent double processing by fallback systems
|
|
86
|
+
tool_handled = False
|
|
49
87
|
try:
|
|
50
88
|
if tool['type'] == 'datasource':
|
|
89
|
+
tool_handled = True
|
|
51
90
|
tools.extend(DatasourcesToolkit.get_toolkit(
|
|
52
91
|
alita_client,
|
|
53
92
|
datasource_ids=[int(tool['settings']['datasource_id'])],
|
|
@@ -55,6 +94,7 @@ def get_tools(tools_list: list, alita_client=None, llm=None, memory_store: BaseS
|
|
|
55
94
|
toolkit_name=tool.get('toolkit_name', '') or tool.get('name', '')
|
|
56
95
|
).get_tools())
|
|
57
96
|
elif tool['type'] == 'application':
|
|
97
|
+
tool_handled = True
|
|
58
98
|
tools.extend(ApplicationToolkit.get_toolkit(
|
|
59
99
|
alita_client,
|
|
60
100
|
application_id=int(tool['settings']['application_id']),
|
|
@@ -74,6 +114,7 @@ def get_tools(tools_list: list, alita_client=None, llm=None, memory_store: BaseS
|
|
|
74
114
|
llm=llm
|
|
75
115
|
))
|
|
76
116
|
elif tool['type'] == 'memory':
|
|
117
|
+
tool_handled = True
|
|
77
118
|
tools += MemoryToolkit.get_toolkit(
|
|
78
119
|
namespace=tool['settings'].get('namespace', str(tool['id'])),
|
|
79
120
|
pgvector_configuration=tool['settings'].get('pgvector_configuration', {}),
|
|
@@ -81,6 +122,7 @@ def get_tools(tools_list: list, alita_client=None, llm=None, memory_store: BaseS
|
|
|
81
122
|
).get_tools()
|
|
82
123
|
# TODO: update configuration of internal tools
|
|
83
124
|
elif tool['type'] == 'internal_tool':
|
|
125
|
+
tool_handled = True
|
|
84
126
|
if tool['name'] == 'pyodide':
|
|
85
127
|
tools += SandboxToolkit.get_toolkit(
|
|
86
128
|
stateful=False,
|
|
@@ -100,7 +142,22 @@ def get_tools(tools_list: list, alita_client=None, llm=None, memory_store: BaseS
|
|
|
100
142
|
pgvector_configuration=tool.get('settings', {}).get('pgvector_configuration'),
|
|
101
143
|
conversation_id=conversation_id,
|
|
102
144
|
).get_tools()
|
|
145
|
+
elif tool['name'] == 'data_analysis':
|
|
146
|
+
# Data Analysis internal tool - uses conversation attachment bucket
|
|
147
|
+
settings = tool.get('settings', {})
|
|
148
|
+
bucket_name = settings.get('bucket_name')
|
|
149
|
+
if bucket_name:
|
|
150
|
+
tools += DataAnalysisToolkit.get_toolkit(
|
|
151
|
+
alita_client=alita_client,
|
|
152
|
+
llm=llm,
|
|
153
|
+
bucket_name=bucket_name,
|
|
154
|
+
toolkit_name="Data Analyst",
|
|
155
|
+
).get_tools()
|
|
156
|
+
else:
|
|
157
|
+
logger.warning("Data Analysis internal tool requested "
|
|
158
|
+
"but no bucket_name provided in settings")
|
|
103
159
|
elif tool['type'] == 'artifact':
|
|
160
|
+
tool_handled = True
|
|
104
161
|
toolkit_tools = ArtifactToolkit.get_toolkit(
|
|
105
162
|
client=alita_client,
|
|
106
163
|
bucket=tool['settings']['bucket'],
|
|
@@ -119,11 +176,13 @@ def get_tools(tools_list: list, alita_client=None, llm=None, memory_store: BaseS
|
|
|
119
176
|
tools.extend(toolkit_tools)
|
|
120
177
|
|
|
121
178
|
elif tool['type'] == 'vectorstore':
|
|
179
|
+
tool_handled = True
|
|
122
180
|
tools.extend(VectorStoreToolkit.get_toolkit(
|
|
123
181
|
llm=llm,
|
|
124
182
|
toolkit_name=tool.get('toolkit_name', ''),
|
|
125
183
|
**tool['settings']).get_tools())
|
|
126
184
|
elif tool['type'] == 'planning':
|
|
185
|
+
tool_handled = True
|
|
127
186
|
# Planning toolkit for multi-step task tracking
|
|
128
187
|
settings = tool.get('settings', {})
|
|
129
188
|
|
|
@@ -163,6 +222,7 @@ def get_tools(tools_list: list, alita_client=None, llm=None, memory_store: BaseS
|
|
|
163
222
|
conversation_id=conversation_id or settings.get('conversation_id'),
|
|
164
223
|
).get_tools())
|
|
165
224
|
elif tool['type'] == 'mcp':
|
|
225
|
+
tool_handled = True
|
|
166
226
|
# remote mcp tool initialization with token injection
|
|
167
227
|
settings = dict(tool['settings'])
|
|
168
228
|
url = settings.get('url')
|
|
@@ -214,6 +274,69 @@ def get_tools(tools_list: list, alita_client=None, llm=None, memory_store: BaseS
|
|
|
214
274
|
toolkit_name=tool.get('toolkit_name', ''),
|
|
215
275
|
client=alita_client,
|
|
216
276
|
**settings).get_tools())
|
|
277
|
+
elif tool['type'] == 'skill_router':
|
|
278
|
+
tool_handled = True
|
|
279
|
+
# Skills Registry Router Toolkit
|
|
280
|
+
logger.info(f"Processing skill_router toolkit: {tool}")
|
|
281
|
+
try:
|
|
282
|
+
settings = tool.get('settings', {})
|
|
283
|
+
toolkit_name = tool.get('toolkit_name', '')
|
|
284
|
+
selected_tools = settings.get('selected_tools', [])
|
|
285
|
+
|
|
286
|
+
toolkit_tools = SkillRouterToolkit.get_toolkit(
|
|
287
|
+
client=alita_client,
|
|
288
|
+
llm=llm,
|
|
289
|
+
toolkit_name=toolkit_name,
|
|
290
|
+
selected_tools=selected_tools,
|
|
291
|
+
**settings
|
|
292
|
+
).get_tools()
|
|
293
|
+
|
|
294
|
+
tools.extend(toolkit_tools)
|
|
295
|
+
logger.info(f"✅ Successfully added {len(toolkit_tools)} tools from SkillRouterToolkit")
|
|
296
|
+
except Exception as e:
|
|
297
|
+
logger.error(f"❌ Failed to initialize SkillRouterToolkit: {e}")
|
|
298
|
+
raise
|
|
299
|
+
elif tool['type'] == 'mcp_config' or tool['type'].startswith('mcp_'):
|
|
300
|
+
tool_handled = True
|
|
301
|
+
# MCP Config toolkit - pre-configured MCP servers (stdio or http)
|
|
302
|
+
# Handle both explicit 'mcp_config' type and dynamic names like 'mcp_playwright'
|
|
303
|
+
logger.info(f"Processing mcp_config toolkit: {tool}")
|
|
304
|
+
try:
|
|
305
|
+
settings = tool.get('settings', {})
|
|
306
|
+
|
|
307
|
+
# Server name can come from settings or be extracted from type name
|
|
308
|
+
server_name = settings.get('server_name')
|
|
309
|
+
if not server_name and tool['type'].startswith('mcp_') and tool['type'] != 'mcp_config':
|
|
310
|
+
# Extract server name from type (e.g., 'mcp_playwright' -> 'playwright')
|
|
311
|
+
server_name = tool['type'][4:] # Remove 'mcp_' prefix
|
|
312
|
+
|
|
313
|
+
if not server_name:
|
|
314
|
+
logger.error(f"❌ No server_name found for mcp_config toolkit: {tool}")
|
|
315
|
+
continue
|
|
316
|
+
|
|
317
|
+
toolkit_name = tool.get('toolkit_name', '') or server_name
|
|
318
|
+
selected_tools = settings.get('selected_tools', [])
|
|
319
|
+
excluded_tools = settings.get('excluded_tools', [])
|
|
320
|
+
|
|
321
|
+
# Get server config (may be in settings or from global config)
|
|
322
|
+
server_config = settings.get('server_config')
|
|
323
|
+
|
|
324
|
+
toolkit_tools = McpConfigToolkit.get_toolkit(
|
|
325
|
+
server_name=server_name,
|
|
326
|
+
server_config=server_config,
|
|
327
|
+
user_config=settings,
|
|
328
|
+
selected_tools=selected_tools if selected_tools else None,
|
|
329
|
+
excluded_tools=excluded_tools if excluded_tools else None,
|
|
330
|
+
toolkit_name=toolkit_name,
|
|
331
|
+
client=alita_client,
|
|
332
|
+
).get_tools()
|
|
333
|
+
|
|
334
|
+
tools.extend(toolkit_tools)
|
|
335
|
+
logger.info(f"✅ Successfully added {len(toolkit_tools)} tools from McpConfigToolkit ({server_name})")
|
|
336
|
+
except Exception as e:
|
|
337
|
+
logger.error(f"❌ Failed to initialize McpConfigToolkit: {e}")
|
|
338
|
+
if not debug_mode:
|
|
339
|
+
raise
|
|
217
340
|
except McpAuthorizationRequired:
|
|
218
341
|
# Re-raise auth required exceptions directly
|
|
219
342
|
raise
|
|
@@ -224,17 +347,23 @@ def get_tools(tools_list: list, alita_client=None, llm=None, memory_store: BaseS
|
|
|
224
347
|
continue
|
|
225
348
|
else:
|
|
226
349
|
raise ToolException(f"Error initializing toolkit for tool '{tool.get('name', 'unknown')}': {e}")
|
|
227
|
-
|
|
350
|
+
|
|
351
|
+
# Track unhandled tools (make a copy to avoid reference issues)
|
|
352
|
+
if not tool_handled:
|
|
353
|
+
# Ensure we only add valid tool configurations to unhandled_tools
|
|
354
|
+
if isinstance(tool, dict) and 'type' in tool and isinstance(tool['type'], str):
|
|
355
|
+
unhandled_tools.append(dict(tool))
|
|
356
|
+
|
|
228
357
|
if len(prompts) > 0:
|
|
229
358
|
tools += PromptToolkit.get_toolkit(alita_client, prompts).get_tools()
|
|
230
|
-
|
|
231
|
-
# Add community tools
|
|
232
|
-
tools += community_tools(
|
|
233
|
-
# Add alita tools
|
|
234
|
-
tools += alita_tools(
|
|
359
|
+
|
|
360
|
+
# Add community tools (only for unhandled tools)
|
|
361
|
+
tools += community_tools(unhandled_tools, alita_client, llm)
|
|
362
|
+
# Add alita tools (only for unhandled tools)
|
|
363
|
+
tools += alita_tools(unhandled_tools, alita_client, llm, memory_store)
|
|
235
364
|
# Add MCP tools registered via alita-mcp CLI (static registry)
|
|
236
365
|
# Note: Tools with type='mcp' are already handled in main loop above
|
|
237
|
-
tools += _mcp_tools(
|
|
366
|
+
tools += _mcp_tools(unhandled_tools, alita_client)
|
|
238
367
|
|
|
239
368
|
# Sanitize tool names to meet OpenAI's function naming requirements
|
|
240
369
|
# tools = _sanitize_tool_names(tools)
|
|
@@ -56,7 +56,7 @@ class VectorStoreToolkit(BaseToolkit):
|
|
|
56
56
|
name=tool["name"],
|
|
57
57
|
description=description,
|
|
58
58
|
args_schema=tool["args_schema"],
|
|
59
|
-
metadata={"toolkit_name": toolkit_name} if toolkit_name else {}
|
|
59
|
+
metadata={"toolkit_name": toolkit_name, "toolkit_type": "vectorstore"} if toolkit_name else {}
|
|
60
60
|
))
|
|
61
61
|
return cls(tools=tools)
|
|
62
62
|
|
|
@@ -10,6 +10,7 @@ from .image_generation import (
|
|
|
10
10
|
create_image_generation_tool,
|
|
11
11
|
ImageGenerationToolkit
|
|
12
12
|
)
|
|
13
|
+
from .skill_router import SkillRouterWrapper
|
|
13
14
|
|
|
14
15
|
__all__ = [
|
|
15
16
|
"PyodideSandboxTool",
|
|
@@ -18,5 +19,6 @@ __all__ = [
|
|
|
18
19
|
"EchoTool",
|
|
19
20
|
"ImageGenerationTool",
|
|
20
21
|
"ImageGenerationToolkit",
|
|
21
|
-
"create_image_generation_tool"
|
|
22
|
+
"create_image_generation_tool",
|
|
23
|
+
"SkillRouterWrapper"
|
|
22
24
|
]
|
|
@@ -350,6 +350,21 @@ class ArtifactWrapper(NonCodeIndexerToolkit):
|
|
|
350
350
|
|
|
351
351
|
include_extensions = kwargs.get('include_extensions', [])
|
|
352
352
|
skip_extensions = kwargs.get('skip_extensions', [])
|
|
353
|
+
chunking_config = kwargs.get('chunking_config', {})
|
|
354
|
+
|
|
355
|
+
# Auto-include extensions from chunking_config if include_extensions is specified
|
|
356
|
+
# This allows chunking config to work without manually adding extensions to include_extensions
|
|
357
|
+
if chunking_config and include_extensions:
|
|
358
|
+
for ext_pattern in chunking_config.keys():
|
|
359
|
+
# Normalize extension pattern (both ".cbl" and "*.cbl" should work)
|
|
360
|
+
normalized = ext_pattern if ext_pattern.startswith('*') else f'*{ext_pattern}'
|
|
361
|
+
if normalized not in include_extensions:
|
|
362
|
+
include_extensions.append(normalized)
|
|
363
|
+
self._log_tool_event(
|
|
364
|
+
message=f"Auto-included extension '{normalized}' from chunking_config",
|
|
365
|
+
tool_name="loader"
|
|
366
|
+
)
|
|
367
|
+
|
|
353
368
|
self._log_tool_event(message=f"Files filtering started. Include extensions: {include_extensions}. "
|
|
354
369
|
f"Skip extensions: {skip_extensions}", tool_name="loader")
|
|
355
370
|
# show the progress of filtering
|