alita-sdk 0.3.263__py3-none-any.whl → 0.3.499__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- alita_sdk/cli/__init__.py +10 -0
- alita_sdk/cli/__main__.py +17 -0
- alita_sdk/cli/agent/__init__.py +5 -0
- alita_sdk/cli/agent/default.py +258 -0
- alita_sdk/cli/agent_executor.py +155 -0
- alita_sdk/cli/agent_loader.py +215 -0
- alita_sdk/cli/agent_ui.py +228 -0
- alita_sdk/cli/agents.py +3601 -0
- alita_sdk/cli/callbacks.py +647 -0
- alita_sdk/cli/cli.py +168 -0
- alita_sdk/cli/config.py +306 -0
- alita_sdk/cli/context/__init__.py +30 -0
- alita_sdk/cli/context/cleanup.py +198 -0
- alita_sdk/cli/context/manager.py +731 -0
- alita_sdk/cli/context/message.py +285 -0
- alita_sdk/cli/context/strategies.py +289 -0
- alita_sdk/cli/context/token_estimation.py +127 -0
- alita_sdk/cli/formatting.py +182 -0
- alita_sdk/cli/input_handler.py +419 -0
- alita_sdk/cli/inventory.py +1256 -0
- alita_sdk/cli/mcp_loader.py +315 -0
- alita_sdk/cli/toolkit.py +327 -0
- alita_sdk/cli/toolkit_loader.py +85 -0
- alita_sdk/cli/tools/__init__.py +43 -0
- alita_sdk/cli/tools/approval.py +224 -0
- alita_sdk/cli/tools/filesystem.py +1751 -0
- alita_sdk/cli/tools/planning.py +389 -0
- alita_sdk/cli/tools/terminal.py +414 -0
- alita_sdk/community/__init__.py +64 -8
- alita_sdk/community/inventory/__init__.py +224 -0
- alita_sdk/community/inventory/config.py +257 -0
- alita_sdk/community/inventory/enrichment.py +2137 -0
- alita_sdk/community/inventory/extractors.py +1469 -0
- alita_sdk/community/inventory/ingestion.py +3172 -0
- alita_sdk/community/inventory/knowledge_graph.py +1457 -0
- alita_sdk/community/inventory/parsers/__init__.py +218 -0
- alita_sdk/community/inventory/parsers/base.py +295 -0
- alita_sdk/community/inventory/parsers/csharp_parser.py +907 -0
- alita_sdk/community/inventory/parsers/go_parser.py +851 -0
- alita_sdk/community/inventory/parsers/html_parser.py +389 -0
- alita_sdk/community/inventory/parsers/java_parser.py +593 -0
- alita_sdk/community/inventory/parsers/javascript_parser.py +629 -0
- alita_sdk/community/inventory/parsers/kotlin_parser.py +768 -0
- alita_sdk/community/inventory/parsers/markdown_parser.py +362 -0
- alita_sdk/community/inventory/parsers/python_parser.py +604 -0
- alita_sdk/community/inventory/parsers/rust_parser.py +858 -0
- alita_sdk/community/inventory/parsers/swift_parser.py +832 -0
- alita_sdk/community/inventory/parsers/text_parser.py +322 -0
- alita_sdk/community/inventory/parsers/yaml_parser.py +370 -0
- alita_sdk/community/inventory/patterns/__init__.py +61 -0
- alita_sdk/community/inventory/patterns/ast_adapter.py +380 -0
- alita_sdk/community/inventory/patterns/loader.py +348 -0
- alita_sdk/community/inventory/patterns/registry.py +198 -0
- alita_sdk/community/inventory/presets.py +535 -0
- alita_sdk/community/inventory/retrieval.py +1403 -0
- alita_sdk/community/inventory/toolkit.py +173 -0
- alita_sdk/community/inventory/visualize.py +1370 -0
- alita_sdk/configurations/__init__.py +10 -0
- alita_sdk/configurations/ado.py +4 -2
- alita_sdk/configurations/azure_search.py +1 -1
- alita_sdk/configurations/bigquery.py +1 -1
- alita_sdk/configurations/bitbucket.py +94 -2
- alita_sdk/configurations/browser.py +18 -0
- alita_sdk/configurations/carrier.py +19 -0
- alita_sdk/configurations/confluence.py +96 -1
- alita_sdk/configurations/delta_lake.py +1 -1
- alita_sdk/configurations/figma.py +0 -5
- alita_sdk/configurations/github.py +65 -1
- alita_sdk/configurations/gitlab.py +79 -0
- alita_sdk/configurations/google_places.py +17 -0
- alita_sdk/configurations/jira.py +103 -0
- alita_sdk/configurations/postman.py +1 -1
- alita_sdk/configurations/qtest.py +1 -3
- alita_sdk/configurations/report_portal.py +19 -0
- alita_sdk/configurations/salesforce.py +19 -0
- alita_sdk/configurations/service_now.py +1 -12
- alita_sdk/configurations/sharepoint.py +19 -0
- alita_sdk/configurations/sonar.py +18 -0
- alita_sdk/configurations/sql.py +20 -0
- alita_sdk/configurations/testio.py +18 -0
- alita_sdk/configurations/testrail.py +88 -0
- alita_sdk/configurations/xray.py +94 -1
- alita_sdk/configurations/zephyr_enterprise.py +94 -1
- alita_sdk/configurations/zephyr_essential.py +95 -0
- alita_sdk/runtime/clients/artifact.py +12 -2
- alita_sdk/runtime/clients/client.py +235 -66
- alita_sdk/runtime/clients/mcp_discovery.py +342 -0
- alita_sdk/runtime/clients/mcp_manager.py +262 -0
- alita_sdk/runtime/clients/sandbox_client.py +373 -0
- alita_sdk/runtime/langchain/assistant.py +123 -17
- alita_sdk/runtime/langchain/constants.py +8 -1
- alita_sdk/runtime/langchain/document_loaders/AlitaDocxMammothLoader.py +315 -3
- alita_sdk/runtime/langchain/document_loaders/AlitaExcelLoader.py +209 -31
- alita_sdk/runtime/langchain/document_loaders/AlitaImageLoader.py +1 -1
- alita_sdk/runtime/langchain/document_loaders/AlitaJSONLoader.py +8 -2
- alita_sdk/runtime/langchain/document_loaders/AlitaMarkdownLoader.py +66 -0
- alita_sdk/runtime/langchain/document_loaders/AlitaPDFLoader.py +79 -10
- alita_sdk/runtime/langchain/document_loaders/AlitaPowerPointLoader.py +52 -15
- alita_sdk/runtime/langchain/document_loaders/AlitaPythonLoader.py +9 -0
- alita_sdk/runtime/langchain/document_loaders/AlitaTableLoader.py +1 -4
- alita_sdk/runtime/langchain/document_loaders/AlitaTextLoader.py +15 -2
- alita_sdk/runtime/langchain/document_loaders/ImageParser.py +30 -0
- alita_sdk/runtime/langchain/document_loaders/constants.py +187 -40
- alita_sdk/runtime/langchain/interfaces/llm_processor.py +4 -2
- alita_sdk/runtime/langchain/langraph_agent.py +406 -91
- alita_sdk/runtime/langchain/utils.py +51 -8
- alita_sdk/runtime/llms/preloaded.py +2 -6
- alita_sdk/runtime/models/mcp_models.py +61 -0
- alita_sdk/runtime/toolkits/__init__.py +26 -0
- alita_sdk/runtime/toolkits/application.py +9 -2
- alita_sdk/runtime/toolkits/artifact.py +19 -7
- alita_sdk/runtime/toolkits/datasource.py +13 -6
- alita_sdk/runtime/toolkits/mcp.py +780 -0
- alita_sdk/runtime/toolkits/planning.py +178 -0
- alita_sdk/runtime/toolkits/subgraph.py +11 -6
- alita_sdk/runtime/toolkits/tools.py +214 -60
- alita_sdk/runtime/toolkits/vectorstore.py +9 -4
- alita_sdk/runtime/tools/__init__.py +22 -0
- alita_sdk/runtime/tools/application.py +16 -4
- alita_sdk/runtime/tools/artifact.py +312 -19
- alita_sdk/runtime/tools/function.py +100 -4
- alita_sdk/runtime/tools/graph.py +81 -0
- alita_sdk/runtime/tools/image_generation.py +212 -0
- alita_sdk/runtime/tools/llm.py +539 -180
- alita_sdk/runtime/tools/mcp_inspect_tool.py +284 -0
- alita_sdk/runtime/tools/mcp_remote_tool.py +181 -0
- alita_sdk/runtime/tools/mcp_server_tool.py +3 -1
- alita_sdk/runtime/tools/planning/__init__.py +36 -0
- alita_sdk/runtime/tools/planning/models.py +246 -0
- alita_sdk/runtime/tools/planning/wrapper.py +607 -0
- alita_sdk/runtime/tools/router.py +2 -1
- alita_sdk/runtime/tools/sandbox.py +375 -0
- alita_sdk/runtime/tools/vectorstore.py +62 -63
- alita_sdk/runtime/tools/vectorstore_base.py +156 -85
- alita_sdk/runtime/utils/AlitaCallback.py +106 -20
- alita_sdk/runtime/utils/mcp_client.py +465 -0
- alita_sdk/runtime/utils/mcp_oauth.py +244 -0
- alita_sdk/runtime/utils/mcp_sse_client.py +405 -0
- alita_sdk/runtime/utils/mcp_tools_discovery.py +124 -0
- alita_sdk/runtime/utils/streamlit.py +41 -14
- alita_sdk/runtime/utils/toolkit_utils.py +28 -9
- alita_sdk/runtime/utils/utils.py +14 -0
- alita_sdk/tools/__init__.py +78 -35
- alita_sdk/tools/ado/__init__.py +0 -1
- alita_sdk/tools/ado/repos/__init__.py +10 -6
- alita_sdk/tools/ado/repos/repos_wrapper.py +12 -11
- alita_sdk/tools/ado/test_plan/__init__.py +10 -7
- alita_sdk/tools/ado/test_plan/test_plan_wrapper.py +56 -23
- alita_sdk/tools/ado/wiki/__init__.py +10 -11
- alita_sdk/tools/ado/wiki/ado_wrapper.py +114 -28
- alita_sdk/tools/ado/work_item/__init__.py +10 -11
- alita_sdk/tools/ado/work_item/ado_wrapper.py +63 -10
- alita_sdk/tools/advanced_jira_mining/__init__.py +10 -7
- alita_sdk/tools/aws/delta_lake/__init__.py +13 -11
- alita_sdk/tools/azure_ai/search/__init__.py +11 -7
- alita_sdk/tools/base_indexer_toolkit.py +392 -86
- alita_sdk/tools/bitbucket/__init__.py +18 -11
- alita_sdk/tools/bitbucket/api_wrapper.py +52 -9
- alita_sdk/tools/bitbucket/cloud_api_wrapper.py +5 -5
- alita_sdk/tools/browser/__init__.py +40 -16
- alita_sdk/tools/browser/crawler.py +3 -1
- alita_sdk/tools/browser/utils.py +15 -6
- alita_sdk/tools/carrier/__init__.py +17 -17
- alita_sdk/tools/carrier/backend_reports_tool.py +8 -4
- alita_sdk/tools/carrier/excel_reporter.py +8 -4
- alita_sdk/tools/chunkers/__init__.py +3 -1
- alita_sdk/tools/chunkers/code/codeparser.py +1 -1
- alita_sdk/tools/chunkers/sematic/json_chunker.py +1 -0
- alita_sdk/tools/chunkers/sematic/markdown_chunker.py +97 -6
- alita_sdk/tools/chunkers/sematic/proposal_chunker.py +1 -1
- alita_sdk/tools/chunkers/universal_chunker.py +270 -0
- alita_sdk/tools/cloud/aws/__init__.py +9 -6
- alita_sdk/tools/cloud/azure/__init__.py +9 -6
- alita_sdk/tools/cloud/gcp/__init__.py +9 -6
- alita_sdk/tools/cloud/k8s/__init__.py +9 -6
- alita_sdk/tools/code/linter/__init__.py +7 -7
- alita_sdk/tools/code/loaders/codesearcher.py +3 -2
- alita_sdk/tools/code/sonar/__init__.py +18 -12
- alita_sdk/tools/code_indexer_toolkit.py +199 -0
- alita_sdk/tools/confluence/__init__.py +14 -11
- alita_sdk/tools/confluence/api_wrapper.py +198 -58
- alita_sdk/tools/confluence/loader.py +10 -0
- alita_sdk/tools/custom_open_api/__init__.py +9 -4
- alita_sdk/tools/elastic/__init__.py +8 -7
- alita_sdk/tools/elitea_base.py +543 -64
- alita_sdk/tools/figma/__init__.py +10 -8
- alita_sdk/tools/figma/api_wrapper.py +352 -153
- alita_sdk/tools/github/__init__.py +13 -11
- alita_sdk/tools/github/api_wrapper.py +9 -26
- alita_sdk/tools/github/github_client.py +75 -12
- alita_sdk/tools/github/schemas.py +2 -1
- alita_sdk/tools/gitlab/__init__.py +11 -10
- alita_sdk/tools/gitlab/api_wrapper.py +135 -45
- alita_sdk/tools/gitlab_org/__init__.py +11 -9
- alita_sdk/tools/google/bigquery/__init__.py +12 -13
- alita_sdk/tools/google_places/__init__.py +18 -10
- alita_sdk/tools/jira/__init__.py +14 -8
- alita_sdk/tools/jira/api_wrapper.py +315 -168
- alita_sdk/tools/keycloak/__init__.py +8 -7
- alita_sdk/tools/localgit/local_git.py +56 -54
- alita_sdk/tools/memory/__init__.py +27 -11
- alita_sdk/tools/non_code_indexer_toolkit.py +7 -2
- alita_sdk/tools/ocr/__init__.py +8 -7
- alita_sdk/tools/openapi/__init__.py +10 -1
- alita_sdk/tools/pandas/__init__.py +8 -7
- alita_sdk/tools/pandas/api_wrapper.py +7 -25
- alita_sdk/tools/postman/__init__.py +8 -10
- alita_sdk/tools/postman/api_wrapper.py +19 -8
- alita_sdk/tools/postman/postman_analysis.py +8 -1
- alita_sdk/tools/pptx/__init__.py +8 -9
- alita_sdk/tools/qtest/__init__.py +19 -13
- alita_sdk/tools/qtest/api_wrapper.py +1784 -88
- alita_sdk/tools/rally/__init__.py +10 -9
- alita_sdk/tools/report_portal/__init__.py +20 -15
- alita_sdk/tools/salesforce/__init__.py +19 -15
- alita_sdk/tools/servicenow/__init__.py +14 -11
- alita_sdk/tools/sharepoint/__init__.py +14 -13
- alita_sdk/tools/sharepoint/api_wrapper.py +179 -39
- alita_sdk/tools/sharepoint/authorization_helper.py +191 -1
- alita_sdk/tools/sharepoint/utils.py +8 -2
- alita_sdk/tools/slack/__init__.py +10 -7
- alita_sdk/tools/sql/__init__.py +19 -18
- alita_sdk/tools/sql/api_wrapper.py +71 -23
- alita_sdk/tools/testio/__init__.py +18 -12
- alita_sdk/tools/testrail/__init__.py +10 -10
- alita_sdk/tools/testrail/api_wrapper.py +213 -45
- alita_sdk/tools/utils/__init__.py +28 -4
- alita_sdk/tools/utils/content_parser.py +181 -61
- alita_sdk/tools/utils/text_operations.py +254 -0
- alita_sdk/tools/vector_adapters/VectorStoreAdapter.py +83 -27
- alita_sdk/tools/xray/__init__.py +12 -7
- alita_sdk/tools/xray/api_wrapper.py +58 -113
- alita_sdk/tools/zephyr/__init__.py +9 -6
- alita_sdk/tools/zephyr_enterprise/__init__.py +13 -8
- alita_sdk/tools/zephyr_enterprise/api_wrapper.py +17 -7
- alita_sdk/tools/zephyr_essential/__init__.py +13 -9
- alita_sdk/tools/zephyr_essential/api_wrapper.py +289 -47
- alita_sdk/tools/zephyr_essential/client.py +6 -4
- alita_sdk/tools/zephyr_scale/__init__.py +10 -7
- alita_sdk/tools/zephyr_scale/api_wrapper.py +6 -2
- alita_sdk/tools/zephyr_squad/__init__.py +9 -6
- {alita_sdk-0.3.263.dist-info → alita_sdk-0.3.499.dist-info}/METADATA +180 -33
- alita_sdk-0.3.499.dist-info/RECORD +433 -0
- alita_sdk-0.3.499.dist-info/entry_points.txt +2 -0
- alita_sdk-0.3.263.dist-info/RECORD +0 -342
- {alita_sdk-0.3.263.dist-info → alita_sdk-0.3.499.dist-info}/WHEEL +0 -0
- {alita_sdk-0.3.263.dist-info → alita_sdk-0.3.499.dist-info}/licenses/LICENSE +0 -0
- {alita_sdk-0.3.263.dist-info → alita_sdk-0.3.499.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,178 @@
|
|
|
1
|
+
"""
|
|
2
|
+
PlanningToolkit - Runtime toolkit for agent plan management.
|
|
3
|
+
|
|
4
|
+
Provides tools for creating, tracking, and completing multi-step execution plans.
|
|
5
|
+
Supports two storage backends:
|
|
6
|
+
1. PostgreSQL - when pgvector_configuration with connection_string is provided
|
|
7
|
+
2. Filesystem - when no connection string (local CLI usage)
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
from typing import ClassVar, List, Any, Literal, Optional, Callable
|
|
11
|
+
|
|
12
|
+
from langchain_community.agent_toolkits.base import BaseToolkit
|
|
13
|
+
from langchain_core.tools import BaseTool
|
|
14
|
+
from pydantic import create_model, BaseModel, ConfigDict, Field
|
|
15
|
+
from pydantic.fields import FieldInfo
|
|
16
|
+
|
|
17
|
+
from ..tools.planning import PlanningWrapper
|
|
18
|
+
from ...tools.base.tool import BaseAction
|
|
19
|
+
from ...tools.utils import clean_string, get_max_toolkit_length
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class PlanningToolkit(BaseToolkit):
|
|
23
|
+
"""
|
|
24
|
+
Toolkit for agent plan management.
|
|
25
|
+
|
|
26
|
+
Provides tools for creating, updating, and tracking execution plans.
|
|
27
|
+
Supports PostgreSQL (production) and filesystem (local) storage backends.
|
|
28
|
+
Plans are scoped by conversation_id.
|
|
29
|
+
"""
|
|
30
|
+
tools: List[BaseTool] = []
|
|
31
|
+
_toolkit_max_length: ClassVar[int] = 50 # Use ClassVar to avoid Pydantic treating it as field
|
|
32
|
+
|
|
33
|
+
@staticmethod
|
|
34
|
+
def toolkit_config_schema() -> BaseModel:
|
|
35
|
+
"""
|
|
36
|
+
Returns the configuration schema for the Planning toolkit.
|
|
37
|
+
|
|
38
|
+
Used by the UI to generate the toolkit configuration form.
|
|
39
|
+
"""
|
|
40
|
+
# Define available tools
|
|
41
|
+
selected_tools = {
|
|
42
|
+
'update_plan': {
|
|
43
|
+
'title': 'UpdatePlanInput',
|
|
44
|
+
'type': 'object',
|
|
45
|
+
'properties': {
|
|
46
|
+
'title': {'type': 'string', 'description': "Title for the plan"},
|
|
47
|
+
'steps': {'type': 'array', 'items': {'type': 'string'}, 'description': "List of step descriptions"},
|
|
48
|
+
'conversation_id': {'type': 'string', 'description': "Conversation ID (auto-injected)"}
|
|
49
|
+
},
|
|
50
|
+
'required': ['title', 'steps', 'conversation_id']
|
|
51
|
+
},
|
|
52
|
+
'complete_step': {
|
|
53
|
+
'title': 'CompleteStepInput',
|
|
54
|
+
'type': 'object',
|
|
55
|
+
'properties': {
|
|
56
|
+
'step_number': {'type': 'integer', 'description': "Step number to complete (1-indexed)"},
|
|
57
|
+
'conversation_id': {'type': 'string', 'description': "Conversation ID (auto-injected)"}
|
|
58
|
+
},
|
|
59
|
+
'required': ['step_number', 'conversation_id']
|
|
60
|
+
},
|
|
61
|
+
'get_plan_status': {
|
|
62
|
+
'title': 'GetPlanStatusInput',
|
|
63
|
+
'type': 'object',
|
|
64
|
+
'properties': {
|
|
65
|
+
'conversation_id': {'type': 'string', 'description': "Conversation ID (auto-injected)"}
|
|
66
|
+
},
|
|
67
|
+
'required': ['conversation_id']
|
|
68
|
+
},
|
|
69
|
+
'delete_plan': {
|
|
70
|
+
'title': 'DeletePlanInput',
|
|
71
|
+
'type': 'object',
|
|
72
|
+
'properties': {
|
|
73
|
+
'conversation_id': {'type': 'string', 'description': "Conversation ID (auto-injected)"}
|
|
74
|
+
},
|
|
75
|
+
'required': ['conversation_id']
|
|
76
|
+
}
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
PlanningToolkit._toolkit_max_length = get_max_toolkit_length(selected_tools)
|
|
80
|
+
|
|
81
|
+
return create_model(
|
|
82
|
+
"planning",
|
|
83
|
+
# Tool selection
|
|
84
|
+
selected_tools=(
|
|
85
|
+
List[Literal[tuple(selected_tools)]],
|
|
86
|
+
Field(
|
|
87
|
+
default=list(selected_tools.keys()),
|
|
88
|
+
json_schema_extra={'args_schemas': selected_tools}
|
|
89
|
+
)
|
|
90
|
+
),
|
|
91
|
+
__config__=ConfigDict(
|
|
92
|
+
json_schema_extra={
|
|
93
|
+
'metadata': {
|
|
94
|
+
"label": "Planning",
|
|
95
|
+
"description": "Tools for managing multi-step execution plans with progress tracking. Uses PostgreSQL when configured, filesystem otherwise.",
|
|
96
|
+
"icon_url": None,
|
|
97
|
+
"max_length": PlanningToolkit._toolkit_max_length,
|
|
98
|
+
"categories": ["planning", "internal_tool"],
|
|
99
|
+
"extra_categories": ["task management", "todo", "progress tracking"]
|
|
100
|
+
}
|
|
101
|
+
}
|
|
102
|
+
)
|
|
103
|
+
)
|
|
104
|
+
|
|
105
|
+
@classmethod
|
|
106
|
+
def get_toolkit(
|
|
107
|
+
cls,
|
|
108
|
+
toolkit_name: Optional[str] = None,
|
|
109
|
+
selected_tools: Optional[List[str]] = None,
|
|
110
|
+
pgvector_configuration: Optional[dict] = None,
|
|
111
|
+
storage_dir: Optional[str] = None,
|
|
112
|
+
plan_callback: Optional[Any] = None,
|
|
113
|
+
conversation_id: Optional[str] = None,
|
|
114
|
+
**kwargs
|
|
115
|
+
):
|
|
116
|
+
"""
|
|
117
|
+
Create a PlanningToolkit instance with configured tools.
|
|
118
|
+
|
|
119
|
+
Args:
|
|
120
|
+
toolkit_name: Optional name prefix for tools
|
|
121
|
+
selected_tools: List of tool names to include (default: all)
|
|
122
|
+
pgvector_configuration: PostgreSQL configuration dict with connection_string.
|
|
123
|
+
If not provided, uses filesystem storage.
|
|
124
|
+
storage_dir: Directory for filesystem storage (when no pgvector_configuration)
|
|
125
|
+
plan_callback: Optional callback function called when plan changes (for CLI UI)
|
|
126
|
+
conversation_id: Conversation ID for scoping plans.
|
|
127
|
+
For server: from elitea_core payload. For CLI: session_id.
|
|
128
|
+
**kwargs: Additional configuration options
|
|
129
|
+
|
|
130
|
+
Returns:
|
|
131
|
+
PlanningToolkit instance with configured tools
|
|
132
|
+
"""
|
|
133
|
+
if selected_tools is None:
|
|
134
|
+
selected_tools = ['update_plan', 'complete_step', 'get_plan_status', 'delete_plan']
|
|
135
|
+
|
|
136
|
+
tools = []
|
|
137
|
+
|
|
138
|
+
# Extract connection string from pgvector configuration (if provided)
|
|
139
|
+
connection_string = None
|
|
140
|
+
if pgvector_configuration:
|
|
141
|
+
connection_string = pgvector_configuration.get('connection_string', '')
|
|
142
|
+
if hasattr(connection_string, 'get_secret_value'):
|
|
143
|
+
connection_string = connection_string.get_secret_value()
|
|
144
|
+
|
|
145
|
+
# Create wrapper - it will auto-select storage backend
|
|
146
|
+
wrapper = PlanningWrapper(
|
|
147
|
+
connection_string=connection_string if connection_string else None,
|
|
148
|
+
conversation_id=conversation_id,
|
|
149
|
+
storage_dir=storage_dir,
|
|
150
|
+
plan_callback=plan_callback,
|
|
151
|
+
)
|
|
152
|
+
|
|
153
|
+
# Use clean toolkit name for context (max 1000 chars in description)
|
|
154
|
+
toolkit_context = f" [Toolkit: {clean_string(toolkit_name, 0)}]" if toolkit_name else ''
|
|
155
|
+
|
|
156
|
+
# Create tools from wrapper
|
|
157
|
+
available_tools = wrapper.get_available_tools()
|
|
158
|
+
for tool in available_tools:
|
|
159
|
+
if tool["name"] not in selected_tools:
|
|
160
|
+
continue
|
|
161
|
+
|
|
162
|
+
# Add toolkit context to description with character limit
|
|
163
|
+
description = tool["description"]
|
|
164
|
+
if toolkit_context and len(description + toolkit_context) <= 1000:
|
|
165
|
+
description = description + toolkit_context
|
|
166
|
+
|
|
167
|
+
tools.append(BaseAction(
|
|
168
|
+
api_wrapper=wrapper,
|
|
169
|
+
name=tool["name"],
|
|
170
|
+
description=description,
|
|
171
|
+
args_schema=tool["args_schema"]
|
|
172
|
+
))
|
|
173
|
+
|
|
174
|
+
return cls(tools=tools)
|
|
175
|
+
|
|
176
|
+
def get_tools(self) -> List[BaseTool]:
|
|
177
|
+
"""Return the list of configured tools."""
|
|
178
|
+
return self.tools
|
|
@@ -1,8 +1,11 @@
|
|
|
1
1
|
from typing import List, Any
|
|
2
2
|
|
|
3
|
+
from langchain_core.tools import BaseTool
|
|
4
|
+
from langgraph.checkpoint.memory import MemorySaver
|
|
3
5
|
from langgraph.graph.state import CompiledStateGraph
|
|
4
6
|
|
|
5
7
|
from ..langchain.langraph_agent import create_graph, SUBGRAPH_REGISTRY
|
|
8
|
+
from ..tools.graph import GraphTool
|
|
6
9
|
from ..utils.utils import clean_string
|
|
7
10
|
|
|
8
11
|
|
|
@@ -16,7 +19,7 @@ class SubgraphToolkit:
|
|
|
16
19
|
llm,
|
|
17
20
|
app_api_key: str,
|
|
18
21
|
selected_tools: list[str] = []
|
|
19
|
-
) -> List[
|
|
22
|
+
) -> List[BaseTool]:
|
|
20
23
|
from .tools import get_tools
|
|
21
24
|
# from langgraph.checkpoint.memory import MemorySaver
|
|
22
25
|
|
|
@@ -36,18 +39,20 @@ class SubgraphToolkit:
|
|
|
36
39
|
|
|
37
40
|
# For backward compatibility, still create a compiled graph stub
|
|
38
41
|
# This is mainly used for identification in the parent graph's tools list
|
|
42
|
+
# For now the graph toolkit will have its own ephemeral in memory checkpoint memory.
|
|
39
43
|
graph = create_graph(
|
|
40
44
|
client=llm,
|
|
41
45
|
tools=tools,
|
|
42
46
|
yaml_schema=version_details['instructions'],
|
|
43
47
|
debug=False,
|
|
44
48
|
store=None,
|
|
45
|
-
memory=
|
|
46
|
-
for_subgraph=True, # compile as raw subgraph
|
|
49
|
+
memory=MemorySaver(),
|
|
50
|
+
# for_subgraph=True, # compile as raw subgraph
|
|
47
51
|
)
|
|
48
|
-
|
|
52
|
+
|
|
53
|
+
cleaned_subgraph_name = clean_string(subgraph_name)
|
|
49
54
|
# Tag the graph stub for parent lookup
|
|
50
|
-
graph.name =
|
|
55
|
+
graph.name = cleaned_subgraph_name
|
|
51
56
|
|
|
52
57
|
# Return the compiled graph stub for backward compatibility
|
|
53
|
-
return [graph]
|
|
58
|
+
return [GraphTool(description=app_details['description'], name=subgraph_name, graph=graph)]
|
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import logging
|
|
2
|
+
from typing import Optional
|
|
2
3
|
|
|
3
4
|
from langchain_core.tools import ToolException
|
|
4
5
|
from langgraph.store.base import BaseStore
|
|
@@ -8,13 +9,20 @@ from alita_sdk.tools import get_tools as alita_tools
|
|
|
8
9
|
from .application import ApplicationToolkit
|
|
9
10
|
from .artifact import ArtifactToolkit
|
|
10
11
|
from .datasource import DatasourcesToolkit
|
|
12
|
+
from .planning import PlanningToolkit
|
|
11
13
|
from .prompt import PromptToolkit
|
|
12
14
|
from .subgraph import SubgraphToolkit
|
|
13
15
|
from .vectorstore import VectorStoreToolkit
|
|
16
|
+
from .mcp import McpToolkit
|
|
14
17
|
from ..tools.mcp_server_tool import McpServerTool
|
|
18
|
+
from ..tools.sandbox import SandboxToolkit
|
|
19
|
+
from ..tools.image_generation import ImageGenerationToolkit
|
|
15
20
|
# Import community tools
|
|
16
21
|
from ...community import get_toolkits as community_toolkits, get_tools as community_tools
|
|
17
22
|
from ...tools.memory import MemoryToolkit
|
|
23
|
+
from ..utils.mcp_oauth import canonical_resource, McpAuthorizationRequired
|
|
24
|
+
from ...tools.utils import clean_string
|
|
25
|
+
from alita_sdk.tools import _inject_toolkit_id
|
|
18
26
|
|
|
19
27
|
logger = logging.getLogger(__name__)
|
|
20
28
|
|
|
@@ -23,66 +31,190 @@ def get_toolkits():
|
|
|
23
31
|
core_toolkits = [
|
|
24
32
|
ArtifactToolkit.toolkit_config_schema(),
|
|
25
33
|
MemoryToolkit.toolkit_config_schema(),
|
|
26
|
-
|
|
34
|
+
PlanningToolkit.toolkit_config_schema(),
|
|
35
|
+
VectorStoreToolkit.toolkit_config_schema(),
|
|
36
|
+
SandboxToolkit.toolkit_config_schema(),
|
|
37
|
+
ImageGenerationToolkit.toolkit_config_schema(),
|
|
38
|
+
McpToolkit.toolkit_config_schema()
|
|
27
39
|
]
|
|
28
40
|
|
|
29
41
|
return core_toolkits + community_toolkits() + alita_toolkits()
|
|
30
42
|
|
|
31
43
|
|
|
32
|
-
def get_tools(tools_list: list, alita_client, llm, memory_store: BaseStore = None) -> list:
|
|
44
|
+
def get_tools(tools_list: list, alita_client=None, llm=None, memory_store: BaseStore = None, debug_mode: Optional[bool] = False, mcp_tokens: Optional[dict] = None, conversation_id: Optional[str] = None) -> list:
|
|
33
45
|
prompts = []
|
|
34
46
|
tools = []
|
|
35
47
|
|
|
36
48
|
for tool in tools_list:
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
49
|
+
try:
|
|
50
|
+
if tool['type'] == 'datasource':
|
|
51
|
+
tools.extend(DatasourcesToolkit.get_toolkit(
|
|
52
|
+
alita_client,
|
|
53
|
+
datasource_ids=[int(tool['settings']['datasource_id'])],
|
|
54
|
+
selected_tools=tool['settings']['selected_tools'],
|
|
55
|
+
toolkit_name=tool.get('toolkit_name', '') or tool.get('name', '')
|
|
56
|
+
).get_tools())
|
|
57
|
+
elif tool['type'] == 'application':
|
|
58
|
+
tools.extend(ApplicationToolkit.get_toolkit(
|
|
59
|
+
alita_client,
|
|
60
|
+
application_id=int(tool['settings']['application_id']),
|
|
61
|
+
application_version_id=int(tool['settings']['application_version_id']),
|
|
62
|
+
selected_tools=[]
|
|
63
|
+
).get_tools())
|
|
64
|
+
# backward compatibility for pipeline application type as subgraph node
|
|
65
|
+
if tool.get('agent_type', '') == 'pipeline':
|
|
66
|
+
# static get_toolkit returns a list of CompiledStateGraph stubs
|
|
67
|
+
tools.extend(SubgraphToolkit.get_toolkit(
|
|
68
|
+
alita_client,
|
|
69
|
+
application_id=int(tool['settings']['application_id']),
|
|
70
|
+
application_version_id=int(tool['settings']['application_version_id']),
|
|
71
|
+
app_api_key=alita_client.auth_token,
|
|
72
|
+
selected_tools=[],
|
|
73
|
+
llm=llm
|
|
74
|
+
))
|
|
75
|
+
elif tool['type'] == 'memory':
|
|
76
|
+
tools += MemoryToolkit.get_toolkit(
|
|
77
|
+
namespace=tool['settings'].get('namespace', str(tool['id'])),
|
|
78
|
+
pgvector_configuration=tool['settings'].get('pgvector_configuration', {}),
|
|
79
|
+
store=memory_store,
|
|
80
|
+
).get_tools()
|
|
81
|
+
# TODO: update configuration of internal tools
|
|
82
|
+
elif tool['type'] == 'internal_tool':
|
|
83
|
+
if tool['name'] == 'pyodide':
|
|
84
|
+
tools += SandboxToolkit.get_toolkit(
|
|
85
|
+
stateful=False,
|
|
86
|
+
allow_net=True,
|
|
87
|
+
alita_client=alita_client,
|
|
88
|
+
).get_tools()
|
|
89
|
+
elif tool['name'] == 'image_generation':
|
|
90
|
+
if alita_client and alita_client.model_image_generation:
|
|
91
|
+
tools += ImageGenerationToolkit.get_toolkit(
|
|
92
|
+
client=alita_client,
|
|
93
|
+
).get_tools()
|
|
94
|
+
else:
|
|
95
|
+
logger.warning("Image generation internal tool requested "
|
|
96
|
+
"but no image generation model configured")
|
|
97
|
+
elif tool['name'] == 'planner':
|
|
98
|
+
tools += PlanningToolkit.get_toolkit(
|
|
99
|
+
pgvector_configuration=tool.get('settings', {}).get('pgvector_configuration'),
|
|
100
|
+
conversation_id=conversation_id,
|
|
101
|
+
).get_tools()
|
|
102
|
+
elif tool['type'] == 'artifact':
|
|
103
|
+
toolkit_tools = ArtifactToolkit.get_toolkit(
|
|
104
|
+
client=alita_client,
|
|
105
|
+
bucket=tool['settings']['bucket'],
|
|
106
|
+
toolkit_name=tool.get('toolkit_name', ''),
|
|
107
|
+
selected_tools=tool['settings'].get('selected_tools', []),
|
|
108
|
+
llm=llm,
|
|
109
|
+
# indexer settings
|
|
110
|
+
pgvector_configuration=tool['settings'].get('pgvector_configuration', {}),
|
|
111
|
+
embedding_model=tool['settings'].get('embedding_model'),
|
|
112
|
+
collection_name=f"{tool.get('toolkit_name')}",
|
|
113
|
+
collection_schema=str(tool['settings'].get('id', tool.get('id', ''))),
|
|
114
|
+
).get_tools()
|
|
115
|
+
# Inject toolkit_id for artifact tools as well
|
|
116
|
+
# Pass settings as the tool config since that's where the id field is
|
|
117
|
+
_inject_toolkit_id(tool['settings'], toolkit_tools)
|
|
118
|
+
tools.extend(toolkit_tools)
|
|
119
|
+
|
|
120
|
+
elif tool['type'] == 'vectorstore':
|
|
121
|
+
tools.extend(VectorStoreToolkit.get_toolkit(
|
|
122
|
+
llm=llm,
|
|
123
|
+
toolkit_name=tool.get('toolkit_name', ''),
|
|
124
|
+
**tool['settings']).get_tools())
|
|
125
|
+
elif tool['type'] == 'planning':
|
|
126
|
+
# Planning toolkit for multi-step task tracking
|
|
127
|
+
settings = tool.get('settings', {})
|
|
128
|
+
|
|
129
|
+
# Check if local mode is enabled (uses filesystem storage, ignores pgvector)
|
|
130
|
+
use_local = settings.get('local', False)
|
|
131
|
+
|
|
132
|
+
if use_local:
|
|
133
|
+
# Local mode - use filesystem storage
|
|
134
|
+
logger.info("Planning toolkit using local filesystem storage (local=true)")
|
|
135
|
+
pgvector_config = {}
|
|
136
|
+
else:
|
|
137
|
+
# Check if explicit connection_string is provided in pgvector_configuration
|
|
138
|
+
explicit_pgvector_config = settings.get('pgvector_configuration', {})
|
|
139
|
+
explicit_connstr = explicit_pgvector_config.get('connection_string') if explicit_pgvector_config else None
|
|
140
|
+
|
|
141
|
+
if explicit_connstr:
|
|
142
|
+
# Use explicitly provided connection string (overrides project secrets)
|
|
143
|
+
logger.info("Using explicit connection_string for planning toolkit")
|
|
144
|
+
pgvector_config = explicit_pgvector_config
|
|
145
|
+
else:
|
|
146
|
+
# Try to fetch pgvector_project_connstr from project secrets
|
|
147
|
+
pgvector_connstr = None
|
|
148
|
+
if alita_client:
|
|
149
|
+
try:
|
|
150
|
+
pgvector_connstr = alita_client.unsecret('pgvector_project_connstr')
|
|
151
|
+
if pgvector_connstr:
|
|
152
|
+
logger.info("Using pgvector_project_connstr for planning toolkit")
|
|
153
|
+
except Exception as e:
|
|
154
|
+
logger.debug(f"pgvector_project_connstr not available: {e}")
|
|
155
|
+
|
|
156
|
+
pgvector_config = {'connection_string': pgvector_connstr} if pgvector_connstr else {}
|
|
157
|
+
|
|
158
|
+
tools.extend(PlanningToolkit.get_toolkit(
|
|
159
|
+
toolkit_name=tool.get('toolkit_name', ''),
|
|
160
|
+
selected_tools=settings.get('selected_tools', []),
|
|
161
|
+
pgvector_configuration=pgvector_config,
|
|
162
|
+
conversation_id=conversation_id or settings.get('conversation_id'),
|
|
163
|
+
).get_tools())
|
|
164
|
+
elif tool['type'] == 'mcp':
|
|
165
|
+
# remote mcp tool initialization with token injection
|
|
166
|
+
settings = dict(tool['settings'])
|
|
167
|
+
url = settings.get('url')
|
|
168
|
+
headers = settings.get('headers')
|
|
169
|
+
token_data = None
|
|
170
|
+
session_id = None
|
|
171
|
+
if mcp_tokens and url:
|
|
172
|
+
canonical_url = canonical_resource(url)
|
|
173
|
+
logger.info(f"[MCP Auth] Looking for token for URL: {url}")
|
|
174
|
+
logger.info(f"[MCP Auth] Canonical URL: {canonical_url}")
|
|
175
|
+
logger.info(f"[MCP Auth] Available tokens: {list(mcp_tokens.keys())}")
|
|
176
|
+
token_data = mcp_tokens.get(canonical_url)
|
|
177
|
+
if token_data:
|
|
178
|
+
logger.info(f"[MCP Auth] Found token data for {canonical_url}")
|
|
179
|
+
# Handle both old format (string) and new format (dict with access_token and session_id)
|
|
180
|
+
if isinstance(token_data, dict):
|
|
181
|
+
access_token = token_data.get('access_token')
|
|
182
|
+
session_id = token_data.get('session_id')
|
|
183
|
+
logger.info(f"[MCP Auth] Token data: access_token={'present' if access_token else 'missing'}, session_id={session_id or 'none'}")
|
|
184
|
+
else:
|
|
185
|
+
# Backward compatibility: treat as plain token string
|
|
186
|
+
access_token = token_data
|
|
187
|
+
logger.info(f"[MCP Auth] Using legacy token format (string)")
|
|
188
|
+
else:
|
|
189
|
+
access_token = None
|
|
190
|
+
logger.warning(f"[MCP Auth] No token found for {canonical_url}")
|
|
191
|
+
else:
|
|
192
|
+
access_token = None
|
|
193
|
+
|
|
194
|
+
if access_token:
|
|
195
|
+
merged_headers = dict(headers) if headers else {}
|
|
196
|
+
merged_headers.setdefault('Authorization', f'Bearer {access_token}')
|
|
197
|
+
settings['headers'] = merged_headers
|
|
198
|
+
logger.info(f"[MCP Auth] Added Authorization header for {url}")
|
|
199
|
+
|
|
200
|
+
# Pass session_id to MCP toolkit if available
|
|
201
|
+
if session_id:
|
|
202
|
+
settings['session_id'] = session_id
|
|
203
|
+
logger.info(f"[MCP Auth] Passing session_id to toolkit: {session_id}")
|
|
204
|
+
tools.extend(McpToolkit.get_toolkit(
|
|
205
|
+
toolkit_name=tool.get('toolkit_name', ''),
|
|
206
|
+
client=alita_client,
|
|
207
|
+
**settings).get_tools())
|
|
208
|
+
except McpAuthorizationRequired:
|
|
209
|
+
# Re-raise auth required exceptions directly
|
|
210
|
+
raise
|
|
211
|
+
except Exception as e:
|
|
212
|
+
logger.error(f"Error initializing toolkit for tool '{tool.get('name', 'unknown')}': {e}", exc_info=True)
|
|
213
|
+
if debug_mode:
|
|
214
|
+
logger.info("Skipping tool initialization error due to debug mode.")
|
|
215
|
+
continue
|
|
216
|
+
else:
|
|
217
|
+
raise ToolException(f"Error initializing toolkit for tool '{tool.get('name', 'unknown')}': {e}")
|
|
86
218
|
|
|
87
219
|
if len(prompts) > 0:
|
|
88
220
|
tools += PromptToolkit.get_toolkit(alita_client, prompts).get_tools()
|
|
@@ -91,7 +223,8 @@ def get_tools(tools_list: list, alita_client, llm, memory_store: BaseStore = Non
|
|
|
91
223
|
tools += community_tools(tools_list, alita_client, llm)
|
|
92
224
|
# Add alita tools
|
|
93
225
|
tools += alita_tools(tools_list, alita_client, llm, memory_store)
|
|
94
|
-
# Add MCP tools
|
|
226
|
+
# Add MCP tools registered via alita-mcp CLI (static registry)
|
|
227
|
+
# Note: Tools with type='mcp' are already handled in main loop above
|
|
95
228
|
tools += _mcp_tools(tools_list, alita_client)
|
|
96
229
|
|
|
97
230
|
# Sanitize tool names to meet OpenAI's function naming requirements
|
|
@@ -146,17 +279,26 @@ def _sanitize_tool_names(tools: list) -> list:
|
|
|
146
279
|
|
|
147
280
|
|
|
148
281
|
def _mcp_tools(tools_list, alita):
|
|
282
|
+
"""
|
|
283
|
+
Handle MCP tools registered via alita-mcp CLI (static registry).
|
|
284
|
+
Skips tools with type='mcp' as those are handled by dynamic discovery.
|
|
285
|
+
"""
|
|
149
286
|
try:
|
|
150
287
|
all_available_toolkits = alita.get_mcp_toolkits()
|
|
151
288
|
toolkit_lookup = {tk["name"]: tk for tk in all_available_toolkits}
|
|
152
289
|
tools = []
|
|
153
290
|
#
|
|
154
291
|
for selected_toolkit in tools_list:
|
|
155
|
-
|
|
156
|
-
|
|
292
|
+
server_toolkit_name = selected_toolkit['type']
|
|
293
|
+
|
|
294
|
+
# Skip tools with type='mcp' - they're handled by dynamic discovery
|
|
295
|
+
if server_toolkit_name == 'mcp':
|
|
296
|
+
continue
|
|
297
|
+
|
|
298
|
+
toolkit_conf = toolkit_lookup.get(server_toolkit_name)
|
|
157
299
|
#
|
|
158
300
|
if not toolkit_conf:
|
|
159
|
-
logger.debug(f"Toolkit '{
|
|
301
|
+
logger.debug(f"Toolkit '{server_toolkit_name}' not found in available MCP toolkits. Skipping...")
|
|
160
302
|
continue
|
|
161
303
|
#
|
|
162
304
|
available_tools = toolkit_conf.get("tools", [])
|
|
@@ -164,7 +306,11 @@ def _mcp_tools(tools_list, alita):
|
|
|
164
306
|
for available_tool in available_tools:
|
|
165
307
|
tool_name = available_tool.get("name", "").lower()
|
|
166
308
|
if not selected_tools or tool_name in selected_tools:
|
|
167
|
-
if server_tool := _init_single_mcp_tool(
|
|
309
|
+
if server_tool := _init_single_mcp_tool(server_toolkit_name,
|
|
310
|
+
# selected_toolkit["name"] is None for toolkit_test
|
|
311
|
+
selected_toolkit["toolkit_name"] if selected_toolkit.get("toolkit_name")
|
|
312
|
+
else server_toolkit_name,
|
|
313
|
+
available_tool, alita, selected_toolkit['settings']):
|
|
168
314
|
tools.append(server_tool)
|
|
169
315
|
return tools
|
|
170
316
|
except Exception:
|
|
@@ -172,19 +318,27 @@ def _mcp_tools(tools_list, alita):
|
|
|
172
318
|
return []
|
|
173
319
|
|
|
174
320
|
|
|
175
|
-
def _init_single_mcp_tool(toolkit_name, available_tool, alita, toolkit_settings):
|
|
321
|
+
def _init_single_mcp_tool(server_toolkit_name, toolkit_name, available_tool, alita, toolkit_settings):
|
|
176
322
|
try:
|
|
323
|
+
# Use clean tool name without prefix
|
|
177
324
|
tool_name = available_tool["name"]
|
|
325
|
+
# Add toolkit context to description (max 1000 chars)
|
|
326
|
+
toolkit_context = f" [Toolkit: {clean_string(toolkit_name)}]" if toolkit_name else ''
|
|
327
|
+
base_description = f"MCP for a tool '{tool_name}': {available_tool.get('description', '')}"
|
|
328
|
+
description = base_description
|
|
329
|
+
if toolkit_context and len(base_description + toolkit_context) <= 1000:
|
|
330
|
+
description = base_description + toolkit_context
|
|
331
|
+
|
|
178
332
|
return McpServerTool(
|
|
179
333
|
name=tool_name,
|
|
180
|
-
description=
|
|
334
|
+
description=description,
|
|
181
335
|
args_schema=McpServerTool.create_pydantic_model_from_schema(
|
|
182
336
|
available_tool.get("inputSchema", {})
|
|
183
337
|
),
|
|
184
338
|
client=alita,
|
|
185
|
-
server=
|
|
339
|
+
server=server_toolkit_name,
|
|
186
340
|
tool_timeout_sec=toolkit_settings.get("timeout", 90)
|
|
187
341
|
)
|
|
188
342
|
except Exception as e:
|
|
189
|
-
logger.error(f"Failed to create McpServerTool for '{toolkit_name}.{tool_name}': {e}")
|
|
343
|
+
logger.error(f"Failed to create McpServerTool ('{server_toolkit_name}') for '{toolkit_name}.{tool_name}': {e}")
|
|
190
344
|
return None
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
from logging import getLogger
|
|
2
2
|
from typing import Any, List, Literal, Optional
|
|
3
3
|
|
|
4
|
-
from alita_sdk.tools.utils import clean_string
|
|
4
|
+
from alita_sdk.tools.utils import clean_string
|
|
5
5
|
from pydantic import BaseModel, create_model, Field, ConfigDict
|
|
6
6
|
from langchain_core.tools import BaseToolkit, BaseTool
|
|
7
7
|
from alita_sdk.tools.base.tool import BaseAction
|
|
@@ -31,7 +31,8 @@ class VectorStoreToolkit(BaseToolkit):
|
|
|
31
31
|
toolkit_name: Optional[str] = None,
|
|
32
32
|
selected_tools: list[str] = []):
|
|
33
33
|
logger.info("Selected tools: %s", selected_tools)
|
|
34
|
-
|
|
34
|
+
# Use clean toolkit name for context (max 1000 chars in description)
|
|
35
|
+
toolkit_context = f" [Toolkit: {clean_string(toolkit_name)}]" if toolkit_name else ''
|
|
35
36
|
if selected_tools is None:
|
|
36
37
|
selected_tools = []
|
|
37
38
|
tools = []
|
|
@@ -46,10 +47,14 @@ class VectorStoreToolkit(BaseToolkit):
|
|
|
46
47
|
# if selected_tools:
|
|
47
48
|
# if tool["name"] not in selected_tools:
|
|
48
49
|
# continue
|
|
50
|
+
# Add toolkit context to description with character limit
|
|
51
|
+
description = tool["description"]
|
|
52
|
+
if toolkit_context and len(description + toolkit_context) <= 1000:
|
|
53
|
+
description = description + toolkit_context
|
|
49
54
|
tools.append(BaseAction(
|
|
50
55
|
api_wrapper=vectorstore_wrapper,
|
|
51
|
-
name=
|
|
52
|
-
description=
|
|
56
|
+
name=tool["name"],
|
|
57
|
+
description=description,
|
|
53
58
|
args_schema=tool["args_schema"]
|
|
54
59
|
))
|
|
55
60
|
return cls(tools=tools)
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Runtime tools module for Alita SDK.
|
|
3
|
+
This module provides various tools that can be used within LangGraph agents.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from .sandbox import PyodideSandboxTool, StatefulPyodideSandboxTool, create_sandbox_tool
|
|
7
|
+
from .echo import EchoTool
|
|
8
|
+
from .image_generation import (
|
|
9
|
+
ImageGenerationTool,
|
|
10
|
+
create_image_generation_tool,
|
|
11
|
+
ImageGenerationToolkit
|
|
12
|
+
)
|
|
13
|
+
|
|
14
|
+
__all__ = [
|
|
15
|
+
"PyodideSandboxTool",
|
|
16
|
+
"StatefulPyodideSandboxTool",
|
|
17
|
+
"create_sandbox_tool",
|
|
18
|
+
"EchoTool",
|
|
19
|
+
"ImageGenerationTool",
|
|
20
|
+
"ImageGenerationToolkit",
|
|
21
|
+
"create_image_generation_tool"
|
|
22
|
+
]
|