alita-sdk 0.3.462__py3-none-any.whl → 0.3.627__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- alita_sdk/cli/agent/__init__.py +5 -0
- alita_sdk/cli/agent/default.py +258 -0
- alita_sdk/cli/agent_executor.py +15 -3
- alita_sdk/cli/agent_loader.py +56 -8
- alita_sdk/cli/agent_ui.py +93 -31
- alita_sdk/cli/agents.py +2274 -230
- alita_sdk/cli/callbacks.py +96 -25
- alita_sdk/cli/cli.py +10 -1
- alita_sdk/cli/config.py +162 -9
- alita_sdk/cli/context/__init__.py +30 -0
- alita_sdk/cli/context/cleanup.py +198 -0
- alita_sdk/cli/context/manager.py +731 -0
- alita_sdk/cli/context/message.py +285 -0
- alita_sdk/cli/context/strategies.py +289 -0
- alita_sdk/cli/context/token_estimation.py +127 -0
- alita_sdk/cli/input_handler.py +419 -0
- alita_sdk/cli/inventory.py +1073 -0
- alita_sdk/cli/testcases/__init__.py +94 -0
- alita_sdk/cli/testcases/data_generation.py +119 -0
- alita_sdk/cli/testcases/discovery.py +96 -0
- alita_sdk/cli/testcases/executor.py +84 -0
- alita_sdk/cli/testcases/logger.py +85 -0
- alita_sdk/cli/testcases/parser.py +172 -0
- alita_sdk/cli/testcases/prompts.py +91 -0
- alita_sdk/cli/testcases/reporting.py +125 -0
- alita_sdk/cli/testcases/setup.py +108 -0
- alita_sdk/cli/testcases/test_runner.py +282 -0
- alita_sdk/cli/testcases/utils.py +39 -0
- alita_sdk/cli/testcases/validation.py +90 -0
- alita_sdk/cli/testcases/workflow.py +196 -0
- alita_sdk/cli/toolkit.py +14 -17
- alita_sdk/cli/toolkit_loader.py +35 -5
- alita_sdk/cli/tools/__init__.py +36 -2
- alita_sdk/cli/tools/approval.py +224 -0
- alita_sdk/cli/tools/filesystem.py +910 -64
- alita_sdk/cli/tools/planning.py +389 -0
- alita_sdk/cli/tools/terminal.py +414 -0
- alita_sdk/community/__init__.py +72 -12
- alita_sdk/community/inventory/__init__.py +236 -0
- alita_sdk/community/inventory/config.py +257 -0
- alita_sdk/community/inventory/enrichment.py +2137 -0
- alita_sdk/community/inventory/extractors.py +1469 -0
- alita_sdk/community/inventory/ingestion.py +3172 -0
- alita_sdk/community/inventory/knowledge_graph.py +1457 -0
- alita_sdk/community/inventory/parsers/__init__.py +218 -0
- alita_sdk/community/inventory/parsers/base.py +295 -0
- alita_sdk/community/inventory/parsers/csharp_parser.py +907 -0
- alita_sdk/community/inventory/parsers/go_parser.py +851 -0
- alita_sdk/community/inventory/parsers/html_parser.py +389 -0
- alita_sdk/community/inventory/parsers/java_parser.py +593 -0
- alita_sdk/community/inventory/parsers/javascript_parser.py +629 -0
- alita_sdk/community/inventory/parsers/kotlin_parser.py +768 -0
- alita_sdk/community/inventory/parsers/markdown_parser.py +362 -0
- alita_sdk/community/inventory/parsers/python_parser.py +604 -0
- alita_sdk/community/inventory/parsers/rust_parser.py +858 -0
- alita_sdk/community/inventory/parsers/swift_parser.py +832 -0
- alita_sdk/community/inventory/parsers/text_parser.py +322 -0
- alita_sdk/community/inventory/parsers/yaml_parser.py +370 -0
- alita_sdk/community/inventory/patterns/__init__.py +61 -0
- alita_sdk/community/inventory/patterns/ast_adapter.py +380 -0
- alita_sdk/community/inventory/patterns/loader.py +348 -0
- alita_sdk/community/inventory/patterns/registry.py +198 -0
- alita_sdk/community/inventory/presets.py +535 -0
- alita_sdk/community/inventory/retrieval.py +1403 -0
- alita_sdk/community/inventory/toolkit.py +173 -0
- alita_sdk/community/inventory/toolkit_utils.py +176 -0
- alita_sdk/community/inventory/visualize.py +1370 -0
- alita_sdk/configurations/__init__.py +1 -1
- alita_sdk/configurations/ado.py +141 -20
- alita_sdk/configurations/bitbucket.py +0 -3
- alita_sdk/configurations/confluence.py +76 -42
- alita_sdk/configurations/figma.py +76 -0
- alita_sdk/configurations/gitlab.py +17 -5
- alita_sdk/configurations/openapi.py +329 -0
- alita_sdk/configurations/qtest.py +72 -1
- alita_sdk/configurations/report_portal.py +96 -0
- alita_sdk/configurations/sharepoint.py +148 -0
- alita_sdk/configurations/testio.py +83 -0
- alita_sdk/runtime/clients/artifact.py +3 -3
- alita_sdk/runtime/clients/client.py +353 -48
- alita_sdk/runtime/clients/sandbox_client.py +0 -21
- alita_sdk/runtime/langchain/_constants_bkup.py +1318 -0
- alita_sdk/runtime/langchain/assistant.py +123 -26
- alita_sdk/runtime/langchain/constants.py +642 -1
- alita_sdk/runtime/langchain/document_loaders/AlitaExcelLoader.py +103 -60
- alita_sdk/runtime/langchain/document_loaders/AlitaJSONLinesLoader.py +77 -0
- alita_sdk/runtime/langchain/document_loaders/AlitaJSONLoader.py +6 -3
- alita_sdk/runtime/langchain/document_loaders/AlitaPowerPointLoader.py +226 -7
- alita_sdk/runtime/langchain/document_loaders/AlitaTextLoader.py +5 -2
- alita_sdk/runtime/langchain/document_loaders/constants.py +12 -7
- alita_sdk/runtime/langchain/langraph_agent.py +279 -73
- alita_sdk/runtime/langchain/utils.py +82 -15
- alita_sdk/runtime/llms/preloaded.py +2 -6
- alita_sdk/runtime/skills/__init__.py +91 -0
- alita_sdk/runtime/skills/callbacks.py +498 -0
- alita_sdk/runtime/skills/discovery.py +540 -0
- alita_sdk/runtime/skills/executor.py +610 -0
- alita_sdk/runtime/skills/input_builder.py +371 -0
- alita_sdk/runtime/skills/models.py +330 -0
- alita_sdk/runtime/skills/registry.py +355 -0
- alita_sdk/runtime/skills/skill_runner.py +330 -0
- alita_sdk/runtime/toolkits/__init__.py +7 -0
- alita_sdk/runtime/toolkits/application.py +21 -9
- alita_sdk/runtime/toolkits/artifact.py +15 -5
- alita_sdk/runtime/toolkits/datasource.py +13 -6
- alita_sdk/runtime/toolkits/mcp.py +139 -251
- alita_sdk/runtime/toolkits/mcp_config.py +1048 -0
- alita_sdk/runtime/toolkits/planning.py +178 -0
- alita_sdk/runtime/toolkits/skill_router.py +238 -0
- alita_sdk/runtime/toolkits/subgraph.py +251 -6
- alita_sdk/runtime/toolkits/tools.py +238 -32
- alita_sdk/runtime/toolkits/vectorstore.py +11 -5
- alita_sdk/runtime/tools/__init__.py +3 -1
- alita_sdk/runtime/tools/application.py +20 -6
- alita_sdk/runtime/tools/artifact.py +511 -28
- alita_sdk/runtime/tools/data_analysis.py +183 -0
- alita_sdk/runtime/tools/function.py +43 -15
- alita_sdk/runtime/tools/image_generation.py +50 -44
- alita_sdk/runtime/tools/llm.py +852 -67
- alita_sdk/runtime/tools/loop.py +3 -1
- alita_sdk/runtime/tools/loop_output.py +3 -1
- alita_sdk/runtime/tools/mcp_remote_tool.py +25 -10
- alita_sdk/runtime/tools/mcp_server_tool.py +7 -6
- alita_sdk/runtime/tools/planning/__init__.py +36 -0
- alita_sdk/runtime/tools/planning/models.py +246 -0
- alita_sdk/runtime/tools/planning/wrapper.py +607 -0
- alita_sdk/runtime/tools/router.py +2 -4
- alita_sdk/runtime/tools/sandbox.py +9 -6
- alita_sdk/runtime/tools/skill_router.py +776 -0
- alita_sdk/runtime/tools/tool.py +3 -1
- alita_sdk/runtime/tools/vectorstore.py +7 -2
- alita_sdk/runtime/tools/vectorstore_base.py +51 -11
- alita_sdk/runtime/utils/AlitaCallback.py +137 -21
- alita_sdk/runtime/utils/constants.py +5 -1
- alita_sdk/runtime/utils/mcp_client.py +492 -0
- alita_sdk/runtime/utils/mcp_oauth.py +202 -5
- alita_sdk/runtime/utils/mcp_sse_client.py +36 -7
- alita_sdk/runtime/utils/mcp_tools_discovery.py +124 -0
- alita_sdk/runtime/utils/serialization.py +155 -0
- alita_sdk/runtime/utils/streamlit.py +6 -10
- alita_sdk/runtime/utils/toolkit_utils.py +16 -5
- alita_sdk/runtime/utils/utils.py +36 -0
- alita_sdk/tools/__init__.py +113 -29
- alita_sdk/tools/ado/repos/__init__.py +51 -33
- alita_sdk/tools/ado/repos/repos_wrapper.py +148 -89
- alita_sdk/tools/ado/test_plan/__init__.py +25 -9
- alita_sdk/tools/ado/test_plan/test_plan_wrapper.py +23 -1
- alita_sdk/tools/ado/utils.py +1 -18
- alita_sdk/tools/ado/wiki/__init__.py +25 -8
- alita_sdk/tools/ado/wiki/ado_wrapper.py +291 -22
- alita_sdk/tools/ado/work_item/__init__.py +26 -9
- alita_sdk/tools/ado/work_item/ado_wrapper.py +56 -3
- alita_sdk/tools/advanced_jira_mining/__init__.py +11 -8
- alita_sdk/tools/aws/delta_lake/__init__.py +13 -9
- alita_sdk/tools/aws/delta_lake/tool.py +5 -1
- alita_sdk/tools/azure_ai/search/__init__.py +11 -8
- alita_sdk/tools/azure_ai/search/api_wrapper.py +1 -1
- alita_sdk/tools/base/tool.py +5 -1
- alita_sdk/tools/base_indexer_toolkit.py +170 -45
- alita_sdk/tools/bitbucket/__init__.py +17 -12
- alita_sdk/tools/bitbucket/api_wrapper.py +59 -11
- alita_sdk/tools/bitbucket/cloud_api_wrapper.py +49 -35
- alita_sdk/tools/browser/__init__.py +5 -4
- alita_sdk/tools/carrier/__init__.py +5 -6
- alita_sdk/tools/carrier/backend_reports_tool.py +6 -6
- alita_sdk/tools/carrier/run_ui_test_tool.py +6 -6
- alita_sdk/tools/carrier/ui_reports_tool.py +5 -5
- alita_sdk/tools/chunkers/__init__.py +3 -1
- alita_sdk/tools/chunkers/code/treesitter/treesitter.py +37 -13
- alita_sdk/tools/chunkers/sematic/json_chunker.py +1 -0
- alita_sdk/tools/chunkers/sematic/markdown_chunker.py +97 -6
- alita_sdk/tools/chunkers/universal_chunker.py +270 -0
- alita_sdk/tools/cloud/aws/__init__.py +10 -7
- alita_sdk/tools/cloud/azure/__init__.py +10 -7
- alita_sdk/tools/cloud/gcp/__init__.py +10 -7
- alita_sdk/tools/cloud/k8s/__init__.py +10 -7
- alita_sdk/tools/code/linter/__init__.py +10 -8
- alita_sdk/tools/code/loaders/codesearcher.py +3 -2
- alita_sdk/tools/code/sonar/__init__.py +10 -7
- alita_sdk/tools/code_indexer_toolkit.py +73 -23
- alita_sdk/tools/confluence/__init__.py +21 -15
- alita_sdk/tools/confluence/api_wrapper.py +78 -23
- alita_sdk/tools/confluence/loader.py +4 -2
- alita_sdk/tools/custom_open_api/__init__.py +12 -5
- alita_sdk/tools/elastic/__init__.py +11 -8
- alita_sdk/tools/elitea_base.py +493 -30
- alita_sdk/tools/figma/__init__.py +58 -11
- alita_sdk/tools/figma/api_wrapper.py +1235 -143
- alita_sdk/tools/figma/figma_client.py +73 -0
- alita_sdk/tools/figma/toon_tools.py +2748 -0
- alita_sdk/tools/github/__init__.py +13 -14
- alita_sdk/tools/github/github_client.py +224 -100
- alita_sdk/tools/github/graphql_client_wrapper.py +119 -33
- alita_sdk/tools/github/schemas.py +14 -5
- alita_sdk/tools/github/tool.py +5 -1
- alita_sdk/tools/github/tool_prompts.py +9 -22
- alita_sdk/tools/gitlab/__init__.py +15 -11
- alita_sdk/tools/gitlab/api_wrapper.py +207 -41
- alita_sdk/tools/gitlab_org/__init__.py +10 -8
- alita_sdk/tools/gitlab_org/api_wrapper.py +63 -64
- alita_sdk/tools/google/bigquery/__init__.py +13 -12
- alita_sdk/tools/google/bigquery/tool.py +5 -1
- alita_sdk/tools/google_places/__init__.py +10 -8
- alita_sdk/tools/google_places/api_wrapper.py +1 -1
- alita_sdk/tools/jira/__init__.py +17 -11
- alita_sdk/tools/jira/api_wrapper.py +91 -40
- alita_sdk/tools/keycloak/__init__.py +11 -8
- alita_sdk/tools/localgit/__init__.py +9 -3
- alita_sdk/tools/localgit/local_git.py +62 -54
- alita_sdk/tools/localgit/tool.py +5 -1
- alita_sdk/tools/memory/__init__.py +11 -3
- alita_sdk/tools/non_code_indexer_toolkit.py +1 -0
- alita_sdk/tools/ocr/__init__.py +11 -8
- alita_sdk/tools/openapi/__init__.py +490 -114
- alita_sdk/tools/openapi/api_wrapper.py +1368 -0
- alita_sdk/tools/openapi/tool.py +20 -0
- alita_sdk/tools/pandas/__init__.py +20 -12
- alita_sdk/tools/pandas/api_wrapper.py +38 -25
- alita_sdk/tools/pandas/dataframe/generator/base.py +3 -1
- alita_sdk/tools/postman/__init__.py +11 -11
- alita_sdk/tools/pptx/__init__.py +10 -9
- alita_sdk/tools/pptx/pptx_wrapper.py +1 -1
- alita_sdk/tools/qtest/__init__.py +30 -10
- alita_sdk/tools/qtest/api_wrapper.py +430 -13
- alita_sdk/tools/rally/__init__.py +10 -8
- alita_sdk/tools/rally/api_wrapper.py +1 -1
- alita_sdk/tools/report_portal/__init__.py +12 -9
- alita_sdk/tools/salesforce/__init__.py +10 -9
- alita_sdk/tools/servicenow/__init__.py +17 -14
- alita_sdk/tools/servicenow/api_wrapper.py +1 -1
- alita_sdk/tools/sharepoint/__init__.py +10 -8
- alita_sdk/tools/sharepoint/api_wrapper.py +4 -4
- alita_sdk/tools/slack/__init__.py +10 -8
- alita_sdk/tools/slack/api_wrapper.py +2 -2
- alita_sdk/tools/sql/__init__.py +11 -9
- alita_sdk/tools/testio/__init__.py +10 -8
- alita_sdk/tools/testrail/__init__.py +11 -8
- alita_sdk/tools/testrail/api_wrapper.py +1 -1
- alita_sdk/tools/utils/__init__.py +9 -4
- alita_sdk/tools/utils/content_parser.py +77 -3
- alita_sdk/tools/utils/text_operations.py +410 -0
- alita_sdk/tools/utils/tool_prompts.py +79 -0
- alita_sdk/tools/vector_adapters/VectorStoreAdapter.py +17 -13
- alita_sdk/tools/xray/__init__.py +12 -9
- alita_sdk/tools/yagmail/__init__.py +9 -3
- alita_sdk/tools/zephyr/__init__.py +9 -7
- alita_sdk/tools/zephyr_enterprise/__init__.py +11 -8
- alita_sdk/tools/zephyr_essential/__init__.py +10 -8
- alita_sdk/tools/zephyr_essential/api_wrapper.py +30 -13
- alita_sdk/tools/zephyr_essential/client.py +2 -2
- alita_sdk/tools/zephyr_scale/__init__.py +11 -9
- alita_sdk/tools/zephyr_scale/api_wrapper.py +2 -2
- alita_sdk/tools/zephyr_squad/__init__.py +10 -8
- {alita_sdk-0.3.462.dist-info → alita_sdk-0.3.627.dist-info}/METADATA +147 -7
- alita_sdk-0.3.627.dist-info/RECORD +468 -0
- alita_sdk-0.3.627.dist-info/entry_points.txt +2 -0
- alita_sdk-0.3.462.dist-info/RECORD +0 -384
- alita_sdk-0.3.462.dist-info/entry_points.txt +0 -2
- {alita_sdk-0.3.462.dist-info → alita_sdk-0.3.627.dist-info}/WHEEL +0 -0
- {alita_sdk-0.3.462.dist-info → alita_sdk-0.3.627.dist-info}/licenses/LICENSE +0 -0
- {alita_sdk-0.3.462.dist-info → alita_sdk-0.3.627.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,355 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Skills Registry service for managing and querying skills.
|
|
3
|
+
|
|
4
|
+
This module provides a thread-safe registry service that uses the discovery
|
|
5
|
+
service to find skills and provides a clean API for skill management.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import logging
|
|
9
|
+
import threading
|
|
10
|
+
from pathlib import Path
|
|
11
|
+
from typing import Dict, List, Optional
|
|
12
|
+
|
|
13
|
+
from .discovery import SkillDiscovery
|
|
14
|
+
from .models import SkillMetadata, SkillType
|
|
15
|
+
|
|
16
|
+
logger = logging.getLogger(__name__)
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class SkillsRegistry:
|
|
20
|
+
"""
|
|
21
|
+
Thread-safe registry service for managing skills.
|
|
22
|
+
|
|
23
|
+
The registry uses a discovery service to find skills and provides
|
|
24
|
+
a clean API for querying, filtering, and managing skills.
|
|
25
|
+
"""
|
|
26
|
+
|
|
27
|
+
def __init__(self, search_paths: Optional[List[str]] = None):
|
|
28
|
+
"""
|
|
29
|
+
Initialize the skills registry.
|
|
30
|
+
|
|
31
|
+
Args:
|
|
32
|
+
search_paths: Custom search paths for skills. If None, uses defaults.
|
|
33
|
+
"""
|
|
34
|
+
self.discovery = SkillDiscovery(search_paths)
|
|
35
|
+
self._lock = threading.RLock()
|
|
36
|
+
self._initialized = False
|
|
37
|
+
|
|
38
|
+
logger.info("Skills registry initialized")
|
|
39
|
+
|
|
40
|
+
def discover(self, refresh: bool = False) -> Dict[str, SkillMetadata]:
|
|
41
|
+
"""
|
|
42
|
+
Discover skills from configured search paths.
|
|
43
|
+
|
|
44
|
+
This is the primary method to populate the registry with skills.
|
|
45
|
+
It's thread-safe and can be called multiple times.
|
|
46
|
+
|
|
47
|
+
Args:
|
|
48
|
+
refresh: If True, clear cache and rescan all directories.
|
|
49
|
+
|
|
50
|
+
Returns:
|
|
51
|
+
Dictionary mapping skill names to SkillMetadata objects.
|
|
52
|
+
"""
|
|
53
|
+
with self._lock:
|
|
54
|
+
skills = self.discovery.discover(refresh=refresh)
|
|
55
|
+
self._initialized = True
|
|
56
|
+
logger.info(f"Registry discovered {len(skills)} skills")
|
|
57
|
+
return skills
|
|
58
|
+
|
|
59
|
+
def list(self) -> List[SkillMetadata]:
|
|
60
|
+
"""
|
|
61
|
+
Get list of all discovered skills.
|
|
62
|
+
|
|
63
|
+
Returns:
|
|
64
|
+
List of SkillMetadata objects.
|
|
65
|
+
"""
|
|
66
|
+
with self._lock:
|
|
67
|
+
if not self._initialized:
|
|
68
|
+
self.discover()
|
|
69
|
+
return list(self.discovery.cache.values())
|
|
70
|
+
|
|
71
|
+
def get(self, name: str) -> Optional[SkillMetadata]:
|
|
72
|
+
"""
|
|
73
|
+
Get a skill by name.
|
|
74
|
+
|
|
75
|
+
Args:
|
|
76
|
+
name: Name of the skill to retrieve.
|
|
77
|
+
|
|
78
|
+
Returns:
|
|
79
|
+
SkillMetadata if found, None otherwise.
|
|
80
|
+
"""
|
|
81
|
+
with self._lock:
|
|
82
|
+
if not self._initialized:
|
|
83
|
+
self.discover()
|
|
84
|
+
return self.discovery.get_skill_by_name(name)
|
|
85
|
+
|
|
86
|
+
def find_by_capability(self, capability: str) -> List[SkillMetadata]:
|
|
87
|
+
"""
|
|
88
|
+
Find skills that provide a specific capability.
|
|
89
|
+
|
|
90
|
+
Args:
|
|
91
|
+
capability: Capability to search for.
|
|
92
|
+
|
|
93
|
+
Returns:
|
|
94
|
+
List of matching SkillMetadata objects.
|
|
95
|
+
"""
|
|
96
|
+
with self._lock:
|
|
97
|
+
if not self._initialized:
|
|
98
|
+
self.discover()
|
|
99
|
+
return self.discovery.find_skills_by_capability(capability)
|
|
100
|
+
|
|
101
|
+
def find_by_tag(self, tag: str) -> List[SkillMetadata]:
|
|
102
|
+
"""
|
|
103
|
+
Find skills with a specific tag.
|
|
104
|
+
|
|
105
|
+
Args:
|
|
106
|
+
tag: Tag to search for.
|
|
107
|
+
|
|
108
|
+
Returns:
|
|
109
|
+
List of matching SkillMetadata objects.
|
|
110
|
+
"""
|
|
111
|
+
with self._lock:
|
|
112
|
+
if not self._initialized:
|
|
113
|
+
self.discover()
|
|
114
|
+
return self.discovery.find_skills_by_tag(tag)
|
|
115
|
+
|
|
116
|
+
def find_by_type(self, skill_type: SkillType) -> List[SkillMetadata]:
|
|
117
|
+
"""
|
|
118
|
+
Find skills of a specific type (graph or agent).
|
|
119
|
+
|
|
120
|
+
Args:
|
|
121
|
+
skill_type: SkillType to filter by.
|
|
122
|
+
|
|
123
|
+
Returns:
|
|
124
|
+
List of matching SkillMetadata objects.
|
|
125
|
+
"""
|
|
126
|
+
with self._lock:
|
|
127
|
+
if not self._initialized:
|
|
128
|
+
self.discover()
|
|
129
|
+
return self.discovery.find_skills_by_type(skill_type)
|
|
130
|
+
|
|
131
|
+
def reload(self, name: str) -> Optional[SkillMetadata]:
|
|
132
|
+
"""
|
|
133
|
+
Reload a specific skill from disk.
|
|
134
|
+
|
|
135
|
+
This is useful when you know a specific skill has been updated
|
|
136
|
+
and you want to reload just that skill without refreshing all.
|
|
137
|
+
|
|
138
|
+
Args:
|
|
139
|
+
name: Name of the skill to reload.
|
|
140
|
+
|
|
141
|
+
Returns:
|
|
142
|
+
Updated SkillMetadata if found and reloaded, None otherwise.
|
|
143
|
+
"""
|
|
144
|
+
with self._lock:
|
|
145
|
+
# Get current skill to know where to look
|
|
146
|
+
current_skill = self.get(name)
|
|
147
|
+
if not current_skill:
|
|
148
|
+
logger.warning(f"Cannot reload skill '{name}': not found in registry")
|
|
149
|
+
return None
|
|
150
|
+
|
|
151
|
+
skill_path = Path(current_skill.path)
|
|
152
|
+
agent_file = skill_path / "agent.md"
|
|
153
|
+
|
|
154
|
+
try:
|
|
155
|
+
# Parse the skill file directly
|
|
156
|
+
updated_skill = self.discovery._parse_skill_file(agent_file)
|
|
157
|
+
if updated_skill and updated_skill.name == name:
|
|
158
|
+
# Update cache with reloaded skill
|
|
159
|
+
self.discovery.cache[name] = updated_skill
|
|
160
|
+
logger.info(f"Successfully reloaded skill: {name}")
|
|
161
|
+
return updated_skill
|
|
162
|
+
else:
|
|
163
|
+
logger.error(f"Reloaded skill name mismatch: expected '{name}', got '{updated_skill.name if updated_skill else None}'")
|
|
164
|
+
return None
|
|
165
|
+
|
|
166
|
+
except Exception as e:
|
|
167
|
+
logger.error(f"Failed to reload skill '{name}': {e}")
|
|
168
|
+
return None
|
|
169
|
+
|
|
170
|
+
def clear(self) -> None:
|
|
171
|
+
"""
|
|
172
|
+
Clear the registry cache.
|
|
173
|
+
|
|
174
|
+
This removes all cached skills. The next query will trigger
|
|
175
|
+
a fresh discovery.
|
|
176
|
+
"""
|
|
177
|
+
with self._lock:
|
|
178
|
+
self.discovery.cache.clear()
|
|
179
|
+
self._initialized = False
|
|
180
|
+
logger.info("Registry cache cleared")
|
|
181
|
+
|
|
182
|
+
def is_skill_valid(self, name: str) -> bool:
|
|
183
|
+
"""
|
|
184
|
+
Check if a skill exists and is valid.
|
|
185
|
+
|
|
186
|
+
Args:
|
|
187
|
+
name: Name of the skill to check.
|
|
188
|
+
|
|
189
|
+
Returns:
|
|
190
|
+
True if skill exists and is valid, False otherwise.
|
|
191
|
+
"""
|
|
192
|
+
skill = self.get(name)
|
|
193
|
+
return skill is not None
|
|
194
|
+
|
|
195
|
+
def validate_skill_at_path(self, skill_path: str) -> tuple[bool, Optional[str]]:
|
|
196
|
+
"""
|
|
197
|
+
Validate a skill definition at a specific path.
|
|
198
|
+
|
|
199
|
+
This can be used to validate a skill before adding it to
|
|
200
|
+
the registry or to check if a skill definition is valid.
|
|
201
|
+
|
|
202
|
+
Args:
|
|
203
|
+
skill_path: Path to skill directory containing agent.md.
|
|
204
|
+
|
|
205
|
+
Returns:
|
|
206
|
+
Tuple of (is_valid, error_message).
|
|
207
|
+
"""
|
|
208
|
+
return self.discovery.validate_skill_definition(Path(skill_path))
|
|
209
|
+
|
|
210
|
+
def get_registry_stats(self) -> Dict[str, int]:
|
|
211
|
+
"""
|
|
212
|
+
Get statistics about the registry contents.
|
|
213
|
+
|
|
214
|
+
Returns:
|
|
215
|
+
Dictionary with registry statistics.
|
|
216
|
+
"""
|
|
217
|
+
with self._lock:
|
|
218
|
+
if not self._initialized:
|
|
219
|
+
self.discover()
|
|
220
|
+
|
|
221
|
+
stats = {
|
|
222
|
+
"total_skills": len(self.discovery.cache),
|
|
223
|
+
"graph_skills": len(self.find_by_type(SkillType.GRAPH)),
|
|
224
|
+
"agent_skills": len(self.find_by_type(SkillType.AGENT))
|
|
225
|
+
}
|
|
226
|
+
|
|
227
|
+
# Count unique capabilities and tags
|
|
228
|
+
all_capabilities = set()
|
|
229
|
+
all_tags = set()
|
|
230
|
+
for skill in self.discovery.cache.values():
|
|
231
|
+
all_capabilities.update(skill.capabilities)
|
|
232
|
+
all_tags.update(skill.tags)
|
|
233
|
+
|
|
234
|
+
stats["unique_capabilities"] = len(all_capabilities)
|
|
235
|
+
stats["unique_tags"] = len(all_tags)
|
|
236
|
+
|
|
237
|
+
return stats
|
|
238
|
+
|
|
239
|
+
def get_skills_by_search_path(self) -> Dict[str, List[SkillMetadata]]:
|
|
240
|
+
"""
|
|
241
|
+
Get skills grouped by their search path.
|
|
242
|
+
|
|
243
|
+
This is useful for understanding which skills come from which
|
|
244
|
+
directories and for debugging discovery issues.
|
|
245
|
+
|
|
246
|
+
Returns:
|
|
247
|
+
Dictionary mapping search paths to lists of skills.
|
|
248
|
+
"""
|
|
249
|
+
with self._lock:
|
|
250
|
+
if not self._initialized:
|
|
251
|
+
self.discover()
|
|
252
|
+
|
|
253
|
+
path_groups = {}
|
|
254
|
+
for skill in self.discovery.cache.values():
|
|
255
|
+
skill_path = Path(skill.path)
|
|
256
|
+
|
|
257
|
+
# Find which search path this skill belongs to
|
|
258
|
+
for search_path in self.discovery.search_paths:
|
|
259
|
+
search_path_obj = Path(search_path).expanduser().resolve()
|
|
260
|
+
try:
|
|
261
|
+
if skill_path.is_relative_to(search_path_obj):
|
|
262
|
+
if search_path not in path_groups:
|
|
263
|
+
path_groups[search_path] = []
|
|
264
|
+
path_groups[search_path].append(skill)
|
|
265
|
+
break
|
|
266
|
+
except (ValueError, OSError):
|
|
267
|
+
# skill_path is not relative to this search path
|
|
268
|
+
continue
|
|
269
|
+
|
|
270
|
+
return path_groups
|
|
271
|
+
|
|
272
|
+
def refresh_if_stale(self, max_age_seconds: int = 300) -> bool:
|
|
273
|
+
"""
|
|
274
|
+
Refresh the registry if it's considered stale.
|
|
275
|
+
|
|
276
|
+
Args:
|
|
277
|
+
max_age_seconds: Maximum age in seconds before refresh.
|
|
278
|
+
|
|
279
|
+
Returns:
|
|
280
|
+
True if refresh was performed, False if cache is still fresh.
|
|
281
|
+
"""
|
|
282
|
+
# This is a simple implementation - in production you might want
|
|
283
|
+
# to track file modification times or use a more sophisticated
|
|
284
|
+
# staleness check
|
|
285
|
+
with self._lock:
|
|
286
|
+
if not self._initialized:
|
|
287
|
+
self.discover()
|
|
288
|
+
return True
|
|
289
|
+
|
|
290
|
+
# For now, always consider the cache fresh since we don't
|
|
291
|
+
# track timestamps. This method provides the interface
|
|
292
|
+
# for future enhancement.
|
|
293
|
+
# TODO: Implement actual staleness check using max_age_seconds
|
|
294
|
+
_ = max_age_seconds # Suppress unused parameter warning
|
|
295
|
+
return False
|
|
296
|
+
|
|
297
|
+
def __len__(self) -> int:
|
|
298
|
+
"""Return the number of skills in the registry."""
|
|
299
|
+
return len(self.list())
|
|
300
|
+
|
|
301
|
+
def __contains__(self, name: str) -> bool:
|
|
302
|
+
"""Check if a skill name exists in the registry."""
|
|
303
|
+
return self.is_skill_valid(name)
|
|
304
|
+
|
|
305
|
+
def __repr__(self) -> str:
|
|
306
|
+
"""String representation of the registry."""
|
|
307
|
+
stats = self.get_registry_stats()
|
|
308
|
+
return (
|
|
309
|
+
f"SkillsRegistry("
|
|
310
|
+
f"skills={stats['total_skills']}, "
|
|
311
|
+
f"graphs={stats['graph_skills']}, "
|
|
312
|
+
f"agents={stats['agent_skills']})"
|
|
313
|
+
)
|
|
314
|
+
|
|
315
|
+
|
|
316
|
+
# Global registry instance for convenience
|
|
317
|
+
_default_registry: Optional[SkillsRegistry] = None
|
|
318
|
+
_registry_lock = threading.Lock()
|
|
319
|
+
|
|
320
|
+
|
|
321
|
+
def get_default_registry() -> SkillsRegistry:
|
|
322
|
+
"""
|
|
323
|
+
Get the default global skills registry instance.
|
|
324
|
+
|
|
325
|
+
This provides a convenient way to access a shared registry
|
|
326
|
+
without having to pass it around. The registry is created
|
|
327
|
+
lazily on first access.
|
|
328
|
+
|
|
329
|
+
Returns:
|
|
330
|
+
Default SkillsRegistry instance.
|
|
331
|
+
"""
|
|
332
|
+
global _default_registry
|
|
333
|
+
|
|
334
|
+
with _registry_lock:
|
|
335
|
+
if _default_registry is None:
|
|
336
|
+
_default_registry = SkillsRegistry()
|
|
337
|
+
logger.info("Created default skills registry")
|
|
338
|
+
|
|
339
|
+
return _default_registry
|
|
340
|
+
|
|
341
|
+
|
|
342
|
+
def reset_default_registry() -> None:
|
|
343
|
+
"""
|
|
344
|
+
Reset the default registry instance.
|
|
345
|
+
|
|
346
|
+
This is mainly useful for testing or when you want to
|
|
347
|
+
reinitialize the registry with different settings.
|
|
348
|
+
"""
|
|
349
|
+
global _default_registry
|
|
350
|
+
|
|
351
|
+
with _registry_lock:
|
|
352
|
+
if _default_registry is not None:
|
|
353
|
+
_default_registry.clear()
|
|
354
|
+
_default_registry = None
|
|
355
|
+
logger.info("Reset default skills registry")
|
|
@@ -0,0 +1,330 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Skill runner subprocess entry point.
|
|
3
|
+
|
|
4
|
+
This module is executed as a subprocess by the skill executor to run
|
|
5
|
+
individual skills in isolation. It handles both graph and agent skills
|
|
6
|
+
using the existing alita-sdk infrastructure.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
import argparse
|
|
10
|
+
import json
|
|
11
|
+
import logging
|
|
12
|
+
import os
|
|
13
|
+
import sys
|
|
14
|
+
import traceback
|
|
15
|
+
from pathlib import Path
|
|
16
|
+
from typing import Any, Dict, Optional
|
|
17
|
+
|
|
18
|
+
# Configure logging for subprocess
|
|
19
|
+
logging.basicConfig(
|
|
20
|
+
level=logging.INFO,
|
|
21
|
+
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
|
|
22
|
+
stream=sys.stderr
|
|
23
|
+
)
|
|
24
|
+
|
|
25
|
+
logger = logging.getLogger(__name__)
|
|
26
|
+
|
|
27
|
+
# Import alita-sdk components
|
|
28
|
+
try:
|
|
29
|
+
from ..langchain.assistant import Assistant
|
|
30
|
+
from ..langchain.langraph_agent import create_graph
|
|
31
|
+
from ..clients.client import AlitaClient
|
|
32
|
+
from .models import SkillMetadata, SkillType, SkillStatus
|
|
33
|
+
except ImportError as e:
|
|
34
|
+
logger.error(f"Failed to import alita-sdk components: {e}")
|
|
35
|
+
sys.exit(1)
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
class SkillRunner:
|
|
39
|
+
"""
|
|
40
|
+
Subprocess skill runner that executes individual skills.
|
|
41
|
+
"""
|
|
42
|
+
|
|
43
|
+
def __init__(self):
|
|
44
|
+
self.work_dir = None
|
|
45
|
+
self.skill_metadata = None
|
|
46
|
+
self.skill_input = None
|
|
47
|
+
self.execution_id = None
|
|
48
|
+
|
|
49
|
+
def run(self, input_file: Path, work_dir: Path) -> None:
|
|
50
|
+
"""
|
|
51
|
+
Run a skill based on input file configuration.
|
|
52
|
+
|
|
53
|
+
Args:
|
|
54
|
+
input_file: Path to JSON input file with skill configuration.
|
|
55
|
+
work_dir: Working directory for execution.
|
|
56
|
+
"""
|
|
57
|
+
self.work_dir = work_dir
|
|
58
|
+
result_file = work_dir / "skill_result.json"
|
|
59
|
+
|
|
60
|
+
try:
|
|
61
|
+
# Load input configuration
|
|
62
|
+
self._load_input(input_file)
|
|
63
|
+
|
|
64
|
+
# Execute skill based on type
|
|
65
|
+
if self.skill_metadata.skill_type == SkillType.AGENT:
|
|
66
|
+
result = self._run_agent_skill()
|
|
67
|
+
else: # SkillType.GRAPH
|
|
68
|
+
result = self._run_graph_skill()
|
|
69
|
+
|
|
70
|
+
# Write successful result
|
|
71
|
+
self._write_result(result_file, {
|
|
72
|
+
'status': 'success',
|
|
73
|
+
'output_text': result,
|
|
74
|
+
'execution_id': self.execution_id
|
|
75
|
+
})
|
|
76
|
+
|
|
77
|
+
logger.info(f"Skill '{self.skill_metadata.name}' completed successfully")
|
|
78
|
+
|
|
79
|
+
except Exception as e:
|
|
80
|
+
logger.error(f"Skill execution failed: {e}")
|
|
81
|
+
logger.error(traceback.format_exc())
|
|
82
|
+
|
|
83
|
+
# Write error result
|
|
84
|
+
self._write_result(result_file, {
|
|
85
|
+
'status': 'error',
|
|
86
|
+
'output_text': f"Skill execution failed: {str(e)}",
|
|
87
|
+
'error_details': str(e),
|
|
88
|
+
'execution_id': self.execution_id
|
|
89
|
+
})
|
|
90
|
+
|
|
91
|
+
# Exit with error code
|
|
92
|
+
sys.exit(1)
|
|
93
|
+
|
|
94
|
+
def _load_input(self, input_file: Path) -> None:
|
|
95
|
+
"""
|
|
96
|
+
Load skill configuration and input from JSON file.
|
|
97
|
+
|
|
98
|
+
Args:
|
|
99
|
+
input_file: Path to input JSON file.
|
|
100
|
+
"""
|
|
101
|
+
try:
|
|
102
|
+
with open(input_file, 'r', encoding='utf-8') as f:
|
|
103
|
+
data = json.load(f)
|
|
104
|
+
|
|
105
|
+
# Recreate SkillMetadata from dict
|
|
106
|
+
self.skill_metadata = SkillMetadata(**data['skill_metadata'])
|
|
107
|
+
self.skill_input = data['skill_input']
|
|
108
|
+
self.execution_id = data['execution_id']
|
|
109
|
+
|
|
110
|
+
logger.info(f"Loaded skill '{self.skill_metadata.name}' ({self.skill_metadata.skill_type.value})")
|
|
111
|
+
|
|
112
|
+
except Exception as e:
|
|
113
|
+
raise RuntimeError(f"Failed to load input file {input_file}: {e}")
|
|
114
|
+
|
|
115
|
+
def _run_agent_skill(self) -> str:
|
|
116
|
+
"""
|
|
117
|
+
Run an agent-type skill using the Assistant framework.
|
|
118
|
+
|
|
119
|
+
Returns:
|
|
120
|
+
Skill execution result as text.
|
|
121
|
+
"""
|
|
122
|
+
logger.info("Executing agent skill")
|
|
123
|
+
|
|
124
|
+
# Create mock AlitaClient (in production this would be passed or configured)
|
|
125
|
+
# For subprocess execution, we need to handle this differently
|
|
126
|
+
alita_client = self._create_mock_client()
|
|
127
|
+
|
|
128
|
+
# Create LLM client
|
|
129
|
+
llm = self._create_llm_client()
|
|
130
|
+
|
|
131
|
+
# Build agent data structure compatible with Assistant
|
|
132
|
+
agent_data = self._build_agent_data_structure()
|
|
133
|
+
|
|
134
|
+
# Create Assistant instance
|
|
135
|
+
assistant = Assistant(
|
|
136
|
+
alita=alita_client,
|
|
137
|
+
data=agent_data,
|
|
138
|
+
client=llm,
|
|
139
|
+
chat_history=self.skill_input.get('chat_history', []),
|
|
140
|
+
app_type=self.skill_metadata.agent_type or 'react',
|
|
141
|
+
tools=[] # TODO: Load tools based on toolkit configuration
|
|
142
|
+
)
|
|
143
|
+
|
|
144
|
+
# Execute agent
|
|
145
|
+
agent_executor = assistant.runnable()
|
|
146
|
+
result = agent_executor.invoke({
|
|
147
|
+
'input': self.skill_input['input'],
|
|
148
|
+
'chat_history': self.skill_input.get('chat_history', []),
|
|
149
|
+
**self.skill_input.get('variables', {})
|
|
150
|
+
})
|
|
151
|
+
|
|
152
|
+
# Extract output text
|
|
153
|
+
if isinstance(result, dict):
|
|
154
|
+
output_text = result.get('output', str(result))
|
|
155
|
+
else:
|
|
156
|
+
output_text = str(result)
|
|
157
|
+
|
|
158
|
+
return output_text
|
|
159
|
+
|
|
160
|
+
def _run_graph_skill(self) -> str:
|
|
161
|
+
"""
|
|
162
|
+
Run a graph-type skill using the LangGraph framework.
|
|
163
|
+
|
|
164
|
+
Returns:
|
|
165
|
+
Skill execution result as text.
|
|
166
|
+
"""
|
|
167
|
+
logger.info("Executing graph skill")
|
|
168
|
+
|
|
169
|
+
# Create LLM client
|
|
170
|
+
llm = self._create_llm_client()
|
|
171
|
+
|
|
172
|
+
# Create graph from skill's YAML definition
|
|
173
|
+
if not self.skill_metadata.graph_yaml:
|
|
174
|
+
raise ValueError("Graph skill missing YAML definition")
|
|
175
|
+
|
|
176
|
+
graph = create_graph(
|
|
177
|
+
client=llm,
|
|
178
|
+
yaml_schema=self.skill_metadata.graph_yaml,
|
|
179
|
+
tools=[], # TODO: Load tools based on configuration
|
|
180
|
+
memory=None, # TODO: Configure memory if needed
|
|
181
|
+
store=None # TODO: Configure store if needed
|
|
182
|
+
)
|
|
183
|
+
|
|
184
|
+
# Execute graph with state input
|
|
185
|
+
result = graph.invoke(self.skill_input)
|
|
186
|
+
|
|
187
|
+
# Extract output from graph result
|
|
188
|
+
if isinstance(result, dict):
|
|
189
|
+
# Try to get 'output' field first, then look for other relevant fields
|
|
190
|
+
output_text = (result.get('output') or
|
|
191
|
+
result.get('messages', [{}])[-1].get('content', '') or
|
|
192
|
+
str(result))
|
|
193
|
+
else:
|
|
194
|
+
output_text = str(result)
|
|
195
|
+
|
|
196
|
+
return output_text
|
|
197
|
+
|
|
198
|
+
def _build_agent_data_structure(self) -> Dict[str, Any]:
|
|
199
|
+
"""
|
|
200
|
+
Build agent data structure compatible with Assistant.
|
|
201
|
+
|
|
202
|
+
Returns:
|
|
203
|
+
Agent configuration dictionary.
|
|
204
|
+
"""
|
|
205
|
+
# Build LLM settings
|
|
206
|
+
llm_settings = {
|
|
207
|
+
'model_name': self.skill_metadata.model or 'gpt-4o',
|
|
208
|
+
'temperature': self.skill_metadata.temperature or 0.7,
|
|
209
|
+
'max_tokens': self.skill_metadata.max_tokens or 2000,
|
|
210
|
+
'top_p': 1.0,
|
|
211
|
+
'top_k': -1
|
|
212
|
+
}
|
|
213
|
+
|
|
214
|
+
# Build tools configuration from toolkits
|
|
215
|
+
tools = []
|
|
216
|
+
if self.skill_metadata.toolkits:
|
|
217
|
+
for toolkit_config in self.skill_metadata.toolkits:
|
|
218
|
+
# Convert toolkit config to tool format expected by Assistant
|
|
219
|
+
tools.append({
|
|
220
|
+
'type': toolkit_config.get('type', 'unknown'),
|
|
221
|
+
'name': toolkit_config.get('name', toolkit_config.get('type')),
|
|
222
|
+
'toolkit_name': toolkit_config.get('type'),
|
|
223
|
+
'config_ref': toolkit_config.get('config_ref')
|
|
224
|
+
})
|
|
225
|
+
|
|
226
|
+
return {
|
|
227
|
+
'name': self.skill_metadata.name,
|
|
228
|
+
'description': self.skill_metadata.description,
|
|
229
|
+
'llm_settings': llm_settings,
|
|
230
|
+
'agent_type': self.skill_metadata.agent_type or 'react',
|
|
231
|
+
'system_prompt': self.skill_metadata.system_prompt,
|
|
232
|
+
'tools': tools,
|
|
233
|
+
'meta': {
|
|
234
|
+
'step_limit': 25 # Default step limit
|
|
235
|
+
}
|
|
236
|
+
}
|
|
237
|
+
|
|
238
|
+
def _create_mock_client(self):
|
|
239
|
+
"""
|
|
240
|
+
Create a mock AlitaClient for subprocess execution.
|
|
241
|
+
|
|
242
|
+
In a real implementation, this would be properly configured
|
|
243
|
+
or passed from the parent process.
|
|
244
|
+
"""
|
|
245
|
+
# TODO: Implement proper client creation based on configuration
|
|
246
|
+
# For now, return None - the Assistant should handle this gracefully
|
|
247
|
+
return None
|
|
248
|
+
|
|
249
|
+
def _create_llm_client(self):
|
|
250
|
+
"""
|
|
251
|
+
Create LLM client based on skill metadata.
|
|
252
|
+
|
|
253
|
+
Returns:
|
|
254
|
+
Configured LLM client.
|
|
255
|
+
"""
|
|
256
|
+
# TODO: Implement proper LLM client creation
|
|
257
|
+
# This would typically involve:
|
|
258
|
+
# 1. Reading API keys from environment
|
|
259
|
+
# 2. Creating appropriate LLM instance based on model
|
|
260
|
+
# 3. Configuring with skill-specific settings
|
|
261
|
+
|
|
262
|
+
# For now, return a placeholder that indicates LLM is needed
|
|
263
|
+
class MockLLM:
|
|
264
|
+
def __init__(self):
|
|
265
|
+
self.model_name = self.skill_metadata.model or 'gpt-4o'
|
|
266
|
+
self.temperature = self.skill_metadata.temperature or 0.7
|
|
267
|
+
self.max_tokens = self.skill_metadata.max_tokens or 2000
|
|
268
|
+
|
|
269
|
+
def invoke(self, messages, **kwargs):
|
|
270
|
+
# This is a placeholder - in real implementation this would
|
|
271
|
+
# call the actual LLM API
|
|
272
|
+
return f"Mock LLM response for: {messages[-1] if messages else 'empty input'}"
|
|
273
|
+
|
|
274
|
+
return MockLLM()
|
|
275
|
+
|
|
276
|
+
def _write_result(self, result_file: Path, result_data: Dict[str, Any]) -> None:
|
|
277
|
+
"""
|
|
278
|
+
Write execution result to file for parent process.
|
|
279
|
+
|
|
280
|
+
Args:
|
|
281
|
+
result_file: Path to result file.
|
|
282
|
+
result_data: Result data to write.
|
|
283
|
+
"""
|
|
284
|
+
try:
|
|
285
|
+
with open(result_file, 'w', encoding='utf-8') as f:
|
|
286
|
+
json.dump(result_data, f, indent=2, default=str)
|
|
287
|
+
except Exception as e:
|
|
288
|
+
logger.error(f"Failed to write result file: {e}")
|
|
289
|
+
raise
|
|
290
|
+
|
|
291
|
+
|
|
292
|
+
def main():
|
|
293
|
+
"""
|
|
294
|
+
Main entry point for skill runner subprocess.
|
|
295
|
+
"""
|
|
296
|
+
parser = argparse.ArgumentParser(description="Alita Skills Runner")
|
|
297
|
+
parser.add_argument(
|
|
298
|
+
'--input-file',
|
|
299
|
+
type=Path,
|
|
300
|
+
required=True,
|
|
301
|
+
help="Path to JSON input file with skill configuration"
|
|
302
|
+
)
|
|
303
|
+
parser.add_argument(
|
|
304
|
+
'--work-dir',
|
|
305
|
+
type=Path,
|
|
306
|
+
required=True,
|
|
307
|
+
help="Working directory for skill execution"
|
|
308
|
+
)
|
|
309
|
+
|
|
310
|
+
args = parser.parse_args()
|
|
311
|
+
|
|
312
|
+
# Validate arguments
|
|
313
|
+
if not args.input_file.exists():
|
|
314
|
+
logger.error(f"Input file does not exist: {args.input_file}")
|
|
315
|
+
sys.exit(1)
|
|
316
|
+
|
|
317
|
+
if not args.work_dir.exists():
|
|
318
|
+
logger.error(f"Working directory does not exist: {args.work_dir}")
|
|
319
|
+
sys.exit(1)
|
|
320
|
+
|
|
321
|
+
# Change to working directory
|
|
322
|
+
os.chdir(args.work_dir)
|
|
323
|
+
|
|
324
|
+
# Run skill
|
|
325
|
+
runner = SkillRunner()
|
|
326
|
+
runner.run(args.input_file, args.work_dir)
|
|
327
|
+
|
|
328
|
+
|
|
329
|
+
if __name__ == '__main__':
|
|
330
|
+
main()
|
|
@@ -6,19 +6,26 @@ This module provides various toolkit implementations for LangGraph agents.
|
|
|
6
6
|
from .application import ApplicationToolkit
|
|
7
7
|
from .artifact import ArtifactToolkit
|
|
8
8
|
from .datasource import DatasourcesToolkit
|
|
9
|
+
from .planning import PlanningToolkit
|
|
9
10
|
from .prompt import PromptToolkit
|
|
10
11
|
from .subgraph import SubgraphToolkit
|
|
11
12
|
from .vectorstore import VectorStoreToolkit
|
|
12
13
|
from .mcp import McpToolkit
|
|
14
|
+
from .mcp_config import McpConfigToolkit, get_session_manager as get_mcp_session_manager
|
|
15
|
+
from .skill_router import SkillRouterToolkit
|
|
13
16
|
from ...tools.memory import MemoryToolkit
|
|
14
17
|
|
|
15
18
|
__all__ = [
|
|
16
19
|
"ApplicationToolkit",
|
|
17
20
|
"ArtifactToolkit",
|
|
18
21
|
"DatasourcesToolkit",
|
|
22
|
+
"PlanningToolkit",
|
|
19
23
|
"PromptToolkit",
|
|
20
24
|
"SubgraphToolkit",
|
|
21
25
|
"VectorStoreToolkit",
|
|
22
26
|
"McpToolkit",
|
|
27
|
+
"McpConfigToolkit",
|
|
28
|
+
"get_mcp_session_manager",
|
|
29
|
+
"SkillRouterToolkit",
|
|
23
30
|
"MemoryToolkit"
|
|
24
31
|
]
|