alita-sdk 0.3.528__py3-none-any.whl → 0.3.554__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of alita-sdk might be problematic. Click here for more details.
- alita_sdk/community/__init__.py +8 -4
- alita_sdk/configurations/__init__.py +1 -0
- alita_sdk/configurations/openapi.py +111 -0
- alita_sdk/runtime/clients/client.py +185 -10
- alita_sdk/runtime/langchain/langraph_agent.py +2 -2
- alita_sdk/runtime/langchain/utils.py +46 -0
- alita_sdk/runtime/skills/__init__.py +91 -0
- alita_sdk/runtime/skills/callbacks.py +498 -0
- alita_sdk/runtime/skills/discovery.py +540 -0
- alita_sdk/runtime/skills/executor.py +610 -0
- alita_sdk/runtime/skills/input_builder.py +371 -0
- alita_sdk/runtime/skills/models.py +330 -0
- alita_sdk/runtime/skills/registry.py +355 -0
- alita_sdk/runtime/skills/skill_runner.py +330 -0
- alita_sdk/runtime/toolkits/__init__.py +2 -0
- alita_sdk/runtime/toolkits/skill_router.py +238 -0
- alita_sdk/runtime/toolkits/tools.py +76 -9
- alita_sdk/runtime/tools/__init__.py +3 -1
- alita_sdk/runtime/tools/artifact.py +70 -21
- alita_sdk/runtime/tools/image_generation.py +50 -44
- alita_sdk/runtime/tools/llm.py +363 -44
- alita_sdk/runtime/tools/loop.py +3 -1
- alita_sdk/runtime/tools/loop_output.py +3 -1
- alita_sdk/runtime/tools/skill_router.py +776 -0
- alita_sdk/runtime/tools/tool.py +3 -1
- alita_sdk/runtime/tools/vectorstore.py +7 -2
- alita_sdk/runtime/tools/vectorstore_base.py +7 -2
- alita_sdk/runtime/utils/AlitaCallback.py +2 -1
- alita_sdk/runtime/utils/utils.py +34 -0
- alita_sdk/tools/__init__.py +41 -1
- alita_sdk/tools/ado/work_item/ado_wrapper.py +33 -2
- alita_sdk/tools/base_indexer_toolkit.py +36 -24
- alita_sdk/tools/confluence/api_wrapper.py +5 -6
- alita_sdk/tools/confluence/loader.py +4 -2
- alita_sdk/tools/openapi/__init__.py +280 -120
- alita_sdk/tools/openapi/api_wrapper.py +883 -0
- alita_sdk/tools/openapi/tool.py +20 -0
- alita_sdk/tools/pandas/dataframe/generator/base.py +3 -1
- alita_sdk/tools/servicenow/__init__.py +9 -9
- alita_sdk/tools/servicenow/api_wrapper.py +1 -1
- {alita_sdk-0.3.528.dist-info → alita_sdk-0.3.554.dist-info}/METADATA +2 -2
- {alita_sdk-0.3.528.dist-info → alita_sdk-0.3.554.dist-info}/RECORD +46 -33
- {alita_sdk-0.3.528.dist-info → alita_sdk-0.3.554.dist-info}/WHEEL +0 -0
- {alita_sdk-0.3.528.dist-info → alita_sdk-0.3.554.dist-info}/entry_points.txt +0 -0
- {alita_sdk-0.3.528.dist-info → alita_sdk-0.3.554.dist-info}/licenses/LICENSE +0 -0
- {alita_sdk-0.3.528.dist-info → alita_sdk-0.3.554.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,540 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Skill discovery service for finding and validating skills from filesystem.
|
|
3
|
+
|
|
4
|
+
This module handles the discovery of skill definitions from configurable
|
|
5
|
+
directories, parsing agent.md files, and creating validated SkillMetadata objects.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import logging
|
|
9
|
+
import os
|
|
10
|
+
import re
|
|
11
|
+
import yaml
|
|
12
|
+
from pathlib import Path
|
|
13
|
+
from typing import Dict, List, Optional, Tuple
|
|
14
|
+
|
|
15
|
+
from .models import (
|
|
16
|
+
SkillMetadata, SkillType, ExecutionConfig, ResultsConfig,
|
|
17
|
+
SkillInputSchema, SkillOutputSchema, SkillValidationError
|
|
18
|
+
)
|
|
19
|
+
|
|
20
|
+
logger = logging.getLogger(__name__)
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class SkillDiscovery:
|
|
24
|
+
"""Service for discovering and validating skills from filesystem."""
|
|
25
|
+
|
|
26
|
+
def __init__(self, search_paths: Optional[List[str]] = None):
|
|
27
|
+
"""
|
|
28
|
+
Initialize skill discovery service.
|
|
29
|
+
|
|
30
|
+
Args:
|
|
31
|
+
search_paths: Custom search paths. If None, uses default paths.
|
|
32
|
+
"""
|
|
33
|
+
self.search_paths = search_paths or self._get_default_search_paths()
|
|
34
|
+
self.cache: Dict[str, SkillMetadata] = {}
|
|
35
|
+
logger.info(f"Initialized skill discovery with paths: {self.search_paths}")
|
|
36
|
+
|
|
37
|
+
@staticmethod
|
|
38
|
+
def _get_default_search_paths() -> List[str]:
|
|
39
|
+
"""Get default search paths for skills."""
|
|
40
|
+
return [
|
|
41
|
+
".alita/agents/skills",
|
|
42
|
+
"./skills",
|
|
43
|
+
os.path.expanduser("~/.alita/skills")
|
|
44
|
+
]
|
|
45
|
+
|
|
46
|
+
def discover(self, refresh: bool = False) -> Dict[str, SkillMetadata]:
|
|
47
|
+
"""
|
|
48
|
+
Discover skills from configured search paths.
|
|
49
|
+
|
|
50
|
+
Args:
|
|
51
|
+
refresh: If True, clear cache and rescan all directories.
|
|
52
|
+
|
|
53
|
+
Returns:
|
|
54
|
+
Dictionary mapping skill names to SkillMetadata objects.
|
|
55
|
+
|
|
56
|
+
Raises:
|
|
57
|
+
Exception: If discovery fails critically.
|
|
58
|
+
"""
|
|
59
|
+
if not refresh and self.cache:
|
|
60
|
+
logger.debug(f"Returning cached skills: {len(self.cache)} found")
|
|
61
|
+
return self.cache
|
|
62
|
+
|
|
63
|
+
if refresh:
|
|
64
|
+
logger.info("Refreshing skill discovery cache")
|
|
65
|
+
self.cache.clear()
|
|
66
|
+
|
|
67
|
+
discovered_skills = {}
|
|
68
|
+
|
|
69
|
+
for search_path in self.search_paths:
|
|
70
|
+
try:
|
|
71
|
+
path_skills = self._discover_in_path(search_path)
|
|
72
|
+
logger.info(f"Found {len(path_skills)} skills in {search_path}")
|
|
73
|
+
|
|
74
|
+
# Handle name collisions
|
|
75
|
+
for name, skill in path_skills.items():
|
|
76
|
+
if name in discovered_skills:
|
|
77
|
+
logger.warning(
|
|
78
|
+
f"Skill name collision: '{name}' found in both "
|
|
79
|
+
f"{discovered_skills[name].path} and {skill.path}. "
|
|
80
|
+
f"Using the latter."
|
|
81
|
+
)
|
|
82
|
+
discovered_skills[name] = skill
|
|
83
|
+
|
|
84
|
+
except Exception as e:
|
|
85
|
+
logger.error(f"Error discovering skills in {search_path}: {e}")
|
|
86
|
+
# Continue with other paths rather than failing completely
|
|
87
|
+
|
|
88
|
+
logger.info(f"Discovery complete: {len(discovered_skills)} skills found")
|
|
89
|
+
self.cache = discovered_skills
|
|
90
|
+
return discovered_skills
|
|
91
|
+
|
|
92
|
+
def _discover_in_path(self, search_path: str) -> Dict[str, SkillMetadata]:
|
|
93
|
+
"""
|
|
94
|
+
Discover skills in a specific path.
|
|
95
|
+
|
|
96
|
+
Args:
|
|
97
|
+
search_path: Path to search for skills.
|
|
98
|
+
|
|
99
|
+
Returns:
|
|
100
|
+
Dictionary of discovered skills.
|
|
101
|
+
"""
|
|
102
|
+
path = Path(search_path).expanduser().resolve()
|
|
103
|
+
|
|
104
|
+
if not path.exists():
|
|
105
|
+
logger.debug(f"Search path does not exist: {path}")
|
|
106
|
+
return {}
|
|
107
|
+
|
|
108
|
+
if not path.is_dir():
|
|
109
|
+
logger.warning(f"Search path is not a directory: {path}")
|
|
110
|
+
return {}
|
|
111
|
+
|
|
112
|
+
skills = {}
|
|
113
|
+
|
|
114
|
+
# Recursively find all agent.md files, including through symlinks
|
|
115
|
+
for agent_file in self._find_agent_files(path):
|
|
116
|
+
try:
|
|
117
|
+
skill_metadata = self._parse_skill_file(agent_file)
|
|
118
|
+
if skill_metadata:
|
|
119
|
+
skills[skill_metadata.name] = skill_metadata
|
|
120
|
+
logger.debug(f"Discovered skill: {skill_metadata.name} at {agent_file.parent}")
|
|
121
|
+
except Exception as e:
|
|
122
|
+
logger.error(f"Error parsing skill file {agent_file}: {e}")
|
|
123
|
+
continue
|
|
124
|
+
|
|
125
|
+
return skills
|
|
126
|
+
|
|
127
|
+
def _find_agent_files(self, path: Path) -> list[Path]:
|
|
128
|
+
"""
|
|
129
|
+
Find all agent.md files, including through symlinks.
|
|
130
|
+
|
|
131
|
+
Python's rglob doesn't follow symlinks by default, so we need
|
|
132
|
+
to handle them manually for skills discovery.
|
|
133
|
+
|
|
134
|
+
Args:
|
|
135
|
+
path: Root path to search from.
|
|
136
|
+
|
|
137
|
+
Returns:
|
|
138
|
+
List of agent.md file paths found.
|
|
139
|
+
"""
|
|
140
|
+
agent_files = []
|
|
141
|
+
|
|
142
|
+
def _walk_directory(current_path: Path, visited: set = None):
|
|
143
|
+
if visited is None:
|
|
144
|
+
visited = set()
|
|
145
|
+
|
|
146
|
+
# Avoid infinite loops from circular symlinks
|
|
147
|
+
resolved_path = current_path.resolve()
|
|
148
|
+
if resolved_path in visited:
|
|
149
|
+
return
|
|
150
|
+
visited.add(resolved_path)
|
|
151
|
+
|
|
152
|
+
try:
|
|
153
|
+
if not current_path.exists() or not current_path.is_dir():
|
|
154
|
+
return
|
|
155
|
+
|
|
156
|
+
for item in current_path.iterdir():
|
|
157
|
+
if item.name == "agent.md" and item.is_file():
|
|
158
|
+
agent_files.append(item)
|
|
159
|
+
elif item.is_dir():
|
|
160
|
+
# Recurse into subdirectories (including symlinked ones)
|
|
161
|
+
_walk_directory(item, visited.copy())
|
|
162
|
+
|
|
163
|
+
except (PermissionError, OSError) as e:
|
|
164
|
+
logger.debug(f"Cannot access {current_path}: {e}")
|
|
165
|
+
|
|
166
|
+
_walk_directory(path)
|
|
167
|
+
logger.debug(f"Found {len(agent_files)} agent.md files in {path}")
|
|
168
|
+
return agent_files
|
|
169
|
+
|
|
170
|
+
def _parse_skill_file(self, agent_file: Path) -> Optional[SkillMetadata]:
|
|
171
|
+
"""
|
|
172
|
+
Parse an agent.md file and create SkillMetadata.
|
|
173
|
+
|
|
174
|
+
Args:
|
|
175
|
+
agent_file: Path to agent.md file.
|
|
176
|
+
|
|
177
|
+
Returns:
|
|
178
|
+
SkillMetadata object if valid, None otherwise.
|
|
179
|
+
|
|
180
|
+
Raises:
|
|
181
|
+
SkillValidationError: If skill definition is invalid.
|
|
182
|
+
"""
|
|
183
|
+
logger.debug(f"Parsing skill file: {agent_file}")
|
|
184
|
+
|
|
185
|
+
try:
|
|
186
|
+
content = agent_file.read_text(encoding='utf-8')
|
|
187
|
+
logger.debug(f"Read {len(content)} characters from {agent_file}")
|
|
188
|
+
except Exception as e:
|
|
189
|
+
logger.error(f"Cannot read file {agent_file}: {e}")
|
|
190
|
+
raise SkillValidationError(f"Cannot read file {agent_file}: {e}")
|
|
191
|
+
|
|
192
|
+
# Extract YAML frontmatter
|
|
193
|
+
frontmatter, body = self._extract_frontmatter(content)
|
|
194
|
+
|
|
195
|
+
if not frontmatter:
|
|
196
|
+
logger.debug(f"No frontmatter found in {agent_file}")
|
|
197
|
+
return None
|
|
198
|
+
|
|
199
|
+
logger.debug(f"Extracted frontmatter from {agent_file}: {list(frontmatter.keys())}")
|
|
200
|
+
|
|
201
|
+
# Validate agent_type
|
|
202
|
+
agent_type = frontmatter.get('agent_type')
|
|
203
|
+
if agent_type != 'skill':
|
|
204
|
+
logger.debug(f"File {agent_file} is not a skill (agent_type: {agent_type})")
|
|
205
|
+
return None
|
|
206
|
+
|
|
207
|
+
logger.debug(f"Agent type validation passed for {agent_file}")
|
|
208
|
+
|
|
209
|
+
# Validate and create metadata
|
|
210
|
+
try:
|
|
211
|
+
metadata = self._create_skill_metadata(frontmatter, body, agent_file.parent)
|
|
212
|
+
logger.debug(f"Successfully created metadata for skill: {metadata.name}")
|
|
213
|
+
return metadata
|
|
214
|
+
except Exception as e:
|
|
215
|
+
logger.error(f"Invalid skill definition in {agent_file}: {e}")
|
|
216
|
+
raise SkillValidationError(f"Invalid skill definition in {agent_file}: {e}")
|
|
217
|
+
|
|
218
|
+
def _extract_frontmatter(self, content: str) -> Tuple[Optional[Dict], str]:
|
|
219
|
+
"""
|
|
220
|
+
Extract YAML frontmatter from markdown content.
|
|
221
|
+
|
|
222
|
+
Args:
|
|
223
|
+
content: Full file content.
|
|
224
|
+
|
|
225
|
+
Returns:
|
|
226
|
+
Tuple of (frontmatter_dict, remaining_body).
|
|
227
|
+
"""
|
|
228
|
+
# Match YAML frontmatter pattern
|
|
229
|
+
pattern = r'^---\s*\n(.*?)\n---\s*\n(.*)'
|
|
230
|
+
match = re.match(pattern, content, re.DOTALL)
|
|
231
|
+
|
|
232
|
+
if not match:
|
|
233
|
+
return None, content
|
|
234
|
+
|
|
235
|
+
try:
|
|
236
|
+
frontmatter = yaml.safe_load(match.group(1))
|
|
237
|
+
body = match.group(2).strip()
|
|
238
|
+
return frontmatter, body
|
|
239
|
+
except yaml.YAMLError as e:
|
|
240
|
+
logger.error(f"Invalid YAML frontmatter: {e}")
|
|
241
|
+
return None, content
|
|
242
|
+
|
|
243
|
+
def _create_skill_metadata(
|
|
244
|
+
self,
|
|
245
|
+
frontmatter: Dict,
|
|
246
|
+
body: str,
|
|
247
|
+
skill_path: Path
|
|
248
|
+
) -> SkillMetadata:
|
|
249
|
+
"""
|
|
250
|
+
Create SkillMetadata from parsed frontmatter.
|
|
251
|
+
|
|
252
|
+
Args:
|
|
253
|
+
frontmatter: Parsed YAML frontmatter.
|
|
254
|
+
body: Markdown body content.
|
|
255
|
+
skill_path: Path to skill directory.
|
|
256
|
+
|
|
257
|
+
Returns:
|
|
258
|
+
Validated SkillMetadata object.
|
|
259
|
+
|
|
260
|
+
Raises:
|
|
261
|
+
SkillValidationError: If validation fails.
|
|
262
|
+
"""
|
|
263
|
+
logger.debug(f"Creating skill metadata from frontmatter keys: {list(frontmatter.keys())}")
|
|
264
|
+
|
|
265
|
+
# Validate required fields
|
|
266
|
+
name = frontmatter.get('name')
|
|
267
|
+
if not name:
|
|
268
|
+
raise SkillValidationError("Missing required field: name")
|
|
269
|
+
|
|
270
|
+
logger.debug(f"Skill name: {name}")
|
|
271
|
+
|
|
272
|
+
description = frontmatter.get('description', '')
|
|
273
|
+
skill_type_str = frontmatter.get('skill_type', 'agent')
|
|
274
|
+
|
|
275
|
+
logger.debug(f"Skill type string: {skill_type_str}")
|
|
276
|
+
|
|
277
|
+
try:
|
|
278
|
+
skill_type = SkillType(skill_type_str)
|
|
279
|
+
logger.debug(f"Parsed skill type: {skill_type}")
|
|
280
|
+
except ValueError:
|
|
281
|
+
raise SkillValidationError(f"Invalid skill_type: {skill_type_str}. Must be 'graph' or 'agent'")
|
|
282
|
+
|
|
283
|
+
# Parse execution configuration
|
|
284
|
+
logger.debug(f"Parsing execution config: {frontmatter.get('execution', {})}")
|
|
285
|
+
try:
|
|
286
|
+
execution_config = self._parse_execution_config(frontmatter.get('execution', {}))
|
|
287
|
+
logger.debug(f"Created execution config: {execution_config}")
|
|
288
|
+
except Exception as e:
|
|
289
|
+
logger.error(f"Failed to parse execution config: {e}")
|
|
290
|
+
raise SkillValidationError(f"Invalid execution configuration: {e}")
|
|
291
|
+
|
|
292
|
+
# Parse results configuration
|
|
293
|
+
logger.debug(f"Parsing results config: {frontmatter.get('results', {})}")
|
|
294
|
+
try:
|
|
295
|
+
results_config = self._parse_results_config(frontmatter.get('results', {}))
|
|
296
|
+
logger.debug(f"Created results config: {results_config}")
|
|
297
|
+
except Exception as e:
|
|
298
|
+
logger.error(f"Failed to parse results config: {e}")
|
|
299
|
+
raise SkillValidationError(f"Invalid results configuration: {e}")
|
|
300
|
+
|
|
301
|
+
# Parse input/output schemas
|
|
302
|
+
logger.debug(f"Parsing input schema: {frontmatter.get('inputs', {})}")
|
|
303
|
+
try:
|
|
304
|
+
inputs = self._parse_input_schema(frontmatter.get('inputs', {}), skill_type)
|
|
305
|
+
logger.debug(f"Created input schema: {inputs}")
|
|
306
|
+
except Exception as e:
|
|
307
|
+
logger.error(f"Failed to parse input schema: {e}")
|
|
308
|
+
raise SkillValidationError(f"Invalid input schema: {e}")
|
|
309
|
+
|
|
310
|
+
logger.debug(f"Parsing output schema: {frontmatter.get('outputs', {})}")
|
|
311
|
+
try:
|
|
312
|
+
outputs = self._parse_output_schema(frontmatter.get('outputs', {}))
|
|
313
|
+
logger.debug(f"Created output schema: {outputs}")
|
|
314
|
+
except Exception as e:
|
|
315
|
+
logger.error(f"Failed to parse output schema: {e}")
|
|
316
|
+
raise SkillValidationError(f"Invalid output schema: {e}")
|
|
317
|
+
|
|
318
|
+
# Create base metadata
|
|
319
|
+
logger.debug("Creating SkillMetadata object...")
|
|
320
|
+
try:
|
|
321
|
+
from .models import SkillSource
|
|
322
|
+
metadata = SkillMetadata(
|
|
323
|
+
name=name,
|
|
324
|
+
skill_type=skill_type,
|
|
325
|
+
source=SkillSource.FILESYSTEM,
|
|
326
|
+
path=str(skill_path),
|
|
327
|
+
description=description,
|
|
328
|
+
capabilities=frontmatter.get('capabilities', []),
|
|
329
|
+
tags=frontmatter.get('tags', []),
|
|
330
|
+
version=frontmatter.get('version', '1.0.0'),
|
|
331
|
+
execution=execution_config,
|
|
332
|
+
results=results_config,
|
|
333
|
+
inputs=inputs,
|
|
334
|
+
outputs=outputs,
|
|
335
|
+
model=frontmatter.get('model'),
|
|
336
|
+
temperature=frontmatter.get('temperature'),
|
|
337
|
+
max_tokens=frontmatter.get('max_tokens')
|
|
338
|
+
)
|
|
339
|
+
logger.debug("Base metadata created successfully")
|
|
340
|
+
except Exception as e:
|
|
341
|
+
logger.error(f"Failed to create base metadata: {e}")
|
|
342
|
+
raise SkillValidationError(f"Failed to create skill metadata: {e}")
|
|
343
|
+
|
|
344
|
+
# Add type-specific fields
|
|
345
|
+
try:
|
|
346
|
+
if skill_type == SkillType.GRAPH:
|
|
347
|
+
metadata.state_schema = frontmatter.get('state', {})
|
|
348
|
+
metadata.nodes = frontmatter.get('nodes', [])
|
|
349
|
+
# Store complete YAML for graph reconstruction
|
|
350
|
+
metadata.graph_yaml = self._build_graph_yaml(frontmatter)
|
|
351
|
+
logger.debug("Added graph-specific fields")
|
|
352
|
+
else: # AGENT
|
|
353
|
+
metadata.system_prompt = body if body else frontmatter.get('system_prompt')
|
|
354
|
+
metadata.agent_type = frontmatter.get('agent_subtype', 'react') # Use different field name
|
|
355
|
+
metadata.toolkits = frontmatter.get('toolkits', [])
|
|
356
|
+
logger.debug("Added agent-specific fields")
|
|
357
|
+
|
|
358
|
+
# Validate type-specific requirements
|
|
359
|
+
self._validate_skill_type_requirements(metadata)
|
|
360
|
+
|
|
361
|
+
logger.debug(f"Successfully created skill metadata: {name} ({skill_type.value})")
|
|
362
|
+
return metadata
|
|
363
|
+
|
|
364
|
+
except Exception as e:
|
|
365
|
+
logger.error(f"Failed to add type-specific fields: {e}")
|
|
366
|
+
raise SkillValidationError(f"Failed to complete skill metadata: {e}")
|
|
367
|
+
|
|
368
|
+
def _parse_execution_config(self, execution_data: Dict) -> ExecutionConfig:
|
|
369
|
+
"""Parse execution configuration from frontmatter."""
|
|
370
|
+
return ExecutionConfig(
|
|
371
|
+
mode=execution_data.get('mode', 'subprocess'),
|
|
372
|
+
timeout=execution_data.get('timeout', 300),
|
|
373
|
+
working_directory=execution_data.get('working_directory'),
|
|
374
|
+
environment=execution_data.get('environment', {}),
|
|
375
|
+
max_retries=execution_data.get('max_retries', 0)
|
|
376
|
+
)
|
|
377
|
+
|
|
378
|
+
def _parse_results_config(self, results_data: Dict) -> ResultsConfig:
|
|
379
|
+
"""Parse results configuration from frontmatter."""
|
|
380
|
+
return ResultsConfig(
|
|
381
|
+
format=results_data.get('format', 'text_with_links'),
|
|
382
|
+
output_files=results_data.get('output_files', []),
|
|
383
|
+
cleanup_policy=results_data.get('cleanup_policy', 'preserve')
|
|
384
|
+
)
|
|
385
|
+
|
|
386
|
+
def _parse_input_schema(self, inputs_data: Dict, skill_type: SkillType) -> SkillInputSchema:
|
|
387
|
+
"""Parse input schema based on skill type."""
|
|
388
|
+
schema = SkillInputSchema()
|
|
389
|
+
|
|
390
|
+
if skill_type == SkillType.AGENT:
|
|
391
|
+
schema.variables = inputs_data.get('variables', {})
|
|
392
|
+
schema.chat_history = inputs_data.get('chat_history')
|
|
393
|
+
schema.user_input = inputs_data.get('user_input')
|
|
394
|
+
else: # GRAPH
|
|
395
|
+
schema.state_variables = inputs_data.get('state_variables', {})
|
|
396
|
+
|
|
397
|
+
return schema
|
|
398
|
+
|
|
399
|
+
def _parse_output_schema(self, outputs_data: Dict) -> SkillOutputSchema:
|
|
400
|
+
"""Parse output schema from frontmatter."""
|
|
401
|
+
return SkillOutputSchema(
|
|
402
|
+
primary_output=outputs_data.get('primary_output', {
|
|
403
|
+
"type": "text",
|
|
404
|
+
"description": "Main result text"
|
|
405
|
+
}),
|
|
406
|
+
generated_files=outputs_data.get('generated_files', {
|
|
407
|
+
"type": "list[file_reference]",
|
|
408
|
+
"description": "Created files"
|
|
409
|
+
}),
|
|
410
|
+
additional_outputs=outputs_data.get('additional_outputs')
|
|
411
|
+
)
|
|
412
|
+
|
|
413
|
+
def _build_graph_yaml(self, frontmatter: Dict) -> str:
|
|
414
|
+
"""Build complete YAML for graph skills."""
|
|
415
|
+
graph_definition = {
|
|
416
|
+
'name': frontmatter.get('name'),
|
|
417
|
+
'description': frontmatter.get('description'),
|
|
418
|
+
'state': frontmatter.get('state', {}),
|
|
419
|
+
'nodes': frontmatter.get('nodes', []),
|
|
420
|
+
'entry_point': frontmatter.get('entry_point'),
|
|
421
|
+
'interrupt_before': frontmatter.get('interrupt_before', []),
|
|
422
|
+
'interrupt_after': frontmatter.get('interrupt_after', [])
|
|
423
|
+
}
|
|
424
|
+
|
|
425
|
+
# Remove None values
|
|
426
|
+
graph_definition = {k: v for k, v in graph_definition.items() if v is not None}
|
|
427
|
+
|
|
428
|
+
return yaml.dump(graph_definition, default_flow_style=False)
|
|
429
|
+
|
|
430
|
+
def _validate_skill_type_requirements(self, metadata: SkillMetadata) -> None:
|
|
431
|
+
"""
|
|
432
|
+
Validate type-specific requirements for skills.
|
|
433
|
+
|
|
434
|
+
Args:
|
|
435
|
+
metadata: SkillMetadata to validate.
|
|
436
|
+
|
|
437
|
+
Raises:
|
|
438
|
+
SkillValidationError: If validation fails.
|
|
439
|
+
"""
|
|
440
|
+
if metadata.skill_type == SkillType.GRAPH:
|
|
441
|
+
if not metadata.state_schema:
|
|
442
|
+
logger.warning(f"Graph skill {metadata.name} has no state schema")
|
|
443
|
+
if not metadata.nodes:
|
|
444
|
+
raise SkillValidationError(f"Graph skill {metadata.name} must have nodes defined")
|
|
445
|
+
|
|
446
|
+
elif metadata.skill_type == SkillType.AGENT:
|
|
447
|
+
if not metadata.system_prompt:
|
|
448
|
+
logger.warning(f"Agent skill {metadata.name} has no system prompt")
|
|
449
|
+
if not metadata.toolkits:
|
|
450
|
+
logger.warning(f"Agent skill {metadata.name} has no toolkits defined")
|
|
451
|
+
|
|
452
|
+
def get_skill_by_name(self, name: str) -> Optional[SkillMetadata]:
|
|
453
|
+
"""
|
|
454
|
+
Get a specific skill by name.
|
|
455
|
+
|
|
456
|
+
Args:
|
|
457
|
+
name: Name of the skill to retrieve.
|
|
458
|
+
|
|
459
|
+
Returns:
|
|
460
|
+
SkillMetadata if found, None otherwise.
|
|
461
|
+
"""
|
|
462
|
+
if not self.cache:
|
|
463
|
+
self.discover()
|
|
464
|
+
|
|
465
|
+
return self.cache.get(name)
|
|
466
|
+
|
|
467
|
+
def find_skills_by_capability(self, capability: str) -> List[SkillMetadata]:
|
|
468
|
+
"""
|
|
469
|
+
Find skills that provide a specific capability.
|
|
470
|
+
|
|
471
|
+
Args:
|
|
472
|
+
capability: Capability to search for.
|
|
473
|
+
|
|
474
|
+
Returns:
|
|
475
|
+
List of matching SkillMetadata objects.
|
|
476
|
+
"""
|
|
477
|
+
if not self.cache:
|
|
478
|
+
self.discover()
|
|
479
|
+
|
|
480
|
+
return [
|
|
481
|
+
skill for skill in self.cache.values()
|
|
482
|
+
if capability in skill.capabilities
|
|
483
|
+
]
|
|
484
|
+
|
|
485
|
+
def find_skills_by_tag(self, tag: str) -> List[SkillMetadata]:
|
|
486
|
+
"""
|
|
487
|
+
Find skills with a specific tag.
|
|
488
|
+
|
|
489
|
+
Args:
|
|
490
|
+
tag: Tag to search for.
|
|
491
|
+
|
|
492
|
+
Returns:
|
|
493
|
+
List of matching SkillMetadata objects.
|
|
494
|
+
"""
|
|
495
|
+
if not self.cache:
|
|
496
|
+
self.discover()
|
|
497
|
+
|
|
498
|
+
return [
|
|
499
|
+
skill for skill in self.cache.values()
|
|
500
|
+
if tag in skill.tags
|
|
501
|
+
]
|
|
502
|
+
|
|
503
|
+
def find_skills_by_type(self, skill_type: SkillType) -> List[SkillMetadata]:
|
|
504
|
+
"""
|
|
505
|
+
Find skills of a specific type.
|
|
506
|
+
|
|
507
|
+
Args:
|
|
508
|
+
skill_type: SkillType to filter by.
|
|
509
|
+
|
|
510
|
+
Returns:
|
|
511
|
+
List of matching SkillMetadata objects.
|
|
512
|
+
"""
|
|
513
|
+
if not self.cache:
|
|
514
|
+
self.discover()
|
|
515
|
+
|
|
516
|
+
return [
|
|
517
|
+
skill for skill in self.cache.values()
|
|
518
|
+
if skill.skill_type == skill_type
|
|
519
|
+
]
|
|
520
|
+
|
|
521
|
+
def validate_skill_definition(self, skill_path: Path) -> Tuple[bool, Optional[str]]:
|
|
522
|
+
"""
|
|
523
|
+
Validate a skill definition without adding it to cache.
|
|
524
|
+
|
|
525
|
+
Args:
|
|
526
|
+
skill_path: Path to skill directory containing agent.md.
|
|
527
|
+
|
|
528
|
+
Returns:
|
|
529
|
+
Tuple of (is_valid, error_message).
|
|
530
|
+
"""
|
|
531
|
+
agent_file = skill_path / "agent.md"
|
|
532
|
+
|
|
533
|
+
if not agent_file.exists():
|
|
534
|
+
return False, f"agent.md not found in {skill_path}"
|
|
535
|
+
|
|
536
|
+
try:
|
|
537
|
+
self._parse_skill_file(agent_file)
|
|
538
|
+
return True, None
|
|
539
|
+
except Exception as e:
|
|
540
|
+
return False, str(e)
|