skilllite 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- skilllite/__init__.py +159 -0
- skilllite/analyzer.py +391 -0
- skilllite/builtin_tools.py +240 -0
- skilllite/cli.py +217 -0
- skilllite/core/__init__.py +65 -0
- skilllite/core/executor.py +182 -0
- skilllite/core/handler.py +332 -0
- skilllite/core/loops.py +770 -0
- skilllite/core/manager.py +507 -0
- skilllite/core/metadata.py +338 -0
- skilllite/core/prompt_builder.py +321 -0
- skilllite/core/registry.py +185 -0
- skilllite/core/skill_info.py +181 -0
- skilllite/core/tool_builder.py +338 -0
- skilllite/core/tools.py +253 -0
- skilllite/mcp/__init__.py +45 -0
- skilllite/mcp/server.py +734 -0
- skilllite/quick.py +420 -0
- skilllite/sandbox/__init__.py +36 -0
- skilllite/sandbox/base.py +93 -0
- skilllite/sandbox/config.py +229 -0
- skilllite/sandbox/skillbox/__init__.py +44 -0
- skilllite/sandbox/skillbox/binary.py +421 -0
- skilllite/sandbox/skillbox/executor.py +608 -0
- skilllite/sandbox/utils.py +77 -0
- skilllite/validation.py +137 -0
- skilllite-0.1.0.dist-info/METADATA +293 -0
- skilllite-0.1.0.dist-info/RECORD +32 -0
- skilllite-0.1.0.dist-info/WHEEL +5 -0
- skilllite-0.1.0.dist-info/entry_points.txt +3 -0
- skilllite-0.1.0.dist-info/licenses/LICENSE +21 -0
- skilllite-0.1.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,338 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Skill metadata parsing from SKILL.md files.
|
|
3
|
+
|
|
4
|
+
This is a CORE module - do not modify without explicit permission.
|
|
5
|
+
|
|
6
|
+
Follows the official Claude Agent Skills specification:
|
|
7
|
+
https://docs.anthropic.com/en/docs/agents-and-tools/agent-skills/specification
|
|
8
|
+
|
|
9
|
+
Network access and language are derived from the 'compatibility' field.
|
|
10
|
+
Entry point is auto-detected from scripts/ directory.
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
import re
|
|
14
|
+
from dataclasses import dataclass, field
|
|
15
|
+
from pathlib import Path
|
|
16
|
+
from typing import Any, Dict, List, Optional
|
|
17
|
+
|
|
18
|
+
import yaml
|
|
19
|
+
|
|
20
|
+
@dataclass
|
|
21
|
+
class NetworkPolicy:
|
|
22
|
+
"""Network access policy for a skill (derived from compatibility field)."""
|
|
23
|
+
enabled: bool = False
|
|
24
|
+
outbound: List[str] = field(default_factory=list)
|
|
25
|
+
|
|
26
|
+
@dataclass
|
|
27
|
+
class SkillMetadata:
|
|
28
|
+
"""Skill metadata parsed from SKILL.md YAML front matter."""
|
|
29
|
+
name: str
|
|
30
|
+
entry_point: str
|
|
31
|
+
language: Optional[str] = None
|
|
32
|
+
description: Optional[str] = None
|
|
33
|
+
version: Optional[str] = None
|
|
34
|
+
compatibility: Optional[str] = None
|
|
35
|
+
network: NetworkPolicy = field(default_factory=NetworkPolicy)
|
|
36
|
+
input_schema: Optional[Dict[str, Any]] = None
|
|
37
|
+
output_schema: Optional[Dict[str, Any]] = None
|
|
38
|
+
|
|
39
|
+
@classmethod
|
|
40
|
+
def from_dict(cls, data: Dict[str, Any], skill_dir: Optional[Path] = None) -> "SkillMetadata":
|
|
41
|
+
"""Create SkillMetadata from parsed YAML front matter."""
|
|
42
|
+
version = data.get("version")
|
|
43
|
+
if not version and "metadata" in data:
|
|
44
|
+
version = data["metadata"].get("version")
|
|
45
|
+
|
|
46
|
+
compatibility = data.get("compatibility")
|
|
47
|
+
|
|
48
|
+
# Parse network policy from compatibility field
|
|
49
|
+
network = parse_compatibility_for_network(compatibility)
|
|
50
|
+
|
|
51
|
+
# Auto-detect entry point
|
|
52
|
+
entry_point = ""
|
|
53
|
+
if skill_dir:
|
|
54
|
+
detected = detect_entry_point(skill_dir)
|
|
55
|
+
if detected:
|
|
56
|
+
entry_point = detected
|
|
57
|
+
|
|
58
|
+
# Detect language from compatibility or entry point
|
|
59
|
+
language = parse_compatibility_for_language(compatibility)
|
|
60
|
+
if not language and entry_point:
|
|
61
|
+
language = detect_language_from_entry_point(entry_point)
|
|
62
|
+
|
|
63
|
+
return cls(
|
|
64
|
+
name=data.get("name", ""),
|
|
65
|
+
entry_point=entry_point,
|
|
66
|
+
language=language,
|
|
67
|
+
description=data.get("description"),
|
|
68
|
+
version=version,
|
|
69
|
+
compatibility=compatibility,
|
|
70
|
+
network=network,
|
|
71
|
+
input_schema=data.get("input_schema"),
|
|
72
|
+
output_schema=data.get("output_schema")
|
|
73
|
+
)
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
def parse_compatibility_for_network(compatibility: Optional[str]) -> NetworkPolicy:
|
|
77
|
+
"""
|
|
78
|
+
Parse compatibility string to extract network policy.
|
|
79
|
+
|
|
80
|
+
Examples:
|
|
81
|
+
- "Requires network access" -> enabled=True
|
|
82
|
+
- "Requires Python 3.x, internet" -> enabled=True
|
|
83
|
+
- "Requires git, docker" -> enabled=False
|
|
84
|
+
"""
|
|
85
|
+
if not compatibility:
|
|
86
|
+
return NetworkPolicy()
|
|
87
|
+
|
|
88
|
+
compat_lower = compatibility.lower()
|
|
89
|
+
|
|
90
|
+
# Check for network/internet keywords
|
|
91
|
+
needs_network = any(keyword in compat_lower for keyword in [
|
|
92
|
+
"network", "internet", "http", "api", "web"
|
|
93
|
+
])
|
|
94
|
+
|
|
95
|
+
if needs_network:
|
|
96
|
+
return NetworkPolicy(
|
|
97
|
+
enabled=True,
|
|
98
|
+
outbound=["*:80", "*:443"] # Allow all HTTP/HTTPS by default
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
return NetworkPolicy()
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
def parse_compatibility_for_language(compatibility: Optional[str]) -> Optional[str]:
|
|
105
|
+
"""
|
|
106
|
+
Parse compatibility string to detect language.
|
|
107
|
+
|
|
108
|
+
Examples:
|
|
109
|
+
- "Requires Python 3.x" -> "python"
|
|
110
|
+
- "Requires Node.js" -> "node"
|
|
111
|
+
- "Requires bash" -> "bash"
|
|
112
|
+
"""
|
|
113
|
+
if not compatibility:
|
|
114
|
+
return None
|
|
115
|
+
|
|
116
|
+
compat_lower = compatibility.lower()
|
|
117
|
+
|
|
118
|
+
if "python" in compat_lower:
|
|
119
|
+
return "python"
|
|
120
|
+
elif "node" in compat_lower or "javascript" in compat_lower or "typescript" in compat_lower:
|
|
121
|
+
return "node"
|
|
122
|
+
elif "bash" in compat_lower or "shell" in compat_lower:
|
|
123
|
+
return "bash"
|
|
124
|
+
|
|
125
|
+
return None
|
|
126
|
+
|
|
127
|
+
|
|
128
|
+
def detect_language_from_entry_point(entry_point: str) -> Optional[str]:
|
|
129
|
+
"""Detect language from entry point file extension."""
|
|
130
|
+
if entry_point.endswith(".py"):
|
|
131
|
+
return "python"
|
|
132
|
+
elif entry_point.endswith(".js") or entry_point.endswith(".ts"):
|
|
133
|
+
return "node"
|
|
134
|
+
elif entry_point.endswith(".sh"):
|
|
135
|
+
return "bash"
|
|
136
|
+
return None
|
|
137
|
+
|
|
138
|
+
def detect_entry_point(skill_dir: Path) -> Optional[str]:
|
|
139
|
+
"""
|
|
140
|
+
Auto-detect entry point from skill directory.
|
|
141
|
+
|
|
142
|
+
Detection strategy (in order of priority):
|
|
143
|
+
1. Look for main.* files (main.py, main.js, main.ts, main.sh)
|
|
144
|
+
2. Look for index.* files (common in Node.js projects)
|
|
145
|
+
3. Look for run.* or entry.* files
|
|
146
|
+
4. If only one script file exists, use it as entry point
|
|
147
|
+
5. If multiple scripts exist, return None (requires explicit config or LLM inference)
|
|
148
|
+
|
|
149
|
+
Returns:
|
|
150
|
+
Relative path to entry point (e.g., "scripts/main.py"), or None if not detected
|
|
151
|
+
"""
|
|
152
|
+
scripts_dir = skill_dir / "scripts"
|
|
153
|
+
if not scripts_dir.exists():
|
|
154
|
+
return None
|
|
155
|
+
|
|
156
|
+
supported_extensions = [".py", ".js", ".ts", ".sh"]
|
|
157
|
+
|
|
158
|
+
# Priority 1: Look for main.* files
|
|
159
|
+
for ext in supported_extensions:
|
|
160
|
+
main_file = scripts_dir / f"main{ext}"
|
|
161
|
+
if main_file.exists():
|
|
162
|
+
return f"scripts/main{ext}"
|
|
163
|
+
|
|
164
|
+
# Priority 2: Look for index.* files (common in Node.js)
|
|
165
|
+
for ext in supported_extensions:
|
|
166
|
+
index_file = scripts_dir / f"index{ext}"
|
|
167
|
+
if index_file.exists():
|
|
168
|
+
return f"scripts/index{ext}"
|
|
169
|
+
|
|
170
|
+
# Priority 3: Look for run.* or entry.* files
|
|
171
|
+
for prefix in ["run", "entry", "app", "cli"]:
|
|
172
|
+
for ext in supported_extensions:
|
|
173
|
+
candidate = scripts_dir / f"{prefix}{ext}"
|
|
174
|
+
if candidate.exists():
|
|
175
|
+
return f"scripts/{prefix}{ext}"
|
|
176
|
+
|
|
177
|
+
# Priority 4: If only one script file exists, use it
|
|
178
|
+
script_files = []
|
|
179
|
+
for ext in supported_extensions:
|
|
180
|
+
script_files.extend(scripts_dir.glob(f"*{ext}"))
|
|
181
|
+
|
|
182
|
+
# Filter out test files and __init__.py
|
|
183
|
+
script_files = [
|
|
184
|
+
f for f in script_files
|
|
185
|
+
if not f.name.startswith("test_")
|
|
186
|
+
and not f.name.endswith("_test.py")
|
|
187
|
+
and f.name != "__init__.py"
|
|
188
|
+
and not f.name.startswith(".")
|
|
189
|
+
]
|
|
190
|
+
|
|
191
|
+
if len(script_files) == 1:
|
|
192
|
+
return f"scripts/{script_files[0].name}"
|
|
193
|
+
|
|
194
|
+
# Multiple scripts or no scripts found - return None
|
|
195
|
+
# This will be handled by LLM inference or explicit configuration
|
|
196
|
+
return None
|
|
197
|
+
|
|
198
|
+
def detect_all_scripts(skill_dir: Path) -> List[Dict[str, str]]:
|
|
199
|
+
"""
|
|
200
|
+
Detect all executable scripts in a skill directory.
|
|
201
|
+
|
|
202
|
+
This is useful for skills with multiple entry points (like skill-creator
|
|
203
|
+
which has init_skill.py, package_skill.py, etc.)
|
|
204
|
+
|
|
205
|
+
Returns:
|
|
206
|
+
List of dicts with 'name', 'path', and 'language' for each script
|
|
207
|
+
"""
|
|
208
|
+
scripts_dir = skill_dir / "scripts"
|
|
209
|
+
if not scripts_dir.exists():
|
|
210
|
+
return []
|
|
211
|
+
|
|
212
|
+
extension_to_language = {
|
|
213
|
+
".py": "python",
|
|
214
|
+
".js": "node",
|
|
215
|
+
".ts": "node",
|
|
216
|
+
".sh": "bash",
|
|
217
|
+
}
|
|
218
|
+
|
|
219
|
+
scripts = []
|
|
220
|
+
for ext, lang in extension_to_language.items():
|
|
221
|
+
for script_file in scripts_dir.glob(f"*{ext}"):
|
|
222
|
+
# Skip test files and __init__.py
|
|
223
|
+
if (script_file.name.startswith("test_")
|
|
224
|
+
or script_file.name.endswith("_test.py")
|
|
225
|
+
or script_file.name == "__init__.py"
|
|
226
|
+
or script_file.name.startswith(".")):
|
|
227
|
+
continue
|
|
228
|
+
|
|
229
|
+
# Generate a tool name from the script filename
|
|
230
|
+
tool_name = script_file.stem.replace("_", "-")
|
|
231
|
+
|
|
232
|
+
scripts.append({
|
|
233
|
+
"name": tool_name,
|
|
234
|
+
"path": f"scripts/{script_file.name}",
|
|
235
|
+
"language": lang,
|
|
236
|
+
"filename": script_file.name,
|
|
237
|
+
})
|
|
238
|
+
|
|
239
|
+
return scripts
|
|
240
|
+
|
|
241
|
+
def detect_language(skill_dir: Path, metadata: Optional[SkillMetadata] = None) -> str:
|
|
242
|
+
"""
|
|
243
|
+
Detect the programming language of a skill.
|
|
244
|
+
|
|
245
|
+
Args:
|
|
246
|
+
skill_dir: Path to the skill directory
|
|
247
|
+
metadata: Optional metadata object (may contain language info)
|
|
248
|
+
|
|
249
|
+
Returns:
|
|
250
|
+
Language string (e.g., "python", "node", "bash")
|
|
251
|
+
"""
|
|
252
|
+
# First check metadata
|
|
253
|
+
if metadata and metadata.language:
|
|
254
|
+
return metadata.language
|
|
255
|
+
|
|
256
|
+
# Check entry point extension
|
|
257
|
+
if metadata and metadata.entry_point:
|
|
258
|
+
entry_ext = Path(metadata.entry_point).suffix
|
|
259
|
+
ext_map = {".py": "python", ".js": "node", ".ts": "node", ".sh": "bash"}
|
|
260
|
+
if entry_ext in ext_map:
|
|
261
|
+
return ext_map[entry_ext]
|
|
262
|
+
|
|
263
|
+
# Scan scripts directory
|
|
264
|
+
scripts_dir = skill_dir / "scripts"
|
|
265
|
+
if scripts_dir.exists():
|
|
266
|
+
for ext, lang in [(".py", "python"), (".js", "node"), (".ts", "node"), (".sh", "bash")]:
|
|
267
|
+
if any(scripts_dir.glob(f"*{ext}")):
|
|
268
|
+
return lang
|
|
269
|
+
|
|
270
|
+
return "unknown"
|
|
271
|
+
|
|
272
|
+
def extract_yaml_front_matter(content: str, skill_dir: Optional[Path] = None) -> SkillMetadata:
|
|
273
|
+
"""
|
|
274
|
+
Extract YAML front matter from markdown content.
|
|
275
|
+
|
|
276
|
+
Args:
|
|
277
|
+
content: Full markdown content
|
|
278
|
+
skill_dir: Optional skill directory path for auto-detection
|
|
279
|
+
|
|
280
|
+
Returns:
|
|
281
|
+
SkillMetadata object
|
|
282
|
+
"""
|
|
283
|
+
# Check for YAML front matter (between --- markers)
|
|
284
|
+
front_matter_match = re.match(r'^---\n(.*?)\n---', content, re.DOTALL)
|
|
285
|
+
|
|
286
|
+
data = {}
|
|
287
|
+
if front_matter_match:
|
|
288
|
+
try:
|
|
289
|
+
data = yaml.safe_load(front_matter_match.group(1))
|
|
290
|
+
if not isinstance(data, dict):
|
|
291
|
+
data = {}
|
|
292
|
+
except yaml.YAMLError:
|
|
293
|
+
data = {}
|
|
294
|
+
|
|
295
|
+
return SkillMetadata.from_dict(data, skill_dir)
|
|
296
|
+
|
|
297
|
+
def parse_skill_metadata(skill_dir: Path) -> SkillMetadata:
|
|
298
|
+
"""Parse SKILL.md file and extract metadata from YAML front matter."""
|
|
299
|
+
skill_md_path = skill_dir / "SKILL.md"
|
|
300
|
+
if not skill_md_path.exists():
|
|
301
|
+
raise FileNotFoundError(f"SKILL.md not found in directory: {skill_dir}")
|
|
302
|
+
content = skill_md_path.read_text(encoding="utf-8")
|
|
303
|
+
metadata = extract_yaml_front_matter(content, skill_dir)
|
|
304
|
+
|
|
305
|
+
return metadata
|
|
306
|
+
|
|
307
|
+
def get_skill_summary(content: str, max_length: int = 200) -> str:
|
|
308
|
+
"""
|
|
309
|
+
Extract a concise summary from SKILL.md content.
|
|
310
|
+
|
|
311
|
+
Removes YAML front matter, code blocks, and headers to extract
|
|
312
|
+
the main descriptive text.
|
|
313
|
+
|
|
314
|
+
Args:
|
|
315
|
+
content: Full SKILL.md content
|
|
316
|
+
max_length: Maximum length of the summary
|
|
317
|
+
|
|
318
|
+
Returns:
|
|
319
|
+
Extracted summary string
|
|
320
|
+
"""
|
|
321
|
+
# Remove YAML front matter
|
|
322
|
+
content_clean = re.sub(r"^---\s*\n.*?\n---\s*\n", "", content, flags=re.DOTALL)
|
|
323
|
+
# Remove code blocks
|
|
324
|
+
content_clean = re.sub(r"```[\s\S]*?```", "", content_clean)
|
|
325
|
+
# Remove headers
|
|
326
|
+
content_clean = re.sub(r"^#+\s*", "", content_clean, flags=re.MULTILINE)
|
|
327
|
+
|
|
328
|
+
lines = [line.strip() for line in content_clean.split("\n") if line.strip()]
|
|
329
|
+
summary_lines = []
|
|
330
|
+
current_length = 0
|
|
331
|
+
|
|
332
|
+
for line in lines:
|
|
333
|
+
if current_length + len(line) > max_length:
|
|
334
|
+
break
|
|
335
|
+
summary_lines.append(line)
|
|
336
|
+
current_length += len(line) + 1
|
|
337
|
+
|
|
338
|
+
return " ".join(summary_lines)[:max_length]
|
|
@@ -0,0 +1,321 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Prompt Builder - System prompt context generation.
|
|
3
|
+
|
|
4
|
+
This module handles:
|
|
5
|
+
- Generating system prompt context for LLM
|
|
6
|
+
- Formatting skill information for different modes
|
|
7
|
+
- Skills status reporting and logging
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
import json
|
|
11
|
+
from typing import Any, Dict, List, Optional, TYPE_CHECKING
|
|
12
|
+
|
|
13
|
+
from .skill_info import SkillInfo
|
|
14
|
+
|
|
15
|
+
if TYPE_CHECKING:
|
|
16
|
+
from .registry import SkillRegistry
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class PromptBuilder:
|
|
20
|
+
"""
|
|
21
|
+
Builder for generating system prompt context from skills.
|
|
22
|
+
|
|
23
|
+
Supports multiple disclosure modes:
|
|
24
|
+
- summary: Brief overview of skills
|
|
25
|
+
- standard: Input schema and usage summary
|
|
26
|
+
- progressive: Summary with "more details available" hint
|
|
27
|
+
- full: Complete instructions and references
|
|
28
|
+
"""
|
|
29
|
+
|
|
30
|
+
def __init__(self, registry: "SkillRegistry"):
|
|
31
|
+
"""
|
|
32
|
+
Initialize the prompt builder.
|
|
33
|
+
|
|
34
|
+
Args:
|
|
35
|
+
registry: Skill registry for accessing skill info
|
|
36
|
+
"""
|
|
37
|
+
self._registry = registry
|
|
38
|
+
|
|
39
|
+
def get_system_prompt_context(
|
|
40
|
+
self,
|
|
41
|
+
include_full_instructions: bool = True,
|
|
42
|
+
include_references: bool = False,
|
|
43
|
+
include_assets: bool = False,
|
|
44
|
+
skills: Optional[List[str]] = None,
|
|
45
|
+
mode: str = "full",
|
|
46
|
+
max_tokens_per_skill: Optional[int] = None
|
|
47
|
+
) -> str:
|
|
48
|
+
"""
|
|
49
|
+
Generate system prompt context containing skill information.
|
|
50
|
+
|
|
51
|
+
Args:
|
|
52
|
+
include_full_instructions: Include full instructions (affects mode)
|
|
53
|
+
include_references: Include reference documents
|
|
54
|
+
include_assets: Include asset files
|
|
55
|
+
skills: Specific skills to include (None = all)
|
|
56
|
+
mode: Disclosure mode (summary, standard, progressive, full)
|
|
57
|
+
max_tokens_per_skill: Max tokens per skill content
|
|
58
|
+
|
|
59
|
+
Returns:
|
|
60
|
+
Formatted system prompt context string
|
|
61
|
+
"""
|
|
62
|
+
if not include_full_instructions and mode == "full":
|
|
63
|
+
mode = "standard"
|
|
64
|
+
|
|
65
|
+
lines = ["# Available Skills\n"]
|
|
66
|
+
target_skills = self._registry.list_skills()
|
|
67
|
+
if skills:
|
|
68
|
+
target_skills = [
|
|
69
|
+
info for info in self._registry.list_skills()
|
|
70
|
+
if info.name in skills
|
|
71
|
+
]
|
|
72
|
+
|
|
73
|
+
if mode == "progressive":
|
|
74
|
+
lines.append("\n> **Note**: Skill details are shown in summary mode.\n\n")
|
|
75
|
+
|
|
76
|
+
for info in target_skills:
|
|
77
|
+
skill_lines = self._format_skill_context(
|
|
78
|
+
info,
|
|
79
|
+
mode=mode,
|
|
80
|
+
include_references=include_references,
|
|
81
|
+
include_assets=include_assets,
|
|
82
|
+
max_tokens=max_tokens_per_skill
|
|
83
|
+
)
|
|
84
|
+
lines.extend(skill_lines)
|
|
85
|
+
lines.append("\n---\n")
|
|
86
|
+
|
|
87
|
+
return "\n".join(lines)
|
|
88
|
+
|
|
89
|
+
def _format_skill_context(
|
|
90
|
+
self,
|
|
91
|
+
info: SkillInfo,
|
|
92
|
+
mode: str = "full",
|
|
93
|
+
include_references: bool = False,
|
|
94
|
+
include_assets: bool = False,
|
|
95
|
+
max_tokens: Optional[int] = None
|
|
96
|
+
) -> List[str]:
|
|
97
|
+
"""Format a single skill's context based on the disclosure mode."""
|
|
98
|
+
from ..parsing import get_skill_summary
|
|
99
|
+
|
|
100
|
+
lines = [f"## {info.name}\n"]
|
|
101
|
+
if info.description:
|
|
102
|
+
lines.append(f"**Description:** {info.description}\n")
|
|
103
|
+
|
|
104
|
+
if mode == "summary":
|
|
105
|
+
full_content = info.get_full_content()
|
|
106
|
+
if full_content:
|
|
107
|
+
summary = get_skill_summary(full_content, max_length=150)
|
|
108
|
+
if summary:
|
|
109
|
+
lines.append(f"\n**Summary:** {summary}\n")
|
|
110
|
+
|
|
111
|
+
elif mode == "standard":
|
|
112
|
+
if info.metadata.input_schema:
|
|
113
|
+
lines.append("\n**Input Schema:**\n")
|
|
114
|
+
schema_str = json.dumps(info.metadata.input_schema, indent=2, ensure_ascii=False)
|
|
115
|
+
lines.append(f"```json\n{schema_str}\n```\n")
|
|
116
|
+
full_content = info.get_full_content()
|
|
117
|
+
if full_content:
|
|
118
|
+
summary = get_skill_summary(full_content, max_length=200)
|
|
119
|
+
if summary:
|
|
120
|
+
lines.append(f"\n**Usage:** {summary}\n")
|
|
121
|
+
|
|
122
|
+
elif mode == "progressive":
|
|
123
|
+
if info.metadata.input_schema:
|
|
124
|
+
lines.append("\n**Input Schema:**\n")
|
|
125
|
+
schema_str = json.dumps(info.metadata.input_schema, indent=2, ensure_ascii=False)
|
|
126
|
+
lines.append(f"```json\n{schema_str}\n```\n")
|
|
127
|
+
full_content = info.get_full_content()
|
|
128
|
+
if full_content:
|
|
129
|
+
summary = get_skill_summary(full_content, max_length=150)
|
|
130
|
+
if summary:
|
|
131
|
+
lines.append(f"\n**Summary:** {summary}\n")
|
|
132
|
+
has_more = bool(
|
|
133
|
+
info.get_references() or
|
|
134
|
+
info.get_assets() or
|
|
135
|
+
(full_content and len(full_content) > 300)
|
|
136
|
+
)
|
|
137
|
+
if has_more:
|
|
138
|
+
lines.append(f"\n> đĄ *More details available.*\n")
|
|
139
|
+
|
|
140
|
+
else: # full mode
|
|
141
|
+
full_content = info.get_full_content()
|
|
142
|
+
if full_content:
|
|
143
|
+
if max_tokens and len(full_content) > max_tokens * 4:
|
|
144
|
+
full_content = full_content[:max_tokens * 4] + "\n\n... (truncated)"
|
|
145
|
+
lines.append("\n### Instructions\n")
|
|
146
|
+
lines.append(full_content)
|
|
147
|
+
lines.append("\n")
|
|
148
|
+
|
|
149
|
+
# Include references for full/standard modes
|
|
150
|
+
if include_references and mode in ["full", "standard"]:
|
|
151
|
+
refs = info.get_references()
|
|
152
|
+
if refs:
|
|
153
|
+
lines.append("\n### Reference Documents\n")
|
|
154
|
+
for filename, content in refs.items():
|
|
155
|
+
lines.append(f"\n#### {filename}\n")
|
|
156
|
+
if max_tokens and len(content) > max_tokens * 2:
|
|
157
|
+
content = content[:max_tokens * 2] + "\n... (truncated)"
|
|
158
|
+
lines.append(content)
|
|
159
|
+
lines.append("\n")
|
|
160
|
+
|
|
161
|
+
# Include assets for full/standard modes
|
|
162
|
+
if include_assets and mode in ["full", "standard"]:
|
|
163
|
+
assets = info.get_assets()
|
|
164
|
+
if assets:
|
|
165
|
+
lines.append("\n### Assets\n")
|
|
166
|
+
for filename, content in assets.items():
|
|
167
|
+
lines.append(f"\n#### {filename}\n")
|
|
168
|
+
if isinstance(content, dict):
|
|
169
|
+
content_str = json.dumps(content, indent=2, ensure_ascii=False)
|
|
170
|
+
if max_tokens and len(content_str) > max_tokens * 2:
|
|
171
|
+
content_str = content_str[:max_tokens * 2] + "\n... (truncated)"
|
|
172
|
+
lines.append(f"```json\n{content_str}\n```\n")
|
|
173
|
+
else:
|
|
174
|
+
if max_tokens and len(str(content)) > max_tokens * 2:
|
|
175
|
+
content = str(content)[:max_tokens * 2] + "\n... (truncated)"
|
|
176
|
+
lines.append(f"```\n{content}\n```\n")
|
|
177
|
+
|
|
178
|
+
return lines
|
|
179
|
+
|
|
180
|
+
def get_skill_details(self, skill_name: str) -> Optional[str]:
|
|
181
|
+
"""Get full details for a specific skill."""
|
|
182
|
+
info = self._registry.get_skill(skill_name)
|
|
183
|
+
if not info:
|
|
184
|
+
return None
|
|
185
|
+
lines = self._format_skill_context(
|
|
186
|
+
info,
|
|
187
|
+
mode="full",
|
|
188
|
+
include_references=True,
|
|
189
|
+
include_assets=True
|
|
190
|
+
)
|
|
191
|
+
return "\n".join(lines)
|
|
192
|
+
|
|
193
|
+
def get_skills_summary(self) -> str:
|
|
194
|
+
"""Get a compact summary of all available skills."""
|
|
195
|
+
return self.get_system_prompt_context(mode="summary")
|
|
196
|
+
|
|
197
|
+
def estimate_context_tokens(
|
|
198
|
+
self,
|
|
199
|
+
mode: str = "full",
|
|
200
|
+
include_references: bool = False,
|
|
201
|
+
include_assets: bool = False
|
|
202
|
+
) -> int:
|
|
203
|
+
"""Estimate the number of tokens the system prompt context will use."""
|
|
204
|
+
context = self.get_system_prompt_context(
|
|
205
|
+
mode=mode,
|
|
206
|
+
include_references=include_references,
|
|
207
|
+
include_assets=include_assets
|
|
208
|
+
)
|
|
209
|
+
return len(context) // 4
|
|
210
|
+
|
|
211
|
+
def get_skill_context(self, skill_name: str) -> Optional[Dict[str, Any]]:
|
|
212
|
+
"""Get complete context for a specific skill."""
|
|
213
|
+
info = self._registry.get_skill(skill_name)
|
|
214
|
+
if not info:
|
|
215
|
+
return None
|
|
216
|
+
return info.get_context(include_references=True, include_assets=True)
|
|
217
|
+
|
|
218
|
+
def get_all_skill_contexts(self) -> Dict[str, Dict[str, Any]]:
|
|
219
|
+
"""Get complete context for all skills."""
|
|
220
|
+
return {
|
|
221
|
+
info.name: info.get_context(include_references=True, include_assets=True)
|
|
222
|
+
for info in self._registry.list_skills()
|
|
223
|
+
}
|
|
224
|
+
|
|
225
|
+
# ==================== Skills Status & Logging ====================
|
|
226
|
+
|
|
227
|
+
def get_skills_status(self) -> Dict[str, Any]:
|
|
228
|
+
"""
|
|
229
|
+
Get structured status information about all loaded skills.
|
|
230
|
+
|
|
231
|
+
Returns a dict with:
|
|
232
|
+
- all_skills: list of all skill names
|
|
233
|
+
- executable_tools: list of executable skill/tool names
|
|
234
|
+
- multi_script_tools: list of multi-script tool names
|
|
235
|
+
- prompt_only_guides: list of prompt-only skill names
|
|
236
|
+
- details: dict mapping skill name to its details
|
|
237
|
+
"""
|
|
238
|
+
executable = self._registry.list_executable_skills()
|
|
239
|
+
prompt_only = self._registry.list_prompt_only_skills()
|
|
240
|
+
multi_script_tools = self._registry.list_multi_script_tools()
|
|
241
|
+
|
|
242
|
+
# Build list of executable tool names
|
|
243
|
+
executable_tool_names = []
|
|
244
|
+
for info in executable:
|
|
245
|
+
if info.metadata.entry_point:
|
|
246
|
+
executable_tool_names.append(info.name)
|
|
247
|
+
executable_tool_names.extend(multi_script_tools)
|
|
248
|
+
|
|
249
|
+
details = {}
|
|
250
|
+
for info in self._registry.list_skills():
|
|
251
|
+
refs = info.get_references()
|
|
252
|
+
assets = info.get_assets()
|
|
253
|
+
scripts = info.get_all_scripts()
|
|
254
|
+
|
|
255
|
+
is_multi_script = info.name in [
|
|
256
|
+
t["skill_name"] for t in self._registry.multi_script_tools.values()
|
|
257
|
+
]
|
|
258
|
+
|
|
259
|
+
details[info.name] = {
|
|
260
|
+
"description": info.description,
|
|
261
|
+
"is_executable": bool(info.metadata.entry_point) or is_multi_script,
|
|
262
|
+
"is_multi_script": is_multi_script,
|
|
263
|
+
"has_references": bool(refs),
|
|
264
|
+
"has_assets": bool(assets),
|
|
265
|
+
"references": list(refs.keys()) if refs else [],
|
|
266
|
+
"assets": list(assets.keys()) if assets else [],
|
|
267
|
+
"scripts": [s["name"] for s in scripts] if scripts else [],
|
|
268
|
+
}
|
|
269
|
+
|
|
270
|
+
return {
|
|
271
|
+
"all_skills": self._registry.skill_names(),
|
|
272
|
+
"executable_tools": executable_tool_names,
|
|
273
|
+
"multi_script_tools": multi_script_tools,
|
|
274
|
+
"prompt_only_guides": [s.name for s in prompt_only],
|
|
275
|
+
"details": details,
|
|
276
|
+
}
|
|
277
|
+
|
|
278
|
+
def print_skills_status(self, verbose: bool = False) -> None:
|
|
279
|
+
"""Print a formatted status of all loaded skills."""
|
|
280
|
+
status = self.get_skills_status()
|
|
281
|
+
|
|
282
|
+
print(f"đŠ ć·Čć 蜜 Skills: {status['all_skills']}")
|
|
283
|
+
|
|
284
|
+
if status["executable_tools"]:
|
|
285
|
+
print(f" đ§ ćŻè°çšć·„ć
· (Tools): {status['executable_tools']}")
|
|
286
|
+
|
|
287
|
+
if status["multi_script_tools"]:
|
|
288
|
+
print(f" đš ć€èæŹć·„ć
· (Multi-script): {status['multi_script_tools']}")
|
|
289
|
+
|
|
290
|
+
if status["prompt_only_guides"]:
|
|
291
|
+
print(f" đ ćèæć (Prompt-only): {status['prompt_only_guides']}")
|
|
292
|
+
|
|
293
|
+
if verbose:
|
|
294
|
+
for name, detail in status["details"].items():
|
|
295
|
+
extras = []
|
|
296
|
+
if detail["has_references"]:
|
|
297
|
+
extras.append(f"refs={detail['references']}")
|
|
298
|
+
if detail["has_assets"]:
|
|
299
|
+
extras.append(f"assets={detail['assets']}")
|
|
300
|
+
if extras:
|
|
301
|
+
print(f" đ {name}: {', '.join(extras)}")
|
|
302
|
+
if detail["description"]:
|
|
303
|
+
skill_type = "Tool" if detail["is_executable"] else "Guide"
|
|
304
|
+
print(f" ââ [{skill_type}] {detail['description']}")
|
|
305
|
+
|
|
306
|
+
def get_prompt_only_status(self) -> List[Dict[str, str]]:
|
|
307
|
+
"""Get status info for prompt-only skills."""
|
|
308
|
+
return [
|
|
309
|
+
{"name": s.name, "description": s.description or ""}
|
|
310
|
+
for s in self._registry.list_prompt_only_skills()
|
|
311
|
+
]
|
|
312
|
+
|
|
313
|
+
def print_prompt_only_status(self) -> None:
|
|
314
|
+
"""Print status of prompt-only skills."""
|
|
315
|
+
prompt_only = self.get_prompt_only_status()
|
|
316
|
+
if prompt_only:
|
|
317
|
+
names = [s["name"] for s in prompt_only]
|
|
318
|
+
print(f"đ ć·Čæłšć
„ćèæć (Prompt-only Skills): {names}")
|
|
319
|
+
for s in prompt_only:
|
|
320
|
+
if s["description"]:
|
|
321
|
+
print(f" ââ {s['name']}: {s['description']}")
|