a4e 0.1.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- a4e/__init__.py +0 -0
- a4e/cli.py +47 -0
- a4e/cli_commands/__init__.py +5 -0
- a4e/cli_commands/add.py +376 -0
- a4e/cli_commands/deploy.py +149 -0
- a4e/cli_commands/dev.py +162 -0
- a4e/cli_commands/info.py +206 -0
- a4e/cli_commands/init.py +211 -0
- a4e/cli_commands/list.py +227 -0
- a4e/cli_commands/mcp.py +504 -0
- a4e/cli_commands/remove.py +197 -0
- a4e/cli_commands/update.py +285 -0
- a4e/cli_commands/validate.py +117 -0
- a4e/core.py +109 -0
- a4e/dev_runner.py +425 -0
- a4e/server.py +86 -0
- a4e/templates/agent.md.j2 +168 -0
- a4e/templates/agent.py.j2 +15 -0
- a4e/templates/agents.md.j2 +99 -0
- a4e/templates/metadata.json.j2 +20 -0
- a4e/templates/prompt.md.j2 +20 -0
- a4e/templates/prompts/agent.md.j2 +206 -0
- a4e/templates/skills/agents.md.j2 +110 -0
- a4e/templates/skills/skill.md.j2 +120 -0
- a4e/templates/support_module.py.j2 +84 -0
- a4e/templates/tool.py.j2 +60 -0
- a4e/templates/tools/agent.md.j2 +192 -0
- a4e/templates/view.tsx.j2 +21 -0
- a4e/templates/views/agent.md.j2 +219 -0
- a4e/tools/__init__.py +70 -0
- a4e/tools/agent_tools/__init__.py +12 -0
- a4e/tools/agent_tools/add_support_module.py +95 -0
- a4e/tools/agent_tools/add_tool.py +115 -0
- a4e/tools/agent_tools/list_tools.py +28 -0
- a4e/tools/agent_tools/remove_tool.py +69 -0
- a4e/tools/agent_tools/update_tool.py +123 -0
- a4e/tools/deploy/__init__.py +8 -0
- a4e/tools/deploy/deploy.py +59 -0
- a4e/tools/dev/__init__.py +10 -0
- a4e/tools/dev/check_environment.py +79 -0
- a4e/tools/dev/dev_start.py +30 -0
- a4e/tools/dev/dev_stop.py +26 -0
- a4e/tools/project/__init__.py +10 -0
- a4e/tools/project/get_agent_info.py +66 -0
- a4e/tools/project/get_instructions.py +216 -0
- a4e/tools/project/initialize_project.py +231 -0
- a4e/tools/schemas/__init__.py +8 -0
- a4e/tools/schemas/generate_schemas.py +278 -0
- a4e/tools/skills/__init__.py +12 -0
- a4e/tools/skills/add_skill.py +105 -0
- a4e/tools/skills/helpers.py +137 -0
- a4e/tools/skills/list_skills.py +54 -0
- a4e/tools/skills/remove_skill.py +74 -0
- a4e/tools/skills/update_skill.py +150 -0
- a4e/tools/validation/__init__.py +8 -0
- a4e/tools/validation/validate.py +389 -0
- a4e/tools/views/__init__.py +12 -0
- a4e/tools/views/add_view.py +40 -0
- a4e/tools/views/helpers.py +91 -0
- a4e/tools/views/list_views.py +27 -0
- a4e/tools/views/remove_view.py +73 -0
- a4e/tools/views/update_view.py +124 -0
- a4e/utils/dev_manager.py +253 -0
- a4e/utils/schema_generator.py +255 -0
- a4e-0.1.5.dist-info/METADATA +427 -0
- a4e-0.1.5.dist-info/RECORD +70 -0
- a4e-0.1.5.dist-info/WHEEL +5 -0
- a4e-0.1.5.dist-info/entry_points.txt +2 -0
- a4e-0.1.5.dist-info/licenses/LICENSE +21 -0
- a4e-0.1.5.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,278 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Schema generation tool.
|
|
3
|
+
|
|
4
|
+
NOTE: This tool may be called by the MCP server which uses stdio.
|
|
5
|
+
All logging MUST go to stderr to avoid breaking the MCP protocol.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
from typing import Optional
|
|
10
|
+
import json
|
|
11
|
+
import inspect
|
|
12
|
+
import re
|
|
13
|
+
import sys
|
|
14
|
+
import importlib.util
|
|
15
|
+
from types import ModuleType
|
|
16
|
+
|
|
17
|
+
from ...core import mcp, get_project_dir
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def _log(message: str) -> None:
|
|
21
|
+
"""Log to stderr (stdout is reserved for MCP protocol)."""
|
|
22
|
+
print(message, file=sys.stderr)
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
@mcp.tool()
|
|
26
|
+
def generate_schemas(force: bool = False, agent_name: Optional[str] = None) -> dict:
|
|
27
|
+
"""
|
|
28
|
+
Auto-generate all schemas from code
|
|
29
|
+
|
|
30
|
+
Args:
|
|
31
|
+
force: If True, overwrite existing schema files. If False, skip generation if schemas exist.
|
|
32
|
+
agent_name: Optional agent ID if not in agent directory
|
|
33
|
+
|
|
34
|
+
Generates:
|
|
35
|
+
- tools/schemas.json (from @tool functions)
|
|
36
|
+
- views/*/view.schema.json (from TypeScript props)
|
|
37
|
+
- views/schemas.json (aggregated summary for backend)
|
|
38
|
+
"""
|
|
39
|
+
# Import schema_generator dynamically from utils directory
|
|
40
|
+
schema_gen_path = Path(__file__).parent.parent.parent / "utils" / "schema_generator.py"
|
|
41
|
+
spec = importlib.util.spec_from_file_location("schema_generator", schema_gen_path)
|
|
42
|
+
if not spec or not spec.loader:
|
|
43
|
+
return {"success": False, "error": "Failed to load schema_generator module"}
|
|
44
|
+
schema_gen_module = importlib.util.module_from_spec(spec)
|
|
45
|
+
spec.loader.exec_module(schema_gen_module)
|
|
46
|
+
generate_schema = schema_gen_module.generate_schema
|
|
47
|
+
|
|
48
|
+
project_dir = get_project_dir(agent_name)
|
|
49
|
+
tools_dir = project_dir / "tools"
|
|
50
|
+
views_dir = project_dir / "views"
|
|
51
|
+
|
|
52
|
+
results = {
|
|
53
|
+
"tools": {"count": 0, "status": "skipped", "errors": []},
|
|
54
|
+
"views": {"count": 0, "status": "skipped", "errors": []},
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
# Check for existing schemas if force is False
|
|
58
|
+
tools_schema_file = tools_dir / "schemas.json" if tools_dir.exists() else None
|
|
59
|
+
views_schema_file = views_dir / "schemas.json" if views_dir.exists() else None
|
|
60
|
+
|
|
61
|
+
if not force:
|
|
62
|
+
# Check if tool schemas exist
|
|
63
|
+
if tools_schema_file and tools_schema_file.exists():
|
|
64
|
+
_log(
|
|
65
|
+
f"Skipping tool schema generation - {tools_schema_file} exists (use force=True to overwrite)"
|
|
66
|
+
)
|
|
67
|
+
results["tools"]["status"] = "skipped"
|
|
68
|
+
|
|
69
|
+
# Check if view schemas exist
|
|
70
|
+
if views_schema_file and views_schema_file.exists():
|
|
71
|
+
_log(
|
|
72
|
+
f"Skipping view schema generation - {views_schema_file} exists (use force=True to overwrite)"
|
|
73
|
+
)
|
|
74
|
+
results["views"]["status"] = "skipped"
|
|
75
|
+
|
|
76
|
+
# If both are skipped, return early
|
|
77
|
+
if (
|
|
78
|
+
tools_schema_file
|
|
79
|
+
and tools_schema_file.exists()
|
|
80
|
+
and views_schema_file
|
|
81
|
+
and views_schema_file.exists()
|
|
82
|
+
):
|
|
83
|
+
return results
|
|
84
|
+
|
|
85
|
+
# Generate Tool Schemas
|
|
86
|
+
if tools_dir.exists() and (
|
|
87
|
+
force or not (tools_schema_file and tools_schema_file.exists())
|
|
88
|
+
):
|
|
89
|
+
tool_schemas = []
|
|
90
|
+
has_errors = False
|
|
91
|
+
|
|
92
|
+
# Add project dir to sys.path to allow imports
|
|
93
|
+
if str(project_dir) not in sys.path:
|
|
94
|
+
sys.path.insert(0, str(project_dir))
|
|
95
|
+
|
|
96
|
+
# Mock a4e SDK to avoid backend dependency hell (psycopg2, etc.)
|
|
97
|
+
# We only need the @tool decorator to mark functions
|
|
98
|
+
if "a4e" not in sys.modules:
|
|
99
|
+
a4e_module = ModuleType("a4e")
|
|
100
|
+
a4e_sdk_module = ModuleType("a4e.sdk")
|
|
101
|
+
|
|
102
|
+
def mock_tool(func):
|
|
103
|
+
func._is_tool = True
|
|
104
|
+
return func
|
|
105
|
+
|
|
106
|
+
a4e_sdk_module.tool = mock_tool
|
|
107
|
+
a4e_module.sdk = a4e_sdk_module
|
|
108
|
+
|
|
109
|
+
sys.modules["a4e"] = a4e_module
|
|
110
|
+
sys.modules["a4e.sdk"] = a4e_sdk_module
|
|
111
|
+
|
|
112
|
+
for tool_file in tools_dir.glob("*.py"):
|
|
113
|
+
if tool_file.name == "__init__.py":
|
|
114
|
+
continue
|
|
115
|
+
|
|
116
|
+
try:
|
|
117
|
+
# Dynamic import
|
|
118
|
+
spec = importlib.util.spec_from_file_location(tool_file.stem, tool_file)
|
|
119
|
+
if spec and spec.loader:
|
|
120
|
+
module = importlib.util.module_from_spec(spec)
|
|
121
|
+
spec.loader.exec_module(module)
|
|
122
|
+
|
|
123
|
+
# Find @tool decorated functions or functions matching filename convention
|
|
124
|
+
for name, obj in inspect.getmembers(module):
|
|
125
|
+
if not inspect.isfunction(obj):
|
|
126
|
+
continue
|
|
127
|
+
|
|
128
|
+
is_decorated_tool = getattr(obj, "_is_tool", False)
|
|
129
|
+
matches_filename = name == tool_file.stem
|
|
130
|
+
|
|
131
|
+
if is_decorated_tool or matches_filename:
|
|
132
|
+
schema = generate_schema(obj)
|
|
133
|
+
tool_schemas.append(schema)
|
|
134
|
+
results["tools"]["count"] += 1
|
|
135
|
+
if is_decorated_tool:
|
|
136
|
+
break
|
|
137
|
+
except Exception as e:
|
|
138
|
+
error_msg = f"Error processing {tool_file}: {e}"
|
|
139
|
+
_log(error_msg)
|
|
140
|
+
results["tools"]["errors"].append(error_msg)
|
|
141
|
+
has_errors = True
|
|
142
|
+
|
|
143
|
+
try:
|
|
144
|
+
schema_file = tools_dir / "schemas.json"
|
|
145
|
+
if schema_file.exists() and force:
|
|
146
|
+
_log(f"Overwriting {schema_file}")
|
|
147
|
+
|
|
148
|
+
# Convert list to dictionary format with tool names as keys
|
|
149
|
+
# This is the format expected by the A4E main application
|
|
150
|
+
schemas_dict = {}
|
|
151
|
+
for schema in tool_schemas:
|
|
152
|
+
tool_name = schema.get("function", {}).get("name")
|
|
153
|
+
if not tool_name:
|
|
154
|
+
# Fallback for legacy format
|
|
155
|
+
tool_name = schema.get("name", "unknown")
|
|
156
|
+
# Convert legacy format to new format
|
|
157
|
+
schema = {
|
|
158
|
+
"function": {
|
|
159
|
+
"name": tool_name,
|
|
160
|
+
"description": schema.get("description", ""),
|
|
161
|
+
"parameters": schema.get("inputSchema", schema.get("parameters", {}))
|
|
162
|
+
},
|
|
163
|
+
"returns": schema.get("returns", {
|
|
164
|
+
"type": "object",
|
|
165
|
+
"properties": {"status": {"type": "string"}}
|
|
166
|
+
})
|
|
167
|
+
}
|
|
168
|
+
schemas_dict[tool_name] = schema
|
|
169
|
+
|
|
170
|
+
schema_file.write_text(json.dumps(schemas_dict, indent=2))
|
|
171
|
+
results["tools"]["status"] = "error" if has_errors else "success"
|
|
172
|
+
except Exception as e:
|
|
173
|
+
error_msg = f"Error writing schemas.json: {e}"
|
|
174
|
+
_log(error_msg)
|
|
175
|
+
results["tools"]["errors"].append(error_msg)
|
|
176
|
+
results["tools"]["status"] = "error"
|
|
177
|
+
|
|
178
|
+
# Generate View Schemas
|
|
179
|
+
if views_dir.exists() and (
|
|
180
|
+
force or not (views_schema_file and views_schema_file.exists())
|
|
181
|
+
):
|
|
182
|
+
has_errors = False
|
|
183
|
+
aggregated_views = {}
|
|
184
|
+
|
|
185
|
+
for view_dir in views_dir.iterdir():
|
|
186
|
+
if not view_dir.is_dir():
|
|
187
|
+
continue
|
|
188
|
+
|
|
189
|
+
view_file = view_dir / "view.tsx"
|
|
190
|
+
if not view_file.exists():
|
|
191
|
+
continue
|
|
192
|
+
|
|
193
|
+
try:
|
|
194
|
+
content = view_file.read_text()
|
|
195
|
+
|
|
196
|
+
# Simple regex to find interface Props
|
|
197
|
+
props_match = re.search(r"interface\s+(\w+Props)\s*{([^}]+)}", content)
|
|
198
|
+
|
|
199
|
+
properties = {}
|
|
200
|
+
required = []
|
|
201
|
+
|
|
202
|
+
if props_match:
|
|
203
|
+
props_body = props_match.group(2)
|
|
204
|
+
for line in props_body.split("\n"):
|
|
205
|
+
line = line.strip()
|
|
206
|
+
if not line or line.startswith("//"):
|
|
207
|
+
continue
|
|
208
|
+
|
|
209
|
+
prop_match = re.match(r"(\w+)(\?)?:\s*([^;]+);", line)
|
|
210
|
+
if prop_match:
|
|
211
|
+
name = prop_match.group(1)
|
|
212
|
+
optional = prop_match.group(2) == "?"
|
|
213
|
+
ts_type = prop_match.group(3).strip()
|
|
214
|
+
|
|
215
|
+
json_type = "string"
|
|
216
|
+
if "number" in ts_type:
|
|
217
|
+
json_type = "number"
|
|
218
|
+
elif "boolean" in ts_type:
|
|
219
|
+
json_type = "boolean"
|
|
220
|
+
elif "Array" in ts_type or "[]" in ts_type:
|
|
221
|
+
json_type = "array"
|
|
222
|
+
|
|
223
|
+
properties[name] = {
|
|
224
|
+
"type": json_type,
|
|
225
|
+
"description": f"From {ts_type}",
|
|
226
|
+
}
|
|
227
|
+
|
|
228
|
+
if not optional:
|
|
229
|
+
required.append(name)
|
|
230
|
+
|
|
231
|
+
schema = {
|
|
232
|
+
"name": view_dir.name,
|
|
233
|
+
"description": f"View for {view_dir.name}",
|
|
234
|
+
"props": {
|
|
235
|
+
"type": "object",
|
|
236
|
+
"properties": properties,
|
|
237
|
+
"required": required,
|
|
238
|
+
},
|
|
239
|
+
}
|
|
240
|
+
|
|
241
|
+
# Write individual schema
|
|
242
|
+
view_schema_file = view_dir / "view.schema.json"
|
|
243
|
+
if view_schema_file.exists() and force:
|
|
244
|
+
_log(f"Overwriting {view_schema_file}")
|
|
245
|
+
view_schema_file.write_text(json.dumps(schema, indent=2))
|
|
246
|
+
|
|
247
|
+
# Add to aggregated dict
|
|
248
|
+
# Backend expects: { "view_id": { "id": "...", "description": "...", "params": {...} } }
|
|
249
|
+
aggregated_views[view_dir.name] = {
|
|
250
|
+
"id": view_dir.name,
|
|
251
|
+
"description": f"View for {view_dir.name}",
|
|
252
|
+
"params": properties,
|
|
253
|
+
}
|
|
254
|
+
|
|
255
|
+
results["views"]["count"] += 1
|
|
256
|
+
|
|
257
|
+
except Exception as e:
|
|
258
|
+
error_msg = f"Error processing view {view_dir}: {e}"
|
|
259
|
+
_log(error_msg)
|
|
260
|
+
results["views"]["errors"].append(error_msg)
|
|
261
|
+
has_errors = True
|
|
262
|
+
|
|
263
|
+
# Write aggregated schemas.json
|
|
264
|
+
try:
|
|
265
|
+
aggregated_schema_file = views_dir / "schemas.json"
|
|
266
|
+
if aggregated_schema_file.exists() and force:
|
|
267
|
+
_log(f"Overwriting {aggregated_schema_file}")
|
|
268
|
+
aggregated_schema_file.write_text(json.dumps(aggregated_views, indent=2))
|
|
269
|
+
except Exception as e:
|
|
270
|
+
error_msg = f"Error writing views/schemas.json: {e}"
|
|
271
|
+
_log(error_msg)
|
|
272
|
+
results["views"]["errors"].append(error_msg)
|
|
273
|
+
has_errors = True
|
|
274
|
+
|
|
275
|
+
results["views"]["status"] = "error" if has_errors else "success"
|
|
276
|
+
|
|
277
|
+
return results
|
|
278
|
+
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Skills management tools.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from .add_skill import add_skill
|
|
6
|
+
from .list_skills import list_skills
|
|
7
|
+
from .remove_skill import remove_skill
|
|
8
|
+
from .update_skill import update_skill
|
|
9
|
+
from .helpers import create_skill
|
|
10
|
+
|
|
11
|
+
__all__ = ["add_skill", "list_skills", "remove_skill", "update_skill", "create_skill"]
|
|
12
|
+
|
|
@@ -0,0 +1,105 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Add skill tool.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from typing import Optional, List
|
|
6
|
+
import json
|
|
7
|
+
|
|
8
|
+
from ...core import mcp, get_project_dir
|
|
9
|
+
from .helpers import create_skill
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
@mcp.tool()
|
|
13
|
+
def add_skill(
|
|
14
|
+
skill_id: str,
|
|
15
|
+
name: str,
|
|
16
|
+
description: str,
|
|
17
|
+
intent_triggers: List[str],
|
|
18
|
+
output_view: str,
|
|
19
|
+
internal_tools: Optional[List[str]] = None,
|
|
20
|
+
requires_auth: bool = False,
|
|
21
|
+
agent_name: Optional[str] = None,
|
|
22
|
+
) -> dict:
|
|
23
|
+
"""
|
|
24
|
+
Add a new skill to the agent
|
|
25
|
+
|
|
26
|
+
Args:
|
|
27
|
+
skill_id: ID of the skill (snake_case, e.g., "show_welcome")
|
|
28
|
+
name: Human-readable name (e.g., "Show Welcome")
|
|
29
|
+
description: What the skill does and when to use it
|
|
30
|
+
intent_triggers: List of phrases that trigger this skill
|
|
31
|
+
output_view: View ID to render (e.g., "welcome") or "NONE" for no view
|
|
32
|
+
internal_tools: List of tool names this skill uses (e.g., ["get_agents"])
|
|
33
|
+
requires_auth: Whether this skill requires user authentication
|
|
34
|
+
agent_name: Optional agent ID if not in agent directory
|
|
35
|
+
"""
|
|
36
|
+
# Validate skill ID
|
|
37
|
+
if not skill_id.replace("_", "").isalnum():
|
|
38
|
+
return {
|
|
39
|
+
"success": False,
|
|
40
|
+
"error": "Skill ID must be alphanumeric with underscores",
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
if not intent_triggers or len(intent_triggers) == 0:
|
|
44
|
+
return {
|
|
45
|
+
"success": False,
|
|
46
|
+
"error": "At least one intent trigger is required",
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
project_dir = get_project_dir(agent_name)
|
|
50
|
+
warnings = []
|
|
51
|
+
|
|
52
|
+
# Validate output_view exists
|
|
53
|
+
view_props = {}
|
|
54
|
+
if output_view and output_view != "NONE":
|
|
55
|
+
view_dir = project_dir / "views" / output_view
|
|
56
|
+
view_schema_file = view_dir / "view.schema.json"
|
|
57
|
+
|
|
58
|
+
if not view_dir.exists():
|
|
59
|
+
return {
|
|
60
|
+
"success": False,
|
|
61
|
+
"error": f"View '{output_view}' not found. Create it first with add_view()",
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
if view_schema_file.exists():
|
|
65
|
+
try:
|
|
66
|
+
schema = json.loads(view_schema_file.read_text())
|
|
67
|
+
view_props = schema.get("props", {}).get("properties", {})
|
|
68
|
+
except Exception:
|
|
69
|
+
warnings.append(f"Could not parse view schema for '{output_view}'")
|
|
70
|
+
|
|
71
|
+
# Validate internal_tools exist
|
|
72
|
+
if internal_tools:
|
|
73
|
+
tools_schema_file = project_dir / "tools" / "schemas.json"
|
|
74
|
+
if tools_schema_file.exists():
|
|
75
|
+
try:
|
|
76
|
+
tools_schemas = json.loads(tools_schema_file.read_text())
|
|
77
|
+
# Support both formats: dict (keys are tool names) or list (objects with 'name' field)
|
|
78
|
+
if isinstance(tools_schemas, dict):
|
|
79
|
+
existing_tools = set(tools_schemas.keys())
|
|
80
|
+
else:
|
|
81
|
+
existing_tools = {t.get("name") for t in tools_schemas if isinstance(t, dict)}
|
|
82
|
+
|
|
83
|
+
for tool in internal_tools:
|
|
84
|
+
if tool not in existing_tools:
|
|
85
|
+
warnings.append(f"Tool '{tool}' not found in tools/schemas.json. Make sure it exists.")
|
|
86
|
+
except Exception:
|
|
87
|
+
warnings.append("Could not validate internal_tools against tools/schemas.json")
|
|
88
|
+
|
|
89
|
+
result = create_skill(
|
|
90
|
+
skill_id=skill_id,
|
|
91
|
+
name=name,
|
|
92
|
+
description=description,
|
|
93
|
+
intent_triggers=intent_triggers,
|
|
94
|
+
output_view=output_view,
|
|
95
|
+
internal_tools=internal_tools,
|
|
96
|
+
requires_auth=requires_auth,
|
|
97
|
+
view_props=view_props,
|
|
98
|
+
project_dir=project_dir,
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
# Add warnings to result if any
|
|
102
|
+
if warnings and result.get("success"):
|
|
103
|
+
result["warnings"] = warnings
|
|
104
|
+
|
|
105
|
+
return result
|
|
@@ -0,0 +1,137 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Helper functions for skill management.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
import json
|
|
7
|
+
from typing import List, Optional
|
|
8
|
+
|
|
9
|
+
from ...core import jinja_env
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def create_skill(
|
|
13
|
+
skill_id: str,
|
|
14
|
+
name: str,
|
|
15
|
+
description: str,
|
|
16
|
+
intent_triggers: List[str],
|
|
17
|
+
output_view: str,
|
|
18
|
+
internal_tools: Optional[List[str]] = None,
|
|
19
|
+
requires_auth: bool = False,
|
|
20
|
+
view_props: Optional[dict] = None,
|
|
21
|
+
project_dir: Path = None,
|
|
22
|
+
) -> dict:
|
|
23
|
+
"""
|
|
24
|
+
Helper to create a skill directory with SKILL.md file.
|
|
25
|
+
|
|
26
|
+
Args:
|
|
27
|
+
skill_id: ID of the skill (snake_case)
|
|
28
|
+
name: Human-readable name
|
|
29
|
+
description: What the skill does
|
|
30
|
+
intent_triggers: Phrases that trigger this skill
|
|
31
|
+
output_view: View ID to render (or "NONE" for no view)
|
|
32
|
+
internal_tools: List of tool names this skill uses
|
|
33
|
+
requires_auth: Whether auth is required
|
|
34
|
+
view_props: Expected props for the output view
|
|
35
|
+
project_dir: Path to the agent project directory
|
|
36
|
+
|
|
37
|
+
Returns:
|
|
38
|
+
Result dictionary with success status
|
|
39
|
+
"""
|
|
40
|
+
if project_dir is None:
|
|
41
|
+
return {"success": False, "error": "project_dir is required"}
|
|
42
|
+
|
|
43
|
+
skills_dir = project_dir / "skills"
|
|
44
|
+
|
|
45
|
+
if not skills_dir.exists():
|
|
46
|
+
return {
|
|
47
|
+
"success": False,
|
|
48
|
+
"error": f"skills/ directory not found at {skills_dir}",
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
skill_dir = skills_dir / skill_id
|
|
52
|
+
if skill_dir.exists():
|
|
53
|
+
return {"success": False, "error": f"Skill '{skill_id}' already exists"}
|
|
54
|
+
|
|
55
|
+
try:
|
|
56
|
+
skill_dir.mkdir()
|
|
57
|
+
|
|
58
|
+
# Convert snake_case to Title Case for display
|
|
59
|
+
skill_name = name or " ".join(word.title() for word in skill_id.split("_"))
|
|
60
|
+
|
|
61
|
+
# Generate SKILL.md
|
|
62
|
+
template = jinja_env.get_template("skills/skill.md.j2")
|
|
63
|
+
skill_md = template.render(
|
|
64
|
+
skill_name=skill_name,
|
|
65
|
+
description=description,
|
|
66
|
+
intent_triggers=intent_triggers,
|
|
67
|
+
internal_tools=internal_tools or [],
|
|
68
|
+
output_view=output_view,
|
|
69
|
+
requires_auth=requires_auth,
|
|
70
|
+
view_props=view_props or {},
|
|
71
|
+
)
|
|
72
|
+
(skill_dir / "SKILL.md").write_text(skill_md)
|
|
73
|
+
|
|
74
|
+
# Update skills/schemas.json
|
|
75
|
+
_update_skills_schema(
|
|
76
|
+
skills_dir=skills_dir,
|
|
77
|
+
skill_id=skill_id,
|
|
78
|
+
name=skill_name,
|
|
79
|
+
description=description,
|
|
80
|
+
intent_triggers=intent_triggers,
|
|
81
|
+
output_view=output_view,
|
|
82
|
+
internal_tools=internal_tools,
|
|
83
|
+
requires_auth=requires_auth,
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
return {
|
|
87
|
+
"success": True,
|
|
88
|
+
"message": f"Created skill '{skill_id}'",
|
|
89
|
+
"path": str(skill_dir),
|
|
90
|
+
}
|
|
91
|
+
except Exception as e:
|
|
92
|
+
return {"success": False, "error": str(e)}
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
def _update_skills_schema(
|
|
96
|
+
skills_dir: Path,
|
|
97
|
+
skill_id: str,
|
|
98
|
+
name: str,
|
|
99
|
+
description: str,
|
|
100
|
+
intent_triggers: List[str],
|
|
101
|
+
output_view: str,
|
|
102
|
+
internal_tools: Optional[List[str]] = None,
|
|
103
|
+
requires_auth: bool = False,
|
|
104
|
+
) -> None:
|
|
105
|
+
"""
|
|
106
|
+
Update the skills/schemas.json file with the new skill.
|
|
107
|
+
"""
|
|
108
|
+
schema_file = skills_dir / "schemas.json"
|
|
109
|
+
|
|
110
|
+
# Load existing schemas or create empty dict
|
|
111
|
+
if schema_file.exists():
|
|
112
|
+
try:
|
|
113
|
+
schemas = json.loads(schema_file.read_text())
|
|
114
|
+
except json.JSONDecodeError:
|
|
115
|
+
schemas = {}
|
|
116
|
+
else:
|
|
117
|
+
schemas = {}
|
|
118
|
+
|
|
119
|
+
# Build output object
|
|
120
|
+
output = {"view": output_view}
|
|
121
|
+
if output_view == "NONE":
|
|
122
|
+
output["view"] = "NONE"
|
|
123
|
+
|
|
124
|
+
# Add new skill schema
|
|
125
|
+
schemas[skill_id] = {
|
|
126
|
+
"id": skill_id,
|
|
127
|
+
"name": name,
|
|
128
|
+
"description": description,
|
|
129
|
+
"intent_triggers": intent_triggers,
|
|
130
|
+
"requires_auth": requires_auth,
|
|
131
|
+
"internal_tools": internal_tools or [],
|
|
132
|
+
"output": output,
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
# Write updated schemas
|
|
136
|
+
schema_file.write_text(json.dumps(schemas, indent=2))
|
|
137
|
+
|
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
"""
|
|
2
|
+
List skills tool.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from typing import Optional
|
|
6
|
+
import json
|
|
7
|
+
|
|
8
|
+
from ...core import mcp, get_project_dir
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
@mcp.tool()
|
|
12
|
+
def list_skills(agent_name: Optional[str] = None) -> dict:
|
|
13
|
+
"""
|
|
14
|
+
List all skills available in the current agent project
|
|
15
|
+
"""
|
|
16
|
+
project_dir = get_project_dir(agent_name)
|
|
17
|
+
skills_dir = project_dir / "skills"
|
|
18
|
+
|
|
19
|
+
if not skills_dir.exists():
|
|
20
|
+
return {"skills": [], "count": 0}
|
|
21
|
+
|
|
22
|
+
skills = []
|
|
23
|
+
|
|
24
|
+
# Read from schemas.json if exists
|
|
25
|
+
schema_file = skills_dir / "schemas.json"
|
|
26
|
+
if schema_file.exists():
|
|
27
|
+
try:
|
|
28
|
+
schemas = json.loads(schema_file.read_text())
|
|
29
|
+
for skill_id, skill_data in schemas.items():
|
|
30
|
+
skills.append({
|
|
31
|
+
"id": skill_id,
|
|
32
|
+
"name": skill_data.get("name", skill_id),
|
|
33
|
+
"description": skill_data.get("description", ""),
|
|
34
|
+
"output_view": skill_data.get("output", {}).get("view", "NONE"),
|
|
35
|
+
"internal_tools": skill_data.get("internal_tools", []),
|
|
36
|
+
"intent_triggers": skill_data.get("intent_triggers", []),
|
|
37
|
+
})
|
|
38
|
+
except json.JSONDecodeError:
|
|
39
|
+
pass
|
|
40
|
+
|
|
41
|
+
# Fallback: list directories with SKILL.md
|
|
42
|
+
if not skills:
|
|
43
|
+
for skill_dir in skills_dir.iterdir():
|
|
44
|
+
if skill_dir.is_dir() and (skill_dir / "SKILL.md").exists():
|
|
45
|
+
skills.append({
|
|
46
|
+
"id": skill_dir.name,
|
|
47
|
+
"name": skill_dir.name.replace("_", " ").title(),
|
|
48
|
+
"description": "",
|
|
49
|
+
"output_view": "unknown",
|
|
50
|
+
"internal_tools": [],
|
|
51
|
+
})
|
|
52
|
+
|
|
53
|
+
return {"skills": sorted(skills, key=lambda x: x["id"]), "count": len(skills)}
|
|
54
|
+
|
|
@@ -0,0 +1,74 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Remove skill tool.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
from typing import Optional
|
|
7
|
+
import json
|
|
8
|
+
import shutil
|
|
9
|
+
|
|
10
|
+
from ...core import mcp, get_project_dir
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
@mcp.tool()
|
|
14
|
+
def remove_skill(
|
|
15
|
+
skill_id: str,
|
|
16
|
+
agent_name: Optional[str] = None,
|
|
17
|
+
) -> dict:
|
|
18
|
+
"""
|
|
19
|
+
Remove a skill from the agent
|
|
20
|
+
|
|
21
|
+
Args:
|
|
22
|
+
skill_id: ID of the skill to remove (the folder name)
|
|
23
|
+
agent_name: Optional agent ID if not in agent directory
|
|
24
|
+
|
|
25
|
+
Returns:
|
|
26
|
+
Result with success status and removed folder path
|
|
27
|
+
"""
|
|
28
|
+
project_dir = get_project_dir(agent_name)
|
|
29
|
+
skills_dir = project_dir / "skills"
|
|
30
|
+
|
|
31
|
+
if not skills_dir.exists():
|
|
32
|
+
return {
|
|
33
|
+
"success": False,
|
|
34
|
+
"error": f"skills/ directory not found at {skills_dir}. Are you in an agent project?",
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
skill_folder = skills_dir / skill_id
|
|
38
|
+
|
|
39
|
+
if not skill_folder.exists():
|
|
40
|
+
return {
|
|
41
|
+
"success": False,
|
|
42
|
+
"error": f"Skill '{skill_id}' not found at {skill_folder}",
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
# Prevent removing the mandatory show_welcome skill
|
|
46
|
+
if skill_id == "show_welcome":
|
|
47
|
+
return {
|
|
48
|
+
"success": False,
|
|
49
|
+
"error": "Cannot remove the 'show_welcome' skill - it is required for all agents",
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
try:
|
|
53
|
+
# Remove the skill folder and all its contents
|
|
54
|
+
shutil.rmtree(skill_folder)
|
|
55
|
+
|
|
56
|
+
# Update schemas.json if it exists
|
|
57
|
+
schemas_file = skills_dir / "schemas.json"
|
|
58
|
+
if schemas_file.exists():
|
|
59
|
+
try:
|
|
60
|
+
schemas = json.loads(schemas_file.read_text())
|
|
61
|
+
# Skills schemas is a dict with skill_id as keys
|
|
62
|
+
if skill_id in schemas:
|
|
63
|
+
del schemas[skill_id]
|
|
64
|
+
schemas_file.write_text(json.dumps(schemas, indent=2))
|
|
65
|
+
except (json.JSONDecodeError, KeyError):
|
|
66
|
+
pass # Ignore schema update errors
|
|
67
|
+
|
|
68
|
+
return {
|
|
69
|
+
"success": True,
|
|
70
|
+
"message": f"Removed skill '{skill_id}'",
|
|
71
|
+
"removed_folder": str(skill_folder),
|
|
72
|
+
}
|
|
73
|
+
except Exception as e:
|
|
74
|
+
return {"success": False, "error": str(e)}
|