aes-cli 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aes/__init__.py +5 -0
- aes/__main__.py +37 -0
- aes/analyzer.py +487 -0
- aes/commands/__init__.py +0 -0
- aes/commands/init.py +727 -0
- aes/commands/inspect.py +204 -0
- aes/commands/install.py +379 -0
- aes/commands/publish.py +432 -0
- aes/commands/search.py +65 -0
- aes/commands/status.py +153 -0
- aes/commands/sync.py +413 -0
- aes/commands/validate.py +77 -0
- aes/config.py +43 -0
- aes/domains.py +1382 -0
- aes/frameworks.py +522 -0
- aes/mcp_server.py +213 -0
- aes/registry.py +294 -0
- aes/scaffold/agent.yaml.jinja +135 -0
- aes/scaffold/agentignore.jinja +61 -0
- aes/scaffold/instructions.md.jinja +311 -0
- aes/scaffold/local.example.yaml.jinja +35 -0
- aes/scaffold/local.yaml.jinja +29 -0
- aes/scaffold/operations.md.jinja +33 -0
- aes/scaffold/orchestrator.md.jinja +95 -0
- aes/scaffold/permissions.yaml.jinja +151 -0
- aes/scaffold/setup.md.jinja +244 -0
- aes/scaffold/skill.md.jinja +27 -0
- aes/scaffold/skill.yaml.jinja +175 -0
- aes/scaffold/workflow.yaml.jinja +44 -0
- aes/scaffold/workflow_command.md.jinja +48 -0
- aes/schemas/agent.schema.json +188 -0
- aes/schemas/permissions.schema.json +100 -0
- aes/schemas/registry.schema.json +72 -0
- aes/schemas/skill.schema.json +209 -0
- aes/schemas/workflow.schema.json +92 -0
- aes/targets/__init__.py +29 -0
- aes/targets/_base.py +77 -0
- aes/targets/_composer.py +338 -0
- aes/targets/claude.py +153 -0
- aes/targets/copilot.py +48 -0
- aes/targets/cursor.py +46 -0
- aes/targets/windsurf.py +46 -0
- aes/validator.py +394 -0
- aes_cli-0.2.0.dist-info/METADATA +110 -0
- aes_cli-0.2.0.dist-info/RECORD +48 -0
- aes_cli-0.2.0.dist-info/WHEEL +5 -0
- aes_cli-0.2.0.dist-info/entry_points.txt +3 -0
- aes_cli-0.2.0.dist-info/top_level.txt +1 -0
aes/targets/_base.py
ADDED
|
@@ -0,0 +1,77 @@
|
|
|
1
|
+
"""Base types for sync target adapters."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from abc import ABC, abstractmethod
|
|
6
|
+
from dataclasses import dataclass, field
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
from typing import Any, Dict, List, Optional
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
# Sentinels embedded in generated files to detect aes sync output
|
|
12
|
+
AES_SENTINEL_MD = "<!-- Generated by `aes sync` \u2014 DO NOT EDIT MANUALLY -->"
|
|
13
|
+
AES_SENTINEL_JSON_KEY = "_aes_sync"
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
@dataclass
|
|
17
|
+
class AgentContext:
|
|
18
|
+
"""Parsed .agent/ contents, passed to every adapter."""
|
|
19
|
+
|
|
20
|
+
project_root: Path
|
|
21
|
+
agent_dir: Path
|
|
22
|
+
manifest: dict
|
|
23
|
+
instructions: Optional[str]
|
|
24
|
+
orchestrator: Optional[str]
|
|
25
|
+
skill_runbooks: Dict[str, str]
|
|
26
|
+
permissions: Optional[dict]
|
|
27
|
+
commands: List[dict]
|
|
28
|
+
memory_project: Optional[str]
|
|
29
|
+
skill_metadata: Dict[str, Dict[str, Any]] = field(default_factory=dict)
|
|
30
|
+
local_config: Optional[dict] = None
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
@dataclass
|
|
34
|
+
class GeneratedFile:
|
|
35
|
+
"""A single file to be generated by sync."""
|
|
36
|
+
|
|
37
|
+
relative_path: str
|
|
38
|
+
content: str
|
|
39
|
+
description: str
|
|
40
|
+
action: str = "create" # create | update | conflict
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
@dataclass
|
|
44
|
+
class SyncPlan:
|
|
45
|
+
"""What a target adapter wants to generate."""
|
|
46
|
+
|
|
47
|
+
target_name: str
|
|
48
|
+
files: List[GeneratedFile] = field(default_factory=list)
|
|
49
|
+
warnings: List[str] = field(default_factory=list)
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
class SyncTarget(ABC):
|
|
53
|
+
"""Abstract base for a tool target adapter."""
|
|
54
|
+
|
|
55
|
+
@property
|
|
56
|
+
@abstractmethod
|
|
57
|
+
def name(self) -> str:
|
|
58
|
+
"""Tool name, e.g. 'claude'."""
|
|
59
|
+
|
|
60
|
+
@abstractmethod
|
|
61
|
+
def plan(self, ctx: AgentContext, force: bool) -> SyncPlan:
|
|
62
|
+
"""Generate a plan of files to create/update."""
|
|
63
|
+
|
|
64
|
+
def _check_conflict(self, project_root: Path, rel_path: str, force: bool) -> str:
|
|
65
|
+
"""Determine the action for a file path.
|
|
66
|
+
|
|
67
|
+
Returns 'create', 'update', or 'conflict'.
|
|
68
|
+
"""
|
|
69
|
+
full_path = project_root / rel_path
|
|
70
|
+
if not full_path.exists():
|
|
71
|
+
return "create"
|
|
72
|
+
content = full_path.read_text()
|
|
73
|
+
if AES_SENTINEL_MD in content or f'"{AES_SENTINEL_JSON_KEY}"' in content:
|
|
74
|
+
return "update"
|
|
75
|
+
if force:
|
|
76
|
+
return "update"
|
|
77
|
+
return "conflict"
|
aes/targets/_composer.py
ADDED
|
@@ -0,0 +1,338 @@
|
|
|
1
|
+
"""Shared composition logic for sync targets."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing import Any, Dict, List, Optional
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def compose_instructions(
|
|
9
|
+
project_name: str,
|
|
10
|
+
instructions: Optional[str],
|
|
11
|
+
orchestrator: Optional[str],
|
|
12
|
+
skill_runbooks: Dict[str, str],
|
|
13
|
+
memory_project: Optional[str],
|
|
14
|
+
header: str,
|
|
15
|
+
) -> str:
|
|
16
|
+
"""Compose a single instructions document from .agent/ contents.
|
|
17
|
+
|
|
18
|
+
Structure:
|
|
19
|
+
1. Header (sentinel + title)
|
|
20
|
+
2. instructions.md
|
|
21
|
+
3. ORCHESTRATOR.md
|
|
22
|
+
4. Skill runbooks
|
|
23
|
+
5. memory/project.md
|
|
24
|
+
"""
|
|
25
|
+
sections: List[str] = [header]
|
|
26
|
+
|
|
27
|
+
if instructions:
|
|
28
|
+
sections.append(instructions)
|
|
29
|
+
|
|
30
|
+
if orchestrator:
|
|
31
|
+
sections.append("---\n")
|
|
32
|
+
sections.append(orchestrator)
|
|
33
|
+
|
|
34
|
+
if skill_runbooks:
|
|
35
|
+
sections.append("---\n")
|
|
36
|
+
sections.append("# Skills Reference\n")
|
|
37
|
+
for _skill_id, runbook in skill_runbooks.items():
|
|
38
|
+
sections.append(runbook)
|
|
39
|
+
|
|
40
|
+
if memory_project:
|
|
41
|
+
sections.append("---\n")
|
|
42
|
+
sections.append(memory_project)
|
|
43
|
+
|
|
44
|
+
return "\n\n".join(sections) + "\n"
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def compose_instructions_with_skill_index(
|
|
48
|
+
project_name: str,
|
|
49
|
+
instructions: Optional[str],
|
|
50
|
+
orchestrator: Optional[str],
|
|
51
|
+
skill_metadata: Dict[str, Dict[str, Any]],
|
|
52
|
+
memory_project: Optional[str],
|
|
53
|
+
header: str,
|
|
54
|
+
skill_runbooks: Optional[Dict[str, str]] = None,
|
|
55
|
+
) -> str:
|
|
56
|
+
"""Compose instructions with a skill index instead of inlined runbooks.
|
|
57
|
+
|
|
58
|
+
Skills are synced as separate command files; this just lists them
|
|
59
|
+
so the agent knows they exist and can be invoked as slash commands.
|
|
60
|
+
|
|
61
|
+
Skills with ``activation: auto`` have their description (and optionally
|
|
62
|
+
a runbook summary) inlined directly so the agent can match them without
|
|
63
|
+
invoking a slash command. ``hybrid`` skills appear in both sections.
|
|
64
|
+
"""
|
|
65
|
+
sections: List[str] = [header]
|
|
66
|
+
|
|
67
|
+
if instructions:
|
|
68
|
+
sections.append(instructions)
|
|
69
|
+
|
|
70
|
+
if orchestrator:
|
|
71
|
+
sections.append("---\n")
|
|
72
|
+
sections.append(orchestrator)
|
|
73
|
+
|
|
74
|
+
if skill_metadata:
|
|
75
|
+
# Partition skills by activation mode
|
|
76
|
+
auto_skills: Dict[str, Dict[str, Any]] = {}
|
|
77
|
+
explicit_skills: Dict[str, Dict[str, Any]] = {}
|
|
78
|
+
for skill_id, meta in skill_metadata.items():
|
|
79
|
+
mode = meta.get("activation", "explicit")
|
|
80
|
+
if mode in ("auto", "hybrid"):
|
|
81
|
+
auto_skills[skill_id] = meta
|
|
82
|
+
if mode in ("explicit", "hybrid"):
|
|
83
|
+
explicit_skills[skill_id] = meta
|
|
84
|
+
|
|
85
|
+
# Auto-activated skills — inlined into instructions
|
|
86
|
+
if auto_skills:
|
|
87
|
+
sections.append("---\n")
|
|
88
|
+
auto_lines: List[str] = [
|
|
89
|
+
"# Auto-Activated Skills\n",
|
|
90
|
+
"The following skills activate automatically based on context:\n",
|
|
91
|
+
]
|
|
92
|
+
for skill_id, meta in auto_skills.items():
|
|
93
|
+
name = meta.get("name", skill_id)
|
|
94
|
+
desc = meta.get("description", "")
|
|
95
|
+
neg = meta.get("negative_triggers", [])
|
|
96
|
+
auto_lines.append(f"### {name} (`/skills/{skill_id}`)\n")
|
|
97
|
+
if desc:
|
|
98
|
+
auto_lines.append(desc)
|
|
99
|
+
if neg:
|
|
100
|
+
auto_lines.append("")
|
|
101
|
+
for trigger in neg:
|
|
102
|
+
auto_lines.append(f"- {trigger}")
|
|
103
|
+
perms = format_skill_permissions(meta.get("allowed_tools"))
|
|
104
|
+
if perms:
|
|
105
|
+
auto_lines.append("")
|
|
106
|
+
auto_lines.append(perms)
|
|
107
|
+
auto_lines.append("")
|
|
108
|
+
sections.append("\n".join(auto_lines))
|
|
109
|
+
|
|
110
|
+
# Explicit skills — listed as slash commands
|
|
111
|
+
if explicit_skills:
|
|
112
|
+
sections.append("---\n")
|
|
113
|
+
lines: List[str] = [
|
|
114
|
+
"# Available Skills\n",
|
|
115
|
+
"The following skills are available as slash commands:\n",
|
|
116
|
+
]
|
|
117
|
+
for skill_id, meta in explicit_skills.items():
|
|
118
|
+
name = meta.get("name", skill_id)
|
|
119
|
+
desc = meta.get("description", "")
|
|
120
|
+
neg = meta.get("negative_triggers", [])
|
|
121
|
+
line = f"- **/skills/{skill_id}** — {name}"
|
|
122
|
+
if desc:
|
|
123
|
+
line += f": {desc}"
|
|
124
|
+
if neg:
|
|
125
|
+
line += " " + " ".join(f"[{t}]" for t in neg)
|
|
126
|
+
lines.append(line)
|
|
127
|
+
sections.append("\n".join(lines))
|
|
128
|
+
|
|
129
|
+
if memory_project:
|
|
130
|
+
sections.append("---\n")
|
|
131
|
+
sections.append(memory_project)
|
|
132
|
+
|
|
133
|
+
return "\n\n".join(sections) + "\n"
|
|
134
|
+
|
|
135
|
+
|
|
136
|
+
def format_skill_permissions(allowed_tools: Optional[Dict[str, Any]]) -> str:
|
|
137
|
+
"""Format per-skill allowed_tools as a markdown permissions note.
|
|
138
|
+
|
|
139
|
+
Returns empty string if no permissions are specified.
|
|
140
|
+
"""
|
|
141
|
+
if not allowed_tools:
|
|
142
|
+
return ""
|
|
143
|
+
|
|
144
|
+
parts: List[str] = ["**Permissions:**"]
|
|
145
|
+
|
|
146
|
+
shell = allowed_tools.get("shell")
|
|
147
|
+
if shell is not None:
|
|
148
|
+
parts.append(f"- Shell: {'allowed' if shell else 'denied'}")
|
|
149
|
+
|
|
150
|
+
files = allowed_tools.get("files")
|
|
151
|
+
if isinstance(files, dict):
|
|
152
|
+
read_val = files.get("read")
|
|
153
|
+
write_val = files.get("write")
|
|
154
|
+
if read_val is not None:
|
|
155
|
+
if isinstance(read_val, bool):
|
|
156
|
+
parts.append(f"- File read: {'allowed' if read_val else 'denied'}")
|
|
157
|
+
elif isinstance(read_val, list):
|
|
158
|
+
parts.append(f"- File read: {', '.join(f'`{p}`' for p in read_val)}")
|
|
159
|
+
if write_val is not None:
|
|
160
|
+
if isinstance(write_val, bool):
|
|
161
|
+
parts.append(f"- File write: {'allowed' if write_val else 'denied'}")
|
|
162
|
+
elif isinstance(write_val, list):
|
|
163
|
+
parts.append(f"- File write: {', '.join(f'`{p}`' for p in write_val)}")
|
|
164
|
+
|
|
165
|
+
network = allowed_tools.get("network")
|
|
166
|
+
if network is not None:
|
|
167
|
+
parts.append(f"- Network: {'allowed' if network else 'denied'}")
|
|
168
|
+
|
|
169
|
+
mcp = allowed_tools.get("mcp_servers")
|
|
170
|
+
if mcp:
|
|
171
|
+
parts.append(f"- MCP servers: {', '.join(mcp)}")
|
|
172
|
+
|
|
173
|
+
if len(parts) <= 1:
|
|
174
|
+
return ""
|
|
175
|
+
|
|
176
|
+
return "\n".join(parts)
|
|
177
|
+
|
|
178
|
+
|
|
179
|
+
def translate_permissions_to_claude(permissions: dict) -> dict:
|
|
180
|
+
"""Translate AES permissions.yaml into Claude settings.local.json format.
|
|
181
|
+
|
|
182
|
+
Claude permissions use patterns like:
|
|
183
|
+
- Bash(git status*) for shell commands
|
|
184
|
+
- Read(*) for file reading
|
|
185
|
+
- Write(src/**) / Edit(src/**) for file writing
|
|
186
|
+
"""
|
|
187
|
+
allowed: List[str] = []
|
|
188
|
+
denied: List[str] = []
|
|
189
|
+
|
|
190
|
+
# --- allow section ---
|
|
191
|
+
allow = permissions.get("allow", {})
|
|
192
|
+
|
|
193
|
+
allow_shell = allow.get("shell", {})
|
|
194
|
+
if isinstance(allow_shell, dict):
|
|
195
|
+
for category in ("read", "execute", "remote"):
|
|
196
|
+
for pattern in _normalize_patterns(allow_shell.get(category)):
|
|
197
|
+
allowed.append(f"Bash({pattern})")
|
|
198
|
+
elif isinstance(allow_shell, list):
|
|
199
|
+
for pattern in allow_shell:
|
|
200
|
+
allowed.append(f"Bash({pattern})")
|
|
201
|
+
|
|
202
|
+
allow_files = allow.get("files", {})
|
|
203
|
+
for p in _normalize_patterns(allow_files.get("read")):
|
|
204
|
+
allowed.append(f"Read({p})")
|
|
205
|
+
for p in _normalize_patterns(allow_files.get("write")):
|
|
206
|
+
allowed.append(f"Write({p})")
|
|
207
|
+
allowed.append(f"Edit({p})")
|
|
208
|
+
for p in _normalize_patterns(allow_files.get("create")):
|
|
209
|
+
allowed.append(f"Write({p})")
|
|
210
|
+
|
|
211
|
+
# --- deny section ---
|
|
212
|
+
deny = permissions.get("deny", {})
|
|
213
|
+
|
|
214
|
+
deny_shell = deny.get("shell", {})
|
|
215
|
+
if isinstance(deny_shell, list):
|
|
216
|
+
for pattern in deny_shell:
|
|
217
|
+
denied.append(f"Bash({pattern})")
|
|
218
|
+
elif isinstance(deny_shell, dict):
|
|
219
|
+
for category in ("read", "execute", "remote"):
|
|
220
|
+
for pattern in _normalize_patterns(deny_shell.get(category)):
|
|
221
|
+
denied.append(f"Bash({pattern})")
|
|
222
|
+
|
|
223
|
+
deny_files = deny.get("files", {})
|
|
224
|
+
for p in _normalize_patterns(deny_files.get("write")):
|
|
225
|
+
denied.append(f"Write({p})")
|
|
226
|
+
denied.append(f"Edit({p})")
|
|
227
|
+
for p in _normalize_patterns(deny_files.get("delete")):
|
|
228
|
+
denied.append(f"Write({p})")
|
|
229
|
+
|
|
230
|
+
# --- overrides escape hatch ---
|
|
231
|
+
overrides = permissions.get("overrides", {}).get("claude", {})
|
|
232
|
+
override_perms = overrides.get("permissions", {})
|
|
233
|
+
if override_perms.get("allow"):
|
|
234
|
+
allowed.extend(override_perms["allow"])
|
|
235
|
+
if override_perms.get("deny"):
|
|
236
|
+
denied.extend(override_perms["deny"])
|
|
237
|
+
|
|
238
|
+
result: dict = {"permissions": {}}
|
|
239
|
+
if allowed:
|
|
240
|
+
result["permissions"]["allow"] = sorted(set(allowed))
|
|
241
|
+
if denied:
|
|
242
|
+
result["permissions"]["deny"] = sorted(set(denied))
|
|
243
|
+
|
|
244
|
+
return result
|
|
245
|
+
|
|
246
|
+
|
|
247
|
+
def translate_permissions_to_markdown(permissions: dict) -> str:
|
|
248
|
+
"""Translate AES permissions.yaml into a markdown restrictions section.
|
|
249
|
+
|
|
250
|
+
Used by Cursor, Copilot, and Windsurf targets which don't have structured
|
|
251
|
+
permission formats — they rely on markdown instructions instead.
|
|
252
|
+
"""
|
|
253
|
+
sections: List[str] = []
|
|
254
|
+
|
|
255
|
+
# --- Allow: file scope ---
|
|
256
|
+
allow = permissions.get("allow", {})
|
|
257
|
+
allow_files = allow.get("files", {})
|
|
258
|
+
write_patterns = _normalize_patterns(allow_files.get("write"))
|
|
259
|
+
if write_patterns:
|
|
260
|
+
sections.append("### File Scope\n")
|
|
261
|
+
sections.append("Focus edits on these directories/patterns:\n")
|
|
262
|
+
for p in write_patterns:
|
|
263
|
+
sections.append(f"- `{p}`")
|
|
264
|
+
sections.append("")
|
|
265
|
+
|
|
266
|
+
# --- Allow: shell commands ---
|
|
267
|
+
allow_shell = allow.get("shell", {})
|
|
268
|
+
allowed_cmds: List[str] = []
|
|
269
|
+
if isinstance(allow_shell, dict):
|
|
270
|
+
for category in ("read", "execute", "remote"):
|
|
271
|
+
allowed_cmds.extend(_normalize_patterns(allow_shell.get(category)))
|
|
272
|
+
elif isinstance(allow_shell, list):
|
|
273
|
+
allowed_cmds.extend(allow_shell)
|
|
274
|
+
if allowed_cmds:
|
|
275
|
+
sections.append("### Allowed Commands\n")
|
|
276
|
+
for cmd in allowed_cmds:
|
|
277
|
+
sections.append(f"- `{cmd}`")
|
|
278
|
+
sections.append("")
|
|
279
|
+
|
|
280
|
+
# --- Deny ---
|
|
281
|
+
deny = permissions.get("deny", {})
|
|
282
|
+
deny_shell = deny.get("shell", [])
|
|
283
|
+
deny_cmds: List[str] = []
|
|
284
|
+
if isinstance(deny_shell, list):
|
|
285
|
+
deny_cmds.extend(deny_shell)
|
|
286
|
+
elif isinstance(deny_shell, dict):
|
|
287
|
+
for category in ("read", "execute", "remote"):
|
|
288
|
+
deny_cmds.extend(_normalize_patterns(deny_shell.get(category)))
|
|
289
|
+
|
|
290
|
+
deny_files = deny.get("files", {})
|
|
291
|
+
deny_write = _normalize_patterns(deny_files.get("write"))
|
|
292
|
+
deny_delete = _normalize_patterns(deny_files.get("delete"))
|
|
293
|
+
|
|
294
|
+
if deny_cmds or deny_write or deny_delete:
|
|
295
|
+
sections.append("### Never Do These\n")
|
|
296
|
+
for cmd in deny_cmds:
|
|
297
|
+
sections.append(f"- Never run: `{cmd}`")
|
|
298
|
+
for p in deny_write:
|
|
299
|
+
sections.append(f"- Never write to: `{p}`")
|
|
300
|
+
for p in deny_delete:
|
|
301
|
+
sections.append(f"- Never delete: `{p}`")
|
|
302
|
+
sections.append("")
|
|
303
|
+
|
|
304
|
+
# --- Confirm ---
|
|
305
|
+
confirm = permissions.get("confirm", {})
|
|
306
|
+
confirm_shell = confirm.get("shell", [])
|
|
307
|
+
confirm_actions = confirm.get("actions", [])
|
|
308
|
+
if confirm_shell or confirm_actions:
|
|
309
|
+
sections.append("### Ask Before Running\n")
|
|
310
|
+
for cmd in (confirm_shell if isinstance(confirm_shell, list) else []):
|
|
311
|
+
sections.append(f"- `{cmd}`")
|
|
312
|
+
for action in (confirm_actions if isinstance(confirm_actions, list) else []):
|
|
313
|
+
sections.append(f"- Action: {action}")
|
|
314
|
+
sections.append("")
|
|
315
|
+
|
|
316
|
+
# --- Resource limits ---
|
|
317
|
+
resource_limits = permissions.get("resource_limits", {})
|
|
318
|
+
if resource_limits:
|
|
319
|
+
sections.append("### Resource Limits\n")
|
|
320
|
+
for key, val in resource_limits.items():
|
|
321
|
+
sections.append(f"- {key}: {val}")
|
|
322
|
+
sections.append("")
|
|
323
|
+
|
|
324
|
+
if not sections:
|
|
325
|
+
return ""
|
|
326
|
+
|
|
327
|
+
return "\n## Permissions\n\n" + "\n".join(sections) + "\n"
|
|
328
|
+
|
|
329
|
+
|
|
330
|
+
def _normalize_patterns(value: object) -> List[str]:
|
|
331
|
+
"""Normalize a single pattern or list of patterns to a list."""
|
|
332
|
+
if value is None:
|
|
333
|
+
return []
|
|
334
|
+
if isinstance(value, str):
|
|
335
|
+
return [value]
|
|
336
|
+
if isinstance(value, list):
|
|
337
|
+
return value
|
|
338
|
+
return []
|
aes/targets/claude.py
ADDED
|
@@ -0,0 +1,153 @@
|
|
|
1
|
+
"""Claude Code sync target."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import json
|
|
6
|
+
from typing import List
|
|
7
|
+
|
|
8
|
+
from aes.targets._base import (
|
|
9
|
+
AES_SENTINEL_JSON_KEY,
|
|
10
|
+
AES_SENTINEL_MD,
|
|
11
|
+
AgentContext,
|
|
12
|
+
GeneratedFile,
|
|
13
|
+
SyncPlan,
|
|
14
|
+
SyncTarget,
|
|
15
|
+
)
|
|
16
|
+
from aes.targets._composer import (
|
|
17
|
+
compose_instructions_with_skill_index,
|
|
18
|
+
format_skill_permissions,
|
|
19
|
+
translate_permissions_to_claude,
|
|
20
|
+
)
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class ClaudeTarget(SyncTarget):
|
|
24
|
+
|
|
25
|
+
@property
|
|
26
|
+
def name(self) -> str:
|
|
27
|
+
return "claude"
|
|
28
|
+
|
|
29
|
+
def plan(self, ctx: AgentContext, force: bool) -> SyncPlan:
|
|
30
|
+
plan = SyncPlan(target_name=self.name)
|
|
31
|
+
|
|
32
|
+
# 1. CLAUDE.md — with skill index (not inlined runbooks)
|
|
33
|
+
header = (
|
|
34
|
+
AES_SENTINEL_MD
|
|
35
|
+
+ "\n# "
|
|
36
|
+
+ ctx.manifest.get("name", "Project")
|
|
37
|
+
+ " \u2014 Agent Instructions"
|
|
38
|
+
)
|
|
39
|
+
content = compose_instructions_with_skill_index(
|
|
40
|
+
project_name=ctx.manifest.get("name", "Project"),
|
|
41
|
+
instructions=ctx.instructions,
|
|
42
|
+
orchestrator=ctx.orchestrator,
|
|
43
|
+
skill_metadata=ctx.skill_metadata,
|
|
44
|
+
memory_project=ctx.memory_project,
|
|
45
|
+
header=header,
|
|
46
|
+
skill_runbooks=ctx.skill_runbooks,
|
|
47
|
+
)
|
|
48
|
+
|
|
49
|
+
if ctx.permissions:
|
|
50
|
+
confirm_section = _build_confirm_section(ctx.permissions)
|
|
51
|
+
if confirm_section:
|
|
52
|
+
content += "\n" + confirm_section
|
|
53
|
+
|
|
54
|
+
action = self._check_conflict(ctx.project_root, "CLAUDE.md", force)
|
|
55
|
+
plan.files.append(GeneratedFile(
|
|
56
|
+
relative_path="CLAUDE.md",
|
|
57
|
+
content=content,
|
|
58
|
+
description="Agent instructions for Claude Code",
|
|
59
|
+
action=action,
|
|
60
|
+
))
|
|
61
|
+
|
|
62
|
+
# 2. .claude/settings.local.json
|
|
63
|
+
if ctx.permissions:
|
|
64
|
+
settings = translate_permissions_to_claude(ctx.permissions)
|
|
65
|
+
settings[AES_SENTINEL_JSON_KEY] = True
|
|
66
|
+
settings_json = json.dumps(settings, indent=2) + "\n"
|
|
67
|
+
|
|
68
|
+
action = self._check_conflict(
|
|
69
|
+
ctx.project_root, ".claude/settings.local.json", force
|
|
70
|
+
)
|
|
71
|
+
plan.files.append(GeneratedFile(
|
|
72
|
+
relative_path=".claude/settings.local.json",
|
|
73
|
+
content=settings_json,
|
|
74
|
+
description="Claude Code permissions",
|
|
75
|
+
action=action,
|
|
76
|
+
))
|
|
77
|
+
|
|
78
|
+
# 3. .claude/commands/*.md (user-defined commands)
|
|
79
|
+
for cmd in ctx.commands:
|
|
80
|
+
rel_path = f".claude/commands/{cmd['id']}.md"
|
|
81
|
+
cmd_content = AES_SENTINEL_MD + "\n" + cmd.get("content", "")
|
|
82
|
+
action = self._check_conflict(ctx.project_root, rel_path, force)
|
|
83
|
+
plan.files.append(GeneratedFile(
|
|
84
|
+
relative_path=rel_path,
|
|
85
|
+
content=cmd_content,
|
|
86
|
+
description=f"Claude slash command: /{cmd['id']}",
|
|
87
|
+
action=action,
|
|
88
|
+
))
|
|
89
|
+
|
|
90
|
+
# 4. .claude/commands/skills/<id>.md (skill runbooks as slash commands)
|
|
91
|
+
for skill_id, runbook in ctx.skill_runbooks.items():
|
|
92
|
+
rel_path = f".claude/commands/skills/{skill_id}.md"
|
|
93
|
+
skill_content = AES_SENTINEL_MD + "\n" + runbook
|
|
94
|
+
|
|
95
|
+
# Append per-skill metadata (negative triggers, permissions)
|
|
96
|
+
meta = ctx.skill_metadata.get(skill_id, {})
|
|
97
|
+
neg_triggers = meta.get("negative_triggers", [])
|
|
98
|
+
allowed_tools = meta.get("allowed_tools")
|
|
99
|
+
|
|
100
|
+
extras: List[str] = []
|
|
101
|
+
if neg_triggers:
|
|
102
|
+
extras.append("\n## Do NOT Use When\n")
|
|
103
|
+
for trigger in neg_triggers:
|
|
104
|
+
extras.append(f"- {trigger}")
|
|
105
|
+
perms_section = format_skill_permissions(allowed_tools)
|
|
106
|
+
if perms_section:
|
|
107
|
+
extras.append(f"\n## Skill Permissions\n\n{perms_section}")
|
|
108
|
+
if extras:
|
|
109
|
+
skill_content += "\n" + "\n".join(extras) + "\n"
|
|
110
|
+
|
|
111
|
+
action = self._check_conflict(ctx.project_root, rel_path, force)
|
|
112
|
+
plan.files.append(GeneratedFile(
|
|
113
|
+
relative_path=rel_path,
|
|
114
|
+
content=skill_content,
|
|
115
|
+
description=f"Skill slash command: /skills/{skill_id}",
|
|
116
|
+
action=action,
|
|
117
|
+
))
|
|
118
|
+
|
|
119
|
+
return plan
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
def _build_confirm_section(permissions: dict) -> str:
|
|
123
|
+
"""Build a markdown section listing actions that require confirmation."""
|
|
124
|
+
confirm = permissions.get("confirm", {})
|
|
125
|
+
if not confirm:
|
|
126
|
+
return ""
|
|
127
|
+
|
|
128
|
+
lines: List[str] = [
|
|
129
|
+
"\n## Actions Requiring Confirmation\n",
|
|
130
|
+
"Always ask for explicit user approval before:\n",
|
|
131
|
+
]
|
|
132
|
+
|
|
133
|
+
for cmd in confirm.get("shell", []):
|
|
134
|
+
lines.append(f"- Running: `{cmd}`")
|
|
135
|
+
|
|
136
|
+
for action in confirm.get("actions", []):
|
|
137
|
+
lines.append(f"- Action: {action}")
|
|
138
|
+
|
|
139
|
+
confirm_files = confirm.get("files", {})
|
|
140
|
+
for p in _as_list(confirm_files.get("delete")):
|
|
141
|
+
lines.append(f"- Deleting files matching: `{p}`")
|
|
142
|
+
|
|
143
|
+
return "\n".join(lines) + "\n"
|
|
144
|
+
|
|
145
|
+
|
|
146
|
+
def _as_list(value: object) -> list:
|
|
147
|
+
if value is None:
|
|
148
|
+
return []
|
|
149
|
+
if isinstance(value, str):
|
|
150
|
+
return [value]
|
|
151
|
+
if isinstance(value, list):
|
|
152
|
+
return value
|
|
153
|
+
return []
|
aes/targets/copilot.py
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
"""GitHub Copilot sync target."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from aes.targets._base import AES_SENTINEL_MD, AgentContext, GeneratedFile, SyncPlan, SyncTarget
|
|
6
|
+
from aes.targets._composer import compose_instructions, translate_permissions_to_markdown
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class CopilotTarget(SyncTarget):
|
|
10
|
+
|
|
11
|
+
@property
|
|
12
|
+
def name(self) -> str:
|
|
13
|
+
return "copilot"
|
|
14
|
+
|
|
15
|
+
def plan(self, ctx: AgentContext, force: bool) -> SyncPlan:
|
|
16
|
+
plan = SyncPlan(target_name=self.name)
|
|
17
|
+
|
|
18
|
+
header = (
|
|
19
|
+
AES_SENTINEL_MD
|
|
20
|
+
+ "\n# "
|
|
21
|
+
+ ctx.manifest.get("name", "Project")
|
|
22
|
+
+ " \u2014 Copilot Instructions"
|
|
23
|
+
)
|
|
24
|
+
content = compose_instructions(
|
|
25
|
+
project_name=ctx.manifest.get("name", "Project"),
|
|
26
|
+
instructions=ctx.instructions,
|
|
27
|
+
orchestrator=ctx.orchestrator,
|
|
28
|
+
skill_runbooks=ctx.skill_runbooks,
|
|
29
|
+
memory_project=ctx.memory_project,
|
|
30
|
+
header=header,
|
|
31
|
+
)
|
|
32
|
+
|
|
33
|
+
if ctx.permissions:
|
|
34
|
+
perms_md = translate_permissions_to_markdown(ctx.permissions)
|
|
35
|
+
if perms_md:
|
|
36
|
+
content += "\n" + perms_md
|
|
37
|
+
|
|
38
|
+
action = self._check_conflict(
|
|
39
|
+
ctx.project_root, ".github/copilot-instructions.md", force
|
|
40
|
+
)
|
|
41
|
+
plan.files.append(GeneratedFile(
|
|
42
|
+
relative_path=".github/copilot-instructions.md",
|
|
43
|
+
content=content,
|
|
44
|
+
description="GitHub Copilot instructions",
|
|
45
|
+
action=action,
|
|
46
|
+
))
|
|
47
|
+
|
|
48
|
+
return plan
|
aes/targets/cursor.py
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
"""Cursor sync target."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from aes.targets._base import AES_SENTINEL_MD, AgentContext, GeneratedFile, SyncPlan, SyncTarget
|
|
6
|
+
from aes.targets._composer import compose_instructions, translate_permissions_to_markdown
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class CursorTarget(SyncTarget):
|
|
10
|
+
|
|
11
|
+
@property
|
|
12
|
+
def name(self) -> str:
|
|
13
|
+
return "cursor"
|
|
14
|
+
|
|
15
|
+
def plan(self, ctx: AgentContext, force: bool) -> SyncPlan:
|
|
16
|
+
plan = SyncPlan(target_name=self.name)
|
|
17
|
+
|
|
18
|
+
header = (
|
|
19
|
+
AES_SENTINEL_MD
|
|
20
|
+
+ "\n# "
|
|
21
|
+
+ ctx.manifest.get("name", "Project")
|
|
22
|
+
+ " \u2014 Cursor Rules"
|
|
23
|
+
)
|
|
24
|
+
content = compose_instructions(
|
|
25
|
+
project_name=ctx.manifest.get("name", "Project"),
|
|
26
|
+
instructions=ctx.instructions,
|
|
27
|
+
orchestrator=ctx.orchestrator,
|
|
28
|
+
skill_runbooks=ctx.skill_runbooks,
|
|
29
|
+
memory_project=ctx.memory_project,
|
|
30
|
+
header=header,
|
|
31
|
+
)
|
|
32
|
+
|
|
33
|
+
if ctx.permissions:
|
|
34
|
+
perms_md = translate_permissions_to_markdown(ctx.permissions)
|
|
35
|
+
if perms_md:
|
|
36
|
+
content += "\n" + perms_md
|
|
37
|
+
|
|
38
|
+
action = self._check_conflict(ctx.project_root, ".cursorrules", force)
|
|
39
|
+
plan.files.append(GeneratedFile(
|
|
40
|
+
relative_path=".cursorrules",
|
|
41
|
+
content=content,
|
|
42
|
+
description="Cursor rules file",
|
|
43
|
+
action=action,
|
|
44
|
+
))
|
|
45
|
+
|
|
46
|
+
return plan
|