arkaos 2.16.0 → 2.17.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/VERSION +1 -1
- package/config/agent-allowlists/_base.yaml +7 -0
- package/config/agent-allowlists/laravel.yaml +9 -0
- package/config/agent-allowlists/node.yaml +7 -0
- package/config/agent-allowlists/nuxt.yaml +7 -0
- package/config/agent-allowlists/python.yaml +7 -0
- package/config/hooks/agent-provision.sh +135 -0
- package/config/mcp-policy.yaml +36 -0
- package/config/settings-template.json +12 -0
- package/config/standards/claude-md-overlays/laravel.md +8 -0
- package/config/standards/claude-md-overlays/node.md +7 -0
- package/config/standards/claude-md-overlays/nuxt.md +8 -0
- package/config/standards/claude-md-overlays/python.md +8 -0
- package/core/sync/__pycache__/agent_provisioner.cpython-313.pyc +0 -0
- package/core/sync/__pycache__/ai_mcp_decider.cpython-313.pyc +0 -0
- package/core/sync/__pycache__/content_merger.cpython-313.pyc +0 -0
- package/core/sync/__pycache__/content_syncer.cpython-313.pyc +0 -0
- package/core/sync/__pycache__/descriptor_syncer.cpython-313.pyc +0 -0
- package/core/sync/__pycache__/engine.cpython-313.pyc +0 -0
- package/core/sync/__pycache__/mcp_optimizer.cpython-313.pyc +0 -0
- package/core/sync/__pycache__/policy_loader.cpython-313.pyc +0 -0
- package/core/sync/__pycache__/reporter.cpython-313.pyc +0 -0
- package/core/sync/__pycache__/schema.cpython-313.pyc +0 -0
- package/core/sync/__pycache__/self_healing.cpython-313.pyc +0 -0
- package/core/sync/agent_provisioner.py +150 -0
- package/core/sync/ai_mcp_decider.py +86 -0
- package/core/sync/content_merger.py +100 -0
- package/core/sync/content_syncer.py +167 -0
- package/core/sync/descriptor_syncer.py +36 -6
- package/core/sync/engine.py +20 -0
- package/core/sync/mcp_optimizer.py +187 -0
- package/core/sync/policy_loader.py +94 -0
- package/core/sync/reporter.py +49 -1
- package/core/sync/schema.py +37 -0
- package/core/sync/self_healing.py +47 -0
- package/package.json +1 -1
- package/pyproject.toml +1 -1
|
@@ -0,0 +1,167 @@
|
|
|
1
|
+
"""Content syncer for the ArkaOS Sync Engine.
|
|
2
|
+
|
|
3
|
+
Syncs CLAUDE.md (with intelligent merge), rules, hooks, and a generated
|
|
4
|
+
constitution excerpt into each project's .claude/ directory.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
import os
|
|
10
|
+
import shutil
|
|
11
|
+
from pathlib import Path
|
|
12
|
+
|
|
13
|
+
import yaml
|
|
14
|
+
|
|
15
|
+
from core.sync.content_merger import merge_managed_content
|
|
16
|
+
from core.sync.schema import ContentSyncResult, Project
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def _core_root() -> Path:
|
|
20
|
+
# Honors ARKAOS_CORE_ROOT env var for tests; falls back to repo root.
|
|
21
|
+
env = os.environ.get("ARKAOS_CORE_ROOT")
|
|
22
|
+
if env:
|
|
23
|
+
return Path(env)
|
|
24
|
+
return Path(__file__).resolve().parents[2]
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def sync_project_content(project: Project) -> ContentSyncResult:
|
|
28
|
+
"""Sync CLAUDE.md, rules, hooks, and constitution excerpt for a project."""
|
|
29
|
+
try:
|
|
30
|
+
return _do_sync(project)
|
|
31
|
+
except Exception as exc: # noqa: BLE001
|
|
32
|
+
return ContentSyncResult(
|
|
33
|
+
path=project.path, status="error", error=str(exc)
|
|
34
|
+
)
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
def _do_sync(project: Project) -> ContentSyncResult:
|
|
38
|
+
core = _core_root()
|
|
39
|
+
version = (core / "VERSION").read_text().strip()
|
|
40
|
+
project_claude = Path(project.path) / ".claude"
|
|
41
|
+
project_claude.mkdir(parents=True, exist_ok=True)
|
|
42
|
+
|
|
43
|
+
updated: list[str] = []
|
|
44
|
+
unchanged: list[str] = []
|
|
45
|
+
errored: list[str] = []
|
|
46
|
+
|
|
47
|
+
_sync_claude_md(core, project, project_claude, version, updated, unchanged, errored)
|
|
48
|
+
_sync_rules(core, project_claude, updated, unchanged, errored)
|
|
49
|
+
_sync_hooks(core, project_claude, updated, unchanged, errored)
|
|
50
|
+
_sync_constitution(core, project_claude, updated, unchanged, errored)
|
|
51
|
+
|
|
52
|
+
if errored:
|
|
53
|
+
status = "error"
|
|
54
|
+
elif updated:
|
|
55
|
+
status = "updated"
|
|
56
|
+
else:
|
|
57
|
+
status = "unchanged"
|
|
58
|
+
return ContentSyncResult(
|
|
59
|
+
path=project.path,
|
|
60
|
+
status=status,
|
|
61
|
+
artefacts_updated=updated,
|
|
62
|
+
artefacts_unchanged=unchanged,
|
|
63
|
+
artefacts_errored=errored,
|
|
64
|
+
)
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
def _sync_claude_md(
|
|
68
|
+
core: Path,
|
|
69
|
+
project: Project,
|
|
70
|
+
project_claude: Path,
|
|
71
|
+
version: str,
|
|
72
|
+
updated: list[str],
|
|
73
|
+
unchanged: list[str],
|
|
74
|
+
errored: list[str],
|
|
75
|
+
) -> None:
|
|
76
|
+
base = (core / "config" / "user-claude.md").read_text()
|
|
77
|
+
overlays_dir = core / "config" / "standards" / "claude-md-overlays"
|
|
78
|
+
overlays: list[str] = []
|
|
79
|
+
for stack in project.stack:
|
|
80
|
+
overlay = overlays_dir / f"{stack}.md"
|
|
81
|
+
if overlay.exists():
|
|
82
|
+
overlays.append(overlay.read_text())
|
|
83
|
+
|
|
84
|
+
managed_content = "\n\n".join([base, *overlays]).strip()
|
|
85
|
+
target_file = project_claude / "CLAUDE.md"
|
|
86
|
+
target_text = target_file.read_text() if target_file.exists() else ""
|
|
87
|
+
|
|
88
|
+
result = merge_managed_content(target_text, managed_content, version)
|
|
89
|
+
if result.status == "error":
|
|
90
|
+
errored.append(f"CLAUDE.md: {result.error}")
|
|
91
|
+
sidecar = target_file.with_suffix(".md.arkaos-new")
|
|
92
|
+
sidecar.write_text(managed_content)
|
|
93
|
+
return
|
|
94
|
+
if result.status == "unchanged":
|
|
95
|
+
unchanged.append("CLAUDE.md")
|
|
96
|
+
return
|
|
97
|
+
target_file.write_text(result.new_text)
|
|
98
|
+
updated.append("CLAUDE.md")
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
def _sync_rules(
|
|
102
|
+
core: Path,
|
|
103
|
+
project_claude: Path,
|
|
104
|
+
updated: list[str],
|
|
105
|
+
unchanged: list[str],
|
|
106
|
+
errored: list[str],
|
|
107
|
+
) -> None:
|
|
108
|
+
# Copies/updates rules from core standards; does not delete orphan files.
|
|
109
|
+
src = core / "config" / "standards"
|
|
110
|
+
dst = project_claude / "rules"
|
|
111
|
+
dst.mkdir(parents=True, exist_ok=True)
|
|
112
|
+
for rule in src.glob("*.md"):
|
|
113
|
+
target = dst / rule.name
|
|
114
|
+
src_text = rule.read_text()
|
|
115
|
+
if target.exists() and target.read_text() == src_text:
|
|
116
|
+
unchanged.append(f"rules/{rule.name}")
|
|
117
|
+
continue
|
|
118
|
+
target.write_text(src_text)
|
|
119
|
+
updated.append(f"rules/{rule.name}")
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
def _sync_hooks(
|
|
123
|
+
core: Path,
|
|
124
|
+
project_claude: Path,
|
|
125
|
+
updated: list[str],
|
|
126
|
+
unchanged: list[str],
|
|
127
|
+
errored: list[str],
|
|
128
|
+
) -> None:
|
|
129
|
+
src = core / "config" / "hooks"
|
|
130
|
+
dst = project_claude / "hooks"
|
|
131
|
+
dst.mkdir(parents=True, exist_ok=True)
|
|
132
|
+
for hook in src.glob("*.sh"):
|
|
133
|
+
target = dst / hook.name
|
|
134
|
+
src_text = hook.read_text()
|
|
135
|
+
if target.exists() and target.read_text() == src_text:
|
|
136
|
+
unchanged.append(f"hooks/{hook.name}")
|
|
137
|
+
continue
|
|
138
|
+
shutil.copy2(hook, target)
|
|
139
|
+
target.chmod(0o755)
|
|
140
|
+
updated.append(f"hooks/{hook.name}")
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
def _sync_constitution(
|
|
144
|
+
core: Path,
|
|
145
|
+
project_claude: Path,
|
|
146
|
+
updated: list[str],
|
|
147
|
+
unchanged: list[str],
|
|
148
|
+
errored: list[str],
|
|
149
|
+
) -> None:
|
|
150
|
+
src = core / "config" / "constitution.yaml"
|
|
151
|
+
target = project_claude / "constitution-applicable.md"
|
|
152
|
+
data = yaml.safe_load(src.read_text()) or {}
|
|
153
|
+
rules = data.get("rules", [])
|
|
154
|
+
lines = ["# ArkaOS Constitution — Applicable Rules", ""]
|
|
155
|
+
for rule in rules:
|
|
156
|
+
lines.append(f"- **{rule.get('name', '?')}** — {rule.get('level', '?')}")
|
|
157
|
+
body = "\n".join(lines) + "\n"
|
|
158
|
+
if target.exists() and target.read_text() == body:
|
|
159
|
+
unchanged.append("constitution-applicable.md")
|
|
160
|
+
return
|
|
161
|
+
target.write_text(body)
|
|
162
|
+
updated.append("constitution-applicable.md")
|
|
163
|
+
|
|
164
|
+
|
|
165
|
+
def sync_all_content(projects: list[Project]) -> list[ContentSyncResult]:
|
|
166
|
+
"""Sync content artefacts for all projects."""
|
|
167
|
+
return [sync_project_content(p) for p in projects]
|
|
@@ -101,21 +101,51 @@ def _split_frontmatter(text: str) -> tuple[dict, str]:
|
|
|
101
101
|
|
|
102
102
|
|
|
103
103
|
def _normalize_stack_item(item: str) -> str:
|
|
104
|
-
"""Normalize a stack item to lowercase first word for comparison.
|
|
105
|
-
|
|
104
|
+
"""Normalize a stack item to lowercase first word for comparison.
|
|
105
|
+
|
|
106
|
+
Returns an empty string for empty or whitespace-only input so that
|
|
107
|
+
iteration over malformed stacks (e.g., a scalar YAML string) does not
|
|
108
|
+
crash with IndexError.
|
|
109
|
+
"""
|
|
110
|
+
parts = item.strip().lower().split()
|
|
111
|
+
return parts[0] if parts else ""
|
|
106
112
|
|
|
107
113
|
|
|
108
114
|
def _check_stack(
|
|
109
115
|
frontmatter: dict, detected_stack: list[str], changes: list[str]
|
|
110
116
|
) -> None:
|
|
111
|
-
"""Compare frontmatter stack with detected stack and update if different.
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
117
|
+
"""Compare frontmatter stack with detected stack and update if different.
|
|
118
|
+
|
|
119
|
+
Tolerates malformed frontmatter where ``stack`` is a scalar string or
|
|
120
|
+
``None`` by coercing it to a list first. A scalar value is always
|
|
121
|
+
rewritten as a list even when the normalized tokens match. Empty or
|
|
122
|
+
whitespace-only items are dropped during normalization.
|
|
123
|
+
"""
|
|
124
|
+
raw_fm_stack = frontmatter.get("stack")
|
|
125
|
+
needs_type_coercion = not isinstance(raw_fm_stack, list)
|
|
126
|
+
|
|
127
|
+
if raw_fm_stack is None:
|
|
128
|
+
fm_stack: list[str] = []
|
|
129
|
+
elif isinstance(raw_fm_stack, str):
|
|
130
|
+
fm_stack = [raw_fm_stack]
|
|
131
|
+
elif isinstance(raw_fm_stack, list):
|
|
132
|
+
fm_stack = [s for s in raw_fm_stack if isinstance(s, str)]
|
|
133
|
+
else:
|
|
134
|
+
fm_stack = []
|
|
135
|
+
|
|
136
|
+
fm_normalized = {
|
|
137
|
+
token for s in fm_stack if (token := _normalize_stack_item(s))
|
|
138
|
+
}
|
|
139
|
+
detected_normalized = {
|
|
140
|
+
token for s in detected_stack if (token := _normalize_stack_item(s))
|
|
141
|
+
}
|
|
115
142
|
|
|
116
143
|
if fm_normalized != detected_normalized and detected_stack:
|
|
117
144
|
frontmatter["stack"] = detected_stack
|
|
118
145
|
changes.append(f"stack updated: {fm_stack} -> {detected_stack}")
|
|
146
|
+
elif needs_type_coercion and detected_stack:
|
|
147
|
+
frontmatter["stack"] = detected_stack
|
|
148
|
+
changes.append(f"stack coerced to list: {raw_fm_stack!r} -> {detected_stack}")
|
|
119
149
|
|
|
120
150
|
|
|
121
151
|
def _check_activity(
|
package/core/sync/engine.py
CHANGED
|
@@ -13,9 +13,12 @@ from pathlib import Path
|
|
|
13
13
|
|
|
14
14
|
from core.sync.manifest import build_manifest
|
|
15
15
|
from core.sync.discovery import discover_all_projects
|
|
16
|
+
from core.sync.mcp_optimizer import optimize_all_mcps
|
|
16
17
|
from core.sync.mcp_syncer import sync_all_mcps
|
|
17
18
|
from core.sync.settings_syncer import sync_all_settings
|
|
18
19
|
from core.sync.descriptor_syncer import sync_all_descriptors
|
|
20
|
+
from core.sync.agent_provisioner import sync_all_agents
|
|
21
|
+
from core.sync.content_syncer import sync_all_content
|
|
19
22
|
from core.sync.reporter import build_report, format_report, write_sync_state
|
|
20
23
|
from core.sync.schema import SyncReport
|
|
21
24
|
|
|
@@ -37,8 +40,23 @@ def run_sync(arkaos_home: Path, skills_dir: Path, home_path: str) -> SyncReport:
|
|
|
37
40
|
|
|
38
41
|
registry_path = skills_dir / "arka" / "mcps" / "registry.json"
|
|
39
42
|
mcp_results = sync_all_mcps(projects, registry_path, home_path)
|
|
43
|
+
|
|
44
|
+
policy_path = Path(__file__).resolve().parents[2] / "config" / "mcp-policy.yaml"
|
|
45
|
+
vault_path = Path.home() / ".arkaos" / "secrets.json"
|
|
46
|
+
cache_path = Path.home() / ".arkaos" / "mcp-decisions.cache.json"
|
|
47
|
+
if policy_path.exists():
|
|
48
|
+
mcp_results = optimize_all_mcps(
|
|
49
|
+
projects,
|
|
50
|
+
mcp_results,
|
|
51
|
+
policy_path,
|
|
52
|
+
vault_path if vault_path.exists() else None,
|
|
53
|
+
cache_path,
|
|
54
|
+
)
|
|
55
|
+
|
|
40
56
|
settings_results = sync_all_settings(mcp_results)
|
|
41
57
|
descriptor_results = sync_all_descriptors(projects)
|
|
58
|
+
content_results = sync_all_content(projects)
|
|
59
|
+
agent_results = sync_all_agents(projects)
|
|
42
60
|
|
|
43
61
|
report = build_report(
|
|
44
62
|
previous_version,
|
|
@@ -47,6 +65,8 @@ def run_sync(arkaos_home: Path, skills_dir: Path, home_path: str) -> SyncReport:
|
|
|
47
65
|
settings_results,
|
|
48
66
|
descriptor_results,
|
|
49
67
|
[],
|
|
68
|
+
content_results=content_results,
|
|
69
|
+
agent_results=agent_results,
|
|
50
70
|
new_features=manifest.new_features,
|
|
51
71
|
deprecated_features=manifest.deprecated_features,
|
|
52
72
|
)
|
|
@@ -0,0 +1,187 @@
|
|
|
1
|
+
"""MCP Optimizer — narrows the active MCP list per project via policy,
|
|
2
|
+
AI fallback, and per-project override; injects env secrets from a vault;
|
|
3
|
+
generates .env.arkaos.example for missing values.
|
|
4
|
+
|
|
5
|
+
Runs between mcp_syncer (produces full .mcp.json) and settings_syncer
|
|
6
|
+
(writes enabledMcpjsonServers). Deferred MCPs are simply absent from the
|
|
7
|
+
returned ``final_mcp_list`` — their definitions remain in ``.mcp.json``
|
|
8
|
+
so user can opt-in later.
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
from __future__ import annotations
|
|
12
|
+
|
|
13
|
+
import json
|
|
14
|
+
import os
|
|
15
|
+
import stat
|
|
16
|
+
from pathlib import Path
|
|
17
|
+
|
|
18
|
+
import yaml
|
|
19
|
+
|
|
20
|
+
from core.sync.ai_mcp_decider import decide_ambiguous
|
|
21
|
+
from core.sync.policy_loader import decide, load_policy
|
|
22
|
+
from core.sync.schema import McpSyncResult, Project
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def optimize_project_mcps(
|
|
26
|
+
project: Project,
|
|
27
|
+
mcp_result: McpSyncResult,
|
|
28
|
+
policy_path: Path,
|
|
29
|
+
vault_path: Path | None,
|
|
30
|
+
cache_path: Path,
|
|
31
|
+
call_ai=None,
|
|
32
|
+
) -> McpSyncResult:
|
|
33
|
+
"""Return a new McpSyncResult with deferred MCPs removed from final_mcp_list."""
|
|
34
|
+
try:
|
|
35
|
+
return _do_optimize(project, mcp_result, policy_path, vault_path, cache_path, call_ai)
|
|
36
|
+
except Exception as exc: # noqa: BLE001
|
|
37
|
+
return mcp_result.model_copy(update={
|
|
38
|
+
"optimizer_warnings": list(mcp_result.optimizer_warnings) + [f"optimization failed: {exc}"],
|
|
39
|
+
})
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def _do_optimize(
|
|
43
|
+
project: Project,
|
|
44
|
+
mcp_result: McpSyncResult,
|
|
45
|
+
policy_path: Path,
|
|
46
|
+
vault_path: Path | None,
|
|
47
|
+
cache_path: Path,
|
|
48
|
+
call_ai=None,
|
|
49
|
+
) -> McpSyncResult:
|
|
50
|
+
if mcp_result.status == "error" or not mcp_result.final_mcp_list:
|
|
51
|
+
return mcp_result
|
|
52
|
+
|
|
53
|
+
warnings: list[str] = list(mcp_result.optimizer_warnings)
|
|
54
|
+
|
|
55
|
+
policy = load_policy(policy_path)
|
|
56
|
+
pd = decide(policy, mcp_result.final_mcp_list, project.stack, project.ecosystem)
|
|
57
|
+
ai_decisions = decide_ambiguous(
|
|
58
|
+
pd.ambiguous, project.stack, project.ecosystem, cache_path, call_ai
|
|
59
|
+
)
|
|
60
|
+
|
|
61
|
+
active = set(pd.active)
|
|
62
|
+
deferred = set(pd.deferred)
|
|
63
|
+
for name, decision in ai_decisions.items():
|
|
64
|
+
(active if decision == "active" else deferred).add(name)
|
|
65
|
+
|
|
66
|
+
override, override_warnings = _load_override(Path(project.path))
|
|
67
|
+
warnings.extend(override_warnings)
|
|
68
|
+
|
|
69
|
+
# Fix 3: collision handling — force_active takes precedence
|
|
70
|
+
force_active = set(override.get("force_active", []))
|
|
71
|
+
force_deferred = set(override.get("force_deferred", []))
|
|
72
|
+
collisions = force_active & force_deferred
|
|
73
|
+
for c in sorted(collisions):
|
|
74
|
+
warnings.append(f"override collision for MCP '{c}': force_active takes precedence")
|
|
75
|
+
|
|
76
|
+
active = (active | force_active) - (force_deferred - force_active)
|
|
77
|
+
deferred = (deferred - force_active) | (force_deferred - force_active)
|
|
78
|
+
|
|
79
|
+
inject_warnings = _inject_env_vars(Path(project.path), vault_path, project.name)
|
|
80
|
+
warnings.extend(inject_warnings)
|
|
81
|
+
|
|
82
|
+
return mcp_result.model_copy(update={
|
|
83
|
+
"final_mcp_list": sorted(active),
|
|
84
|
+
"mcps_deferred": sorted(deferred),
|
|
85
|
+
"optimizer_warnings": warnings,
|
|
86
|
+
})
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
def _load_override(project_path: Path) -> tuple[dict, list[str]]:
|
|
90
|
+
"""Load per-project override YAML. Returns (data, warnings)."""
|
|
91
|
+
override = project_path / ".arkaos" / "mcp-override.yaml"
|
|
92
|
+
if not override.exists():
|
|
93
|
+
return {}, []
|
|
94
|
+
try:
|
|
95
|
+
return yaml.safe_load(override.read_text()) or {}, []
|
|
96
|
+
except (yaml.YAMLError, OSError) as exc:
|
|
97
|
+
return {}, [f"override YAML parse error: {exc}"]
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
def _merge_env(servers: dict, merged_env: dict) -> tuple[bool, dict[str, str]]:
|
|
101
|
+
"""Inject known env vars into server configs. Returns (changed, missing)."""
|
|
102
|
+
missing: dict[str, str] = {}
|
|
103
|
+
changed = False
|
|
104
|
+
for server_name, config in servers.items():
|
|
105
|
+
env = config.get("env") or {}
|
|
106
|
+
for var_name, current in env.items():
|
|
107
|
+
if current:
|
|
108
|
+
continue
|
|
109
|
+
if var_name in merged_env:
|
|
110
|
+
env[var_name] = merged_env[var_name]
|
|
111
|
+
changed = True
|
|
112
|
+
else:
|
|
113
|
+
missing[var_name] = server_name
|
|
114
|
+
if env:
|
|
115
|
+
config["env"] = env
|
|
116
|
+
return changed, missing
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
def _inject_env_vars(project_path: Path, vault_path: Path | None, project_name: str) -> list[str]:
|
|
120
|
+
"""Inject vault secrets into .mcp.json env vars. Returns warnings."""
|
|
121
|
+
mcp_file = project_path / ".mcp.json"
|
|
122
|
+
if not mcp_file.exists():
|
|
123
|
+
return []
|
|
124
|
+
|
|
125
|
+
try:
|
|
126
|
+
data = json.loads(mcp_file.read_text())
|
|
127
|
+
except (json.JSONDecodeError, OSError):
|
|
128
|
+
return [".mcp.json malformed; skipped env injection"]
|
|
129
|
+
|
|
130
|
+
servers = data.get("mcpServers", {})
|
|
131
|
+
vault, vault_warnings = _load_vault(vault_path) if vault_path else ({}, [])
|
|
132
|
+
|
|
133
|
+
global_env = vault.get("global", {})
|
|
134
|
+
project_env = vault.get("per_project", {}).get(project_name, {})
|
|
135
|
+
merged_env = {**global_env, **project_env}
|
|
136
|
+
|
|
137
|
+
changed, missing = _merge_env(servers, merged_env)
|
|
138
|
+
|
|
139
|
+
if changed:
|
|
140
|
+
mcp_file.write_text(json.dumps(data, indent=2) + "\n")
|
|
141
|
+
|
|
142
|
+
if missing:
|
|
143
|
+
_write_env_example(project_path, missing)
|
|
144
|
+
|
|
145
|
+
return vault_warnings
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
def _load_vault(path: Path) -> tuple[dict, list[str]]:
|
|
149
|
+
"""Load the secrets vault JSON. Returns (data, warnings)."""
|
|
150
|
+
if not path.exists():
|
|
151
|
+
return {}, []
|
|
152
|
+
try:
|
|
153
|
+
st = path.stat()
|
|
154
|
+
except OSError:
|
|
155
|
+
return {}, []
|
|
156
|
+
# Refuse world- or group-readable files
|
|
157
|
+
if st.st_mode & (stat.S_IRWXG | stat.S_IRWXO):
|
|
158
|
+
return {}, ["vault permissions too permissive (group/world readable); secrets not injected"]
|
|
159
|
+
try:
|
|
160
|
+
return json.loads(path.read_text()), []
|
|
161
|
+
except (json.JSONDecodeError, OSError):
|
|
162
|
+
return {}, ["vault JSON parse error; secrets not injected"]
|
|
163
|
+
|
|
164
|
+
|
|
165
|
+
def _write_env_example(project_path: Path, missing: dict[str, str]) -> None:
|
|
166
|
+
lines = ["# Auto-generated by ArkaOS MCP Optimizer", ""]
|
|
167
|
+
for var, server in sorted(missing.items()):
|
|
168
|
+
lines.append(f"# required by {server}")
|
|
169
|
+
lines.append(f"{var}=")
|
|
170
|
+
(project_path / ".env.arkaos.example").write_text("\n".join(lines) + "\n")
|
|
171
|
+
|
|
172
|
+
|
|
173
|
+
def optimize_all_mcps(
|
|
174
|
+
projects: list[Project],
|
|
175
|
+
mcp_results: list[McpSyncResult],
|
|
176
|
+
policy_path: Path,
|
|
177
|
+
vault_path: Path | None,
|
|
178
|
+
cache_path: Path,
|
|
179
|
+
) -> list[McpSyncResult]:
|
|
180
|
+
by_path = {r.path: r for r in mcp_results}
|
|
181
|
+
out: list[McpSyncResult] = []
|
|
182
|
+
for p in projects:
|
|
183
|
+
mr = by_path.get(p.path)
|
|
184
|
+
if mr is None:
|
|
185
|
+
continue
|
|
186
|
+
out.append(optimize_project_mcps(p, mr, policy_path, vault_path, cache_path))
|
|
187
|
+
return out
|
|
@@ -0,0 +1,94 @@
|
|
|
1
|
+
"""MCP policy loader and matcher for the ArkaOS Sync Engine."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from dataclasses import dataclass, field
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
|
|
8
|
+
import yaml
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
@dataclass
|
|
12
|
+
class PolicyRule:
|
|
13
|
+
match: dict
|
|
14
|
+
active: list[str] = field(default_factory=list)
|
|
15
|
+
deferred: list[str] = field(default_factory=list)
|
|
16
|
+
ambiguous: list[str] = field(default_factory=list)
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
@dataclass
|
|
20
|
+
class Policy:
|
|
21
|
+
rules: list[PolicyRule]
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
@dataclass
|
|
25
|
+
class PolicyDecision:
|
|
26
|
+
active: list[str]
|
|
27
|
+
deferred: list[str]
|
|
28
|
+
ambiguous: list[str]
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def load_policy(path: Path) -> Policy:
|
|
32
|
+
"""Load and parse an mcp-policy.yaml file."""
|
|
33
|
+
data = yaml.safe_load(path.read_text()) or {}
|
|
34
|
+
rules = [
|
|
35
|
+
PolicyRule(
|
|
36
|
+
match=r.get("match", {}),
|
|
37
|
+
active=list(r.get("active", [])),
|
|
38
|
+
deferred=list(r.get("deferred", [])),
|
|
39
|
+
ambiguous=list(r.get("ambiguous", [])),
|
|
40
|
+
)
|
|
41
|
+
for r in data.get("policies", [])
|
|
42
|
+
]
|
|
43
|
+
return Policy(rules=rules)
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def decide(
|
|
47
|
+
policy: Policy,
|
|
48
|
+
mcps: list[str],
|
|
49
|
+
stack: list[str],
|
|
50
|
+
ecosystem: str | None,
|
|
51
|
+
) -> PolicyDecision:
|
|
52
|
+
"""Apply the first matching rule and classify each MCP.
|
|
53
|
+
|
|
54
|
+
MCPs explicitly listed in the matched rule go to active/deferred/ambiguous
|
|
55
|
+
as declared. Any MCP not explicitly classified falls through to ambiguous
|
|
56
|
+
(the AI fallback decides).
|
|
57
|
+
"""
|
|
58
|
+
if not mcps:
|
|
59
|
+
return PolicyDecision(active=[], deferred=[], ambiguous=[])
|
|
60
|
+
|
|
61
|
+
rule = _first_match(policy, stack, ecosystem)
|
|
62
|
+
if rule is None:
|
|
63
|
+
return PolicyDecision(active=[], deferred=[], ambiguous=list(mcps))
|
|
64
|
+
|
|
65
|
+
active: list[str] = []
|
|
66
|
+
deferred: list[str] = []
|
|
67
|
+
ambiguous: list[str] = []
|
|
68
|
+
|
|
69
|
+
for mcp in mcps:
|
|
70
|
+
if mcp in rule.active:
|
|
71
|
+
active.append(mcp)
|
|
72
|
+
elif mcp in rule.deferred:
|
|
73
|
+
deferred.append(mcp)
|
|
74
|
+
else:
|
|
75
|
+
ambiguous.append(mcp)
|
|
76
|
+
|
|
77
|
+
return PolicyDecision(active=active, deferred=deferred, ambiguous=ambiguous)
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
def _first_match(
|
|
81
|
+
policy: Policy, stack: list[str], ecosystem: str | None
|
|
82
|
+
) -> PolicyRule | None:
|
|
83
|
+
stack_set = {s.lower() for s in stack}
|
|
84
|
+
for rule in policy.rules:
|
|
85
|
+
match = rule.match
|
|
86
|
+
if match.get("default"):
|
|
87
|
+
return rule
|
|
88
|
+
stack_inc = match.get("stack_includes")
|
|
89
|
+
if stack_inc and any(s.lower() in stack_set for s in stack_inc):
|
|
90
|
+
return rule
|
|
91
|
+
eco = match.get("ecosystem")
|
|
92
|
+
if eco and ecosystem and eco == ecosystem:
|
|
93
|
+
return rule
|
|
94
|
+
return None
|
package/core/sync/reporter.py
CHANGED
|
@@ -10,6 +10,8 @@ from datetime import datetime, timezone
|
|
|
10
10
|
from pathlib import Path
|
|
11
11
|
|
|
12
12
|
from core.sync.schema import (
|
|
13
|
+
AgentProvisionResult,
|
|
14
|
+
ContentSyncResult,
|
|
13
15
|
DescriptorSyncResult,
|
|
14
16
|
McpSyncResult,
|
|
15
17
|
SettingsSyncResult,
|
|
@@ -34,9 +36,18 @@ def build_report(
|
|
|
34
36
|
skill_results: list[SkillSyncResult],
|
|
35
37
|
new_features: list[str] | None = None,
|
|
36
38
|
deprecated_features: list[str] | None = None,
|
|
39
|
+
content_results: list[ContentSyncResult] | None = None,
|
|
40
|
+
agent_results: list[AgentProvisionResult] | None = None,
|
|
37
41
|
) -> SyncReport:
|
|
38
42
|
"""Aggregate all sync results into a SyncReport."""
|
|
39
|
-
errors = _collect_errors(
|
|
43
|
+
errors = _collect_errors(
|
|
44
|
+
mcp_results,
|
|
45
|
+
settings_results,
|
|
46
|
+
descriptor_results,
|
|
47
|
+
skill_results,
|
|
48
|
+
content_results=content_results,
|
|
49
|
+
agent_results=agent_results,
|
|
50
|
+
)
|
|
40
51
|
return SyncReport(
|
|
41
52
|
previous_version=previous_version,
|
|
42
53
|
current_version=current_version,
|
|
@@ -46,6 +57,8 @@ def build_report(
|
|
|
46
57
|
settings_results=settings_results,
|
|
47
58
|
descriptor_results=descriptor_results,
|
|
48
59
|
skill_results=skill_results,
|
|
60
|
+
content_results=content_results or [],
|
|
61
|
+
agent_results=agent_results or [],
|
|
49
62
|
errors=errors,
|
|
50
63
|
)
|
|
51
64
|
|
|
@@ -75,6 +88,8 @@ def format_report(report: SyncReport) -> str:
|
|
|
75
88
|
_format_phase_line("Settings", report.settings_results),
|
|
76
89
|
_format_phase_line("Descriptors", report.descriptor_results),
|
|
77
90
|
_format_skill_line(report.skill_results),
|
|
91
|
+
_format_content_line(report.content_results),
|
|
92
|
+
_format_agents_line(report.agent_results),
|
|
78
93
|
]
|
|
79
94
|
|
|
80
95
|
key_changes = _format_key_changes(report)
|
|
@@ -82,6 +97,11 @@ def format_report(report: SyncReport) -> str:
|
|
|
82
97
|
lines += ["", " Key changes:"]
|
|
83
98
|
lines += [f" - {c}" for c in key_changes]
|
|
84
99
|
|
|
100
|
+
total_deferred = sum(len(r.mcps_deferred) for r in report.mcp_results)
|
|
101
|
+
projects_with_deferred = sum(1 for r in report.mcp_results if r.mcps_deferred)
|
|
102
|
+
if total_deferred > 0:
|
|
103
|
+
lines += ["", f" Deferred MCPs: {total_deferred} across {projects_with_deferred} projects."]
|
|
104
|
+
|
|
85
105
|
lines += [
|
|
86
106
|
"",
|
|
87
107
|
f" Errors: {len(report.errors)}",
|
|
@@ -100,11 +120,15 @@ def _collect_errors(
|
|
|
100
120
|
settings: list[SettingsSyncResult],
|
|
101
121
|
desc: list[DescriptorSyncResult],
|
|
102
122
|
skills: list[SkillSyncResult],
|
|
123
|
+
content_results: list[ContentSyncResult] | None = None,
|
|
124
|
+
agent_results: list[AgentProvisionResult] | None = None,
|
|
103
125
|
) -> list[str]:
|
|
104
126
|
errors: list[str] = []
|
|
105
127
|
for r in mcp:
|
|
106
128
|
if r.error:
|
|
107
129
|
errors.append(f"MCP({r.path}): {r.error}")
|
|
130
|
+
for w in r.optimizer_warnings:
|
|
131
|
+
errors.append(f"MCP Optimizer({r.path}): {w}")
|
|
108
132
|
for r in settings:
|
|
109
133
|
if r.error:
|
|
110
134
|
errors.append(f"Settings({r.path}): {r.error}")
|
|
@@ -114,6 +138,16 @@ def _collect_errors(
|
|
|
114
138
|
for r in skills:
|
|
115
139
|
if r.error:
|
|
116
140
|
errors.append(f"Skill({r.skill_name}): {r.error}")
|
|
141
|
+
for r in content_results or []:
|
|
142
|
+
if r.error:
|
|
143
|
+
errors.append(f"Content({r.path}): {r.error}")
|
|
144
|
+
for artefact_error in r.artefacts_errored:
|
|
145
|
+
errors.append(f"Content({r.path}): {artefact_error}")
|
|
146
|
+
for r in agent_results or []:
|
|
147
|
+
if r.error:
|
|
148
|
+
errors.append(f"Agents({r.path}): {r.error}")
|
|
149
|
+
for a in r.agents_errored:
|
|
150
|
+
errors.append(f"Agents({r.path}): missing core file for {a}")
|
|
117
151
|
return errors
|
|
118
152
|
|
|
119
153
|
|
|
@@ -176,3 +210,17 @@ def _add_skill_changes(results: list[SkillSyncResult], changes: list[str]) -> No
|
|
|
176
210
|
for r in results:
|
|
177
211
|
for feature in r.features_added:
|
|
178
212
|
changes.append(f"'{feature}' added to: {r.skill_name}")
|
|
213
|
+
|
|
214
|
+
|
|
215
|
+
def _format_content_line(results: list[ContentSyncResult]) -> str:
|
|
216
|
+
total = len(results)
|
|
217
|
+
updated = _count_updated(results)
|
|
218
|
+
unchanged = _count_unchanged(results)
|
|
219
|
+
return f" {'Content:':<14}{total} synced ({updated} updated, {unchanged} unchanged)"
|
|
220
|
+
|
|
221
|
+
|
|
222
|
+
def _format_agents_line(results: list[AgentProvisionResult]) -> str:
|
|
223
|
+
total = len(results)
|
|
224
|
+
updated = _count_updated(results)
|
|
225
|
+
unchanged = _count_unchanged(results)
|
|
226
|
+
return f" {'Agents:':<14}{total} synced ({updated} updated, {unchanged} unchanged)"
|