@techwavedev/agi-agent-kit 1.1.7 → 1.2.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of @techwavedev/agi-agent-kit might be problematic. Click here for more details.
- package/CHANGELOG.md +82 -1
- package/README.md +190 -12
- package/bin/init.js +30 -2
- package/package.json +6 -3
- package/templates/base/AGENTS.md +54 -23
- package/templates/base/README.md +325 -0
- package/templates/base/directives/memory_integration.md +95 -0
- package/templates/base/execution/memory_manager.py +309 -0
- package/templates/base/execution/session_boot.py +218 -0
- package/templates/base/execution/session_init.py +320 -0
- package/templates/base/skill-creator/SKILL_skillcreator.md +23 -36
- package/templates/base/skill-creator/scripts/init_skill.py +18 -135
- package/templates/skills/ec/README.md +31 -0
- package/templates/skills/ec/aws/SKILL.md +1020 -0
- package/templates/skills/ec/aws/defaults.yaml +13 -0
- package/templates/skills/ec/aws/references/common_patterns.md +80 -0
- package/templates/skills/ec/aws/references/mcp_servers.md +98 -0
- package/templates/skills/ec/aws-terraform/SKILL.md +349 -0
- package/templates/skills/ec/aws-terraform/references/best_practices.md +394 -0
- package/templates/skills/ec/aws-terraform/references/checkov_reference.md +337 -0
- package/templates/skills/ec/aws-terraform/scripts/configure_mcp.py +150 -0
- package/templates/skills/ec/confluent-kafka/SKILL.md +655 -0
- package/templates/skills/ec/confluent-kafka/references/ansible_playbooks.md +792 -0
- package/templates/skills/ec/confluent-kafka/references/ec_deployment.md +579 -0
- package/templates/skills/ec/confluent-kafka/references/kraft_migration.md +490 -0
- package/templates/skills/ec/confluent-kafka/references/troubleshooting.md +778 -0
- package/templates/skills/ec/confluent-kafka/references/upgrade_7x_to_8x.md +488 -0
- package/templates/skills/ec/confluent-kafka/scripts/kafka_health_check.py +435 -0
- package/templates/skills/ec/confluent-kafka/scripts/upgrade_preflight.py +568 -0
- package/templates/skills/ec/confluent-kafka/scripts/validate_config.py +455 -0
- package/templates/skills/ec/consul/SKILL.md +427 -0
- package/templates/skills/ec/consul/references/acl_setup.md +168 -0
- package/templates/skills/ec/consul/references/ha_config.md +196 -0
- package/templates/skills/ec/consul/references/troubleshooting.md +267 -0
- package/templates/skills/ec/consul/references/upgrades.md +213 -0
- package/templates/skills/ec/consul/scripts/consul_health_report.py +530 -0
- package/templates/skills/ec/consul/scripts/consul_status.py +264 -0
- package/templates/skills/ec/consul/scripts/generate_values.py +170 -0
- package/templates/skills/ec/documentation/SKILL.md +351 -0
- package/templates/skills/ec/documentation/references/best_practices.md +201 -0
- package/templates/skills/ec/documentation/scripts/analyze_code.py +307 -0
- package/templates/skills/ec/documentation/scripts/detect_changes.py +460 -0
- package/templates/skills/ec/documentation/scripts/generate_changelog.py +312 -0
- package/templates/skills/ec/documentation/scripts/sync_docs.py +272 -0
- package/templates/skills/ec/documentation/scripts/update_skill_docs.py +366 -0
- package/templates/skills/ec/gitlab/SKILL.md +529 -0
- package/templates/skills/ec/gitlab/references/agent_installation.md +416 -0
- package/templates/skills/ec/gitlab/references/api_reference.md +508 -0
- package/templates/skills/ec/gitlab/references/gitops_flux.md +465 -0
- package/templates/skills/ec/gitlab/references/troubleshooting.md +518 -0
- package/templates/skills/ec/gitlab/scripts/generate_agent_values.py +329 -0
- package/templates/skills/ec/gitlab/scripts/gitlab_agent_status.py +414 -0
- package/templates/skills/ec/jira/SKILL.md +484 -0
- package/templates/skills/ec/jira/references/jql_reference.md +148 -0
- package/templates/skills/ec/jira/scripts/add_comment.py +91 -0
- package/templates/skills/ec/jira/scripts/bulk_log_work.py +124 -0
- package/templates/skills/ec/jira/scripts/create_ticket.py +162 -0
- package/templates/skills/ec/jira/scripts/get_ticket.py +191 -0
- package/templates/skills/ec/jira/scripts/jira_client.py +383 -0
- package/templates/skills/ec/jira/scripts/log_work.py +154 -0
- package/templates/skills/ec/jira/scripts/search_tickets.py +104 -0
- package/templates/skills/ec/jira/scripts/update_comment.py +67 -0
- package/templates/skills/ec/jira/scripts/update_ticket.py +161 -0
- package/templates/skills/ec/karpenter/SKILL.md +301 -0
- package/templates/skills/ec/karpenter/references/ec2nodeclasses.md +421 -0
- package/templates/skills/ec/karpenter/references/migration.md +396 -0
- package/templates/skills/ec/karpenter/references/nodepools.md +400 -0
- package/templates/skills/ec/karpenter/references/troubleshooting.md +359 -0
- package/templates/skills/ec/karpenter/scripts/generate_ec2nodeclass.py +187 -0
- package/templates/skills/ec/karpenter/scripts/generate_nodepool.py +245 -0
- package/templates/skills/ec/karpenter/scripts/karpenter_status.py +359 -0
- package/templates/skills/ec/opensearch/SKILL.md +720 -0
- package/templates/skills/ec/opensearch/references/ml_neural_search.md +576 -0
- package/templates/skills/ec/opensearch/references/operator.md +532 -0
- package/templates/skills/ec/opensearch/references/query_dsl.md +532 -0
- package/templates/skills/ec/opensearch/scripts/configure_mcp.py +148 -0
- package/templates/skills/ec/victoriametrics/SKILL.md +598 -0
- package/templates/skills/ec/victoriametrics/references/kubernetes.md +531 -0
- package/templates/skills/ec/victoriametrics/references/prometheus_migration.md +333 -0
- package/templates/skills/ec/victoriametrics/references/troubleshooting.md +442 -0
- package/templates/skills/knowledge/SKILLS_CATALOG.md +274 -4
- package/templates/skills/knowledge/intelligent-routing/SKILL.md +237 -164
- package/templates/skills/knowledge/parallel-agents/SKILL.md +345 -73
- package/templates/skills/knowledge/plugin-discovery/SKILL.md +582 -0
- package/templates/skills/knowledge/plugin-discovery/scripts/platform_setup.py +1083 -0
- package/templates/skills/knowledge/design-md/README.md +0 -34
- package/templates/skills/knowledge/design-md/SKILL.md +0 -193
- package/templates/skills/knowledge/design-md/examples/DESIGN.md +0 -154
- package/templates/skills/knowledge/notebooklm-mcp/SKILL.md +0 -71
- package/templates/skills/knowledge/notebooklm-mcp/assets/example_asset.txt +0 -24
- package/templates/skills/knowledge/notebooklm-mcp/references/api_reference.md +0 -34
- package/templates/skills/knowledge/notebooklm-mcp/scripts/example.py +0 -19
- package/templates/skills/knowledge/react-components/README.md +0 -36
- package/templates/skills/knowledge/react-components/SKILL.md +0 -53
- package/templates/skills/knowledge/react-components/examples/gold-standard-card.tsx +0 -80
- package/templates/skills/knowledge/react-components/package-lock.json +0 -231
- package/templates/skills/knowledge/react-components/package.json +0 -16
- package/templates/skills/knowledge/react-components/resources/architecture-checklist.md +0 -15
- package/templates/skills/knowledge/react-components/resources/component-template.tsx +0 -37
- package/templates/skills/knowledge/react-components/resources/stitch-api-reference.md +0 -14
- package/templates/skills/knowledge/react-components/resources/style-guide.json +0 -27
- package/templates/skills/knowledge/react-components/scripts/fetch-stitch.sh +0 -30
- package/templates/skills/knowledge/react-components/scripts/validate.js +0 -68
- package/templates/skills/knowledge/self-update/SKILL.md +0 -60
- package/templates/skills/knowledge/self-update/scripts/update_kit.py +0 -103
- package/templates/skills/knowledge/stitch-loop/README.md +0 -54
- package/templates/skills/knowledge/stitch-loop/SKILL.md +0 -235
- package/templates/skills/knowledge/stitch-loop/examples/SITE.md +0 -73
- package/templates/skills/knowledge/stitch-loop/examples/next-prompt.md +0 -25
- package/templates/skills/knowledge/stitch-loop/resources/baton-schema.md +0 -61
- package/templates/skills/knowledge/stitch-loop/resources/site-template.md +0 -104
|
@@ -0,0 +1,1083 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Script: platform_setup.py
|
|
4
|
+
Purpose: Auto-detect the AI coding platform and configure the environment
|
|
5
|
+
for optimal use. One-step setup wizard.
|
|
6
|
+
|
|
7
|
+
Usage:
|
|
8
|
+
python3 platform_setup.py [--project-dir <path>] [--auto] [--json]
|
|
9
|
+
|
|
10
|
+
Arguments:
|
|
11
|
+
--project-dir Path to the project root (default: current directory)
|
|
12
|
+
--auto Auto-apply all recommended settings without prompting
|
|
13
|
+
--json Output results as JSON (for agent consumption)
|
|
14
|
+
--dry-run Show what would be configured without making changes
|
|
15
|
+
|
|
16
|
+
Exit Codes:
|
|
17
|
+
0 - Success
|
|
18
|
+
1 - Invalid arguments
|
|
19
|
+
2 - Project directory not found
|
|
20
|
+
3 - Configuration error
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
import argparse
|
|
24
|
+
import json
|
|
25
|
+
import os
|
|
26
|
+
import sys
|
|
27
|
+
import glob
|
|
28
|
+
import shutil
|
|
29
|
+
import subprocess
|
|
30
|
+
from pathlib import Path
|
|
31
|
+
from urllib.request import Request, urlopen
|
|
32
|
+
from urllib.error import URLError, HTTPError
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
# ── Platform Detection ──────────────────────────────────────────────
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def detect_platform(project_dir: Path) -> dict:
|
|
39
|
+
"""Detect which AI coding platform is active and what features are available."""
|
|
40
|
+
result = {
|
|
41
|
+
"platform": "unknown",
|
|
42
|
+
"features": {},
|
|
43
|
+
"project_stack": {},
|
|
44
|
+
"recommendations": [],
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
# Check for Claude Code signals
|
|
48
|
+
claude_dir = project_dir / ".claude"
|
|
49
|
+
claude_user_dir = Path.home() / ".claude"
|
|
50
|
+
has_claude_settings = (claude_dir / "settings.json").exists()
|
|
51
|
+
has_claude_user = claude_user_dir.exists()
|
|
52
|
+
claude_md = project_dir / "CLAUDE.md"
|
|
53
|
+
|
|
54
|
+
# Check for Kiro signals
|
|
55
|
+
kiro_dir = project_dir / ".kiro"
|
|
56
|
+
kiro_user_dir = Path.home() / ".kiro"
|
|
57
|
+
has_kiro = kiro_dir.exists() or kiro_user_dir.exists()
|
|
58
|
+
power_files = list(project_dir.glob("**/POWER.md"))
|
|
59
|
+
|
|
60
|
+
# Check for Gemini signals
|
|
61
|
+
gemini_md = project_dir / "GEMINI.md"
|
|
62
|
+
has_gemini = gemini_md.exists()
|
|
63
|
+
|
|
64
|
+
# Check for Opencode signals
|
|
65
|
+
opencode_md = project_dir / "OPENCODE.md"
|
|
66
|
+
has_opencode = opencode_md.exists()
|
|
67
|
+
opencode_config = Path.home() / ".config" / "opencode" / "config.json"
|
|
68
|
+
|
|
69
|
+
# Priority-based detection
|
|
70
|
+
if has_claude_settings or has_claude_user:
|
|
71
|
+
result["platform"] = "claude-code"
|
|
72
|
+
result["features"] = detect_claude_features(project_dir, claude_dir, claude_user_dir)
|
|
73
|
+
elif has_kiro or power_files:
|
|
74
|
+
result["platform"] = "kiro"
|
|
75
|
+
result["features"] = detect_kiro_features(project_dir, kiro_dir, kiro_user_dir)
|
|
76
|
+
elif has_gemini:
|
|
77
|
+
result["platform"] = "gemini"
|
|
78
|
+
result["features"] = detect_gemini_features(project_dir)
|
|
79
|
+
elif has_opencode or opencode_config.exists():
|
|
80
|
+
result["platform"] = "opencode"
|
|
81
|
+
result["features"] = detect_opencode_features(project_dir)
|
|
82
|
+
else:
|
|
83
|
+
# Fallback: check if any agent files exist (installed via agi-agent-kit)
|
|
84
|
+
agents_md = project_dir / "AGENTS.md"
|
|
85
|
+
if agents_md.exists() or gemini_md.exists() or claude_md.exists():
|
|
86
|
+
result["platform"] = "gemini" # Default to Gemini if AGENTS.md exists
|
|
87
|
+
result["features"] = detect_gemini_features(project_dir)
|
|
88
|
+
|
|
89
|
+
# Detect project tech stack
|
|
90
|
+
result["project_stack"] = detect_project_stack(project_dir)
|
|
91
|
+
|
|
92
|
+
# Detect memory system (Qdrant + Ollama)
|
|
93
|
+
result["memory"] = detect_memory_system(project_dir)
|
|
94
|
+
|
|
95
|
+
# Generate recommendations
|
|
96
|
+
result["recommendations"] = generate_recommendations(result)
|
|
97
|
+
|
|
98
|
+
return result
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
def detect_claude_features(project_dir: Path, claude_dir: Path, claude_user_dir: Path) -> dict:
|
|
102
|
+
"""Detect Claude Code features and their current state."""
|
|
103
|
+
features = {
|
|
104
|
+
"agent_teams": {"enabled": False, "configurable": True},
|
|
105
|
+
"plugins": {"installed": [], "marketplace_added": False},
|
|
106
|
+
"subagents": {"project": [], "user": []},
|
|
107
|
+
"skills": {"project": [], "user": []},
|
|
108
|
+
"mcp_servers": {"configured": []},
|
|
109
|
+
"hooks": {"configured": False},
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
# Check Agent Teams
|
|
113
|
+
settings_file = claude_dir / "settings.json"
|
|
114
|
+
if settings_file.exists():
|
|
115
|
+
try:
|
|
116
|
+
settings = json.loads(settings_file.read_text())
|
|
117
|
+
env = settings.get("env", {})
|
|
118
|
+
if env.get("CLAUDE_CODE_EXPERIMENTAL_AGENT_TEAMS") == "1":
|
|
119
|
+
features["agent_teams"]["enabled"] = True
|
|
120
|
+
except (json.JSONDecodeError, KeyError):
|
|
121
|
+
pass
|
|
122
|
+
|
|
123
|
+
# Also check user-level settings
|
|
124
|
+
user_settings = claude_user_dir / "settings.json"
|
|
125
|
+
if user_settings.exists():
|
|
126
|
+
try:
|
|
127
|
+
settings = json.loads(user_settings.read_text())
|
|
128
|
+
env = settings.get("env", {})
|
|
129
|
+
if env.get("CLAUDE_CODE_EXPERIMENTAL_AGENT_TEAMS") == "1":
|
|
130
|
+
features["agent_teams"]["enabled"] = True
|
|
131
|
+
except (json.JSONDecodeError, KeyError):
|
|
132
|
+
pass
|
|
133
|
+
|
|
134
|
+
# Check project-level agents
|
|
135
|
+
agents_dir = claude_dir / "agents"
|
|
136
|
+
if agents_dir.exists():
|
|
137
|
+
features["subagents"]["project"] = [
|
|
138
|
+
f.stem for f in agents_dir.glob("*.md")
|
|
139
|
+
]
|
|
140
|
+
|
|
141
|
+
# Check user-level agents
|
|
142
|
+
user_agents = claude_user_dir / "agents"
|
|
143
|
+
if user_agents.exists():
|
|
144
|
+
features["subagents"]["user"] = [
|
|
145
|
+
f.stem for f in user_agents.glob("*.md")
|
|
146
|
+
]
|
|
147
|
+
|
|
148
|
+
# Check project-level skills
|
|
149
|
+
skills_dir = claude_dir / "skills"
|
|
150
|
+
if skills_dir.exists():
|
|
151
|
+
for skill_dir in skills_dir.iterdir():
|
|
152
|
+
if skill_dir.is_dir() and (skill_dir / "SKILL.md").exists():
|
|
153
|
+
features["skills"]["project"].append(skill_dir.name)
|
|
154
|
+
|
|
155
|
+
# Also check Antigravity skills directory
|
|
156
|
+
agi_skills = project_dir / "skills"
|
|
157
|
+
if agi_skills.exists():
|
|
158
|
+
for skill_dir in agi_skills.iterdir():
|
|
159
|
+
if skill_dir.is_dir() and (skill_dir / "SKILL.md").exists():
|
|
160
|
+
features["skills"]["project"].append(skill_dir.name)
|
|
161
|
+
|
|
162
|
+
# Check MCP servers
|
|
163
|
+
mcp_config = claude_dir / "mcp.json"
|
|
164
|
+
if not mcp_config.exists():
|
|
165
|
+
mcp_config = claude_dir / "settings.json"
|
|
166
|
+
if mcp_config.exists():
|
|
167
|
+
try:
|
|
168
|
+
config = json.loads(mcp_config.read_text())
|
|
169
|
+
servers = config.get("mcpServers", {})
|
|
170
|
+
features["mcp_servers"]["configured"] = list(servers.keys())
|
|
171
|
+
except (json.JSONDecodeError, KeyError):
|
|
172
|
+
pass
|
|
173
|
+
|
|
174
|
+
return features
|
|
175
|
+
|
|
176
|
+
|
|
177
|
+
def detect_kiro_features(project_dir: Path, kiro_dir: Path, kiro_user_dir: Path) -> dict:
|
|
178
|
+
"""Detect Kiro features and their current state."""
|
|
179
|
+
features = {
|
|
180
|
+
"powers": {"installed": []},
|
|
181
|
+
"autonomous_agent": {"available": True},
|
|
182
|
+
"hooks": {"configured": []},
|
|
183
|
+
"mcp_servers": {"configured": []},
|
|
184
|
+
}
|
|
185
|
+
|
|
186
|
+
# Check installed powers (user level)
|
|
187
|
+
kiro_mcp = kiro_user_dir / "settings" / "mcp.json"
|
|
188
|
+
if kiro_mcp.exists():
|
|
189
|
+
try:
|
|
190
|
+
config = json.loads(kiro_mcp.read_text())
|
|
191
|
+
servers = config.get("mcpServers", {})
|
|
192
|
+
# Powers are namespaced as power-<name>-<server>
|
|
193
|
+
for name in servers:
|
|
194
|
+
if name.startswith("power-"):
|
|
195
|
+
features["powers"]["installed"].append(name)
|
|
196
|
+
features["mcp_servers"]["configured"].append(name)
|
|
197
|
+
except (json.JSONDecodeError, KeyError):
|
|
198
|
+
pass
|
|
199
|
+
|
|
200
|
+
# Check hooks
|
|
201
|
+
hooks_dir = kiro_dir / "hooks"
|
|
202
|
+
if hooks_dir.exists():
|
|
203
|
+
features["hooks"]["configured"] = [
|
|
204
|
+
f.stem for f in hooks_dir.glob("*.kiro.hook")
|
|
205
|
+
]
|
|
206
|
+
|
|
207
|
+
# Check for POWER.md files in project
|
|
208
|
+
power_files = list(project_dir.glob("**/POWER.md"))
|
|
209
|
+
for pf in power_files:
|
|
210
|
+
features["powers"]["installed"].append(pf.parent.name)
|
|
211
|
+
|
|
212
|
+
return features
|
|
213
|
+
|
|
214
|
+
|
|
215
|
+
def detect_gemini_features(project_dir: Path) -> dict:
|
|
216
|
+
"""Detect Gemini/Antigravity features."""
|
|
217
|
+
features = {
|
|
218
|
+
"skills": {"installed": []},
|
|
219
|
+
"mcp_servers": {"configured": []},
|
|
220
|
+
"execution_scripts": [],
|
|
221
|
+
"agents": [],
|
|
222
|
+
}
|
|
223
|
+
|
|
224
|
+
# Check skills
|
|
225
|
+
skills_dir = project_dir / "skills"
|
|
226
|
+
if skills_dir.exists():
|
|
227
|
+
for skill_dir in skills_dir.iterdir():
|
|
228
|
+
if skill_dir.is_dir() and (skill_dir / "SKILL.md").exists():
|
|
229
|
+
features["skills"]["installed"].append(skill_dir.name)
|
|
230
|
+
|
|
231
|
+
# Check execution scripts
|
|
232
|
+
exec_dir = project_dir / "execution"
|
|
233
|
+
if exec_dir.exists():
|
|
234
|
+
features["execution_scripts"] = [
|
|
235
|
+
f.name for f in exec_dir.glob("*.py")
|
|
236
|
+
]
|
|
237
|
+
|
|
238
|
+
# Check agents
|
|
239
|
+
agents_dir = project_dir / ".agent" / "agents"
|
|
240
|
+
if not agents_dir.exists():
|
|
241
|
+
agents_dir = project_dir / ".agent"
|
|
242
|
+
if agents_dir.exists():
|
|
243
|
+
features["agents"] = [
|
|
244
|
+
f.stem for f in agents_dir.glob("*.md")
|
|
245
|
+
if f.name != "AGENTS.md"
|
|
246
|
+
]
|
|
247
|
+
|
|
248
|
+
# Check MCP config
|
|
249
|
+
for config_path in [
|
|
250
|
+
project_dir / "mcp_config.json",
|
|
251
|
+
Path.home() / ".config" / "claude" / "claude_desktop_config.json",
|
|
252
|
+
]:
|
|
253
|
+
if config_path.exists():
|
|
254
|
+
try:
|
|
255
|
+
config = json.loads(config_path.read_text())
|
|
256
|
+
servers = config.get("mcpServers", {})
|
|
257
|
+
features["mcp_servers"]["configured"] = list(servers.keys())
|
|
258
|
+
break
|
|
259
|
+
except (json.JSONDecodeError, KeyError):
|
|
260
|
+
pass
|
|
261
|
+
|
|
262
|
+
return features
|
|
263
|
+
|
|
264
|
+
|
|
265
|
+
def detect_opencode_features(project_dir: Path) -> dict:
|
|
266
|
+
"""Detect Opencode features."""
|
|
267
|
+
features = {
|
|
268
|
+
"skills": {"installed": []},
|
|
269
|
+
"mcp_servers": {"configured": []},
|
|
270
|
+
"providers": [],
|
|
271
|
+
}
|
|
272
|
+
|
|
273
|
+
# Check skills (same as Gemini)
|
|
274
|
+
skills_dir = project_dir / "skills"
|
|
275
|
+
if skills_dir.exists():
|
|
276
|
+
for skill_dir in skills_dir.iterdir():
|
|
277
|
+
if skill_dir.is_dir() and (skill_dir / "SKILL.md").exists():
|
|
278
|
+
features["skills"]["installed"].append(skill_dir.name)
|
|
279
|
+
|
|
280
|
+
# Check opencode config
|
|
281
|
+
config_path = Path.home() / ".config" / "opencode" / "config.json"
|
|
282
|
+
if config_path.exists():
|
|
283
|
+
try:
|
|
284
|
+
config = json.loads(config_path.read_text())
|
|
285
|
+
features["providers"] = list(config.get("providers", {}).keys())
|
|
286
|
+
features["mcp_servers"]["configured"] = list(
|
|
287
|
+
config.get("mcpServers", {}).keys()
|
|
288
|
+
)
|
|
289
|
+
except (json.JSONDecodeError, KeyError):
|
|
290
|
+
pass
|
|
291
|
+
|
|
292
|
+
return features
|
|
293
|
+
|
|
294
|
+
|
|
295
|
+
# ── Memory System Detection ─────────────────────────────────────────
|
|
296
|
+
|
|
297
|
+
|
|
298
|
+
def detect_memory_system(project_dir: Path) -> dict:
|
|
299
|
+
"""Detect Qdrant + Ollama memory system status."""
|
|
300
|
+
memory = {
|
|
301
|
+
"qdrant": {"status": "unknown", "url": "http://localhost:6333"},
|
|
302
|
+
"ollama": {"status": "unknown", "url": "http://localhost:11434"},
|
|
303
|
+
"collections": {},
|
|
304
|
+
"ready": False,
|
|
305
|
+
}
|
|
306
|
+
|
|
307
|
+
qdrant_url = os.environ.get("QDRANT_URL", "http://localhost:6333")
|
|
308
|
+
ollama_url = os.environ.get("OLLAMA_URL", "http://localhost:11434")
|
|
309
|
+
memory["qdrant"]["url"] = qdrant_url
|
|
310
|
+
memory["ollama"]["url"] = ollama_url
|
|
311
|
+
|
|
312
|
+
# Check Qdrant
|
|
313
|
+
try:
|
|
314
|
+
req = Request(f"{qdrant_url}/collections", method="GET")
|
|
315
|
+
with urlopen(req, timeout=5) as response:
|
|
316
|
+
data = json.loads(response.read().decode())
|
|
317
|
+
collections = [
|
|
318
|
+
c["name"] for c in data.get("result", {}).get("collections", [])
|
|
319
|
+
]
|
|
320
|
+
memory["qdrant"]["status"] = "ok"
|
|
321
|
+
memory["qdrant"]["collections"] = collections
|
|
322
|
+
|
|
323
|
+
# Check point counts for key collections
|
|
324
|
+
for col_name in ["agent_memory", "semantic_cache"]:
|
|
325
|
+
if col_name in collections:
|
|
326
|
+
try:
|
|
327
|
+
col_req = Request(f"{qdrant_url}/collections/{col_name}", method="GET")
|
|
328
|
+
with urlopen(col_req, timeout=5) as col_resp:
|
|
329
|
+
col_data = json.loads(col_resp.read().decode())
|
|
330
|
+
points = col_data.get("result", {}).get("points_count", 0)
|
|
331
|
+
memory["collections"][col_name] = {
|
|
332
|
+
"exists": True,
|
|
333
|
+
"points": points,
|
|
334
|
+
}
|
|
335
|
+
except Exception:
|
|
336
|
+
memory["collections"][col_name] = {"exists": True, "points": -1}
|
|
337
|
+
else:
|
|
338
|
+
memory["collections"][col_name] = {"exists": False, "points": 0}
|
|
339
|
+
except (URLError, HTTPError, Exception):
|
|
340
|
+
memory["qdrant"]["status"] = "not_running"
|
|
341
|
+
|
|
342
|
+
# Check Ollama
|
|
343
|
+
try:
|
|
344
|
+
req = Request(f"{ollama_url}/api/tags", method="GET")
|
|
345
|
+
with urlopen(req, timeout=5) as response:
|
|
346
|
+
data = json.loads(response.read().decode())
|
|
347
|
+
models = [m["name"] for m in data.get("models", [])]
|
|
348
|
+
memory["ollama"]["status"] = "ok"
|
|
349
|
+
memory["ollama"]["models"] = models
|
|
350
|
+
# Check for embedding model
|
|
351
|
+
has_embed = any("nomic-embed-text" in m for m in models)
|
|
352
|
+
memory["ollama"]["has_embedding_model"] = has_embed
|
|
353
|
+
except (URLError, HTTPError, Exception):
|
|
354
|
+
memory["ollama"]["status"] = "not_running"
|
|
355
|
+
|
|
356
|
+
# Check if session_init.py exists
|
|
357
|
+
session_init = project_dir / "execution" / "session_init.py"
|
|
358
|
+
memory["has_session_init"] = session_init.exists()
|
|
359
|
+
|
|
360
|
+
# Check if memory_manager.py exists
|
|
361
|
+
memory_mgr = project_dir / "execution" / "memory_manager.py"
|
|
362
|
+
memory["has_memory_manager"] = memory_mgr.exists()
|
|
363
|
+
|
|
364
|
+
# Overall readiness
|
|
365
|
+
memory["ready"] = (
|
|
366
|
+
memory["qdrant"]["status"] == "ok"
|
|
367
|
+
and memory["ollama"]["status"] == "ok"
|
|
368
|
+
and memory["ollama"].get("has_embedding_model", False)
|
|
369
|
+
and memory["collections"].get("agent_memory", {}).get("exists", False)
|
|
370
|
+
and memory["collections"].get("semantic_cache", {}).get("exists", False)
|
|
371
|
+
)
|
|
372
|
+
|
|
373
|
+
return memory
|
|
374
|
+
|
|
375
|
+
|
|
376
|
+
# ── Project Stack Detection ─────────────────────────────────────────
|
|
377
|
+
|
|
378
|
+
|
|
379
|
+
def detect_project_stack(project_dir: Path) -> dict:
|
|
380
|
+
"""Detect the project's technology stack."""
|
|
381
|
+
stack = {
|
|
382
|
+
"languages": [],
|
|
383
|
+
"frameworks": [],
|
|
384
|
+
"services": [],
|
|
385
|
+
"package_manager": None,
|
|
386
|
+
}
|
|
387
|
+
|
|
388
|
+
# Package managers
|
|
389
|
+
if (project_dir / "package.json").exists():
|
|
390
|
+
stack["package_manager"] = "npm"
|
|
391
|
+
stack["languages"].append("javascript")
|
|
392
|
+
|
|
393
|
+
try:
|
|
394
|
+
pkg = json.loads((project_dir / "package.json").read_text())
|
|
395
|
+
deps = {
|
|
396
|
+
**pkg.get("dependencies", {}),
|
|
397
|
+
**pkg.get("devDependencies", {}),
|
|
398
|
+
}
|
|
399
|
+
|
|
400
|
+
# Detect TypeScript
|
|
401
|
+
if "typescript" in deps:
|
|
402
|
+
stack["languages"].append("typescript")
|
|
403
|
+
|
|
404
|
+
# Detect frameworks
|
|
405
|
+
framework_map = {
|
|
406
|
+
"next": "nextjs",
|
|
407
|
+
"react": "react",
|
|
408
|
+
"vue": "vue",
|
|
409
|
+
"express": "express",
|
|
410
|
+
"fastify": "fastify",
|
|
411
|
+
"nestjs": "nestjs",
|
|
412
|
+
"@angular/core": "angular",
|
|
413
|
+
"svelte": "svelte",
|
|
414
|
+
"astro": "astro",
|
|
415
|
+
"react-native": "react-native",
|
|
416
|
+
"expo": "expo",
|
|
417
|
+
}
|
|
418
|
+
for dep, fw in framework_map.items():
|
|
419
|
+
if dep in deps:
|
|
420
|
+
stack["frameworks"].append(fw)
|
|
421
|
+
|
|
422
|
+
# Detect services
|
|
423
|
+
service_map = {
|
|
424
|
+
"@supabase/supabase-js": "supabase",
|
|
425
|
+
"stripe": "stripe",
|
|
426
|
+
"firebase": "firebase",
|
|
427
|
+
"@prisma/client": "prisma",
|
|
428
|
+
"mongoose": "mongodb",
|
|
429
|
+
}
|
|
430
|
+
for dep, svc in service_map.items():
|
|
431
|
+
if dep in deps:
|
|
432
|
+
stack["services"].append(svc)
|
|
433
|
+
|
|
434
|
+
except (json.JSONDecodeError, KeyError):
|
|
435
|
+
pass
|
|
436
|
+
|
|
437
|
+
if (project_dir / "requirements.txt").exists() or (project_dir / "pyproject.toml").exists():
|
|
438
|
+
stack["languages"].append("python")
|
|
439
|
+
stack["package_manager"] = stack["package_manager"] or "pip"
|
|
440
|
+
|
|
441
|
+
if (project_dir / "go.mod").exists():
|
|
442
|
+
stack["languages"].append("go")
|
|
443
|
+
|
|
444
|
+
if (project_dir / "Cargo.toml").exists():
|
|
445
|
+
stack["languages"].append("rust")
|
|
446
|
+
|
|
447
|
+
if (project_dir / "Gemfile").exists():
|
|
448
|
+
stack["languages"].append("ruby")
|
|
449
|
+
|
|
450
|
+
# Detect services from config files
|
|
451
|
+
if (project_dir / ".github").exists():
|
|
452
|
+
stack["services"].append("github")
|
|
453
|
+
if (project_dir / ".gitlab-ci.yml").exists():
|
|
454
|
+
stack["services"].append("gitlab")
|
|
455
|
+
if (project_dir / "docker-compose.yml").exists() or (project_dir / "docker-compose.yaml").exists():
|
|
456
|
+
stack["services"].append("docker")
|
|
457
|
+
if (project_dir / "Dockerfile").exists():
|
|
458
|
+
stack["services"].append("docker")
|
|
459
|
+
if (project_dir / "vercel.json").exists():
|
|
460
|
+
stack["services"].append("vercel")
|
|
461
|
+
if (project_dir / "netlify.toml").exists():
|
|
462
|
+
stack["services"].append("netlify")
|
|
463
|
+
if (project_dir / "terraform").exists() or list(project_dir.glob("*.tf")):
|
|
464
|
+
stack["services"].append("terraform")
|
|
465
|
+
|
|
466
|
+
# Deduplicate
|
|
467
|
+
stack["languages"] = list(dict.fromkeys(stack["languages"]))
|
|
468
|
+
stack["frameworks"] = list(dict.fromkeys(stack["frameworks"]))
|
|
469
|
+
stack["services"] = list(dict.fromkeys(stack["services"]))
|
|
470
|
+
|
|
471
|
+
return stack
|
|
472
|
+
|
|
473
|
+
|
|
474
|
+
# ── Recommendations Engine ──────────────────────────────────────────
|
|
475
|
+
|
|
476
|
+
|
|
477
|
+
def generate_recommendations(detection: dict) -> list:
|
|
478
|
+
"""Generate platform-specific recommendations based on detection results."""
|
|
479
|
+
recs = []
|
|
480
|
+
platform = detection["platform"]
|
|
481
|
+
features = detection["features"]
|
|
482
|
+
stack = detection["project_stack"]
|
|
483
|
+
memory = detection.get("memory", {})
|
|
484
|
+
|
|
485
|
+
if platform == "claude-code":
|
|
486
|
+
recs.extend(generate_claude_recommendations(features, stack))
|
|
487
|
+
elif platform == "kiro":
|
|
488
|
+
recs.extend(generate_kiro_recommendations(features, stack))
|
|
489
|
+
elif platform == "gemini":
|
|
490
|
+
recs.extend(generate_gemini_recommendations(features, stack))
|
|
491
|
+
elif platform == "opencode":
|
|
492
|
+
recs.extend(generate_opencode_recommendations(features, stack))
|
|
493
|
+
|
|
494
|
+
# Memory system recommendations (all platforms)
|
|
495
|
+
recs.extend(generate_memory_recommendations(memory))
|
|
496
|
+
|
|
497
|
+
return recs
|
|
498
|
+
|
|
499
|
+
|
|
500
|
+
def generate_memory_recommendations(memory: dict) -> list:
|
|
501
|
+
"""Generate memory system recommendations."""
|
|
502
|
+
recs = []
|
|
503
|
+
|
|
504
|
+
qdrant = memory.get("qdrant", {})
|
|
505
|
+
ollama = memory.get("ollama", {})
|
|
506
|
+
collections = memory.get("collections", {})
|
|
507
|
+
|
|
508
|
+
# Qdrant not running
|
|
509
|
+
if qdrant.get("status") == "not_running":
|
|
510
|
+
recs.append({
|
|
511
|
+
"id": "memory_qdrant",
|
|
512
|
+
"priority": "medium",
|
|
513
|
+
"category": "memory",
|
|
514
|
+
"title": "Start Qdrant vector database",
|
|
515
|
+
"description": "Required for semantic memory and cache",
|
|
516
|
+
"action": "run_command",
|
|
517
|
+
"command": "docker run -d -p 6333:6333 -p 6334:6334 -v qdrant_storage:/qdrant/storage qdrant/qdrant",
|
|
518
|
+
})
|
|
519
|
+
return recs # No point checking further if Qdrant is down
|
|
520
|
+
|
|
521
|
+
# Ollama not running
|
|
522
|
+
if ollama.get("status") == "not_running":
|
|
523
|
+
recs.append({
|
|
524
|
+
"id": "memory_ollama",
|
|
525
|
+
"priority": "medium",
|
|
526
|
+
"category": "memory",
|
|
527
|
+
"title": "Start Ollama for local embeddings",
|
|
528
|
+
"description": "Required for semantic memory (zero-cost embeddings)",
|
|
529
|
+
"action": "run_command",
|
|
530
|
+
"command": "brew install ollama && ollama serve",
|
|
531
|
+
})
|
|
532
|
+
return recs
|
|
533
|
+
|
|
534
|
+
# Ollama running but missing embedding model
|
|
535
|
+
if not ollama.get("has_embedding_model", False):
|
|
536
|
+
recs.append({
|
|
537
|
+
"id": "memory_embed_model",
|
|
538
|
+
"priority": "high",
|
|
539
|
+
"category": "memory",
|
|
540
|
+
"title": "Pull embedding model",
|
|
541
|
+
"description": "nomic-embed-text (274MB) for semantic search",
|
|
542
|
+
"action": "run_command",
|
|
543
|
+
"command": "ollama pull nomic-embed-text",
|
|
544
|
+
})
|
|
545
|
+
|
|
546
|
+
# Collections missing or empty
|
|
547
|
+
agent_mem = collections.get("agent_memory", {})
|
|
548
|
+
sem_cache = collections.get("semantic_cache", {})
|
|
549
|
+
|
|
550
|
+
if not agent_mem.get("exists") or not sem_cache.get("exists"):
|
|
551
|
+
recs.append({
|
|
552
|
+
"id": "memory_init_collections",
|
|
553
|
+
"priority": "high",
|
|
554
|
+
"category": "memory",
|
|
555
|
+
"title": "Initialize memory collections",
|
|
556
|
+
"description": "Create agent_memory and semantic_cache with correct dimensions",
|
|
557
|
+
"action": "init_memory",
|
|
558
|
+
"command": "python3 execution/session_init.py",
|
|
559
|
+
})
|
|
560
|
+
elif agent_mem.get("points", 0) == 0 and sem_cache.get("points", 0) == 0:
|
|
561
|
+
recs.append({
|
|
562
|
+
"id": "memory_empty",
|
|
563
|
+
"priority": "low",
|
|
564
|
+
"category": "memory",
|
|
565
|
+
"title": "Memory collections are empty",
|
|
566
|
+
"description": "Agent should store decisions and cache responses during sessions",
|
|
567
|
+
"action": "info",
|
|
568
|
+
})
|
|
569
|
+
|
|
570
|
+
return recs
|
|
571
|
+
|
|
572
|
+
|
|
573
|
+
def generate_claude_recommendations(features: dict, stack: dict) -> list:
|
|
574
|
+
recs = []
|
|
575
|
+
|
|
576
|
+
# Agent Teams
|
|
577
|
+
if not features.get("agent_teams", {}).get("enabled", False):
|
|
578
|
+
recs.append({
|
|
579
|
+
"id": "claude_agent_teams",
|
|
580
|
+
"priority": "high",
|
|
581
|
+
"category": "orchestration",
|
|
582
|
+
"title": "Enable Agent Teams",
|
|
583
|
+
"description": "True parallel multi-agent orchestration",
|
|
584
|
+
"action": "add_settings_json",
|
|
585
|
+
"config": {"env": {"CLAUDE_CODE_EXPERIMENTAL_AGENT_TEAMS": "1"}},
|
|
586
|
+
"target": ".claude/settings.json",
|
|
587
|
+
})
|
|
588
|
+
|
|
589
|
+
# LSP plugins based on stack
|
|
590
|
+
lsp_map = {
|
|
591
|
+
"typescript": ("typescript-lsp", "TypeScript type checking & diagnostics"),
|
|
592
|
+
"javascript": ("typescript-lsp", "JavaScript/TypeScript diagnostics"),
|
|
593
|
+
"python": ("pyright-lsp", "Python type checking & diagnostics"),
|
|
594
|
+
"rust": ("rust-analyzer-lsp", "Rust diagnostics & navigation"),
|
|
595
|
+
"go": ("gopls-lsp", "Go diagnostics & navigation"),
|
|
596
|
+
}
|
|
597
|
+
for lang in stack.get("languages", []):
|
|
598
|
+
if lang in lsp_map:
|
|
599
|
+
plugin_name, desc = lsp_map[lang]
|
|
600
|
+
recs.append({
|
|
601
|
+
"id": f"claude_lsp_{lang}",
|
|
602
|
+
"priority": "high",
|
|
603
|
+
"category": "plugins",
|
|
604
|
+
"title": f"Install {plugin_name}",
|
|
605
|
+
"description": desc,
|
|
606
|
+
"action": "install_plugin",
|
|
607
|
+
"command": f"/plugin install {plugin_name}@claude-plugins-official",
|
|
608
|
+
})
|
|
609
|
+
|
|
610
|
+
# Service integrations
|
|
611
|
+
service_plugins = {
|
|
612
|
+
"github": ("github", "GitHub issues, PRs, repos"),
|
|
613
|
+
"gitlab": ("gitlab", "GitLab integration"),
|
|
614
|
+
"vercel": ("vercel", "Deployment integration"),
|
|
615
|
+
"firebase": ("firebase", "Firebase services"),
|
|
616
|
+
}
|
|
617
|
+
for svc in stack.get("services", []):
|
|
618
|
+
if svc in service_plugins:
|
|
619
|
+
plugin_name, desc = service_plugins[svc]
|
|
620
|
+
recs.append({
|
|
621
|
+
"id": f"claude_svc_{svc}",
|
|
622
|
+
"priority": "medium",
|
|
623
|
+
"category": "plugins",
|
|
624
|
+
"title": f"Install {plugin_name} plugin",
|
|
625
|
+
"description": desc,
|
|
626
|
+
"action": "install_plugin",
|
|
627
|
+
"command": f"/plugin install {plugin_name}@claude-plugins-official",
|
|
628
|
+
})
|
|
629
|
+
|
|
630
|
+
# Skills symlink
|
|
631
|
+
skills_dir = Path(".claude/skills")
|
|
632
|
+
if not skills_dir.exists():
|
|
633
|
+
recs.append({
|
|
634
|
+
"id": "claude_skills_dir",
|
|
635
|
+
"priority": "medium",
|
|
636
|
+
"category": "skills",
|
|
637
|
+
"title": "Set up .claude/skills/ directory",
|
|
638
|
+
"description": "Enable Claude Code to discover project skills",
|
|
639
|
+
"action": "create_dir",
|
|
640
|
+
"target": ".claude/skills",
|
|
641
|
+
})
|
|
642
|
+
|
|
643
|
+
return recs
|
|
644
|
+
|
|
645
|
+
|
|
646
|
+
def generate_kiro_recommendations(features: dict, stack: dict) -> list:
|
|
647
|
+
recs = []
|
|
648
|
+
|
|
649
|
+
# Power recommendations based on stack
|
|
650
|
+
power_map = {
|
|
651
|
+
"supabase": ("Supabase", "Database, auth, storage, realtime"),
|
|
652
|
+
"stripe": ("Stripe", "Payment integration"),
|
|
653
|
+
"firebase": ("Firebase", "Backend services"),
|
|
654
|
+
"vercel": ("Vercel", "Not available as Power yet"),
|
|
655
|
+
"netlify": ("Netlify", "Web app deployment"),
|
|
656
|
+
"docker": ("Docker", "Not available as Power yet"),
|
|
657
|
+
"terraform": ("Terraform", "Infrastructure as Code"),
|
|
658
|
+
}
|
|
659
|
+
installed = [p.lower() for p in features.get("powers", {}).get("installed", [])]
|
|
660
|
+
for svc in stack.get("services", []):
|
|
661
|
+
if svc in power_map and svc not in installed:
|
|
662
|
+
name, desc = power_map[svc]
|
|
663
|
+
recs.append({
|
|
664
|
+
"id": f"kiro_power_{svc}",
|
|
665
|
+
"priority": "high",
|
|
666
|
+
"category": "powers",
|
|
667
|
+
"title": f"Install {name} Power",
|
|
668
|
+
"description": desc,
|
|
669
|
+
"action": "install_power",
|
|
670
|
+
"instruction": f"Open Powers panel → Install {name}",
|
|
671
|
+
})
|
|
672
|
+
|
|
673
|
+
# Framework-specific powers
|
|
674
|
+
framework_powers = {
|
|
675
|
+
"react": ("Figma", "Design to code with Figma"),
|
|
676
|
+
"nextjs": ("Figma", "Design to code with Figma"),
|
|
677
|
+
}
|
|
678
|
+
for fw in stack.get("frameworks", []):
|
|
679
|
+
if fw in framework_powers:
|
|
680
|
+
name, desc = framework_powers[fw]
|
|
681
|
+
if name.lower() not in installed:
|
|
682
|
+
recs.append({
|
|
683
|
+
"id": f"kiro_power_{name.lower()}",
|
|
684
|
+
"priority": "medium",
|
|
685
|
+
"category": "powers",
|
|
686
|
+
"title": f"Install {name} Power",
|
|
687
|
+
"description": desc,
|
|
688
|
+
"action": "install_power",
|
|
689
|
+
"instruction": f"Open Powers panel → Install {name}",
|
|
690
|
+
})
|
|
691
|
+
|
|
692
|
+
# Hooks
|
|
693
|
+
if not features.get("hooks", {}).get("configured", []):
|
|
694
|
+
recs.append({
|
|
695
|
+
"id": "kiro_hooks",
|
|
696
|
+
"priority": "medium",
|
|
697
|
+
"category": "hooks",
|
|
698
|
+
"title": "Set up quality gate hooks",
|
|
699
|
+
"description": "Automated checks in .kiro/hooks/",
|
|
700
|
+
"action": "create_hooks",
|
|
701
|
+
"target": ".kiro/hooks/",
|
|
702
|
+
})
|
|
703
|
+
|
|
704
|
+
return recs
|
|
705
|
+
|
|
706
|
+
|
|
707
|
+
def generate_gemini_recommendations(features: dict, stack: dict) -> list:
|
|
708
|
+
recs = []
|
|
709
|
+
|
|
710
|
+
# Skills catalog
|
|
711
|
+
skills = features.get("skills", {}).get("installed", [])
|
|
712
|
+
if not skills:
|
|
713
|
+
recs.append({
|
|
714
|
+
"id": "gemini_skills",
|
|
715
|
+
"priority": "high",
|
|
716
|
+
"category": "skills",
|
|
717
|
+
"title": "Initialize skills",
|
|
718
|
+
"description": "Run npx @techwavedev/agi-agent-kit init to install skills",
|
|
719
|
+
"action": "run_command",
|
|
720
|
+
"command": "npx -y @techwavedev/agi-agent-kit init",
|
|
721
|
+
})
|
|
722
|
+
|
|
723
|
+
return recs
|
|
724
|
+
|
|
725
|
+
|
|
726
|
+
def generate_opencode_recommendations(features: dict, stack: dict) -> list:
|
|
727
|
+
recs = []
|
|
728
|
+
|
|
729
|
+
skills = features.get("skills", {}).get("installed", [])
|
|
730
|
+
if not skills:
|
|
731
|
+
recs.append({
|
|
732
|
+
"id": "opencode_skills",
|
|
733
|
+
"priority": "high",
|
|
734
|
+
"category": "skills",
|
|
735
|
+
"title": "Initialize skills",
|
|
736
|
+
"description": "Run npx @techwavedev/agi-agent-kit init to install skills",
|
|
737
|
+
"action": "run_command",
|
|
738
|
+
"command": "npx -y @techwavedev/agi-agent-kit init",
|
|
739
|
+
})
|
|
740
|
+
|
|
741
|
+
return recs
|
|
742
|
+
|
|
743
|
+
|
|
744
|
+
# ── Configuration Applier ───────────────────────────────────────────
|
|
745
|
+
|
|
746
|
+
|
|
747
|
+
def apply_recommendation(rec: dict, project_dir: Path, dry_run: bool = False) -> dict:
|
|
748
|
+
"""Apply a single recommendation."""
|
|
749
|
+
result = {"id": rec["id"], "status": "skipped", "detail": ""}
|
|
750
|
+
|
|
751
|
+
action = rec.get("action", "")
|
|
752
|
+
|
|
753
|
+
if action == "add_settings_json":
|
|
754
|
+
target = project_dir / rec["target"]
|
|
755
|
+
config_to_add = rec["config"]
|
|
756
|
+
|
|
757
|
+
if dry_run:
|
|
758
|
+
result["status"] = "dry_run"
|
|
759
|
+
result["detail"] = f"Would update {target} with {json.dumps(config_to_add)}"
|
|
760
|
+
return result
|
|
761
|
+
|
|
762
|
+
# Read existing or create new
|
|
763
|
+
target.parent.mkdir(parents=True, exist_ok=True)
|
|
764
|
+
existing = {}
|
|
765
|
+
if target.exists():
|
|
766
|
+
try:
|
|
767
|
+
existing = json.loads(target.read_text())
|
|
768
|
+
except json.JSONDecodeError:
|
|
769
|
+
existing = {}
|
|
770
|
+
|
|
771
|
+
# Deep merge
|
|
772
|
+
for key, value in config_to_add.items():
|
|
773
|
+
if isinstance(value, dict) and key in existing and isinstance(existing[key], dict):
|
|
774
|
+
existing[key].update(value)
|
|
775
|
+
else:
|
|
776
|
+
existing[key] = value
|
|
777
|
+
|
|
778
|
+
target.write_text(json.dumps(existing, indent=2) + "\n")
|
|
779
|
+
result["status"] = "applied"
|
|
780
|
+
result["detail"] = f"Updated {target}"
|
|
781
|
+
|
|
782
|
+
elif action == "create_dir":
|
|
783
|
+
target = project_dir / rec["target"]
|
|
784
|
+
if dry_run:
|
|
785
|
+
result["status"] = "dry_run"
|
|
786
|
+
result["detail"] = f"Would create {target}"
|
|
787
|
+
else:
|
|
788
|
+
target.mkdir(parents=True, exist_ok=True)
|
|
789
|
+
result["status"] = "applied"
|
|
790
|
+
result["detail"] = f"Created {target}"
|
|
791
|
+
|
|
792
|
+
elif action == "install_plugin":
|
|
793
|
+
# Plugins require Claude Code CLI — output instruction
|
|
794
|
+
result["status"] = "manual"
|
|
795
|
+
result["detail"] = f"Run in Claude Code: {rec['command']}"
|
|
796
|
+
|
|
797
|
+
elif action == "install_power":
|
|
798
|
+
# Powers require Kiro IDE — output instruction
|
|
799
|
+
result["status"] = "manual"
|
|
800
|
+
result["detail"] = rec.get("instruction", "Open Powers panel to install")
|
|
801
|
+
|
|
802
|
+
elif action == "run_command":
|
|
803
|
+
if dry_run:
|
|
804
|
+
result["status"] = "dry_run"
|
|
805
|
+
result["detail"] = f"Would run: {rec['command']}"
|
|
806
|
+
else:
|
|
807
|
+
result["status"] = "manual"
|
|
808
|
+
result["detail"] = f"Run: {rec['command']}"
|
|
809
|
+
|
|
810
|
+
elif action == "create_hooks":
|
|
811
|
+
target = project_dir / rec["target"]
|
|
812
|
+
if dry_run:
|
|
813
|
+
result["status"] = "dry_run"
|
|
814
|
+
result["detail"] = f"Would create {target}"
|
|
815
|
+
else:
|
|
816
|
+
target.mkdir(parents=True, exist_ok=True)
|
|
817
|
+
result["status"] = "applied"
|
|
818
|
+
result["detail"] = f"Created {target}"
|
|
819
|
+
|
|
820
|
+
elif action == "init_memory":
|
|
821
|
+
cmd = rec.get("command", "python3 execution/session_init.py")
|
|
822
|
+
if dry_run:
|
|
823
|
+
result["status"] = "dry_run"
|
|
824
|
+
result["detail"] = f"Would run: {cmd}"
|
|
825
|
+
else:
|
|
826
|
+
try:
|
|
827
|
+
proc = subprocess.run(
|
|
828
|
+
cmd.split(),
|
|
829
|
+
cwd=str(project_dir),
|
|
830
|
+
capture_output=True,
|
|
831
|
+
text=True,
|
|
832
|
+
timeout=30,
|
|
833
|
+
)
|
|
834
|
+
if proc.returncode == 0:
|
|
835
|
+
result["status"] = "applied"
|
|
836
|
+
result["detail"] = f"Memory initialized: {cmd}"
|
|
837
|
+
else:
|
|
838
|
+
result["status"] = "error"
|
|
839
|
+
result["detail"] = f"Failed: {proc.stderr.strip() or proc.stdout.strip()}"
|
|
840
|
+
except Exception as e:
|
|
841
|
+
result["status"] = "error"
|
|
842
|
+
result["detail"] = f"Error running {cmd}: {e}"
|
|
843
|
+
|
|
844
|
+
elif action == "info":
|
|
845
|
+
result["status"] = "info"
|
|
846
|
+
result["detail"] = rec.get("description", "")
|
|
847
|
+
|
|
848
|
+
return result
|
|
849
|
+
|
|
850
|
+
|
|
851
|
+
# ── Output Formatting ───────────────────────────────────────────────
|
|
852
|
+
|
|
853
|
+
|
|
854
|
+
PLATFORM_NAMES = {
|
|
855
|
+
"claude-code": "Claude Code",
|
|
856
|
+
"kiro": "Kiro IDE",
|
|
857
|
+
"gemini": "Gemini / Antigravity",
|
|
858
|
+
"opencode": "Opencode",
|
|
859
|
+
"unknown": "Unknown",
|
|
860
|
+
}
|
|
861
|
+
|
|
862
|
+
PLATFORM_EMOJI = {
|
|
863
|
+
"claude-code": "🤖",
|
|
864
|
+
"kiro": "⚡",
|
|
865
|
+
"gemini": "♊",
|
|
866
|
+
"opencode": "💻",
|
|
867
|
+
"unknown": "❓",
|
|
868
|
+
}
|
|
869
|
+
|
|
870
|
+
|
|
871
|
+
def print_report(detection: dict, results: list = None):
|
|
872
|
+
"""Print a human-readable setup report."""
|
|
873
|
+
platform = detection["platform"]
|
|
874
|
+
emoji = PLATFORM_EMOJI.get(platform, "❓")
|
|
875
|
+
name = PLATFORM_NAMES.get(platform, "Unknown")
|
|
876
|
+
features = detection["features"]
|
|
877
|
+
stack = detection["project_stack"]
|
|
878
|
+
recs = detection["recommendations"]
|
|
879
|
+
|
|
880
|
+
print(f"\n{'='*60}")
|
|
881
|
+
print(f" {emoji} Platform Setup Report — {name}")
|
|
882
|
+
print(f"{'='*60}\n")
|
|
883
|
+
|
|
884
|
+
# Tech Stack
|
|
885
|
+
if stack.get("languages") or stack.get("frameworks"):
|
|
886
|
+
print("📦 Project Stack:")
|
|
887
|
+
if stack["languages"]:
|
|
888
|
+
print(f" Languages: {', '.join(stack['languages'])}")
|
|
889
|
+
if stack["frameworks"]:
|
|
890
|
+
print(f" Frameworks: {', '.join(stack['frameworks'])}")
|
|
891
|
+
if stack["services"]:
|
|
892
|
+
print(f" Services: {', '.join(stack['services'])}")
|
|
893
|
+
print()
|
|
894
|
+
|
|
895
|
+
# Current Features
|
|
896
|
+
print("🔍 Detected Features:")
|
|
897
|
+
if platform == "claude-code":
|
|
898
|
+
at = features.get("agent_teams", {})
|
|
899
|
+
print(f" Agent Teams: {'✅ Enabled' if at.get('enabled') else '❌ Not enabled'}")
|
|
900
|
+
agents = features.get("subagents", {})
|
|
901
|
+
total_agents = len(agents.get("project", [])) + len(agents.get("user", []))
|
|
902
|
+
print(f" Subagents: {total_agents} configured")
|
|
903
|
+
skills = features.get("skills", {})
|
|
904
|
+
total_skills = len(set(skills.get("project", [])))
|
|
905
|
+
print(f" Skills: {total_skills} discovered")
|
|
906
|
+
mcp = features.get("mcp_servers", {})
|
|
907
|
+
print(f" MCP Servers: {len(mcp.get('configured', []))} configured")
|
|
908
|
+
|
|
909
|
+
elif platform == "kiro":
|
|
910
|
+
powers = features.get("powers", {})
|
|
911
|
+
print(f" Powers: {len(powers.get('installed', []))} installed")
|
|
912
|
+
hooks = features.get("hooks", {})
|
|
913
|
+
print(f" Hooks: {len(hooks.get('configured', []))} configured")
|
|
914
|
+
mcp = features.get("mcp_servers", {})
|
|
915
|
+
print(f" MCP Servers: {len(mcp.get('configured', []))} configured")
|
|
916
|
+
print(f" Autonomous: ✅ Available")
|
|
917
|
+
|
|
918
|
+
elif platform in ("gemini", "opencode"):
|
|
919
|
+
skills = features.get("skills", {})
|
|
920
|
+
print(f" Skills: {len(skills.get('installed', []))} installed")
|
|
921
|
+
agents = features.get("agents", [])
|
|
922
|
+
if agents:
|
|
923
|
+
print(f" Agents: {len(agents)} configured")
|
|
924
|
+
mcp = features.get("mcp_servers", {})
|
|
925
|
+
print(f" MCP Servers: {len(mcp.get('configured', []))} configured")
|
|
926
|
+
|
|
927
|
+
# Memory System (all platforms)
|
|
928
|
+
memory = detection.get("memory", {})
|
|
929
|
+
if memory:
|
|
930
|
+
qdrant = memory.get("qdrant", {})
|
|
931
|
+
ollama = memory.get("ollama", {})
|
|
932
|
+
collections = memory.get("collections", {})
|
|
933
|
+
print()
|
|
934
|
+
print("🧠 Memory System:")
|
|
935
|
+
print(f" Qdrant: {'✅ Running' if qdrant.get('status') == 'ok' else '❌ Not running'}")
|
|
936
|
+
print(f" Ollama: {'✅ Running' if ollama.get('status') == 'ok' else '❌ Not running'}")
|
|
937
|
+
if ollama.get('status') == 'ok':
|
|
938
|
+
print(f" Embed Model: {'✅ nomic-embed-text' if ollama.get('has_embedding_model') else '❌ Missing'}")
|
|
939
|
+
agent_mem = collections.get('agent_memory', {})
|
|
940
|
+
sem_cache = collections.get('semantic_cache', {})
|
|
941
|
+
if agent_mem.get('exists') or sem_cache.get('exists'):
|
|
942
|
+
print(f" agent_memory: {agent_mem.get('points', 0)} points")
|
|
943
|
+
print(f" semantic_cache: {sem_cache.get('points', 0)} points")
|
|
944
|
+
elif qdrant.get('status') == 'ok':
|
|
945
|
+
print(f" Collections: ❌ Not initialized")
|
|
946
|
+
|
|
947
|
+
print()
|
|
948
|
+
|
|
949
|
+
# Recommendations
|
|
950
|
+
if recs:
|
|
951
|
+
print(f"💡 Recommendations ({len(recs)}):")
|
|
952
|
+
for i, rec in enumerate(recs, 1):
|
|
953
|
+
priority_icon = {"high": "🔴", "medium": "🟡", "low": "🟢"}.get(rec["priority"], "⚪")
|
|
954
|
+
print(f" {i}. {priority_icon} [{rec['category']}] {rec['title']}")
|
|
955
|
+
print(f" {rec['description']}")
|
|
956
|
+
print()
|
|
957
|
+
else:
|
|
958
|
+
print("✅ No recommendations — everything looks configured!\n")
|
|
959
|
+
|
|
960
|
+
# Results
|
|
961
|
+
if results:
|
|
962
|
+
print("📋 Applied Changes:")
|
|
963
|
+
for r in results:
|
|
964
|
+
status_icon = {
|
|
965
|
+
"applied": "✅",
|
|
966
|
+
"manual": "👉",
|
|
967
|
+
"skipped": "⏭️",
|
|
968
|
+
"dry_run": "🔍",
|
|
969
|
+
"error": "❌",
|
|
970
|
+
"info": "ℹ️",
|
|
971
|
+
}.get(r["status"], "❓")
|
|
972
|
+
print(f" {status_icon} {r['detail']}")
|
|
973
|
+
print()
|
|
974
|
+
|
|
975
|
+
print(f"{'='*60}\n")
|
|
976
|
+
|
|
977
|
+
|
|
978
|
+
# ── Main ────────────────────────────────────────────────────────────
|
|
979
|
+
|
|
980
|
+
|
|
981
|
+
def main():
|
|
982
|
+
parser = argparse.ArgumentParser(
|
|
983
|
+
description="Auto-detect AI platform and configure environment"
|
|
984
|
+
)
|
|
985
|
+
parser.add_argument(
|
|
986
|
+
"--project-dir",
|
|
987
|
+
default=".",
|
|
988
|
+
help="Path to the project root (default: current directory)",
|
|
989
|
+
)
|
|
990
|
+
parser.add_argument(
|
|
991
|
+
"--auto",
|
|
992
|
+
action="store_true",
|
|
993
|
+
help="Auto-apply all recommended settings without prompting",
|
|
994
|
+
)
|
|
995
|
+
parser.add_argument(
|
|
996
|
+
"--json",
|
|
997
|
+
action="store_true",
|
|
998
|
+
dest="json_output",
|
|
999
|
+
help="Output results as JSON",
|
|
1000
|
+
)
|
|
1001
|
+
parser.add_argument(
|
|
1002
|
+
"--dry-run",
|
|
1003
|
+
action="store_true",
|
|
1004
|
+
help="Show what would be configured without making changes",
|
|
1005
|
+
)
|
|
1006
|
+
args = parser.parse_args()
|
|
1007
|
+
|
|
1008
|
+
project_dir = Path(args.project_dir).resolve()
|
|
1009
|
+
if not project_dir.exists():
|
|
1010
|
+
print(json.dumps({"status": "error", "message": f"Directory not found: {project_dir}"}))
|
|
1011
|
+
sys.exit(2)
|
|
1012
|
+
|
|
1013
|
+
# Detect
|
|
1014
|
+
detection = detect_platform(project_dir)
|
|
1015
|
+
|
|
1016
|
+
# JSON output mode (for agent consumption)
|
|
1017
|
+
if args.json_output:
|
|
1018
|
+
print(json.dumps(detection, indent=2))
|
|
1019
|
+
sys.exit(0)
|
|
1020
|
+
|
|
1021
|
+
# Interactive / auto mode
|
|
1022
|
+
print_report(detection)
|
|
1023
|
+
|
|
1024
|
+
recs = detection["recommendations"]
|
|
1025
|
+
if not recs:
|
|
1026
|
+
sys.exit(0)
|
|
1027
|
+
|
|
1028
|
+
# Filter to auto-applicable recommendations
|
|
1029
|
+
auto_applicable = [r for r in recs if r["action"] in ("add_settings_json", "create_dir", "create_hooks", "init_memory")]
|
|
1030
|
+
manual_only = [r for r in recs if r["action"] not in ("add_settings_json", "create_dir", "create_hooks", "init_memory") and r["action"] != "info"]
|
|
1031
|
+
info_only = [r for r in recs if r["action"] == "info"]
|
|
1032
|
+
|
|
1033
|
+
if auto_applicable:
|
|
1034
|
+
if args.auto or args.dry_run:
|
|
1035
|
+
confirm = True
|
|
1036
|
+
else:
|
|
1037
|
+
print(f"🔧 {len(auto_applicable)} setting(s) can be auto-configured.")
|
|
1038
|
+
if manual_only:
|
|
1039
|
+
print(f"👉 {len(manual_only)} require manual action (plugin/power installs).")
|
|
1040
|
+
try:
|
|
1041
|
+
answer = input("\nApply recommended settings? [Y/n] ").strip().lower()
|
|
1042
|
+
confirm = answer in ("", "y", "yes")
|
|
1043
|
+
except (EOFError, KeyboardInterrupt):
|
|
1044
|
+
confirm = False
|
|
1045
|
+
|
|
1046
|
+
results = []
|
|
1047
|
+
if confirm:
|
|
1048
|
+
for rec in auto_applicable:
|
|
1049
|
+
result = apply_recommendation(rec, project_dir, dry_run=args.dry_run)
|
|
1050
|
+
results.append(result)
|
|
1051
|
+
|
|
1052
|
+
# Always show manual actions
|
|
1053
|
+
for rec in manual_only:
|
|
1054
|
+
result = apply_recommendation(rec, project_dir, dry_run=args.dry_run)
|
|
1055
|
+
results.append(result)
|
|
1056
|
+
|
|
1057
|
+
# Show info items
|
|
1058
|
+
for rec in info_only:
|
|
1059
|
+
result = apply_recommendation(rec, project_dir, dry_run=args.dry_run)
|
|
1060
|
+
results.append(result)
|
|
1061
|
+
|
|
1062
|
+
print_report(detection, results)
|
|
1063
|
+
else:
|
|
1064
|
+
# Only manual or info recommendations
|
|
1065
|
+
results = []
|
|
1066
|
+
for rec in manual_only:
|
|
1067
|
+
result = apply_recommendation(rec, project_dir, dry_run=args.dry_run)
|
|
1068
|
+
results.append(result)
|
|
1069
|
+
for rec in info_only:
|
|
1070
|
+
result = apply_recommendation(rec, project_dir, dry_run=args.dry_run)
|
|
1071
|
+
results.append(result)
|
|
1072
|
+
if results:
|
|
1073
|
+
print("👉 Manual actions required:")
|
|
1074
|
+
for r in results:
|
|
1075
|
+
status_icon = {"manual": "👉", "info": "ℹ️"}.get(r["status"], "•")
|
|
1076
|
+
print(f" {status_icon} {r['detail']}")
|
|
1077
|
+
print()
|
|
1078
|
+
|
|
1079
|
+
sys.exit(0)
|
|
1080
|
+
|
|
1081
|
+
|
|
1082
|
+
if __name__ == "__main__":
|
|
1083
|
+
main()
|