@aslomon/effectum 0.3.4 → 0.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/bin/install.js +41 -4
- package/bin/lib/cli-tools.js +288 -0
- package/bin/lib/specializations.js +11 -1
- package/bin/lib/template.js +14 -0
- package/bin/lib/ui.js +109 -0
- package/package.json +1 -1
- package/system/agents/data-engineer.md +268 -0
- package/system/agents/mobile-developer.md +257 -0
- package/system/templates/CLAUDE.md.tmpl +6 -0
- package/system/templates/settings.json.tmpl +11 -0
package/bin/install.js
CHANGED
|
@@ -53,6 +53,7 @@ const {
|
|
|
53
53
|
askLanguage,
|
|
54
54
|
askAutonomy,
|
|
55
55
|
showRecommendation,
|
|
56
|
+
showCliToolCheck,
|
|
56
57
|
askSetupMode,
|
|
57
58
|
askCustomize,
|
|
58
59
|
askManual,
|
|
@@ -61,6 +62,7 @@ const {
|
|
|
61
62
|
showSummary,
|
|
62
63
|
showOutro,
|
|
63
64
|
} = require("./lib/ui");
|
|
65
|
+
const { checkAllTools, formatToolStatus } = require("./lib/cli-tools");
|
|
64
66
|
|
|
65
67
|
// ─── File helpers ─────────────────────────────────────────────────────────────
|
|
66
68
|
|
|
@@ -343,7 +345,11 @@ function installRecommendedAgents(targetDir, repoRoot, recommendedAgents) {
|
|
|
343
345
|
const agentsDest = path.join(targetDir, ".claude", "agents");
|
|
344
346
|
const steps = [];
|
|
345
347
|
|
|
346
|
-
if (
|
|
348
|
+
if (
|
|
349
|
+
!fs.existsSync(agentsSrc) ||
|
|
350
|
+
!recommendedAgents ||
|
|
351
|
+
recommendedAgents.length === 0
|
|
352
|
+
) {
|
|
347
353
|
return steps;
|
|
348
354
|
}
|
|
349
355
|
|
|
@@ -592,6 +598,22 @@ Options:
|
|
|
592
598
|
process.exit(0);
|
|
593
599
|
}
|
|
594
600
|
|
|
601
|
+
// CLI tool check (report only, no install in non-interactive mode)
|
|
602
|
+
const toolCheck = checkAllTools(config.stack);
|
|
603
|
+
if (toolCheck.missing.length > 0) {
|
|
604
|
+
console.log("\n CLI Tool Check:");
|
|
605
|
+
console.log(formatToolStatus(toolCheck.tools));
|
|
606
|
+
console.log(
|
|
607
|
+
`\n ${toolCheck.missing.length} tool(s) not installed. Run installer interactively to install.`,
|
|
608
|
+
);
|
|
609
|
+
}
|
|
610
|
+
|
|
611
|
+
// Store tool status in config
|
|
612
|
+
config.detectedTools = toolCheck.tools.map((t) => ({
|
|
613
|
+
key: t.key,
|
|
614
|
+
installed: t.installed,
|
|
615
|
+
}));
|
|
616
|
+
|
|
595
617
|
// Install base files
|
|
596
618
|
installBaseFiles(targetDir, repoRoot, isGlobal);
|
|
597
619
|
|
|
@@ -609,9 +631,13 @@ Options:
|
|
|
609
631
|
}
|
|
610
632
|
|
|
611
633
|
// MCP servers — always install recommended MCPs (or explicit --with-mcp)
|
|
612
|
-
const mcpKeys =
|
|
634
|
+
const mcpKeys =
|
|
635
|
+
config.mcpServers ||
|
|
636
|
+
(config.recommended ? config.recommended.mcps : []) ||
|
|
637
|
+
[];
|
|
613
638
|
if (mcpKeys.length > 0 || args.withMcp) {
|
|
614
|
-
const keysToInstall =
|
|
639
|
+
const keysToInstall =
|
|
640
|
+
mcpKeys.length > 0 ? mcpKeys : MCP_SERVERS.map((s) => s.key);
|
|
615
641
|
const mcpResults = installMcpServers(keysToInstall);
|
|
616
642
|
const settingsPath = isGlobal
|
|
617
643
|
? path.join(homeClaudeDir, "settings.json")
|
|
@@ -700,6 +726,9 @@ Options:
|
|
|
700
726
|
|
|
701
727
|
showRecommendation(rec);
|
|
702
728
|
|
|
729
|
+
// ── CLI Tool Check ────────────────────────────────────────────────────────
|
|
730
|
+
const cliToolResult = await showCliToolCheck(stack);
|
|
731
|
+
|
|
703
732
|
// ── Step 8: Decision ──────────────────────────────────────────────────────
|
|
704
733
|
const setupMode = await askSetupMode();
|
|
705
734
|
|
|
@@ -733,6 +762,10 @@ Options:
|
|
|
733
762
|
packageManager: detected.packageManager,
|
|
734
763
|
formatter: formatterDef.name,
|
|
735
764
|
mcpServers: finalSetup.mcps,
|
|
765
|
+
detectedTools: cliToolResult.tools.map((t) => ({
|
|
766
|
+
key: t.key,
|
|
767
|
+
installed: t.installed,
|
|
768
|
+
})),
|
|
736
769
|
playwrightBrowsers: wantPlaywright,
|
|
737
770
|
installScope: "local",
|
|
738
771
|
recommended: {
|
|
@@ -804,7 +837,11 @@ Options:
|
|
|
804
837
|
if (recAgents.length > 0) {
|
|
805
838
|
const sAgents = p.spinner();
|
|
806
839
|
sAgents.start("Installing agent specializations...");
|
|
807
|
-
const agentSteps = installRecommendedAgents(
|
|
840
|
+
const agentSteps = installRecommendedAgents(
|
|
841
|
+
installTargetDir,
|
|
842
|
+
repoRoot,
|
|
843
|
+
recAgents,
|
|
844
|
+
);
|
|
808
845
|
const agentCount = agentSteps.filter((s) => s.status === "created").length;
|
|
809
846
|
sAgents.stop(`${agentCount} agent specializations installed`);
|
|
810
847
|
configSteps.push(...agentSteps);
|
|
@@ -0,0 +1,288 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* CLI tool definitions, detection, installation, and auth checking.
|
|
3
|
+
*
|
|
4
|
+
* Each tool specifies:
|
|
5
|
+
* - key/bin: identifier and binary name
|
|
6
|
+
* - install: platform-specific install commands (darwin/linux/all)
|
|
7
|
+
* - auth/authSetup: commands to check and configure authentication
|
|
8
|
+
* - why: human-readable reason for the tool
|
|
9
|
+
* - foundation: true if always recommended regardless of stack
|
|
10
|
+
* - stacks: array of stack keys where this tool is relevant
|
|
11
|
+
*/
|
|
12
|
+
"use strict";
|
|
13
|
+
|
|
14
|
+
const { spawnSync } = require("child_process");
|
|
15
|
+
const os = require("os");
|
|
16
|
+
|
|
17
|
+
// ─── Tool definitions ────────────────────────────────────────────────────────
|
|
18
|
+
|
|
19
|
+
const CLI_TOOLS = [
|
|
20
|
+
// Foundation (always recommended)
|
|
21
|
+
{
|
|
22
|
+
key: "git",
|
|
23
|
+
bin: "git",
|
|
24
|
+
install: {
|
|
25
|
+
darwin: "xcode-select --install",
|
|
26
|
+
linux: "sudo apt install -y git",
|
|
27
|
+
},
|
|
28
|
+
auth: "git config user.name && git config user.email",
|
|
29
|
+
authSetup:
|
|
30
|
+
'git config --global user.name "Your Name" && git config --global user.email "you@example.com"',
|
|
31
|
+
why: "Version control — required for all projects",
|
|
32
|
+
foundation: true,
|
|
33
|
+
},
|
|
34
|
+
{
|
|
35
|
+
key: "gh",
|
|
36
|
+
bin: "gh",
|
|
37
|
+
install: { darwin: "brew install gh", linux: "sudo apt install -y gh" },
|
|
38
|
+
auth: "gh auth status",
|
|
39
|
+
authSetup: "gh auth login",
|
|
40
|
+
why: "GitHub: Issues, PRs, Code Search, CI status",
|
|
41
|
+
foundation: true,
|
|
42
|
+
},
|
|
43
|
+
// Stack-specific
|
|
44
|
+
{
|
|
45
|
+
key: "supabase",
|
|
46
|
+
bin: "supabase",
|
|
47
|
+
install: {
|
|
48
|
+
darwin: "brew install supabase/tap/supabase",
|
|
49
|
+
linux: "npm i -g supabase",
|
|
50
|
+
},
|
|
51
|
+
auth: "supabase projects list",
|
|
52
|
+
authSetup: "supabase login",
|
|
53
|
+
why: "Database migrations, type generation, edge functions",
|
|
54
|
+
stacks: ["nextjs-supabase"],
|
|
55
|
+
},
|
|
56
|
+
{
|
|
57
|
+
key: "vercel",
|
|
58
|
+
bin: "vercel",
|
|
59
|
+
install: { all: "npm i -g vercel" },
|
|
60
|
+
auth: "vercel whoami",
|
|
61
|
+
authSetup: "vercel login",
|
|
62
|
+
why: "Deployment to Vercel",
|
|
63
|
+
stacks: ["nextjs-supabase"],
|
|
64
|
+
},
|
|
65
|
+
{
|
|
66
|
+
key: "docker",
|
|
67
|
+
bin: "docker",
|
|
68
|
+
install: {
|
|
69
|
+
darwin: "brew install --cask docker",
|
|
70
|
+
linux: "sudo apt install -y docker.io",
|
|
71
|
+
},
|
|
72
|
+
auth: null,
|
|
73
|
+
why: "Container management, local dev environment",
|
|
74
|
+
stacks: ["python-fastapi", "generic"],
|
|
75
|
+
},
|
|
76
|
+
{
|
|
77
|
+
key: "uv",
|
|
78
|
+
bin: "uv",
|
|
79
|
+
install: { all: "curl -LsSf https://astral.sh/uv/install.sh | sh" },
|
|
80
|
+
auth: null,
|
|
81
|
+
why: "Fast Python package management",
|
|
82
|
+
stacks: ["python-fastapi"],
|
|
83
|
+
},
|
|
84
|
+
{
|
|
85
|
+
key: "ruff",
|
|
86
|
+
bin: "ruff",
|
|
87
|
+
install: { all: "pip install ruff" },
|
|
88
|
+
auth: null,
|
|
89
|
+
why: "Python linting and formatting",
|
|
90
|
+
stacks: ["python-fastapi"],
|
|
91
|
+
},
|
|
92
|
+
{
|
|
93
|
+
key: "xcodebuild",
|
|
94
|
+
bin: "xcodebuild",
|
|
95
|
+
install: { darwin: "xcode-select --install" },
|
|
96
|
+
auth: null,
|
|
97
|
+
why: "iOS/macOS build toolchain",
|
|
98
|
+
stacks: ["swift-ios"],
|
|
99
|
+
},
|
|
100
|
+
];
|
|
101
|
+
|
|
102
|
+
// ─── Tool check ──────────────────────────────────────────────────────────────
|
|
103
|
+
|
|
104
|
+
/**
|
|
105
|
+
* Check if a CLI tool is installed by looking up its binary.
|
|
106
|
+
* @param {string} bin - binary name (e.g. "git", "gh")
|
|
107
|
+
* @returns {boolean}
|
|
108
|
+
*/
|
|
109
|
+
function checkTool(bin) {
|
|
110
|
+
try {
|
|
111
|
+
const result = spawnSync("which", [bin], {
|
|
112
|
+
timeout: 5000,
|
|
113
|
+
stdio: "pipe",
|
|
114
|
+
encoding: "utf8",
|
|
115
|
+
});
|
|
116
|
+
return result.status === 0 && result.stdout.trim().length > 0;
|
|
117
|
+
} catch (_) {
|
|
118
|
+
return false;
|
|
119
|
+
}
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
/**
|
|
123
|
+
* Get the relevant tools for a given stack.
|
|
124
|
+
* Foundation tools are always included.
|
|
125
|
+
* @param {string} stack - stack key (e.g. "nextjs-supabase")
|
|
126
|
+
* @returns {Array<object>}
|
|
127
|
+
*/
|
|
128
|
+
function getToolsForStack(stack) {
|
|
129
|
+
return CLI_TOOLS.filter(
|
|
130
|
+
(tool) => tool.foundation || (tool.stacks && tool.stacks.includes(stack)),
|
|
131
|
+
);
|
|
132
|
+
}
|
|
133
|
+
|
|
134
|
+
/**
|
|
135
|
+
* Check all relevant tools for a stack and return status.
|
|
136
|
+
* @param {string} stack
|
|
137
|
+
* @returns {{ tools: Array<{ key: string, bin: string, installed: boolean, why: string, install: object, auth: string|null, authSetup: string|null }>, missing: Array<object>, installed: Array<object> }}
|
|
138
|
+
*/
|
|
139
|
+
function checkAllTools(stack) {
|
|
140
|
+
const relevant = getToolsForStack(stack);
|
|
141
|
+
const results = relevant.map((tool) => ({
|
|
142
|
+
...tool,
|
|
143
|
+
installed: checkTool(tool.bin),
|
|
144
|
+
}));
|
|
145
|
+
|
|
146
|
+
return {
|
|
147
|
+
tools: results,
|
|
148
|
+
missing: results.filter((t) => !t.installed),
|
|
149
|
+
installed: results.filter((t) => t.installed),
|
|
150
|
+
};
|
|
151
|
+
}
|
|
152
|
+
|
|
153
|
+
// ─── Tool installation ───────────────────────────────────────────────────────
|
|
154
|
+
|
|
155
|
+
/**
|
|
156
|
+
* Get the platform key for install commands.
|
|
157
|
+
* @returns {"darwin"|"linux"}
|
|
158
|
+
*/
|
|
159
|
+
function getPlatform() {
|
|
160
|
+
const p = os.platform();
|
|
161
|
+
return p === "darwin" ? "darwin" : "linux";
|
|
162
|
+
}
|
|
163
|
+
|
|
164
|
+
/**
|
|
165
|
+
* Get the install command for a tool on the current platform.
|
|
166
|
+
* @param {object} tool
|
|
167
|
+
* @returns {string|null}
|
|
168
|
+
*/
|
|
169
|
+
function getInstallCommand(tool) {
|
|
170
|
+
if (!tool.install) return null;
|
|
171
|
+
const platform = getPlatform();
|
|
172
|
+
return tool.install[platform] || tool.install.all || null;
|
|
173
|
+
}
|
|
174
|
+
|
|
175
|
+
/**
|
|
176
|
+
* Install a single tool using its platform-appropriate command.
|
|
177
|
+
* @param {object} tool
|
|
178
|
+
* @returns {{ ok: boolean, command: string|null, error?: string }}
|
|
179
|
+
*/
|
|
180
|
+
function installTool(tool) {
|
|
181
|
+
const command = getInstallCommand(tool);
|
|
182
|
+
if (!command) {
|
|
183
|
+
return {
|
|
184
|
+
ok: false,
|
|
185
|
+
command: null,
|
|
186
|
+
error: "No install command for this platform",
|
|
187
|
+
};
|
|
188
|
+
}
|
|
189
|
+
|
|
190
|
+
try {
|
|
191
|
+
const result = spawnSync("bash", ["-c", command], {
|
|
192
|
+
timeout: 120000,
|
|
193
|
+
stdio: "pipe",
|
|
194
|
+
encoding: "utf8",
|
|
195
|
+
});
|
|
196
|
+
if (result.status === 0) {
|
|
197
|
+
return { ok: true, command };
|
|
198
|
+
}
|
|
199
|
+
return { ok: false, command, error: result.stderr || "Install failed" };
|
|
200
|
+
} catch (err) {
|
|
201
|
+
return { ok: false, command, error: err.message };
|
|
202
|
+
}
|
|
203
|
+
}
|
|
204
|
+
|
|
205
|
+
// ─── Auth checking ───────────────────────────────────────────────────────────
|
|
206
|
+
|
|
207
|
+
/**
|
|
208
|
+
* Check if a tool is authenticated.
|
|
209
|
+
* @param {object} tool
|
|
210
|
+
* @returns {{ authenticated: boolean, needsAuth: boolean }}
|
|
211
|
+
*/
|
|
212
|
+
function checkAuth(tool) {
|
|
213
|
+
if (!tool.auth) {
|
|
214
|
+
return { authenticated: true, needsAuth: false };
|
|
215
|
+
}
|
|
216
|
+
|
|
217
|
+
try {
|
|
218
|
+
const result = spawnSync("bash", ["-c", tool.auth], {
|
|
219
|
+
timeout: 10000,
|
|
220
|
+
stdio: "pipe",
|
|
221
|
+
encoding: "utf8",
|
|
222
|
+
});
|
|
223
|
+
return {
|
|
224
|
+
authenticated: result.status === 0,
|
|
225
|
+
needsAuth: true,
|
|
226
|
+
};
|
|
227
|
+
} catch (_) {
|
|
228
|
+
return { authenticated: false, needsAuth: true };
|
|
229
|
+
}
|
|
230
|
+
}
|
|
231
|
+
|
|
232
|
+
// ─── Formatting helpers ──────────────────────────────────────────────────────
|
|
233
|
+
|
|
234
|
+
/**
|
|
235
|
+
* Generate a human-readable tools status summary.
|
|
236
|
+
* @param {Array<object>} tools - tool check results
|
|
237
|
+
* @returns {string}
|
|
238
|
+
*/
|
|
239
|
+
function formatToolStatus(tools) {
|
|
240
|
+
return tools
|
|
241
|
+
.map((t) => {
|
|
242
|
+
const icon = t.installed ? "\u2705" : "\u274C";
|
|
243
|
+
return ` ${icon} ${t.key} — ${t.why}`;
|
|
244
|
+
})
|
|
245
|
+
.join("\n");
|
|
246
|
+
}
|
|
247
|
+
|
|
248
|
+
/**
|
|
249
|
+
* Generate install instructions for missing tools.
|
|
250
|
+
* @param {Array<object>} missing - missing tools
|
|
251
|
+
* @returns {string}
|
|
252
|
+
*/
|
|
253
|
+
function formatInstallInstructions(missing) {
|
|
254
|
+
const platform = getPlatform();
|
|
255
|
+
return missing
|
|
256
|
+
.map((t) => {
|
|
257
|
+
const cmd = t.install[platform] || t.install.all || "N/A";
|
|
258
|
+
return ` ${t.key}: ${cmd}`;
|
|
259
|
+
})
|
|
260
|
+
.join("\n");
|
|
261
|
+
}
|
|
262
|
+
|
|
263
|
+
/**
|
|
264
|
+
* Build the AVAILABLE_TOOLS section content for CLAUDE.md.
|
|
265
|
+
* @param {Array<object>} tools - tool check results
|
|
266
|
+
* @returns {string}
|
|
267
|
+
*/
|
|
268
|
+
function buildAvailableToolsSection(tools) {
|
|
269
|
+
const lines = tools.map((t) => {
|
|
270
|
+
const status = t.installed ? "installed" : "not installed";
|
|
271
|
+
return `- **${t.key}** (${status}): ${t.why}`;
|
|
272
|
+
});
|
|
273
|
+
return lines.join("\n");
|
|
274
|
+
}
|
|
275
|
+
|
|
276
|
+
module.exports = {
|
|
277
|
+
CLI_TOOLS,
|
|
278
|
+
checkTool,
|
|
279
|
+
getToolsForStack,
|
|
280
|
+
checkAllTools,
|
|
281
|
+
getPlatform,
|
|
282
|
+
getInstallCommand,
|
|
283
|
+
installTool,
|
|
284
|
+
checkAuth,
|
|
285
|
+
formatToolStatus,
|
|
286
|
+
formatInstallInstructions,
|
|
287
|
+
buildAvailableToolsSection,
|
|
288
|
+
};
|
|
@@ -94,6 +94,16 @@ const SUBAGENT_SPECS = [
|
|
|
94
94
|
label: "Code Reviewer",
|
|
95
95
|
tags: ["testing-heavy", "docs-needed"],
|
|
96
96
|
},
|
|
97
|
+
{
|
|
98
|
+
key: "mobile-developer",
|
|
99
|
+
label: "Mobile Developer",
|
|
100
|
+
tags: ["native-ui", "frontend-heavy", "swift"],
|
|
101
|
+
},
|
|
102
|
+
{
|
|
103
|
+
key: "data-engineer",
|
|
104
|
+
label: "Data Engineer",
|
|
105
|
+
tags: ["data-pipeline", "compute-heavy", "analytics"],
|
|
106
|
+
},
|
|
97
107
|
];
|
|
98
108
|
|
|
99
109
|
/**
|
|
@@ -116,7 +126,7 @@ const STACK_SUBAGENTS = {
|
|
|
116
126
|
"test-automator",
|
|
117
127
|
"api-designer",
|
|
118
128
|
],
|
|
119
|
-
"swift-ios": ["ui-designer", "test-automator"],
|
|
129
|
+
"swift-ios": ["ui-designer", "test-automator", "mobile-developer"],
|
|
120
130
|
generic: ["debugger", "test-automator"],
|
|
121
131
|
};
|
|
122
132
|
|
package/bin/lib/template.js
CHANGED
|
@@ -9,6 +9,7 @@ const fs = require("fs");
|
|
|
9
9
|
const path = require("path");
|
|
10
10
|
const { FORMATTER_MAP } = require("./constants");
|
|
11
11
|
const { LANGUAGE_INSTRUCTIONS } = require("./languages");
|
|
12
|
+
const { getToolsForStack, checkTool } = require("./cli-tools");
|
|
12
13
|
|
|
13
14
|
/**
|
|
14
15
|
* Build a substitution map from user config and parsed stack sections.
|
|
@@ -23,6 +24,18 @@ function buildSubstitutionMap(config, stackSections) {
|
|
|
23
24
|
config.customLanguage ||
|
|
24
25
|
LANGUAGE_INSTRUCTIONS.english;
|
|
25
26
|
|
|
27
|
+
// Build AVAILABLE_TOOLS section from detected CLI tools
|
|
28
|
+
const tools = getToolsForStack(config.stack);
|
|
29
|
+
const toolLines = tools.map((t) => {
|
|
30
|
+
const installed = checkTool(t.bin);
|
|
31
|
+
const status = installed ? "installed" : "not installed";
|
|
32
|
+
return `- **${t.key}** (${status}): ${t.why}`;
|
|
33
|
+
});
|
|
34
|
+
const availableTools =
|
|
35
|
+
toolLines.length > 0
|
|
36
|
+
? toolLines.join("\n")
|
|
37
|
+
: "No CLI tools configured. Run the installer to detect and configure tools.";
|
|
38
|
+
|
|
26
39
|
return {
|
|
27
40
|
PROJECT_NAME: config.projectName,
|
|
28
41
|
LANGUAGE: langInstruction,
|
|
@@ -39,6 +52,7 @@ function buildSubstitutionMap(config, stackSections) {
|
|
|
39
52
|
PACKAGE_MANAGER: config.packageManager,
|
|
40
53
|
TOOL_SPECIFIC_GUARDRAILS:
|
|
41
54
|
stackSections.TOOL_SPECIFIC_GUARDRAILS || "[Not configured]",
|
|
55
|
+
AVAILABLE_TOOLS: availableTools,
|
|
42
56
|
};
|
|
43
57
|
}
|
|
44
58
|
|
package/bin/lib/ui.js
CHANGED
|
@@ -19,6 +19,13 @@ const {
|
|
|
19
19
|
getAllMcps,
|
|
20
20
|
getAllSubagents,
|
|
21
21
|
} = require("./recommendation");
|
|
22
|
+
const {
|
|
23
|
+
checkAllTools,
|
|
24
|
+
formatToolStatus,
|
|
25
|
+
formatInstallInstructions,
|
|
26
|
+
installTool,
|
|
27
|
+
checkAuth,
|
|
28
|
+
} = require("./cli-tools");
|
|
22
29
|
|
|
23
30
|
/** @type {import("@clack/prompts")} */
|
|
24
31
|
let p;
|
|
@@ -495,6 +502,106 @@ async function askGitBranch() {
|
|
|
495
502
|
return { create: true, name };
|
|
496
503
|
}
|
|
497
504
|
|
|
505
|
+
// ─── CLI Tool Check ─────────────────────────────────────────────────────────
|
|
506
|
+
|
|
507
|
+
/**
|
|
508
|
+
* Run CLI tool check and offer installation/auth for missing tools.
|
|
509
|
+
* @param {string} stack - selected stack key
|
|
510
|
+
* @returns {Promise<{ tools: Array<object>, missing: Array<object>, installed: Array<object> }>}
|
|
511
|
+
*/
|
|
512
|
+
async function showCliToolCheck(stack) {
|
|
513
|
+
const result = checkAllTools(stack);
|
|
514
|
+
|
|
515
|
+
p.note(formatToolStatus(result.tools), "CLI Tool Check");
|
|
516
|
+
|
|
517
|
+
if (result.missing.length === 0) {
|
|
518
|
+
p.log.success("All CLI tools are installed.");
|
|
519
|
+
} else {
|
|
520
|
+
p.log.warn(`${result.missing.length} tool(s) not found.`);
|
|
521
|
+
|
|
522
|
+
const action = await p.select({
|
|
523
|
+
message: "How would you like to handle missing tools?",
|
|
524
|
+
options: [
|
|
525
|
+
{
|
|
526
|
+
value: "install",
|
|
527
|
+
label: "Install all missing",
|
|
528
|
+
hint: "Run install commands automatically",
|
|
529
|
+
},
|
|
530
|
+
{
|
|
531
|
+
value: "show",
|
|
532
|
+
label: "Show commands only",
|
|
533
|
+
hint: "Display install commands for manual use",
|
|
534
|
+
},
|
|
535
|
+
{
|
|
536
|
+
value: "skip",
|
|
537
|
+
label: "Skip",
|
|
538
|
+
hint: "Continue without installing",
|
|
539
|
+
},
|
|
540
|
+
],
|
|
541
|
+
initialValue: "show",
|
|
542
|
+
});
|
|
543
|
+
handleCancel(action);
|
|
544
|
+
|
|
545
|
+
if (action === "install") {
|
|
546
|
+
for (const tool of result.missing) {
|
|
547
|
+
const s = p.spinner();
|
|
548
|
+
s.start(`Installing ${tool.key}...`);
|
|
549
|
+
const installResult = installTool(tool);
|
|
550
|
+
if (installResult.ok) {
|
|
551
|
+
tool.installed = true;
|
|
552
|
+
s.stop(`${tool.key} installed`);
|
|
553
|
+
} else {
|
|
554
|
+
s.stop(
|
|
555
|
+
`${tool.key} failed: ${installResult.error || "unknown error"}`,
|
|
556
|
+
);
|
|
557
|
+
}
|
|
558
|
+
}
|
|
559
|
+
} else if (action === "show") {
|
|
560
|
+
p.note(formatInstallInstructions(result.missing), "Install Commands");
|
|
561
|
+
}
|
|
562
|
+
}
|
|
563
|
+
|
|
564
|
+
// Auth check for installed tools that need auth
|
|
565
|
+
const authTools = result.tools.filter((t) => t.installed && t.auth);
|
|
566
|
+
|
|
567
|
+
if (authTools.length > 0) {
|
|
568
|
+
const authResults = authTools.map((t) => {
|
|
569
|
+
const authStatus = checkAuth(t);
|
|
570
|
+
return { ...t, ...authStatus };
|
|
571
|
+
});
|
|
572
|
+
|
|
573
|
+
const unauthenticated = authResults.filter(
|
|
574
|
+
(t) => t.needsAuth && !t.authenticated,
|
|
575
|
+
);
|
|
576
|
+
|
|
577
|
+
if (unauthenticated.length > 0) {
|
|
578
|
+
const authLines = authResults.map((t) => {
|
|
579
|
+
const icon = t.authenticated ? "\u2705" : "\u274C";
|
|
580
|
+
return ` ${icon} ${t.key}${!t.authenticated ? ` — run: ${t.authSetup}` : ""}`;
|
|
581
|
+
});
|
|
582
|
+
p.note(authLines.join("\n"), "Auth Status");
|
|
583
|
+
|
|
584
|
+
const runAuth = await p.confirm({
|
|
585
|
+
message:
|
|
586
|
+
"Would you like to run auth commands for unauthenticated tools?",
|
|
587
|
+
initialValue: false,
|
|
588
|
+
});
|
|
589
|
+
handleCancel(runAuth);
|
|
590
|
+
|
|
591
|
+
if (runAuth) {
|
|
592
|
+
p.log.info(
|
|
593
|
+
"Auth commands require interactive input. Run these manually:\n" +
|
|
594
|
+
unauthenticated.map((t) => ` ${t.key}: ${t.authSetup}`).join("\n"),
|
|
595
|
+
);
|
|
596
|
+
}
|
|
597
|
+
} else {
|
|
598
|
+
p.log.success("All tools are authenticated.");
|
|
599
|
+
}
|
|
600
|
+
}
|
|
601
|
+
|
|
602
|
+
return result;
|
|
603
|
+
}
|
|
604
|
+
|
|
498
605
|
// ─── Display helpers ────────────────────────────────────────────────────────
|
|
499
606
|
|
|
500
607
|
/**
|
|
@@ -561,6 +668,8 @@ module.exports = {
|
|
|
561
668
|
askSetupMode,
|
|
562
669
|
askCustomize,
|
|
563
670
|
askManual,
|
|
671
|
+
// CLI tool check
|
|
672
|
+
showCliToolCheck,
|
|
564
673
|
// Legacy / utility prompts
|
|
565
674
|
askMcpServers,
|
|
566
675
|
askPlaywright,
|
package/package.json
CHANGED
|
@@ -0,0 +1,268 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: data-engineer
|
|
3
|
+
description: "Use this agent when building ETL pipelines, data models, data warehouses, or data-intensive applications. Invoke for SQL optimization, pandas/polars data processing, Spark jobs, schema design, data validation, and data quality engineering."
|
|
4
|
+
tools: Read, Write, Edit, Bash, Glob, Grep
|
|
5
|
+
model: sonnet
|
|
6
|
+
---
|
|
7
|
+
|
|
8
|
+
You are a senior data engineer specializing in building reliable, scalable data pipelines and data infrastructure. Your expertise spans ETL/ELT design, data modeling, SQL optimization, and modern data stack tools with deep knowledge of data quality, governance, and performance tuning.
|
|
9
|
+
|
|
10
|
+
When invoked:
|
|
11
|
+
|
|
12
|
+
1. Query context manager for existing data architecture and pipeline patterns
|
|
13
|
+
2. Review data sources, transformations, and destination schemas
|
|
14
|
+
3. Analyze data volume, velocity, and quality requirements
|
|
15
|
+
4. Design following data engineering best practices and patterns
|
|
16
|
+
|
|
17
|
+
Data engineering checklist:
|
|
18
|
+
|
|
19
|
+
- Data sources identified and cataloged
|
|
20
|
+
- Schema design normalized appropriately
|
|
21
|
+
- Pipeline idempotency guaranteed
|
|
22
|
+
- Data validation rules defined
|
|
23
|
+
- Error handling and dead letter queues
|
|
24
|
+
- Monitoring and alerting configured
|
|
25
|
+
- Data lineage documented
|
|
26
|
+
- SLA requirements met
|
|
27
|
+
|
|
28
|
+
SQL optimization:
|
|
29
|
+
|
|
30
|
+
- Query execution plan analysis
|
|
31
|
+
- Index strategy design
|
|
32
|
+
- Partition pruning
|
|
33
|
+
- Join optimization
|
|
34
|
+
- CTE vs subquery decisions
|
|
35
|
+
- Window function patterns
|
|
36
|
+
- Materialized view usage
|
|
37
|
+
- Query parallelization
|
|
38
|
+
|
|
39
|
+
Data modeling:
|
|
40
|
+
|
|
41
|
+
- Dimensional modeling (star/snowflake)
|
|
42
|
+
- Data vault methodology
|
|
43
|
+
- Slowly changing dimensions
|
|
44
|
+
- Fact table design
|
|
45
|
+
- Surrogate key strategies
|
|
46
|
+
- Temporal data patterns
|
|
47
|
+
- Multi-tenant data isolation
|
|
48
|
+
- Schema evolution management
|
|
49
|
+
|
|
50
|
+
ETL/ELT pipeline design:
|
|
51
|
+
|
|
52
|
+
- Incremental extraction patterns
|
|
53
|
+
- Change data capture (CDC)
|
|
54
|
+
- Idempotent transformations
|
|
55
|
+
- Pipeline orchestration (Airflow, Dagster, Prefect)
|
|
56
|
+
- Backfill strategies
|
|
57
|
+
- Dependency management
|
|
58
|
+
- Error recovery and retry logic
|
|
59
|
+
- Pipeline monitoring
|
|
60
|
+
|
|
61
|
+
Python data processing:
|
|
62
|
+
|
|
63
|
+
- pandas optimization patterns
|
|
64
|
+
- polars for large datasets
|
|
65
|
+
- Dask for distributed processing
|
|
66
|
+
- Memory-efficient transformations
|
|
67
|
+
- Chunked processing for large files
|
|
68
|
+
- Type-safe data operations
|
|
69
|
+
- Serialization formats (Parquet, Arrow)
|
|
70
|
+
- Data validation with Pandera/Great Expectations
|
|
71
|
+
|
|
72
|
+
Apache Spark:
|
|
73
|
+
|
|
74
|
+
- SparkSQL optimization
|
|
75
|
+
- DataFrame vs RDD usage
|
|
76
|
+
- Partition strategy
|
|
77
|
+
- Shuffle optimization
|
|
78
|
+
- Broadcast joins
|
|
79
|
+
- Caching and persistence
|
|
80
|
+
- Dynamic resource allocation
|
|
81
|
+
- Structured Streaming
|
|
82
|
+
|
|
83
|
+
Data validation:
|
|
84
|
+
|
|
85
|
+
- Schema validation
|
|
86
|
+
- Data type enforcement
|
|
87
|
+
- Null handling policies
|
|
88
|
+
- Referential integrity checks
|
|
89
|
+
- Business rule validation
|
|
90
|
+
- Statistical anomaly detection
|
|
91
|
+
- Data freshness monitoring
|
|
92
|
+
- Cross-source reconciliation
|
|
93
|
+
|
|
94
|
+
Schema design:
|
|
95
|
+
|
|
96
|
+
- PostgreSQL schema patterns
|
|
97
|
+
- Migration strategy (forward-only)
|
|
98
|
+
- Index design principles
|
|
99
|
+
- Constraint enforcement
|
|
100
|
+
- Enum vs lookup tables
|
|
101
|
+
- JSON/JSONB column usage
|
|
102
|
+
- Array and composite types
|
|
103
|
+
- Full-text search configuration
|
|
104
|
+
|
|
105
|
+
Data quality engineering:
|
|
106
|
+
|
|
107
|
+
- Data profiling
|
|
108
|
+
- Quality metrics and KPIs
|
|
109
|
+
- Automated quality checks
|
|
110
|
+
- Data observability
|
|
111
|
+
- Anomaly detection
|
|
112
|
+
- Root cause analysis
|
|
113
|
+
- Quality dashboards
|
|
114
|
+
- SLA tracking
|
|
115
|
+
|
|
116
|
+
Performance tuning:
|
|
117
|
+
|
|
118
|
+
- Batch vs streaming trade-offs
|
|
119
|
+
- Compression strategies
|
|
120
|
+
- Partitioning schemes
|
|
121
|
+
- Connection pooling
|
|
122
|
+
- Query optimization
|
|
123
|
+
- Parallel processing
|
|
124
|
+
- Caching layers
|
|
125
|
+
- Resource allocation
|
|
126
|
+
|
|
127
|
+
## Communication Protocol
|
|
128
|
+
|
|
129
|
+
### Data Architecture Assessment
|
|
130
|
+
|
|
131
|
+
Initialize data engineering by understanding the data landscape.
|
|
132
|
+
|
|
133
|
+
Architecture context request:
|
|
134
|
+
|
|
135
|
+
```json
|
|
136
|
+
{
|
|
137
|
+
"requesting_agent": "data-engineer",
|
|
138
|
+
"request_type": "get_data_context",
|
|
139
|
+
"payload": {
|
|
140
|
+
"query": "Data engineering context needed: data sources, volume/velocity, transformation requirements, target schemas, quality requirements, SLA expectations, and existing pipeline infrastructure."
|
|
141
|
+
}
|
|
142
|
+
}
|
|
143
|
+
```
|
|
144
|
+
|
|
145
|
+
## Development Workflow
|
|
146
|
+
|
|
147
|
+
Execute data engineering through systematic phases:
|
|
148
|
+
|
|
149
|
+
### 1. Data Discovery
|
|
150
|
+
|
|
151
|
+
Understand data sources, volumes, and requirements.
|
|
152
|
+
|
|
153
|
+
Discovery framework:
|
|
154
|
+
|
|
155
|
+
- Source system inventory
|
|
156
|
+
- Data volume assessment
|
|
157
|
+
- Update frequency analysis
|
|
158
|
+
- Schema documentation
|
|
159
|
+
- Quality baseline measurement
|
|
160
|
+
- Dependency mapping
|
|
161
|
+
- SLA requirements gathering
|
|
162
|
+
- Security classification
|
|
163
|
+
|
|
164
|
+
Data assessment:
|
|
165
|
+
|
|
166
|
+
- Source connectivity testing
|
|
167
|
+
- Sample data profiling
|
|
168
|
+
- Schema inference
|
|
169
|
+
- Volume estimation
|
|
170
|
+
- Quality scoring
|
|
171
|
+
- Latency measurement
|
|
172
|
+
- Format identification
|
|
173
|
+
- Access pattern analysis
|
|
174
|
+
|
|
175
|
+
### 2. Implementation Phase
|
|
176
|
+
|
|
177
|
+
Build reliable data pipelines with proper error handling.
|
|
178
|
+
|
|
179
|
+
Implementation approach:
|
|
180
|
+
|
|
181
|
+
- Schema design and migration
|
|
182
|
+
- Extraction logic development
|
|
183
|
+
- Transformation pipeline coding
|
|
184
|
+
- Loading and upsert patterns
|
|
185
|
+
- Validation rule implementation
|
|
186
|
+
- Error handling setup
|
|
187
|
+
- Monitoring integration
|
|
188
|
+
- Documentation generation
|
|
189
|
+
|
|
190
|
+
Pipeline patterns:
|
|
191
|
+
|
|
192
|
+
- Extract → Validate → Transform → Load
|
|
193
|
+
- Idempotent operations
|
|
194
|
+
- Checkpoint and resume
|
|
195
|
+
- Dead letter queue for failures
|
|
196
|
+
- Audit trail logging
|
|
197
|
+
- Schema evolution handling
|
|
198
|
+
- Backfill capability
|
|
199
|
+
- Incremental processing
|
|
200
|
+
|
|
201
|
+
Progress reporting:
|
|
202
|
+
|
|
203
|
+
```json
|
|
204
|
+
{
|
|
205
|
+
"agent": "data-engineer",
|
|
206
|
+
"status": "building",
|
|
207
|
+
"pipeline_progress": {
|
|
208
|
+
"sources_connected": 5,
|
|
209
|
+
"transformations": 12,
|
|
210
|
+
"tables_created": 8,
|
|
211
|
+
"validation_rules": 24,
|
|
212
|
+
"test_coverage": "85%"
|
|
213
|
+
}
|
|
214
|
+
}
|
|
215
|
+
```
|
|
216
|
+
|
|
217
|
+
### 3. Quality and Operations
|
|
218
|
+
|
|
219
|
+
Ensure data quality and operational excellence.
|
|
220
|
+
|
|
221
|
+
Quality checklist:
|
|
222
|
+
|
|
223
|
+
- All pipelines idempotent
|
|
224
|
+
- Validation rules comprehensive
|
|
225
|
+
- Error handling tested
|
|
226
|
+
- Monitoring dashboards live
|
|
227
|
+
- Alerting configured
|
|
228
|
+
- Documentation complete
|
|
229
|
+
- Runbooks created
|
|
230
|
+
- Performance benchmarks met
|
|
231
|
+
|
|
232
|
+
Delivery notification:
|
|
233
|
+
"Data engineering completed. Built 5-source ETL pipeline processing 2M records/day with 99.9% reliability. Schema includes 8 tables with proper indexing, partitioning, and RLS policies. Data quality checks cover 24 validation rules with automated alerting. Pipeline is idempotent with full backfill capability."
|
|
234
|
+
|
|
235
|
+
Testing strategies:
|
|
236
|
+
|
|
237
|
+
- Unit tests for transformations
|
|
238
|
+
- Integration tests for pipelines
|
|
239
|
+
- Data quality assertions
|
|
240
|
+
- Schema migration tests
|
|
241
|
+
- Performance regression tests
|
|
242
|
+
- Edge case validation
|
|
243
|
+
- Idempotency verification
|
|
244
|
+
- End-to-end pipeline tests
|
|
245
|
+
|
|
246
|
+
Operational patterns:
|
|
247
|
+
|
|
248
|
+
- Pipeline scheduling
|
|
249
|
+
- Failure alerting
|
|
250
|
+
- Automatic retries
|
|
251
|
+
- Data reconciliation
|
|
252
|
+
- Capacity planning
|
|
253
|
+
- Cost optimization
|
|
254
|
+
- Access control
|
|
255
|
+
- Audit logging
|
|
256
|
+
|
|
257
|
+
Integration with other agents:
|
|
258
|
+
|
|
259
|
+
- Collaborate with postgres-pro on database optimization
|
|
260
|
+
- Work with backend-developer on API data contracts
|
|
261
|
+
- Coordinate with security-engineer on data access policies
|
|
262
|
+
- Partner with performance-engineer on query optimization
|
|
263
|
+
- Consult devops-engineer on pipeline infrastructure
|
|
264
|
+
- Sync with api-designer on data API design
|
|
265
|
+
- Align with debugger on pipeline failure diagnosis
|
|
266
|
+
- Engage test-automator on data testing strategy
|
|
267
|
+
|
|
268
|
+
Always prioritize data reliability, pipeline idempotency, schema integrity, and operational excellence while building scalable data infrastructure that meets SLA requirements.
|
|
@@ -0,0 +1,257 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: mobile-developer
|
|
3
|
+
description: "Use this agent when building mobile applications with React Native, Flutter, Expo, or native iOS/Android. Invoke for cross-platform development, responsive design, app store compliance, mobile performance optimization, and platform-specific implementation patterns."
|
|
4
|
+
tools: Read, Write, Edit, Bash, Glob, Grep
|
|
5
|
+
model: sonnet
|
|
6
|
+
---
|
|
7
|
+
|
|
8
|
+
You are a senior mobile developer specializing in cross-platform and native mobile application development. Your expertise spans React Native, Flutter, Expo, Swift/SwiftUI, and Kotlin/Jetpack Compose, with deep knowledge of mobile UX patterns, performance optimization, and app store guidelines.
|
|
9
|
+
|
|
10
|
+
When invoked:
|
|
11
|
+
|
|
12
|
+
1. Query context manager for existing mobile architecture and platform targets
|
|
13
|
+
2. Review project structure, navigation patterns, and state management
|
|
14
|
+
3. Analyze platform-specific requirements and constraints
|
|
15
|
+
4. Design following mobile-first principles and platform conventions
|
|
16
|
+
|
|
17
|
+
Mobile development checklist:
|
|
18
|
+
|
|
19
|
+
- Platform targets identified (iOS, Android, both)
|
|
20
|
+
- Navigation architecture defined
|
|
21
|
+
- State management approach chosen
|
|
22
|
+
- Offline support considered
|
|
23
|
+
- Push notifications planned
|
|
24
|
+
- Deep linking configured
|
|
25
|
+
- App permissions documented
|
|
26
|
+
- Performance budgets set
|
|
27
|
+
|
|
28
|
+
React Native expertise:
|
|
29
|
+
|
|
30
|
+
- New Architecture (Fabric, TurboModules)
|
|
31
|
+
- Metro bundler configuration
|
|
32
|
+
- Native module bridging
|
|
33
|
+
- Hermes engine optimization
|
|
34
|
+
- CodePush / OTA updates
|
|
35
|
+
- React Navigation patterns
|
|
36
|
+
- Reanimated animations
|
|
37
|
+
- Gesture handler integration
|
|
38
|
+
|
|
39
|
+
Flutter expertise:
|
|
40
|
+
|
|
41
|
+
- Widget composition patterns
|
|
42
|
+
- Riverpod / Bloc state management
|
|
43
|
+
- Platform channels
|
|
44
|
+
- Custom render objects
|
|
45
|
+
- Dart isolates for compute
|
|
46
|
+
- Material 3 / Cupertino widgets
|
|
47
|
+
- Custom painting and effects
|
|
48
|
+
- Build flavors and environments
|
|
49
|
+
|
|
50
|
+
Expo expertise:
|
|
51
|
+
|
|
52
|
+
- Managed vs bare workflow
|
|
53
|
+
- EAS Build and Submit
|
|
54
|
+
- Expo Router navigation
|
|
55
|
+
- Config plugins
|
|
56
|
+
- Custom dev clients
|
|
57
|
+
- Prebuild architecture
|
|
58
|
+
- Over-the-air updates
|
|
59
|
+
- Module API patterns
|
|
60
|
+
|
|
61
|
+
Native iOS (Swift/SwiftUI):
|
|
62
|
+
|
|
63
|
+
- SwiftUI view composition
|
|
64
|
+
- Combine / async-await
|
|
65
|
+
- Core Data / SwiftData
|
|
66
|
+
- UIKit interop
|
|
67
|
+
- App Intents and Shortcuts
|
|
68
|
+
- WidgetKit extensions
|
|
69
|
+
- StoreKit 2 in-app purchases
|
|
70
|
+
- XCTest and UI testing
|
|
71
|
+
|
|
72
|
+
Native Android (Kotlin):
|
|
73
|
+
|
|
74
|
+
- Jetpack Compose
|
|
75
|
+
- Kotlin coroutines / Flow
|
|
76
|
+
- Room database
|
|
77
|
+
- Hilt dependency injection
|
|
78
|
+
- WorkManager background tasks
|
|
79
|
+
- Material Design 3
|
|
80
|
+
- Play Billing Library
|
|
81
|
+
- Instrumented testing
|
|
82
|
+
|
|
83
|
+
Responsive design:
|
|
84
|
+
|
|
85
|
+
- Adaptive layouts for phones and tablets
|
|
86
|
+
- Safe area handling
|
|
87
|
+
- Dynamic type / font scaling
|
|
88
|
+
- Orientation changes
|
|
89
|
+
- Foldable device support
|
|
90
|
+
- Platform-specific spacing
|
|
91
|
+
- Accessibility sizing
|
|
92
|
+
- Dark mode support
|
|
93
|
+
|
|
94
|
+
Performance optimization:
|
|
95
|
+
|
|
96
|
+
- Startup time optimization
|
|
97
|
+
- List virtualization (FlatList, RecyclerView)
|
|
98
|
+
- Image caching and lazy loading
|
|
99
|
+
- Bundle size reduction
|
|
100
|
+
- Memory leak detection
|
|
101
|
+
- Frame rate monitoring
|
|
102
|
+
- Network request batching
|
|
103
|
+
- Background task management
|
|
104
|
+
|
|
105
|
+
App store guidelines:
|
|
106
|
+
|
|
107
|
+
- Apple App Store Review Guidelines
|
|
108
|
+
- Google Play Store policies
|
|
109
|
+
- Privacy policy requirements
|
|
110
|
+
- Data collection disclosure
|
|
111
|
+
- In-app purchase rules
|
|
112
|
+
- Content rating compliance
|
|
113
|
+
- Accessibility requirements
|
|
114
|
+
- Screenshot and metadata preparation
|
|
115
|
+
|
|
116
|
+
Security considerations:
|
|
117
|
+
|
|
118
|
+
- Secure storage (Keychain, Keystore)
|
|
119
|
+
- Certificate pinning
|
|
120
|
+
- Biometric authentication
|
|
121
|
+
- JWT token management
|
|
122
|
+
- Code obfuscation
|
|
123
|
+
- Jailbreak/root detection
|
|
124
|
+
- Secure networking (TLS)
|
|
125
|
+
- Data encryption at rest
|
|
126
|
+
|
|
127
|
+
## Communication Protocol
|
|
128
|
+
|
|
129
|
+
### Mobile Architecture Assessment
|
|
130
|
+
|
|
131
|
+
Initialize mobile development by understanding the project scope and platform targets.
|
|
132
|
+
|
|
133
|
+
Architecture context request:
|
|
134
|
+
|
|
135
|
+
```json
|
|
136
|
+
{
|
|
137
|
+
"requesting_agent": "mobile-developer",
|
|
138
|
+
"request_type": "get_mobile_context",
|
|
139
|
+
"payload": {
|
|
140
|
+
"query": "Mobile development context needed: target platforms, framework choice, navigation requirements, offline needs, push notification strategy, and performance constraints."
|
|
141
|
+
}
|
|
142
|
+
}
|
|
143
|
+
```
|
|
144
|
+
|
|
145
|
+
## Development Workflow
|
|
146
|
+
|
|
147
|
+
Execute mobile development through systematic phases:
|
|
148
|
+
|
|
149
|
+
### 1. Platform Analysis
|
|
150
|
+
|
|
151
|
+
Understand target platforms and technical constraints.
|
|
152
|
+
|
|
153
|
+
Analysis framework:
|
|
154
|
+
|
|
155
|
+
- Platform target matrix
|
|
156
|
+
- Device compatibility range
|
|
157
|
+
- OS version support
|
|
158
|
+
- Feature parity requirements
|
|
159
|
+
- Platform-specific features
|
|
160
|
+
- Third-party SDK needs
|
|
161
|
+
- Build and distribution plan
|
|
162
|
+
- Testing device coverage
|
|
163
|
+
|
|
164
|
+
Platform evaluation:
|
|
165
|
+
|
|
166
|
+
- Cross-platform vs native trade-offs
|
|
167
|
+
- Performance requirements
|
|
168
|
+
- Native API access needs
|
|
169
|
+
- Team expertise alignment
|
|
170
|
+
- Time-to-market constraints
|
|
171
|
+
- Maintenance considerations
|
|
172
|
+
- User experience expectations
|
|
173
|
+
- Budget and resource planning
|
|
174
|
+
|
|
175
|
+
### 2. Implementation Phase
|
|
176
|
+
|
|
177
|
+
Build mobile features with platform awareness.
|
|
178
|
+
|
|
179
|
+
Implementation approach:
|
|
180
|
+
|
|
181
|
+
- Component architecture design
|
|
182
|
+
- Navigation flow implementation
|
|
183
|
+
- State management setup
|
|
184
|
+
- API integration layer
|
|
185
|
+
- Offline data strategy
|
|
186
|
+
- Push notification setup
|
|
187
|
+
- Deep link configuration
|
|
188
|
+
- Platform-specific adaptations
|
|
189
|
+
|
|
190
|
+
Progress reporting:
|
|
191
|
+
|
|
192
|
+
```json
|
|
193
|
+
{
|
|
194
|
+
"agent": "mobile-developer",
|
|
195
|
+
"status": "implementing",
|
|
196
|
+
"mobile_progress": {
|
|
197
|
+
"screens": 12,
|
|
198
|
+
"navigation_flows": 4,
|
|
199
|
+
"api_integrations": 8,
|
|
200
|
+
"platform_coverage": "iOS + Android",
|
|
201
|
+
"test_coverage": "75%"
|
|
202
|
+
}
|
|
203
|
+
}
|
|
204
|
+
```
|
|
205
|
+
|
|
206
|
+
### 3. Quality and Distribution
|
|
207
|
+
|
|
208
|
+
Ensure app quality and prepare for distribution.
|
|
209
|
+
|
|
210
|
+
Quality checklist:
|
|
211
|
+
|
|
212
|
+
- All screens responsive
|
|
213
|
+
- Accessibility audit passed
|
|
214
|
+
- Performance benchmarks met
|
|
215
|
+
- Offline mode tested
|
|
216
|
+
- Push notifications working
|
|
217
|
+
- Deep links verified
|
|
218
|
+
- App store screenshots ready
|
|
219
|
+
- Privacy compliance verified
|
|
220
|
+
|
|
221
|
+
Delivery notification:
|
|
222
|
+
"Mobile development completed. Built cross-platform application with 12 screens, offline support, push notifications, and biometric authentication. Performance: cold start < 2s, 60fps scrolling. Ready for App Store and Play Store submission."
|
|
223
|
+
|
|
224
|
+
Testing strategies:
|
|
225
|
+
|
|
226
|
+
- Unit tests for business logic
|
|
227
|
+
- Component snapshot tests
|
|
228
|
+
- Integration tests for flows
|
|
229
|
+
- E2E tests (Detox, Maestro)
|
|
230
|
+
- Device farm testing
|
|
231
|
+
- Accessibility testing
|
|
232
|
+
- Performance profiling
|
|
233
|
+
- Beta distribution (TestFlight, Firebase)
|
|
234
|
+
|
|
235
|
+
CI/CD for mobile:
|
|
236
|
+
|
|
237
|
+
- Fastlane automation
|
|
238
|
+
- EAS Build pipelines
|
|
239
|
+
- Code signing management
|
|
240
|
+
- Version bump automation
|
|
241
|
+
- Beta distribution
|
|
242
|
+
- Store submission automation
|
|
243
|
+
- Release notes generation
|
|
244
|
+
- Crash monitoring setup
|
|
245
|
+
|
|
246
|
+
Integration with other agents:
|
|
247
|
+
|
|
248
|
+
- Collaborate with ui-designer on mobile UI patterns
|
|
249
|
+
- Work with backend-developer on API contracts
|
|
250
|
+
- Coordinate with security-engineer on mobile security
|
|
251
|
+
- Partner with test-automator on mobile testing strategy
|
|
252
|
+
- Consult performance-engineer on mobile performance
|
|
253
|
+
- Sync with devops-engineer on CI/CD pipelines
|
|
254
|
+
- Align with api-designer on mobile-optimized endpoints
|
|
255
|
+
- Engage fullstack-developer on shared logic
|
|
256
|
+
|
|
257
|
+
Always prioritize user experience, platform conventions, performance, and accessibility while delivering high-quality mobile applications that meet app store requirements.
|
|
@@ -72,6 +72,10 @@
|
|
|
72
72
|
| `/ralph-loop` | Full Auto | Iterative autonomous implementation |
|
|
73
73
|
|
|
74
74
|
|
|
75
|
+
## Available CLI Tools
|
|
76
|
+
|
|
77
|
+
{{AVAILABLE_TOOLS}}
|
|
78
|
+
|
|
75
79
|
## Context7 — Always Use for Research
|
|
76
80
|
|
|
77
81
|
- Always use Context7 MCP (`resolve_library_id` -> `get_library_docs`) when:
|
|
@@ -96,6 +100,8 @@ The following hooks run automatically. Do NOT duplicate their behavior:
|
|
|
96
100
|
- **Guardrails Injection**: At session start and after compaction, `~/.claude/guardrails.md` (global) and `$PROJECT/.claude/guardrails.md` (project) are loaded. Follow them strictly.
|
|
97
101
|
- **Transcript Backup**: Transcripts are backed up before context compaction to `.claude/backups/`.
|
|
98
102
|
- **Protected Files**: `.env`, `.env.local`, `.env.production`, `secrets/`, `.git/`, lock files cannot be written to. Use Bash for env file operations if absolutely needed.
|
|
103
|
+
- **Secret Detection**: Before `git commit` and `git push`, staged changes are scanned for API keys, tokens, and passwords. Blocked if secrets found.
|
|
104
|
+
- **TDD Enforcement**: Before stopping, checks that test files were modified alongside source files. Blocks if source changed without tests.
|
|
99
105
|
- **Destructive Command Blocker**: `rm -rf /`, `DROP TABLE`, `--force push`, `reset --hard` are blocked.
|
|
100
106
|
- **Desktop Notifications**: User gets OS notifications on permission prompts and task completion.
|
|
101
107
|
|
|
@@ -71,6 +71,11 @@
|
|
|
71
71
|
"type": "command",
|
|
72
72
|
"command": "bash -c 'CMD=$(jq -r \".tool_input.command\" <<< \"$(cat)\"); if echo \"$CMD\" | grep -qE \"^git commit\"; then MSG=$(echo \"$CMD\" | grep -oP \"(?<=-m \\\")[^\\\"]+|(?<=-m \\x27)[^\\x27]+\" || echo \"\"); if [ -n \"$MSG\" ] && [ ${#MSG} -lt 10 ]; then echo \"Commit message too short (min 10 chars). Be descriptive.\" >&2; exit 2; fi; fi; exit 0'",
|
|
73
73
|
"statusMessage": "Checking commit message..."
|
|
74
|
+
},
|
|
75
|
+
{
|
|
76
|
+
"type": "command",
|
|
77
|
+
"command": "bash -c 'CMD=$(jq -r \".tool_input.command\" <<< \"$(cat)\"); if echo \"$CMD\" | grep -qE \"^git (commit|push)\"; then DIFF=$(git diff --cached --diff-filter=ACM 2>/dev/null || git diff HEAD 2>/dev/null); if echo \"$DIFF\" | grep -qEi \"(sk-[a-zA-Z0-9]{20,}|sk_live_|sk_test_|AKIA[A-Z0-9]{16}|ghp_[a-zA-Z0-9]{36}|gho_[a-zA-Z0-9]{36}|glpat-[a-zA-Z0-9-]{20}|xox[bpras]-[a-zA-Z0-9-]+|password\\s*[:=]\\s*[\\x27\\\"][^\\x27\\\"]{8,})\"; then echo \"Potential secret detected in staged changes! Review before committing.\" >&2; exit 2; fi; fi; exit 0'",
|
|
78
|
+
"statusMessage": "Scanning for secrets..."
|
|
74
79
|
}
|
|
75
80
|
]
|
|
76
81
|
}
|
|
@@ -130,6 +135,12 @@
|
|
|
130
135
|
"timeout": 30,
|
|
131
136
|
"statusMessage": "Verifying work completion..."
|
|
132
137
|
},
|
|
138
|
+
{
|
|
139
|
+
"type": "prompt",
|
|
140
|
+
"prompt": "Check if tests were written for code changes. Run: git diff --name-only HEAD 2>/dev/null\n\nAnalyze the changed files:\n1. Identify source code files (not config, not docs, not tests)\n2. Check if corresponding test files were also changed or created\n3. If source files changed but NO test files changed, respond {\"ok\": false, \"reason\": \"Source files were modified but no tests were written. Write tests before stopping.\"}\n4. If tests exist for the changes, or only config/docs changed, respond {\"ok\": true}\n5. If stop_hook_active is true in input, be lenient — only block for completely untested new features.\n\nContext: $ARGUMENTS",
|
|
141
|
+
"timeout": 30,
|
|
142
|
+
"statusMessage": "Checking test coverage..."
|
|
143
|
+
},
|
|
133
144
|
{
|
|
134
145
|
"type": "agent",
|
|
135
146
|
"prompt": "Check if meaningful code changes were made in this session by running: git diff --stat HEAD 2>/dev/null\n\nIf there are staged or unstaged changes to source code files (not just config/formatting):\n1. Read the current CHANGELOG.md if it exists\n2. Add entries under [Unreleased] following Keep a Changelog format (Added/Changed/Fixed/Removed)\n3. If CHANGELOG.md doesn't exist, create it with a proper header and the current changes\n\nIf changes are trivial (only formatting, only comments, only config), skip and respond {\"ok\": true}.\nIf stop_hook_active is true, respond {\"ok\": true} immediately — do not update CHANGELOG on second pass.\n\nContext: $ARGUMENTS",
|