fenchurch 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +58 -0
- package/dist/ai-config.json +73 -0
- package/dist/index.d.ts +5 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +100 -0
- package/dist/index.js.map +1 -0
- package/dist/prompts/instructions.md +218 -0
- package/dist/prompts/memory-bank/atomic-task-planning.md +82 -0
- package/dist/prompts/memory-bank/test-driven-development.md +122 -0
- package/dist/sync.d.ts +14 -0
- package/dist/sync.d.ts.map +1 -0
- package/dist/sync.js +107 -0
- package/dist/sync.js.map +1 -0
- package/package.json +49 -0
package/README.md
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
1
|
+
# Fenchurch
|
|
2
|
+
|
|
3
|
+
A CLI tool to initialize projects with AI tooling and prompts. This is very much ME opinionated and always changing, but you are welcome to use it.
|
|
4
|
+
|
|
5
|
+
## Installation
|
|
6
|
+
|
|
7
|
+
```bash
|
|
8
|
+
npm install -g fenchurch
|
|
9
|
+
```
|
|
10
|
+
|
|
11
|
+
## Usage
|
|
12
|
+
|
|
13
|
+
Run fenchurch in any directory where you want to set up AI tooling:
|
|
14
|
+
|
|
15
|
+
```bash
|
|
16
|
+
fenchurch
|
|
17
|
+
```
|
|
18
|
+
|
|
19
|
+
The tool will:
|
|
20
|
+
|
|
21
|
+
1. Show a list of available AI tools
|
|
22
|
+
2. Let you select which ones to enable
|
|
23
|
+
3. Copy prompts and configuration files to your project
|
|
24
|
+
4. Set up the selected AI tools
|
|
25
|
+
|
|
26
|
+
## Supported AI Tools
|
|
27
|
+
|
|
28
|
+
- Antigravity
|
|
29
|
+
- Aider
|
|
30
|
+
- Claude Code
|
|
31
|
+
- Cline
|
|
32
|
+
- ChatGPT Codex
|
|
33
|
+
- Continue
|
|
34
|
+
- GitHub Copilot
|
|
35
|
+
- Cursor
|
|
36
|
+
- Kiro
|
|
37
|
+
- OpenCode
|
|
38
|
+
- Windsurf
|
|
39
|
+
|
|
40
|
+
## Re-running
|
|
41
|
+
|
|
42
|
+
If you run fenchurch again in a directory that already has an `ai-config.json` file, it will ask if you want to use the existing settings or start fresh.
|
|
43
|
+
|
|
44
|
+
## Development
|
|
45
|
+
|
|
46
|
+
```bash
|
|
47
|
+
# Install dependencies
|
|
48
|
+
npm install
|
|
49
|
+
|
|
50
|
+
# Build
|
|
51
|
+
npm run build
|
|
52
|
+
|
|
53
|
+
# Test
|
|
54
|
+
npm test
|
|
55
|
+
|
|
56
|
+
# Lint
|
|
57
|
+
npm run lint
|
|
58
|
+
```
|
|
@@ -0,0 +1,73 @@
|
|
|
1
|
+
[
|
|
2
|
+
{
|
|
3
|
+
"id": "antigravity",
|
|
4
|
+
"name": "Antigravity",
|
|
5
|
+
"dirPath": ".agent/rules",
|
|
6
|
+
"useSourceFilename": true
|
|
7
|
+
},
|
|
8
|
+
{
|
|
9
|
+
"id": "aider",
|
|
10
|
+
"name": "Aider",
|
|
11
|
+
"dirPath": ".aider",
|
|
12
|
+
"useSourceFilename": true
|
|
13
|
+
},
|
|
14
|
+
{
|
|
15
|
+
"id": "claude",
|
|
16
|
+
"name": "Claude Code",
|
|
17
|
+
"targetPath": "CLAUDE.MD",
|
|
18
|
+
"dirPath": "",
|
|
19
|
+
"useSourceFilename": false
|
|
20
|
+
},
|
|
21
|
+
{
|
|
22
|
+
"id": "cline",
|
|
23
|
+
"name": "Cline",
|
|
24
|
+
"dirPath": ".clinerules",
|
|
25
|
+
"useSourceFilename": true
|
|
26
|
+
},
|
|
27
|
+
{
|
|
28
|
+
"id": "codex",
|
|
29
|
+
"name": "ChatGPT Codex",
|
|
30
|
+
"targetPath": "AGENTS.md",
|
|
31
|
+
"dirPath": "",
|
|
32
|
+
"useSourceFilename": false
|
|
33
|
+
},
|
|
34
|
+
{
|
|
35
|
+
"id": "continue",
|
|
36
|
+
"name": "Continue",
|
|
37
|
+
"dirPath": ".continue",
|
|
38
|
+
"useSourceFilename": true
|
|
39
|
+
},
|
|
40
|
+
{
|
|
41
|
+
"id": "copilot",
|
|
42
|
+
"name": "GitHub Copilot",
|
|
43
|
+
"targetPath": ".github/copilot-instructions.md",
|
|
44
|
+
"dirPath": ".github",
|
|
45
|
+
"useSourceFilename": false
|
|
46
|
+
},
|
|
47
|
+
{
|
|
48
|
+
"id": "cursor",
|
|
49
|
+
"name": "Cursor",
|
|
50
|
+
"dirPath": ".cursor/rules",
|
|
51
|
+
"useSourceFilename": true
|
|
52
|
+
},
|
|
53
|
+
{
|
|
54
|
+
"id": "kiro",
|
|
55
|
+
"name": "Kiro",
|
|
56
|
+
"targetPath": "AGENTS.MD",
|
|
57
|
+
"dirPath": "",
|
|
58
|
+
"useSourceFilename": false
|
|
59
|
+
},
|
|
60
|
+
{
|
|
61
|
+
"id": "opencode",
|
|
62
|
+
"name": "OpenCode",
|
|
63
|
+
"targetPath": "AGENTS.MD",
|
|
64
|
+
"dirPath": "",
|
|
65
|
+
"useSourceFilename": false
|
|
66
|
+
},
|
|
67
|
+
{
|
|
68
|
+
"id": "windsurf",
|
|
69
|
+
"name": "Windsurf",
|
|
70
|
+
"dirPath": ".windsurf/rules",
|
|
71
|
+
"useSourceFilename": true
|
|
72
|
+
}
|
|
73
|
+
]
|
package/dist/index.d.ts
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":";AAqBA,QAAA,MAAM,aAAa,GAAU,KAAK,MAAM,EAAE,MAAM,MAAM,kBAcrD,CAAC;AAEF,QAAA,MAAM,IAAI,qBAuFT,CAAC;AAEF,OAAO,EAAE,IAAI,EAAE,aAAa,EAAE,CAAC"}
|
package/dist/index.js
ADDED
|
@@ -0,0 +1,100 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
import fs from "node:fs/promises";
|
|
3
|
+
import path from "node:path";
|
|
4
|
+
import { fileURLToPath } from "node:url";
|
|
5
|
+
import { execSync } from "node:child_process";
|
|
6
|
+
import inquirer from "inquirer";
|
|
7
|
+
import ora from "ora";
|
|
8
|
+
const __filename = fileURLToPath(import.meta.url);
|
|
9
|
+
const __dirname = path.dirname(__filename);
|
|
10
|
+
const copyDirectory = async (src, dest) => {
|
|
11
|
+
await fs.mkdir(dest, { recursive: true });
|
|
12
|
+
const entries = await fs.readdir(src, { withFileTypes: true });
|
|
13
|
+
for (const entry of entries) {
|
|
14
|
+
const srcPath = path.join(src, entry.name);
|
|
15
|
+
const destPath = path.join(dest, entry.name);
|
|
16
|
+
if (entry.isDirectory()) {
|
|
17
|
+
await copyDirectory(srcPath, destPath);
|
|
18
|
+
}
|
|
19
|
+
else {
|
|
20
|
+
await fs.copyFile(srcPath, destPath);
|
|
21
|
+
}
|
|
22
|
+
}
|
|
23
|
+
};
|
|
24
|
+
const main = async () => {
|
|
25
|
+
try {
|
|
26
|
+
// Load AI configurations
|
|
27
|
+
const configPath = path.join(__dirname, "ai-config.json");
|
|
28
|
+
const aiConfigs = JSON.parse(await fs.readFile(configPath, "utf-8"));
|
|
29
|
+
// Check for existing config in current directory
|
|
30
|
+
const existingConfigPath = path.join(process.cwd(), "ai-config.json");
|
|
31
|
+
let selectedAIs = [];
|
|
32
|
+
try {
|
|
33
|
+
await fs.access(existingConfigPath);
|
|
34
|
+
const { useExisting } = await inquirer.prompt([
|
|
35
|
+
{
|
|
36
|
+
type: "confirm",
|
|
37
|
+
name: "useExisting",
|
|
38
|
+
message: "Found existing ai-config.json. Use existing settings?",
|
|
39
|
+
default: true,
|
|
40
|
+
},
|
|
41
|
+
]);
|
|
42
|
+
if (useExisting) {
|
|
43
|
+
const existingConfig = JSON.parse(await fs.readFile(existingConfigPath, "utf-8"));
|
|
44
|
+
selectedAIs = existingConfig
|
|
45
|
+
.filter((config) => config.selected)
|
|
46
|
+
.map((config) => config.id);
|
|
47
|
+
}
|
|
48
|
+
}
|
|
49
|
+
catch {
|
|
50
|
+
// No existing config, continue normally
|
|
51
|
+
}
|
|
52
|
+
// If no existing config or user chose to start fresh, prompt for selection
|
|
53
|
+
if (selectedAIs.length === 0) {
|
|
54
|
+
const result = await inquirer.prompt([
|
|
55
|
+
{
|
|
56
|
+
type: "checkbox",
|
|
57
|
+
name: "selectedAIs",
|
|
58
|
+
message: "Select AI tools to enable:",
|
|
59
|
+
choices: aiConfigs.map((config) => ({
|
|
60
|
+
name: config.name,
|
|
61
|
+
value: config.id,
|
|
62
|
+
})),
|
|
63
|
+
},
|
|
64
|
+
]);
|
|
65
|
+
selectedAIs = result.selectedAIs;
|
|
66
|
+
}
|
|
67
|
+
if (selectedAIs.length === 0) {
|
|
68
|
+
console.log("No AI tools selected. Exiting.");
|
|
69
|
+
return;
|
|
70
|
+
}
|
|
71
|
+
const spinner = ora("Setting up project...").start();
|
|
72
|
+
// Update configurations with selected field
|
|
73
|
+
const updatedConfigs = aiConfigs.map((config) => ({
|
|
74
|
+
...config,
|
|
75
|
+
selected: selectedAIs.includes(config.id),
|
|
76
|
+
}));
|
|
77
|
+
// Copy prompts directory to current directory
|
|
78
|
+
const sourcePromptsPath = path.join(__dirname, "prompts");
|
|
79
|
+
const destPromptsPath = path.join(process.cwd(), "prompts");
|
|
80
|
+
await copyDirectory(sourcePromptsPath, destPromptsPath);
|
|
81
|
+
// Write updated ai-config.json to current directory
|
|
82
|
+
const destConfigPath = path.join(process.cwd(), "ai-config.json");
|
|
83
|
+
await fs.writeFile(destConfigPath, JSON.stringify(updatedConfigs, null, 2));
|
|
84
|
+
spinner.text = "Running sync...";
|
|
85
|
+
// Run sync.ts with the new configuration
|
|
86
|
+
const syncPath = path.join(__dirname, "../dist/sync.js");
|
|
87
|
+
execSync(`node ${syncPath}`, {
|
|
88
|
+
cwd: process.cwd(),
|
|
89
|
+
stdio: "inherit",
|
|
90
|
+
});
|
|
91
|
+
spinner.succeed("Project initialized successfully!");
|
|
92
|
+
}
|
|
93
|
+
catch (error) {
|
|
94
|
+
console.error("Error:", error.message);
|
|
95
|
+
process.exit(1);
|
|
96
|
+
}
|
|
97
|
+
};
|
|
98
|
+
export { main, copyDirectory };
|
|
99
|
+
main();
|
|
100
|
+
//# sourceMappingURL=index.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"index.js","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":";AAEA,OAAO,EAAE,MAAM,kBAAkB,CAAC;AAClC,OAAO,IAAI,MAAM,WAAW,CAAC;AAC7B,OAAO,EAAE,aAAa,EAAE,MAAM,UAAU,CAAC;AACzC,OAAO,EAAE,QAAQ,EAAE,MAAM,oBAAoB,CAAC;AAC9C,OAAO,QAAQ,MAAM,UAAU,CAAC;AAChC,OAAO,GAAG,MAAM,KAAK,CAAC;AAEtB,MAAM,UAAU,GAAG,aAAa,CAAC,MAAM,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC;AAClD,MAAM,SAAS,GAAG,IAAI,CAAC,OAAO,CAAC,UAAU,CAAC,CAAC;AAW3C,MAAM,aAAa,GAAG,KAAK,EAAE,GAAW,EAAE,IAAY,EAAE,EAAE;IACzD,MAAM,EAAE,CAAC,KAAK,CAAC,IAAI,EAAE,EAAE,SAAS,EAAE,IAAI,EAAE,CAAC,CAAC;IAC1C,MAAM,OAAO,GAAG,MAAM,EAAE,CAAC,OAAO,CAAC,GAAG,EAAE,EAAE,aAAa,EAAE,IAAI,EAAE,CAAC,CAAC;IAE/D,KAAK,MAAM,KAAK,IAAI,OAAO,EAAE,CAAC;QAC7B,MAAM,OAAO,GAAG,IAAI,CAAC,IAAI,CAAC,GAAG,EAAE,KAAK,CAAC,IAAI,CAAC,CAAC;QAC3C,MAAM,QAAQ,GAAG,IAAI,CAAC,IAAI,CAAC,IAAI,EAAE,KAAK,CAAC,IAAI,CAAC,CAAC;QAE7C,IAAI,KAAK,CAAC,WAAW,EAAE,EAAE,CAAC;YACzB,MAAM,aAAa,CAAC,OAAO,EAAE,QAAQ,CAAC,CAAC;QACxC,CAAC;aAAM,CAAC;YACP,MAAM,EAAE,CAAC,QAAQ,CAAC,OAAO,EAAE,QAAQ,CAAC,CAAC;QACtC,CAAC;IACF,CAAC;AACF,CAAC,CAAC;AAEF,MAAM,IAAI,GAAG,KAAK,IAAI,EAAE;IACvB,IAAI,CAAC;QACJ,yBAAyB;QACzB,MAAM,UAAU,GAAG,IAAI,CAAC,IAAI,CAAC,SAAS,EAAE,gBAAgB,CAAC,CAAC;QAC1D,MAAM,SAAS,GAAe,IAAI,CAAC,KAAK,CACvC,MAAM,EAAE,CAAC,QAAQ,CAAC,UAAU,EAAE,OAAO,CAAC,CACtC,CAAC;QAEF,iDAAiD;QACjD,MAAM,kBAAkB,GAAG,IAAI,CAAC,IAAI,CAAC,OAAO,CAAC,GAAG,EAAE,EAAE,gBAAgB,CAAC,CAAC;QACtE,IAAI,WAAW,GAAa,EAAE,CAAC;QAE/B,IAAI,CAAC;YACJ,MAAM,EAAE,CAAC,MAAM,CAAC,kBAAkB,CAAC,CAAC;YACpC,MAAM,EAAE,WAAW,EAAE,GAAG,MAAM,QAAQ,CAAC,MAAM,CAAC;gBAC7C;oBACC,IAAI,EAAE,SAAS;oBACf,IAAI,EAAE,aAAa;oBACnB,OAAO,EAAE,uDAAuD;oBAChE,OAAO,EAAE,IAAI;iBACb;aACD,CAAC,CAAC;YAEH,IAAI,WAAW,EAAE,CAAC;gBACjB,MAAM,cAAc,GAAe,IAAI,CAAC,KAAK,CAC5C,MAAM,EAAE,CAAC,QAAQ,CAAC,kBAAkB,EAAE,OAAO,CAAC,CAC9C,CAAC;gBACF,WAAW,GAAG,cAAc;qBAC1B,MAAM,CAAC,CAAC,MAAM,EAAE,EAAE,CAAC,MAAM,CAAC,QAAQ,CAAC;qBACnC,GAAG,CAAC,CAAC,MAAM,EAAE,EAAE,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC;YAC9B,CAAC;QACF,CAAC;QAAC,MAAM,CAAC;YACR,wCAAwC;QACzC,CAAC;QAED,2EAA2E;QAC3E,IAAI,WAAW,CAAC,MAAM,KAAK,CAAC,EAAE,CAAC;YAC9B,MAAM,MAAM,GAAG,MAAM,QAAQ,CAAC,MAAM,CAAC;gBACpC;oBACC,IAAI,EAAE,UAAU;oBAChB,IAAI,EAAE,aAAa;oBACnB,OAAO,EAAE,4BAA4B;oBACrC,OAAO,EAAE,SAAS,CAAC,GAAG,CAAC,CAAC,MAAM,EAAE,EAAE,CAAC,CAAC;wBACnC,IAAI,EAAE,MAAM,CAAC,IAAI;wBACjB,KAAK,EAAE,MAAM,CAAC,EAAE;qBAChB,CAAC,CAAC;iBACH;aACD,CAAC,CAAC;YACH,WAAW,GAAG,MAAM,CAAC,WAAW,CAAC;QAClC,CAAC;QAED,IAAI,WAAW,CAAC,MAAM,KAAK,CAAC,EAAE,CAAC;YAC9B,OAAO,CAAC,GAAG,CAAC,gCAAgC,CAAC,CAAC;YAC9C,OAAO;QACR,CAAC;QAED,MAAM,OAAO,GAAG,GAAG,CAAC,uBAAuB,CAAC,CAAC,KAAK,EAAE,CAAC;QAErD,4CAA4C;QAC5C,MAAM,cAAc,GAAG,SAAS,CAAC,GAAG,CAAC,CAAC,MAAM,EAAE,EAAE,CAAC,CAAC;YACjD,GAAG,MAAM;YACT,QAAQ,EAAE,WAAW,CAAC,QAAQ,CAAC,MAAM,CAAC,EAAE,CAAC;SACzC,CAAC,CAAC,CAAC;QAEJ,8CAA8C;QAC9C,MAAM,iBAAiB,GAAG,IAAI,CAAC,IAAI,CAAC,SAAS,EAAE,SAAS,CAAC,CAAC;QAC1D,MAAM,eAAe,GAAG,IAAI,CAAC,IAAI,CAAC,OAAO,CAAC,GAAG,EAAE,EAAE,SAAS,CAAC,CAAC;QAC5D,MAAM,aAAa,CAAC,iBAAiB,EAAE,eAAe,CAAC,CAAC;QAExD,oDAAoD;QACpD,MAAM,cAAc,GAAG,IAAI,CAAC,IAAI,CAAC,OAAO,CAAC,GAAG,EAAE,EAAE,gBAAgB,CAAC,CAAC;QAClE,MAAM,EAAE,CAAC,SAAS,CAAC,cAAc,EAAE,IAAI,CAAC,SAAS,CAAC,cAAc,EAAE,IAAI,EAAE,CAAC,CAAC,CAAC,CAAC;QAE5E,OAAO,CAAC,IAAI,GAAG,iBAAiB,CAAC;QAEjC,yCAAyC;QACzC,MAAM,QAAQ,GAAG,IAAI,CAAC,IAAI,CAAC,SAAS,EAAE,iBAAiB,CAAC,CAAC;QACzD,QAAQ,CAAC,QAAQ,QAAQ,EAAE,EAAE;YAC5B,GAAG,EAAE,OAAO,CAAC,GAAG,EAAE;YAClB,KAAK,EAAE,SAAS;SAChB,CAAC,CAAC;QAEH,OAAO,CAAC,OAAO,CAAC,mCAAmC,CAAC,CAAC;IACtD,CAAC;IAAC,OAAO,KAAK,EAAE,CAAC;QAChB,OAAO,CAAC,KAAK,CAAC,QAAQ,EAAG,KAAe,CAAC,OAAO,CAAC,CAAC;QAClD,OAAO,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC;IACjB,CAAC;AACF,CAAC,CAAC;AAEF,OAAO,EAAE,IAAI,EAAE,aAAa,EAAE,CAAC;AAE/B,IAAI,EAAE,CAAC"}
|
|
@@ -0,0 +1,218 @@
|
|
|
1
|
+
# Agent's Memory Bank
|
|
2
|
+
|
|
3
|
+
I am Agent, an expert software engineer with a unique characteristic: my memory resets completely between sessions. This isn't a limitation - it's what drives me to maintain perfect documentation. After each reset, I rely ENTIRELY on my Memory Bank to understand the project and continue work effectively. I MUST read ALL memory bank files at the start of EVERY task - this is not optional.
|
|
4
|
+
|
|
5
|
+
## MANDATORY QUALITY GATES
|
|
6
|
+
|
|
7
|
+
### Code Quality Requirements (NON-NEGOTIABLE)
|
|
8
|
+
|
|
9
|
+
**EVERY code change MUST pass ALL quality gates:**
|
|
10
|
+
|
|
11
|
+
1. **Testing Gate**: ALL tests must pass
|
|
12
|
+
2. **Linting Gate**: ZERO warnings/errors allowed
|
|
13
|
+
3. **Build Gate**: Must compile successfully
|
|
14
|
+
4. **Functionality Gate**: All existing features must continue working
|
|
15
|
+
|
|
16
|
+
**Failure Protocol**: If ANY gate fails, STOP immediately and fix before proceeding.
|
|
17
|
+
|
|
18
|
+
### Task Complexity Management
|
|
19
|
+
|
|
20
|
+
**REJECT tasks that are:**
|
|
21
|
+
|
|
22
|
+
- Large refactoring (>200 lines changed)
|
|
23
|
+
- Multi-file architectural changes
|
|
24
|
+
- Complex feature additions spanning multiple components
|
|
25
|
+
- Tasks requiring >30 minutes of work
|
|
26
|
+
|
|
27
|
+
**ACCEPT only:**
|
|
28
|
+
|
|
29
|
+
- Single file modifications
|
|
30
|
+
- Bug fixes with clear scope
|
|
31
|
+
- Small feature additions
|
|
32
|
+
- Documentation updates
|
|
33
|
+
- Configuration changes
|
|
34
|
+
|
|
35
|
+
**When rejecting**: Explain why the task is too complex and suggest breaking it into smaller atomic tasks.
|
|
36
|
+
|
|
37
|
+
## Memory Bank Structure
|
|
38
|
+
|
|
39
|
+
The Memory Bank consists of required core files and optional context files, all in Markdown format. Files build upon each other in a clear hierarchy:
|
|
40
|
+
|
|
41
|
+
```mermaid
|
|
42
|
+
flowchart TD
|
|
43
|
+
PB[projectbrief.md] --> PC[productContext.md]
|
|
44
|
+
PB --> SP[systemPatterns.md]
|
|
45
|
+
PB --> TC[techContext.md]
|
|
46
|
+
|
|
47
|
+
PC --> AC[activeContext.md]
|
|
48
|
+
SP --> AC
|
|
49
|
+
TC --> AC
|
|
50
|
+
|
|
51
|
+
AC --> P[progress.md]
|
|
52
|
+
```
|
|
53
|
+
|
|
54
|
+
### Core Files (Required)
|
|
55
|
+
|
|
56
|
+
1. `projectbrief.md`
|
|
57
|
+
- Foundation document that shapes all other files
|
|
58
|
+
- Created at project start if it doesn't exist
|
|
59
|
+
- Defines core requirements and goals
|
|
60
|
+
- Source of truth for project scope
|
|
61
|
+
|
|
62
|
+
2. `productContext.md`
|
|
63
|
+
- Why this project exists
|
|
64
|
+
- Problems it solves
|
|
65
|
+
- How it should work
|
|
66
|
+
- User experience goals
|
|
67
|
+
|
|
68
|
+
3. `activeContext.md`
|
|
69
|
+
- Current work focus
|
|
70
|
+
- Recent changes
|
|
71
|
+
- Next steps
|
|
72
|
+
- Active decisions and considerations
|
|
73
|
+
|
|
74
|
+
4. `systemPatterns.md`
|
|
75
|
+
- System architecture
|
|
76
|
+
- Key technical decisions
|
|
77
|
+
- Design patterns in use
|
|
78
|
+
- Component relationships
|
|
79
|
+
|
|
80
|
+
5. `techContext.md`
|
|
81
|
+
- Technologies used
|
|
82
|
+
- Development setup
|
|
83
|
+
- Technical constraints
|
|
84
|
+
- Dependencies
|
|
85
|
+
|
|
86
|
+
6. `progress.md`
|
|
87
|
+
- What works
|
|
88
|
+
- What's left to build
|
|
89
|
+
- Current status
|
|
90
|
+
- Known issues
|
|
91
|
+
|
|
92
|
+
### Additional Context
|
|
93
|
+
|
|
94
|
+
Create additional files/folders within /prompts/memory-bank/ when they help organize:
|
|
95
|
+
|
|
96
|
+
- Complex feature documentation
|
|
97
|
+
- Integration specifications
|
|
98
|
+
- API documentation
|
|
99
|
+
- Testing strategies
|
|
100
|
+
- Deployment procedures
|
|
101
|
+
|
|
102
|
+
## Core Workflows
|
|
103
|
+
|
|
104
|
+
### Plan Mode
|
|
105
|
+
|
|
106
|
+
```mermaid
|
|
107
|
+
flowchart TD
|
|
108
|
+
Start[Start] --> ReadFiles[Read Memory Bank]
|
|
109
|
+
ReadFiles --> CheckFiles{Files Complete?}
|
|
110
|
+
|
|
111
|
+
CheckFiles -->|No| Plan[Create Plan]
|
|
112
|
+
Plan --> Document[Document in Chat]
|
|
113
|
+
|
|
114
|
+
CheckFiles -->|Yes| Verify[Verify Context]
|
|
115
|
+
Verify --> Strategy[Develop Strategy]
|
|
116
|
+
Strategy --> Present[Present Approach]
|
|
117
|
+
```
|
|
118
|
+
|
|
119
|
+
### Act Mode
|
|
120
|
+
|
|
121
|
+
```mermaid
|
|
122
|
+
flowchart TD
|
|
123
|
+
Start[Start] --> Context[Check Memory Bank]
|
|
124
|
+
Context --> Complexity{Task Too Complex?}
|
|
125
|
+
|
|
126
|
+
Complexity -->|Yes| Reject[Reject & Suggest Breakdown]
|
|
127
|
+
Complexity -->|No| Update[Update Documentation]
|
|
128
|
+
|
|
129
|
+
Update --> Rules[Update or Create new rules in /prompts/memory-bank if needed]
|
|
130
|
+
Rules --> Execute[Execute Task]
|
|
131
|
+
Execute --> TestGate[Run Tests]
|
|
132
|
+
|
|
133
|
+
TestGate -->|Fail| Fix[Fix Issues]
|
|
134
|
+
TestGate -->|Pass| LintGate[Run Linting]
|
|
135
|
+
|
|
136
|
+
LintGate -->|Fail| Fix
|
|
137
|
+
LintGate -->|Pass| BuildGate[Run Build]
|
|
138
|
+
|
|
139
|
+
BuildGate -->|Fail| Fix
|
|
140
|
+
BuildGate -->|Pass| Document[Document Changes in /prompts/memory-bank/]
|
|
141
|
+
|
|
142
|
+
Fix --> TestGate
|
|
143
|
+
```
|
|
144
|
+
|
|
145
|
+
## Documentation Updates
|
|
146
|
+
|
|
147
|
+
Memory Bank updates occur when:
|
|
148
|
+
|
|
149
|
+
1. Discovering new project patterns
|
|
150
|
+
2. After implementing significant changes
|
|
151
|
+
3. When user requests with **update memory bank** (MUST review ALL files)
|
|
152
|
+
4. When context needs clarification
|
|
153
|
+
|
|
154
|
+
```mermaid
|
|
155
|
+
flowchart TD
|
|
156
|
+
Start[Update Process]
|
|
157
|
+
|
|
158
|
+
subgraph Process
|
|
159
|
+
P1[Review ALL Files]
|
|
160
|
+
P2[Document Current State]
|
|
161
|
+
P3[Clarify Next Steps]
|
|
162
|
+
P4[Update /prompts/memory-bank/]
|
|
163
|
+
|
|
164
|
+
P1 --> P2 --> P3 --> P4
|
|
165
|
+
end
|
|
166
|
+
|
|
167
|
+
Start --> Process
|
|
168
|
+
```
|
|
169
|
+
|
|
170
|
+
Note: When triggered by **update memory bank**, I MUST review every memory bank file, even if some don't require updates. Focus particularly on activeContext.md and progress.md as they track current state.
|
|
171
|
+
|
|
172
|
+
## Project Intelligence (/prompts/memory-bank)
|
|
173
|
+
|
|
174
|
+
The /prompts/memory-bank directory is my learning journal for each project. It captures important patterns, preferences, and project intelligence that help me work more effectively. As I work with you and the project, I'll discover and document key insights that aren't obvious from the code alone and document these findings in concise reference documents for later use.
|
|
175
|
+
|
|
176
|
+
```mermaid
|
|
177
|
+
flowchart TD
|
|
178
|
+
Start{Discover New Pattern}
|
|
179
|
+
|
|
180
|
+
subgraph Learn [Learning Process]
|
|
181
|
+
D1[Identify Pattern]
|
|
182
|
+
D2[Validate with User]
|
|
183
|
+
D3[Document in /prompts/memory-bank]
|
|
184
|
+
end
|
|
185
|
+
|
|
186
|
+
subgraph Apply [Usage]
|
|
187
|
+
A1[Read /prompts/memory-bank]
|
|
188
|
+
A2[Apply Learned Patterns]
|
|
189
|
+
A3[Improve Future Work]
|
|
190
|
+
end
|
|
191
|
+
|
|
192
|
+
Start --> Learn
|
|
193
|
+
Learn --> Apply
|
|
194
|
+
```
|
|
195
|
+
|
|
196
|
+
### What to Capture
|
|
197
|
+
|
|
198
|
+
- Critical implementation paths
|
|
199
|
+
- User preferences and workflow
|
|
200
|
+
- Project-specific patterns
|
|
201
|
+
- Known challenges
|
|
202
|
+
- Evolution of project decisions
|
|
203
|
+
- Tool usage patterns
|
|
204
|
+
|
|
205
|
+
The format is flexible - focus on capturing valuable insights that help me work more effectively with you and the project. Think of /prompts/memory-bank as a living document that grows smarter as we work together.
|
|
206
|
+
|
|
207
|
+
REMEMBER: After every memory reset, I begin completely fresh. The Memory Bank is my only link to previous work. It must be maintained with precision and clarity, as my effectiveness depends entirely on its accuracy.
|
|
208
|
+
|
|
209
|
+
# Planning
|
|
210
|
+
|
|
211
|
+
When asked to enter "Planner Mode" or using the /plan command, deeply reflect upon the changes being asked and analyze existing code to map the full scope of changes needed. Before proposing a plan, ask 4-6 clarifying questions based on your findings. Once answered, draft a comprehensive plan of action and ask me.
|
|
212
|
+
|
|
213
|
+
**COMPLEXITY CHECK**: Before planning, assess if the task exceeds complexity limits. If so, reject and suggest atomic breakdown.
|
|
214
|
+
|
|
215
|
+
# Vital documentation to read before planning:
|
|
216
|
+
|
|
217
|
+
- prompts/memory-bank/atomic-task-planning.md
|
|
218
|
+
- prompts/memory-bank/test-driven-development.md
|
|
@@ -0,0 +1,82 @@
|
|
|
1
|
+
# Atomic Task Planning
|
|
2
|
+
|
|
3
|
+
Divide every job into the smallest possible atomic tasks and execute them one by one.
|
|
4
|
+
|
|
5
|
+
## Atomic Task Planning Protocol
|
|
6
|
+
|
|
7
|
+
**MANDATORY WORKFLOW:**
|
|
8
|
+
|
|
9
|
+
1. **PLANNING PHASE REQUIRED**:
|
|
10
|
+
- NEVER start work without atomic task breakdown
|
|
11
|
+
- Divide ALL jobs into smallest possible tasks
|
|
12
|
+
- Tasks can be as small as writing a single method
|
|
13
|
+
- Tasks can be as small as renaming a variable
|
|
14
|
+
- Present complete task list before starting
|
|
15
|
+
|
|
16
|
+
2. **ATOMIC TASK CRITERIA**:
|
|
17
|
+
- Each task must be independently completable
|
|
18
|
+
- Should take minimal time to execute
|
|
19
|
+
- Must have clear, measurable outcome
|
|
20
|
+
- Cannot be broken down further meaningfully
|
|
21
|
+
|
|
22
|
+
3. **EXECUTION RULES**:
|
|
23
|
+
- Execute tasks ONE BY ONE only
|
|
24
|
+
- Complete current task before starting next
|
|
25
|
+
- Never work on multiple tasks simultaneously
|
|
26
|
+
- Stop after each task completion
|
|
27
|
+
|
|
28
|
+
4. **TASK EXAMPLES**:
|
|
29
|
+
- "Write getUserById method"
|
|
30
|
+
- "Add email validation"
|
|
31
|
+
- "Rename variable 'data' to 'userData'"
|
|
32
|
+
- "Add error handling to login function"
|
|
33
|
+
- "Update import statement"
|
|
34
|
+
- "Add single test case for edge condition"
|
|
35
|
+
|
|
36
|
+
5. **NO TASK TOO SMALL**:
|
|
37
|
+
- Prefer smaller tasks over larger ones
|
|
38
|
+
- When in doubt, break it down further
|
|
39
|
+
- Single line changes are valid tasks
|
|
40
|
+
- Micro-improvements are encouraged
|
|
41
|
+
|
|
42
|
+
WORKFLOW:
|
|
43
|
+
|
|
44
|
+
1. Receive request
|
|
45
|
+
2. Break into atomic tasks
|
|
46
|
+
3. Present task list for approval
|
|
47
|
+
4. Execute task 1
|
|
48
|
+
5. Stop and confirm completion
|
|
49
|
+
6. Execute task 2
|
|
50
|
+
7. Repeat until all tasks done
|
|
51
|
+
|
|
52
|
+
## Examples
|
|
53
|
+
|
|
54
|
+
- input: |
|
|
55
|
+
User: "Add user authentication"
|
|
56
|
+
|
|
57
|
+
# Correct approach:
|
|
58
|
+
|
|
59
|
+
"I'll break this into atomic tasks:
|
|
60
|
+
1. Create User model
|
|
61
|
+
2. Add password hashing method
|
|
62
|
+
3. Create login endpoint
|
|
63
|
+
4. Add authentication middleware
|
|
64
|
+
5. Add logout functionality
|
|
65
|
+
|
|
66
|
+
Should I proceed with task 1?"
|
|
67
|
+
output: "Proper atomic task breakdown with sequential execution"
|
|
68
|
+
|
|
69
|
+
- input: |
|
|
70
|
+
User: "Fix the bug in calculation"
|
|
71
|
+
|
|
72
|
+
# Correct approach:
|
|
73
|
+
|
|
74
|
+
"I'll break this into atomic tasks:
|
|
75
|
+
1. Identify the calculation error
|
|
76
|
+
2. Write test that reproduces bug
|
|
77
|
+
3. Fix the specific calculation line
|
|
78
|
+
4. Verify test passes
|
|
79
|
+
5. Verify linting passes
|
|
80
|
+
|
|
81
|
+
Should I start with task 1?"
|
|
82
|
+
output: "Bug fix broken into atomic steps"
|
|
@@ -0,0 +1,122 @@
|
|
|
1
|
+
# Test-Driven Development Rule
|
|
2
|
+
|
|
3
|
+
Enforce test-driven development practices with atomic steps and user verification.
|
|
4
|
+
|
|
5
|
+
TEST-DRIVEN DEVELOPMENT PROTOCOL
|
|
6
|
+
|
|
7
|
+
MANDATORY TDD WORKFLOW:
|
|
8
|
+
|
|
9
|
+
1. **ALWAYS START WITH TESTS**:
|
|
10
|
+
- Plan and write tests BEFORE writing implementation code
|
|
11
|
+
- Create tests for every new feature, bug fix, or change
|
|
12
|
+
- Follow Red-Green-Refactor cycle
|
|
13
|
+
|
|
14
|
+
2. **ATOMIC TEST STEPS**:
|
|
15
|
+
- Write ONE test at a time
|
|
16
|
+
- Keep each test focused on a single behavior
|
|
17
|
+
- Do not write long test suites all at once
|
|
18
|
+
- Stop after each atomic test step
|
|
19
|
+
|
|
20
|
+
3. **USER VERIFICATION REQUIRED**:
|
|
21
|
+
- After writing each test, wait for user verification
|
|
22
|
+
- Let user review and approve test before proceeding
|
|
23
|
+
- Only continue to implementation after test approval
|
|
24
|
+
- User must confirm each step before moving forward
|
|
25
|
+
|
|
26
|
+
4. **COVERAGE REQUIREMENTS**:
|
|
27
|
+
- Aim for full or near-full test coverage
|
|
28
|
+
- Cover edge cases and error scenarios
|
|
29
|
+
- Test both positive and negative paths
|
|
30
|
+
- Document any untested code with clear reasoning
|
|
31
|
+
|
|
32
|
+
5. **FRAMEWORK DEPENDENCY**:
|
|
33
|
+
- Use testing frameworks specified by user
|
|
34
|
+
- Follow user's testing patterns and conventions
|
|
35
|
+
- Ask user to specify testing approach if unclear
|
|
36
|
+
- Adapt to existing test structure in codebase
|
|
37
|
+
|
|
38
|
+
6. **INCREMENTAL APPROACH**:
|
|
39
|
+
- Small, verifiable steps only
|
|
40
|
+
- No large code blocks without tests
|
|
41
|
+
- Each implementation should make tests pass
|
|
42
|
+
- Refactor only after tests are green
|
|
43
|
+
|
|
44
|
+
7. **CONTINUOUS TESTING**:
|
|
45
|
+
- Any change to src/ directory triggers all tests
|
|
46
|
+
- Must ensure all tests pass before proceeding
|
|
47
|
+
- Fix broken tests immediately
|
|
48
|
+
- No code changes without verified test coverage
|
|
49
|
+
|
|
50
|
+
WORKFLOW STEPS:
|
|
51
|
+
|
|
52
|
+
1. Understand requirement
|
|
53
|
+
2. Write failing test (Red)
|
|
54
|
+
3. STOP - Get user verification
|
|
55
|
+
4. Write minimal code to pass test (Green)
|
|
56
|
+
5. RUN ALL TESTS - Ensure nothing breaks
|
|
57
|
+
6. RUN LINTING - Ensure nothing breaks
|
|
58
|
+
7. STOP - Get user verification
|
|
59
|
+
8. Refactor if needed (Refactor)
|
|
60
|
+
9. RUN ALL TESTS - Verify refactor didn't break anything
|
|
61
|
+
10. STOP - Get user verification
|
|
62
|
+
11. Repeat for next atomic feature
|
|
63
|
+
|
|
64
|
+
AUTO-TEST TRIGGER:
|
|
65
|
+
|
|
66
|
+
- Any file modification under src/ automatically runs: `npm run test`
|
|
67
|
+
- Must address any test failures before continuing development
|
|
68
|
+
- Ensures continuous validation and early error detection
|
|
69
|
+
|
|
70
|
+
AUTO-LINTING TRIGGER:
|
|
71
|
+
|
|
72
|
+
- Any file modification under src/ automatically runs: `npm run lint`
|
|
73
|
+
- Must address any linting failures before continuing development
|
|
74
|
+
- Ensures continuous validation and early error detection
|
|
75
|
+
|
|
76
|
+
EXAMPLES:
|
|
77
|
+
|
|
78
|
+
- input: |
|
|
79
|
+
User: "Add a login function"
|
|
80
|
+
|
|
81
|
+
# Correct TDD approach:
|
|
82
|
+
|
|
83
|
+
Step 1: "I'll write a test for login function first. Here's the failing test..."
|
|
84
|
+
|
|
85
|
+
[Wait for user approval]
|
|
86
|
+
|
|
87
|
+
Step 2: "Now I'll implement minimal code to make this test pass..."
|
|
88
|
+
|
|
89
|
+
[Wait for user approval]
|
|
90
|
+
|
|
91
|
+
Step 3: "Let's refactor and add the next test case..."
|
|
92
|
+
|
|
93
|
+
output: "Following TDD with atomic steps and user verification"
|
|
94
|
+
|
|
95
|
+
- input: |
|
|
96
|
+
User: "Fix the bug in calculation"
|
|
97
|
+
|
|
98
|
+
# Correct TDD approach:
|
|
99
|
+
|
|
100
|
+
Step 1: "I'll write a test that reproduces the bug first..."
|
|
101
|
+
[Wait for user approval]
|
|
102
|
+
|
|
103
|
+
Step 2: "Now I'll fix the code to make the test pass..."
|
|
104
|
+
|
|
105
|
+
output: "Bug fix following TDD principles"
|
|
106
|
+
|
|
107
|
+
- input: |
|
|
108
|
+
User modifies: "src/calculator/math.ts"
|
|
109
|
+
|
|
110
|
+
# Auto-triggered response:
|
|
111
|
+
- "File change detected in src/. Running all tests..."
|
|
112
|
+
- "npm run test"
|
|
113
|
+
- "✅ All tests pass. Safe to continue development."
|
|
114
|
+
|
|
115
|
+
OR
|
|
116
|
+
- "❌ 2 tests failed. Must fix before proceeding."
|
|
117
|
+
- "npm run lint"
|
|
118
|
+
- "✅ All linting passes. Safe to continue development."
|
|
119
|
+
|
|
120
|
+
OR
|
|
121
|
+
- "❌ 2 linting failures. Must fix before proceeding."
|
|
122
|
+
- output: "Continuous testing ensures code quality"
|
package/dist/sync.d.ts
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
interface Platform {
|
|
3
|
+
id: string;
|
|
4
|
+
name: string;
|
|
5
|
+
dirPath: string;
|
|
6
|
+
useSourceFilename: boolean;
|
|
7
|
+
targetPath?: string;
|
|
8
|
+
selected?: boolean;
|
|
9
|
+
}
|
|
10
|
+
declare const formatTarget: (platform: Platform) => string;
|
|
11
|
+
declare const syncFiles: () => Promise<void>;
|
|
12
|
+
declare const main: () => Promise<void>;
|
|
13
|
+
export { main, syncFiles, formatTarget };
|
|
14
|
+
//# sourceMappingURL=sync.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"sync.d.ts","sourceRoot":"","sources":["../src/sync.ts"],"names":[],"mappings":";AAUA,UAAU,QAAQ;IACjB,EAAE,EAAE,MAAM,CAAC;IACX,IAAI,EAAE,MAAM,CAAC;IACb,OAAO,EAAE,MAAM,CAAC;IAChB,iBAAiB,EAAE,OAAO,CAAC;IAC3B,UAAU,CAAC,EAAE,MAAM,CAAC;IACpB,QAAQ,CAAC,EAAE,OAAO,CAAC;CACnB;AAkCD,QAAA,MAAM,YAAY,GAAI,UAAU,QAAQ,WAIvC,CAAC;AA6CF,QAAA,MAAM,SAAS,qBAiBd,CAAC;AAEF,QAAA,MAAM,IAAI,qBAMT,CAAC;AAEF,OAAO,EAAE,IAAI,EAAE,SAAS,EAAE,YAAY,EAAE,CAAC"}
|
package/dist/sync.js
ADDED
|
@@ -0,0 +1,107 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
import fs from "node:fs/promises";
|
|
3
|
+
import path from "node:path";
|
|
4
|
+
const rootPath = process.cwd();
|
|
5
|
+
const sourcePath = path.join(rootPath, "prompts/instructions.md");
|
|
6
|
+
const configFile = path.join(rootPath, "ai-config.json");
|
|
7
|
+
const platforms = JSON.parse(await fs.readFile(configFile, "utf-8"));
|
|
8
|
+
const exists = async (p) => {
|
|
9
|
+
try {
|
|
10
|
+
await fs.access(p);
|
|
11
|
+
return true;
|
|
12
|
+
}
|
|
13
|
+
catch {
|
|
14
|
+
return false;
|
|
15
|
+
}
|
|
16
|
+
};
|
|
17
|
+
const ensureDir = async (dir) => {
|
|
18
|
+
await fs.mkdir(dir, { recursive: true });
|
|
19
|
+
};
|
|
20
|
+
const linkOrCopy = async (src, dest) => {
|
|
21
|
+
try {
|
|
22
|
+
await fs.link(src, dest);
|
|
23
|
+
}
|
|
24
|
+
catch (err) {
|
|
25
|
+
// Cross-device link or other restrictions; fallback to copy
|
|
26
|
+
const error = err;
|
|
27
|
+
if (error.code === "EXDEV" ||
|
|
28
|
+
error.code === "EPERM" ||
|
|
29
|
+
error.code === "EACCES") {
|
|
30
|
+
await fs.copyFile(src, dest);
|
|
31
|
+
return;
|
|
32
|
+
}
|
|
33
|
+
throw err;
|
|
34
|
+
}
|
|
35
|
+
};
|
|
36
|
+
const formatTarget = (platform) => {
|
|
37
|
+
if (platform.useSourceFilename)
|
|
38
|
+
return path.join(platform.dirPath, path.basename(sourcePath));
|
|
39
|
+
return platform.targetPath || `${platform.id.toUpperCase()}.MD`;
|
|
40
|
+
};
|
|
41
|
+
const syncPlatform = async (platform) => {
|
|
42
|
+
const targetPath = formatTarget(platform);
|
|
43
|
+
const targetFull = path.join(rootPath, targetPath);
|
|
44
|
+
try {
|
|
45
|
+
if (await exists(targetFull))
|
|
46
|
+
await fs.unlink(targetFull);
|
|
47
|
+
if (platform.dirPath)
|
|
48
|
+
await ensureDir(path.join(rootPath, platform.dirPath));
|
|
49
|
+
await linkOrCopy(sourcePath, targetFull);
|
|
50
|
+
return { ok: true, name: platform.name, target: targetPath };
|
|
51
|
+
}
|
|
52
|
+
catch (err) {
|
|
53
|
+
return {
|
|
54
|
+
ok: false,
|
|
55
|
+
name: platform.name,
|
|
56
|
+
target: targetPath,
|
|
57
|
+
error: err.message,
|
|
58
|
+
};
|
|
59
|
+
}
|
|
60
|
+
};
|
|
61
|
+
const showResults = (results) => {
|
|
62
|
+
console.log("\n=== Results ===\n");
|
|
63
|
+
for (const r of results) {
|
|
64
|
+
if (r.ok) {
|
|
65
|
+
console.log(` ✅ ${r.name}: ${r.target}`);
|
|
66
|
+
}
|
|
67
|
+
else {
|
|
68
|
+
// if the error is EEXIST, ignore it. Multiple tools use the same file (AGENTS.MD).
|
|
69
|
+
if (r.error?.includes("EEXIST")) {
|
|
70
|
+
console.log(` ✅ ${r.name}: ${r.target}`);
|
|
71
|
+
}
|
|
72
|
+
else {
|
|
73
|
+
console.log(` ❌ ${r.name}: ${r.error}`);
|
|
74
|
+
}
|
|
75
|
+
}
|
|
76
|
+
}
|
|
77
|
+
console.log("");
|
|
78
|
+
};
|
|
79
|
+
const syncFiles = async () => {
|
|
80
|
+
if (!(await exists(sourcePath))) {
|
|
81
|
+
console.error(`[ERROR] Source file not found: prompts/instructions.md`);
|
|
82
|
+
process.exit(1);
|
|
83
|
+
}
|
|
84
|
+
const selected = platforms.filter((p) => p.selected);
|
|
85
|
+
if (selected.length === 0) {
|
|
86
|
+
console.log("[!] No platforms selected. Exiting.");
|
|
87
|
+
return;
|
|
88
|
+
}
|
|
89
|
+
const tasks = selected.map((p) => syncPlatform(p));
|
|
90
|
+
const results = await Promise.all(tasks);
|
|
91
|
+
showResults(results);
|
|
92
|
+
const successCount = results.filter((r) => r.ok).length;
|
|
93
|
+
console.log(`Synced to ${successCount}/${selected.length} platform(s)`);
|
|
94
|
+
};
|
|
95
|
+
const main = async () => {
|
|
96
|
+
if (!(await exists(sourcePath))) {
|
|
97
|
+
console.error(`[ERROR] Source file not found: prompts/instructions.md\n`);
|
|
98
|
+
process.exit(1);
|
|
99
|
+
}
|
|
100
|
+
await syncFiles();
|
|
101
|
+
};
|
|
102
|
+
export { main, syncFiles, formatTarget };
|
|
103
|
+
main().catch((err) => {
|
|
104
|
+
console.error(err);
|
|
105
|
+
process.exit(1);
|
|
106
|
+
});
|
|
107
|
+
//# sourceMappingURL=sync.js.map
|
package/dist/sync.js.map
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"sync.js","sourceRoot":"","sources":["../src/sync.ts"],"names":[],"mappings":";AAEA,OAAO,EAAE,MAAM,kBAAkB,CAAC;AAClC,OAAO,IAAI,MAAM,WAAW,CAAC;AAE7B,MAAM,QAAQ,GAAG,OAAO,CAAC,GAAG,EAAE,CAAC;AAC/B,MAAM,UAAU,GAAG,IAAI,CAAC,IAAI,CAAC,QAAQ,EAAE,yBAAyB,CAAC,CAAC;AAClE,MAAM,UAAU,GAAG,IAAI,CAAC,IAAI,CAAC,QAAQ,EAAE,gBAAgB,CAAC,CAAC;AACzD,MAAM,SAAS,GAAG,IAAI,CAAC,KAAK,CAAC,MAAM,EAAE,CAAC,QAAQ,CAAC,UAAU,EAAE,OAAO,CAAC,CAAC,CAAC;AAWrE,MAAM,MAAM,GAAG,KAAK,EAAE,CAAS,EAAE,EAAE;IAClC,IAAI,CAAC;QACJ,MAAM,EAAE,CAAC,MAAM,CAAC,CAAC,CAAC,CAAC;QACnB,OAAO,IAAI,CAAC;IACb,CAAC;IAAC,MAAM,CAAC;QACR,OAAO,KAAK,CAAC;IACd,CAAC;AACF,CAAC,CAAC;AAEF,MAAM,SAAS,GAAG,KAAK,EAAE,GAAW,EAAE,EAAE;IACvC,MAAM,EAAE,CAAC,KAAK,CAAC,GAAG,EAAE,EAAE,SAAS,EAAE,IAAI,EAAE,CAAC,CAAC;AAC1C,CAAC,CAAC;AAEF,MAAM,UAAU,GAAG,KAAK,EAAE,GAAW,EAAE,IAAY,EAAE,EAAE;IACtD,IAAI,CAAC;QACJ,MAAM,EAAE,CAAC,IAAI,CAAC,GAAG,EAAE,IAAI,CAAC,CAAC;IAC1B,CAAC;IAAC,OAAO,GAAY,EAAE,CAAC;QACvB,4DAA4D;QAC5D,MAAM,KAAK,GAAG,GAAwB,CAAC;QACvC,IACC,KAAK,CAAC,IAAI,KAAK,OAAO;YACtB,KAAK,CAAC,IAAI,KAAK,OAAO;YACtB,KAAK,CAAC,IAAI,KAAK,QAAQ,EACtB,CAAC;YACF,MAAM,EAAE,CAAC,QAAQ,CAAC,GAAG,EAAE,IAAI,CAAC,CAAC;YAC7B,OAAO;QACR,CAAC;QAED,MAAM,GAAG,CAAC;IACX,CAAC;AACF,CAAC,CAAC;AAEF,MAAM,YAAY,GAAG,CAAC,QAAkB,EAAE,EAAE;IAC3C,IAAI,QAAQ,CAAC,iBAAiB;QAC7B,OAAO,IAAI,CAAC,IAAI,CAAC,QAAQ,CAAC,OAAO,EAAE,IAAI,CAAC,QAAQ,CAAC,UAAU,CAAC,CAAC,CAAC;IAC/D,OAAO,QAAQ,CAAC,UAAU,IAAI,GAAG,QAAQ,CAAC,EAAE,CAAC,WAAW,EAAE,KAAK,CAAC;AACjE,CAAC,CAAC;AAEF,MAAM,YAAY,GAAG,KAAK,EAAE,QAAkB,EAAE,EAAE;IACjD,MAAM,UAAU,GAAG,YAAY,CAAC,QAAQ,CAAC,CAAC;IAC1C,MAAM,UAAU,GAAG,IAAI,CAAC,IAAI,CAAC,QAAQ,EAAE,UAAU,CAAC,CAAC;IACnD,IAAI,CAAC;QACJ,IAAI,MAAM,MAAM,CAAC,UAAU,CAAC;YAAE,MAAM,EAAE,CAAC,MAAM,CAAC,UAAU,CAAC,CAAC;QAC1D,IAAI,QAAQ,CAAC,OAAO;YACnB,MAAM,SAAS,CAAC,IAAI,CAAC,IAAI,CAAC,QAAQ,EAAE,QAAQ,CAAC,OAAO,CAAC,CAAC,CAAC;QACxD,MAAM,UAAU,CAAC,UAAU,EAAE,UAAU,CAAC,CAAC;QACzC,OAAO,EAAE,EAAE,EAAE,IAAI,EAAE,IAAI,EAAE,QAAQ,CAAC,IAAI,EAAE,MAAM,EAAE,UAAU,EAAE,CAAC;IAC9D,CAAC;IAAC,OAAO,GAAG,EAAE,CAAC;QACd,OAAO;YACN,EAAE,EAAE,KAAK;YACT,IAAI,EAAE,QAAQ,CAAC,IAAI;YACnB,MAAM,EAAE,UAAU;YAClB,KAAK,EAAG,GAAa,CAAC,OAAO;SAC7B,CAAC;IACH,CAAC;AACF,CAAC,CAAC;AASF,MAAM,WAAW,GAAG,CAAC,OAAqB,EAAE,EAAE;IAC7C,OAAO,CAAC,GAAG,CAAC,qBAAqB,CAAC,CAAC;IACnC,KAAK,MAAM,CAAC,IAAI,OAAO,EAAE,CAAC;QACzB,IAAI,CAAC,CAAC,EAAE,EAAE,CAAC;YACV,OAAO,CAAC,GAAG,CAAC,OAAO,CAAC,CAAC,IAAI,KAAK,CAAC,CAAC,MAAM,EAAE,CAAC,CAAC;QAC3C,CAAC;aAAM,CAAC;YACP,mFAAmF;YACnF,IAAI,CAAC,CAAC,KAAK,EAAE,QAAQ,CAAC,QAAQ,CAAC,EAAE,CAAC;gBACjC,OAAO,CAAC,GAAG,CAAC,OAAO,CAAC,CAAC,IAAI,KAAK,CAAC,CAAC,MAAM,EAAE,CAAC,CAAC;YAC3C,CAAC;iBAAM,CAAC;gBACP,OAAO,CAAC,GAAG,CAAC,OAAO,CAAC,CAAC,IAAI,KAAK,CAAC,CAAC,KAAK,EAAE,CAAC,CAAC;YAC1C,CAAC;QACF,CAAC;IACF,CAAC;IACD,OAAO,CAAC,GAAG,CAAC,EAAE,CAAC,CAAC;AACjB,CAAC,CAAC;AAEF,MAAM,SAAS,GAAG,KAAK,IAAI,EAAE;IAC5B,IAAI,CAAC,CAAC,MAAM,MAAM,CAAC,UAAU,CAAC,CAAC,EAAE,CAAC;QACjC,OAAO,CAAC,KAAK,CAAC,wDAAwD,CAAC,CAAC;QACxE,OAAO,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC;IACjB,CAAC;IAED,MAAM,QAAQ,GAAG,SAAS,CAAC,MAAM,CAAC,CAAC,CAAW,EAAE,EAAE,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC;IAC/D,IAAI,QAAQ,CAAC,MAAM,KAAK,CAAC,EAAE,CAAC;QAC3B,OAAO,CAAC,GAAG,CAAC,qCAAqC,CAAC,CAAC;QACnD,OAAO;IACR,CAAC;IAED,MAAM,KAAK,GAAG,QAAQ,CAAC,GAAG,CAAC,CAAC,CAAW,EAAE,EAAE,CAAC,YAAY,CAAC,CAAC,CAAC,CAAC,CAAC;IAC7D,MAAM,OAAO,GAAG,MAAM,OAAO,CAAC,GAAG,CAAC,KAAK,CAAC,CAAC;IACzC,WAAW,CAAC,OAAO,CAAC,CAAC;IACrB,MAAM,YAAY,GAAG,OAAO,CAAC,MAAM,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,MAAM,CAAC;IACxD,OAAO,CAAC,GAAG,CAAC,aAAa,YAAY,IAAI,QAAQ,CAAC,MAAM,cAAc,CAAC,CAAC;AACzE,CAAC,CAAC;AAEF,MAAM,IAAI,GAAG,KAAK,IAAI,EAAE;IACvB,IAAI,CAAC,CAAC,MAAM,MAAM,CAAC,UAAU,CAAC,CAAC,EAAE,CAAC;QACjC,OAAO,CAAC,KAAK,CAAC,0DAA0D,CAAC,CAAC;QAC1E,OAAO,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC;IACjB,CAAC;IACD,MAAM,SAAS,EAAE,CAAC;AACnB,CAAC,CAAC;AAEF,OAAO,EAAE,IAAI,EAAE,SAAS,EAAE,YAAY,EAAE,CAAC;AAEzC,IAAI,EAAE,CAAC,KAAK,CAAC,CAAC,GAAG,EAAE,EAAE;IACpB,OAAO,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC;IACnB,OAAO,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC;AACjB,CAAC,CAAC,CAAC"}
|
package/package.json
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "fenchurch",
|
|
3
|
+
"version": "1.0.0",
|
|
4
|
+
"description": "",
|
|
5
|
+
"homepage": "https://github.com/joemaddalone/fenchurch#readme",
|
|
6
|
+
"bugs": {
|
|
7
|
+
"url": "https://github.com/joemaddalone/fenchurch/issues"
|
|
8
|
+
},
|
|
9
|
+
"repository": {
|
|
10
|
+
"type": "git",
|
|
11
|
+
"url": "git+https://github.com/joemaddalone/fenchurch.git"
|
|
12
|
+
},
|
|
13
|
+
"license": "ISC",
|
|
14
|
+
"author": "Joe Maddalone",
|
|
15
|
+
"type": "module",
|
|
16
|
+
"main": "dist/index.js",
|
|
17
|
+
"bin": {
|
|
18
|
+
"fenchurch": "dist/index.js"
|
|
19
|
+
},
|
|
20
|
+
"files": [
|
|
21
|
+
"dist"
|
|
22
|
+
],
|
|
23
|
+
"scripts": {
|
|
24
|
+
"build": "tsc && cp src/ai-config.json dist/ && cp -r src/prompts dist/",
|
|
25
|
+
"dev": "tsc --watch",
|
|
26
|
+
"start": "node dist/index.js",
|
|
27
|
+
"clean": "rm -rf dist",
|
|
28
|
+
"prebuild": "npm run clean",
|
|
29
|
+
"lint": "biome lint .",
|
|
30
|
+
"format": "biome format --write .",
|
|
31
|
+
"fix": "biome check --fix .",
|
|
32
|
+
"test": "vitest run",
|
|
33
|
+
"test:watch": "vitest watch",
|
|
34
|
+
"coverage": "vitest run --coverage"
|
|
35
|
+
},
|
|
36
|
+
"dependencies": {
|
|
37
|
+
"@joemaddalone/filets": "^1.1.0",
|
|
38
|
+
"inquirer": "^13.2.0",
|
|
39
|
+
"ora": "^9.0.0"
|
|
40
|
+
},
|
|
41
|
+
"devDependencies": {
|
|
42
|
+
"@biomejs/biome": "^2.3.11",
|
|
43
|
+
"@types/blessed": "^0.1.27",
|
|
44
|
+
"@types/node": "^25.0.8",
|
|
45
|
+
"@vitest/coverage-v8": "^4.0.18",
|
|
46
|
+
"typescript": "^5.9.3",
|
|
47
|
+
"vitest": "^4.0.18"
|
|
48
|
+
}
|
|
49
|
+
}
|