@vpxa/aikit 0.1.8 → 0.1.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (30) hide show
  1. package/package.json +1 -1
  2. package/packages/cli/dist/commands/init/adapters.js +1 -1
  3. package/packages/cli/dist/commands/init/user.d.ts +2 -2
  4. package/packages/cli/dist/commands/init/user.js +4 -4
  5. package/packages/server/dist/server.js +2 -2
  6. package/packages/server/dist/tool-metadata.js +1 -1
  7. package/packages/server/dist/tools/flow.tools.js +1 -1
  8. package/packages/server/dist/tools/present/templates.js +6 -6
  9. package/packages/server/dist/tools/present/tool.js +3 -3
  10. package/packages/store/dist/lance-store.js +1 -1
  11. package/scaffold/adapters/copilot.mjs +4 -1
  12. package/scaffold/definitions/agents.mjs +32 -8
  13. package/scaffold/definitions/bodies.mjs +6 -2
  14. package/scaffold/definitions/protocols.mjs +37 -2
  15. package/scaffold/general/agents/Code-Reviewer-Alpha.agent.md +7 -0
  16. package/scaffold/general/agents/Code-Reviewer-Beta.agent.md +7 -0
  17. package/scaffold/general/agents/Debugger.agent.md +7 -0
  18. package/scaffold/general/agents/Documenter.agent.md +1 -0
  19. package/scaffold/general/agents/Explorer.agent.md +6 -0
  20. package/scaffold/general/agents/Frontend.agent.md +9 -0
  21. package/scaffold/general/agents/Implementer.agent.md +7 -0
  22. package/scaffold/general/agents/Orchestrator.agent.md +5 -3
  23. package/scaffold/general/agents/README.md +3 -3
  24. package/scaffold/general/agents/Refactor.agent.md +1 -0
  25. package/scaffold/general/agents/Researcher-Beta.agent.md +2 -2
  26. package/scaffold/general/agents/Researcher-Delta.agent.md +2 -2
  27. package/scaffold/general/agents/Researcher-Gamma.agent.md +2 -2
  28. package/scaffold/general/agents/Security.agent.md +7 -0
  29. package/scaffold/general/agents/_shared/code-agent-base.md +37 -2
  30. package/scaffold/general/skills/aikit/SKILL.md +122 -99
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@vpxa/aikit",
3
- "version": "0.1.8",
3
+ "version": "0.1.10",
4
4
  "type": "module",
5
5
  "description": "Local-first AI developer toolkit — knowledge base, code analysis, context management, and developer tools for LLM agents",
6
6
  "license": "MIT",
@@ -1 +1 @@
1
- import{MCP_SERVER_ENTRY as e}from"./constants.js";import{buildAgentsMd as t,buildCopilotInstructions as n}from"./templates.js";import{existsSync as r,mkdirSync as i,writeFileSync as a}from"node:fs";import{basename as o,resolve as s}from"node:path";function c(e){return r(s(e,`.cursor`))?`cursor`:r(s(e,`.claude`))?`claude-code`:r(s(e,`.windsurf`))?`windsurf`:`copilot`}function l(t){return{servers:{[t]:{...e}}}}function u(t){let{type:n,...r}=e;return{mcpServers:{[t]:r}}}const d={scaffoldDir:`general`,writeMcpConfig(e,t){let n=s(e,`.vscode`),o=s(n,`mcp.json`);r(o)||(i(n,{recursive:!0}),a(o,`${JSON.stringify(l(t),null,2)}\n`,`utf-8`),console.log(` Created .vscode/mcp.json`))},writeInstructions(e,t){let r=s(e,`.github`),c=s(r,`copilot-instructions.md`);i(r,{recursive:!0}),a(c,n(o(e),t),`utf-8`),console.log(` Updated .github/copilot-instructions.md`)},writeAgentsMd(e,n){a(s(e,`AGENTS.md`),t(o(e),n),`utf-8`),console.log(` Updated AGENTS.md`)}},f={scaffoldDir:`general`,writeMcpConfig(e,t){let n=s(e,`.mcp.json`);r(n)||(a(n,`${JSON.stringify(u(t),null,2)}\n`,`utf-8`),console.log(` Created .mcp.json`))},writeInstructions(e,r){let i=s(e,`CLAUDE.md`),c=o(e);a(i,`${n(c,r)}\n---\n\n${t(c,r)}`,`utf-8`),console.log(` Updated CLAUDE.md`)},writeAgentsMd(e,t){}},p={scaffoldDir:`general`,writeMcpConfig(e,t){let n=s(e,`.cursor`),o=s(n,`mcp.json`);r(o)||(i(n,{recursive:!0}),a(o,`${JSON.stringify(u(t),null,2)}\n`,`utf-8`),console.log(` Created .cursor/mcp.json`))},writeInstructions(e,r){let c=s(e,`.cursor`,`rules`),l=s(c,`kb.mdc`);i(c,{recursive:!0});let u=o(e);a(l,`${n(u,r)}\n---\n\n${t(u,r)}`,`utf-8`),console.log(` Updated .cursor/rules/kb.mdc`)},writeAgentsMd(e,t){}},m={scaffoldDir:`general`,writeMcpConfig(e,t){let n=s(e,`.vscode`),o=s(n,`mcp.json`);r(o)||(i(n,{recursive:!0}),a(o,`${JSON.stringify(l(t),null,2)}\n`,`utf-8`),console.log(` Created .vscode/mcp.json (Windsurf-compatible)`))},writeInstructions(e,r){let i=s(e,`.windsurfrules`),c=o(e);a(i,`${n(c,r)}\n---\n\n${t(c,r)}`,`utf-8`),console.log(` Updated .windsurfrules`)},writeAgentsMd(e,t){}};function h(e){switch(e){case`copilot`:return d;case`claude-code`:return f;case`cursor`:return p;case`windsurf`:return m}}export{f as claudeCodeAdapter,d as copilotAdapter,p as cursorAdapter,c as detectIde,h as getAdapter,m as windsurfAdapter};
1
+ import{MCP_SERVER_ENTRY as e}from"./constants.js";import{buildAgentsMd as t,buildCopilotInstructions as n}from"./templates.js";import{existsSync as r,mkdirSync as i,unlinkSync as a,writeFileSync as o}from"node:fs";import{basename as s,resolve as c}from"node:path";function l(e){return r(c(e,`.cursor`))?`cursor`:r(c(e,`.claude`))?`claude-code`:r(c(e,`.windsurf`))?`windsurf`:`copilot`}function u(t){return{servers:{[t]:{...e}}}}function d(t){let{type:n,...r}=e;return{mcpServers:{[t]:r}}}const f={scaffoldDir:`general`,writeMcpConfig(e,t){let n=c(e,`.vscode`),a=c(n,`mcp.json`);r(a)||(i(n,{recursive:!0}),o(a,`${JSON.stringify(u(t),null,2)}\n`,`utf-8`),console.log(` Created .vscode/mcp.json`))},writeInstructions(e,t){let r=c(e,`.github`),a=c(r,`copilot-instructions.md`);i(r,{recursive:!0}),o(a,n(s(e),t),`utf-8`),console.log(` Updated .github/copilot-instructions.md`)},writeAgentsMd(e,n){o(c(e,`AGENTS.md`),t(s(e),n),`utf-8`),console.log(` Updated AGENTS.md`)}},p={scaffoldDir:`general`,writeMcpConfig(e,t){let n=c(e,`.mcp.json`);r(n)||(o(n,`${JSON.stringify(d(t),null,2)}\n`,`utf-8`),console.log(` Created .mcp.json`))},writeInstructions(e,r){let i=c(e,`CLAUDE.md`),a=s(e);o(i,`${n(a,r)}\n---\n\n${t(a,r)}`,`utf-8`),console.log(` Updated CLAUDE.md`)},writeAgentsMd(e,t){}},m={scaffoldDir:`general`,writeMcpConfig(e,t){let n=c(e,`.cursor`),a=c(n,`mcp.json`);r(a)||(i(n,{recursive:!0}),o(a,`${JSON.stringify(d(t),null,2)}\n`,`utf-8`),console.log(` Created .cursor/mcp.json`))},writeInstructions(e,l){let u=c(e,`.cursor`,`rules`),d=c(u,`aikit.mdc`);i(u,{recursive:!0});let f=s(e);o(d,`${n(f,l)}\n---\n\n${t(f,l)}`,`utf-8`),console.log(` Updated .cursor/rules/aikit.mdc`);let p=c(u,`kb.mdc`);r(p)&&p!==d&&(a(p),console.log(` Removed legacy .cursor/rules/kb.mdc`))},writeAgentsMd(e,t){}},h={scaffoldDir:`general`,writeMcpConfig(e,t){let n=c(e,`.vscode`),a=c(n,`mcp.json`);r(a)||(i(n,{recursive:!0}),o(a,`${JSON.stringify(u(t),null,2)}\n`,`utf-8`),console.log(` Created .vscode/mcp.json (Windsurf-compatible)`))},writeInstructions(e,r){let i=c(e,`.windsurfrules`),a=s(e);o(i,`${n(a,r)}\n---\n\n${t(a,r)}`,`utf-8`),console.log(` Updated .windsurfrules`)},writeAgentsMd(e,t){}};function g(e){switch(e){case`copilot`:return f;case`claude-code`:return p;case`cursor`:return m;case`windsurf`:return h}}export{p as claudeCodeAdapter,f as copilotAdapter,m as cursorAdapter,l as detectIde,g as getAdapter,h as windsurfAdapter};
@@ -44,8 +44,8 @@ declare function writeVscodeSettings(idePath: UserLevelIdePath, force?: boolean)
44
44
  * Each IDE has its own global discovery path:
45
45
  * - VS Code / VSCodium: ~/.copilot/ (agents/, prompts/, skills/, instructions/)
46
46
  * - Claude Code: ~/.claude/ (CLAUDE.md, agents/)
47
- * - Cursor: ~/.cursor/ (rules/kb.mdc, agents/, prompts/, skills/)
48
- * - Windsurf: ~/.windsurf/ (rules/kb.md, agents/, prompts/, skills/)
47
+ * - Cursor: ~/.cursor/ (rules/aikit.mdc, agents/, prompts/, skills/)
48
+ * - Windsurf: ~/.windsurf/ (rules/aikit.md, agents/, prompts/, skills/)
49
49
  *
50
50
  * Multiple IDEs may share the same root (e.g. VS Code + VSCodium both use ~/.copilot/).
51
51
  * We deduplicate scaffold files but generate IDE-specific instruction files.
@@ -1,5 +1,5 @@
1
- import{FLOW_DIRS as e,MCP_SERVER_ENTRY as t,SERVER_NAME as n,SKILL_NAMES as r,VSCODE_SETTINGS as i}from"./constants.js";import{buildAgentsMd as a,buildCopilotInstructions as o}from"./templates.js";import{smartCopySubdir as s}from"./scaffold.js";import{existsSync as c,mkdirSync as l,readFileSync as u,writeFileSync as d}from"node:fs";import{dirname as f,resolve as p}from"node:path";import{fileURLToPath as m}from"node:url";import{getGlobalDataDir as h,saveRegistry as g}from"../../../../core/dist/index.js";import{homedir as _}from"node:os";function v(){let e=_(),t=process.platform,n=[],r=p(e,`.copilot`),i=p(r,`instructions`),a=p(e,`.claude`),o=p(e,`.cursor`),s=p(e,`.windsurf`);if(t===`win32`){let t=process.env.APPDATA??p(e,`AppData`,`Roaming`);n.push({ide:`VS Code`,configDir:p(t,`Code`,`User`),mcpConfigPath:p(t,`Code`,`User`,`mcp.json`),globalScaffoldRoot:r,instructionsRoot:i},{ide:`VS Code Insiders`,configDir:p(t,`Code - Insiders`,`User`),mcpConfigPath:p(t,`Code - Insiders`,`User`,`mcp.json`),globalScaffoldRoot:r,instructionsRoot:i},{ide:`VSCodium`,configDir:p(t,`VSCodium`,`User`),mcpConfigPath:p(t,`VSCodium`,`User`,`mcp.json`),globalScaffoldRoot:r,instructionsRoot:i},{ide:`Cursor`,configDir:p(t,`Cursor`,`User`),mcpConfigPath:p(t,`Cursor`,`User`,`mcp.json`),globalScaffoldRoot:o,instructionsRoot:null},{ide:`Cursor Nightly`,configDir:p(t,`Cursor Nightly`,`User`),mcpConfigPath:p(t,`Cursor Nightly`,`User`,`mcp.json`),globalScaffoldRoot:o,instructionsRoot:null},{ide:`Windsurf`,configDir:p(t,`Windsurf`,`User`),mcpConfigPath:p(t,`Windsurf`,`User`,`mcp.json`),globalScaffoldRoot:s,instructionsRoot:null})}else if(t===`darwin`){let t=p(e,`Library`,`Application Support`);n.push({ide:`VS Code`,configDir:p(t,`Code`,`User`),mcpConfigPath:p(t,`Code`,`User`,`mcp.json`),globalScaffoldRoot:r,instructionsRoot:i},{ide:`VS Code Insiders`,configDir:p(t,`Code - Insiders`,`User`),mcpConfigPath:p(t,`Code - Insiders`,`User`,`mcp.json`),globalScaffoldRoot:r,instructionsRoot:i},{ide:`VSCodium`,configDir:p(t,`VSCodium`,`User`),mcpConfigPath:p(t,`VSCodium`,`User`,`mcp.json`),globalScaffoldRoot:r,instructionsRoot:i},{ide:`Cursor`,configDir:p(t,`Cursor`,`User`),mcpConfigPath:p(t,`Cursor`,`User`,`mcp.json`),globalScaffoldRoot:o,instructionsRoot:null},{ide:`Cursor Nightly`,configDir:p(t,`Cursor Nightly`,`User`),mcpConfigPath:p(t,`Cursor Nightly`,`User`,`mcp.json`),globalScaffoldRoot:o,instructionsRoot:null},{ide:`Windsurf`,configDir:p(t,`Windsurf`,`User`),mcpConfigPath:p(t,`Windsurf`,`User`,`mcp.json`),globalScaffoldRoot:s,instructionsRoot:null})}else{let t=process.env.XDG_CONFIG_HOME??p(e,`.config`);n.push({ide:`VS Code`,configDir:p(t,`Code`,`User`),mcpConfigPath:p(t,`Code`,`User`,`mcp.json`),globalScaffoldRoot:r,instructionsRoot:i},{ide:`VS Code Insiders`,configDir:p(t,`Code - Insiders`,`User`),mcpConfigPath:p(t,`Code - Insiders`,`User`,`mcp.json`),globalScaffoldRoot:r,instructionsRoot:i},{ide:`VSCodium`,configDir:p(t,`VSCodium`,`User`),mcpConfigPath:p(t,`VSCodium`,`User`,`mcp.json`),globalScaffoldRoot:r,instructionsRoot:i},{ide:`Cursor`,configDir:p(t,`Cursor`,`User`),mcpConfigPath:p(t,`Cursor`,`User`,`mcp.json`),globalScaffoldRoot:o,instructionsRoot:null},{ide:`Cursor Nightly`,configDir:p(t,`Cursor Nightly`,`User`),mcpConfigPath:p(t,`Cursor Nightly`,`User`,`mcp.json`),globalScaffoldRoot:o,instructionsRoot:null},{ide:`Windsurf`,configDir:p(t,`Windsurf`,`User`),mcpConfigPath:p(t,`Windsurf`,`User`,`mcp.json`),globalScaffoldRoot:s,instructionsRoot:null})}return n.push({ide:`Claude Code`,configDir:p(e,`.claude`),mcpConfigPath:p(e,`.claude`,`mcp.json`),globalScaffoldRoot:a,instructionsRoot:null}),n.filter(e=>c(e.configDir))}function y(e,n,r=!1){let{mcpConfigPath:i,configDir:a}=e,o={...t},s={};if(c(i)){try{let e=u(i,`utf-8`);s=JSON.parse(e)}catch{let e=`${i}.bak`;d(e,u(i,`utf-8`),`utf-8`),console.log(` Backed up invalid ${i} to ${e}`),s={}}if((s.servers??s.mcpServers??{})[n]&&!r){console.log(` ${e.ide}: ${n} already configured (use --force to update)`);return}}let f=new Set([`VS Code`,`VS Code Insiders`,`VSCodium`,`Windsurf`]).has(e.ide)?`servers`:`mcpServers`,p=s[f]??{};p[n]=o,s[f]=p,l(a,{recursive:!0}),d(i,`${JSON.stringify(s,null,2)}\n`,`utf-8`),console.log(` ${e.ide}: configured ${n} in ${i}`)}const b=new Set([`VS Code`,`VS Code Insiders`,`VSCodium`]);function x(e,t=!1){if(!b.has(e.ide))return;let n=p(e.configDir,`settings.json`),r={};if(c(n))try{let e=u(n,`utf-8`);r=JSON.parse(e)}catch{console.log(` ${e.ide}: skipped settings.json (invalid JSON)`);return}let a=!1;for(let[e,n]of Object.entries(i))if(typeof n==`object`&&n){let t=typeof r[e]==`object`&&r[e]!==null?r[e]:{},i={...t,...n};JSON.stringify(i)!==JSON.stringify(t)&&(r[e]=i,a=!0)}else (t||!(e in r))&&(r[e]=n,a=!0);a&&(d(n,`${JSON.stringify(r,null,2)}\n`,`utf-8`),console.log(` ${e.ide}: updated settings.json`))}function S(t,n,i,u,f=!1){let m=new Set;for(let e of n)e.globalScaffoldRoot&&m.add(e.globalScaffoldRoot);if(m.size===0){console.log(` No IDEs with global scaffold support detected.`);return}let h=p(t,`scaffold`,`general`);for(let n of m){s(h,n,`agents`,u,f),s(h,n,`prompts`,u,f);let i=0;for(let e of r)c(p(h,`skills`,e))&&(s(h,n,`skills/${e}`,u,f),i++);for(let r of e){let e=p(t,`scaffold`,`flows`,r);c(p(e,`skills`))&&s(e,p(n,`flows`,r),`skills`,u,f)}console.log(` ${n}: scaffold updated (${i} skills)`)}let g=new Set,_=o(`aikit`,i),v=a(`aikit`,i);for(let e of n){if(!e.globalScaffoldRoot)continue;let t=e.globalScaffoldRoot;if(e.ide===`Claude Code`){let e=p(t,`CLAUDE.md`);d(e,`${_}\n---\n\n${v}`,`utf-8`),g.add(e)}else if(e.ide===`VS Code`||e.ide===`VS Code Insiders`||e.ide===`VSCodium`){let n=e.instructionsRoot??t;l(n,{recursive:!0});let r=p(n,`aikit.instructions.md`);g.has(r)||(d(r,`---\napplyTo: "**"\n---\n\n${_}\n---\n\n${v}`,`utf-8`),g.add(r))}else if(e.ide===`Cursor`||e.ide===`Cursor Nightly`){let e=p(t,`rules`);l(e,{recursive:!0});let n=p(e,`aikit.mdc`);g.has(n)||(d(n,`${_}\n---\n\n${v}`,`utf-8`),g.add(n))}else if(e.ide===`Windsurf`){let e=p(t,`rules`);l(e,{recursive:!0});let n=p(e,`aikit.md`);g.has(n)||(d(n,`${_}\n---\n\n${v}`,`utf-8`),g.add(n))}}g.size>0&&console.log(` Instruction files: ${[...g].join(`, `)}`)}async function C(e){let t=n,r=p(f(m(import.meta.url)),`..`,`..`,`..`,`..`,`..`,`package.json`),i=JSON.parse(u(r,`utf-8`)).version;console.log(`Initializing @vpxa/aikit v${i}...\n`);let a=h();l(a,{recursive:!0}),console.log(` Global data store: ${a}`),g({version:1,workspaces:{}}),console.log(` Created registry.json`);let o=v();if(o.length===0)console.log(`
2
- No supported IDEs detected. You can manually add the MCP server config.`);else{console.log(`\n Detected ${o.length} IDE(s):`);for(let n of o)y(n,t,e.force),x(n,e.force)}let s=p(f(m(import.meta.url)),`..`,`..`,`..`,`..`,`..`);console.log(`
3
- Installing scaffold files:`),S(s,o,t,i,e.force),console.log(`
1
+ import{FLOW_DIRS as e,MCP_SERVER_ENTRY as t,SERVER_NAME as n,SKILL_NAMES as r,VSCODE_SETTINGS as i}from"./constants.js";import{buildAgentsMd as a,buildCopilotInstructions as o}from"./templates.js";import{smartCopySubdir as s}from"./scaffold.js";import{existsSync as c,mkdirSync as l,readFileSync as u,unlinkSync as d,writeFileSync as f}from"node:fs";import{dirname as p,resolve as m}from"node:path";import{fileURLToPath as h}from"node:url";import{getGlobalDataDir as g,saveRegistry as _}from"../../../../core/dist/index.js";import{homedir as v}from"node:os";function y(){let e=v(),t=process.platform,n=[],r=m(e,`.copilot`),i=m(r,`instructions`),a=m(e,`.claude`),o=m(e,`.cursor`),s=m(e,`.windsurf`);if(t===`win32`){let t=process.env.APPDATA??m(e,`AppData`,`Roaming`);n.push({ide:`VS Code`,configDir:m(t,`Code`,`User`),mcpConfigPath:m(t,`Code`,`User`,`mcp.json`),globalScaffoldRoot:r,instructionsRoot:i},{ide:`VS Code Insiders`,configDir:m(t,`Code - Insiders`,`User`),mcpConfigPath:m(t,`Code - Insiders`,`User`,`mcp.json`),globalScaffoldRoot:r,instructionsRoot:i},{ide:`VSCodium`,configDir:m(t,`VSCodium`,`User`),mcpConfigPath:m(t,`VSCodium`,`User`,`mcp.json`),globalScaffoldRoot:r,instructionsRoot:i},{ide:`Cursor`,configDir:m(t,`Cursor`,`User`),mcpConfigPath:m(t,`Cursor`,`User`,`mcp.json`),globalScaffoldRoot:o,instructionsRoot:null},{ide:`Cursor Nightly`,configDir:m(t,`Cursor Nightly`,`User`),mcpConfigPath:m(t,`Cursor Nightly`,`User`,`mcp.json`),globalScaffoldRoot:o,instructionsRoot:null},{ide:`Windsurf`,configDir:m(t,`Windsurf`,`User`),mcpConfigPath:m(t,`Windsurf`,`User`,`mcp.json`),globalScaffoldRoot:s,instructionsRoot:null})}else if(t===`darwin`){let t=m(e,`Library`,`Application Support`);n.push({ide:`VS Code`,configDir:m(t,`Code`,`User`),mcpConfigPath:m(t,`Code`,`User`,`mcp.json`),globalScaffoldRoot:r,instructionsRoot:i},{ide:`VS Code Insiders`,configDir:m(t,`Code - Insiders`,`User`),mcpConfigPath:m(t,`Code - Insiders`,`User`,`mcp.json`),globalScaffoldRoot:r,instructionsRoot:i},{ide:`VSCodium`,configDir:m(t,`VSCodium`,`User`),mcpConfigPath:m(t,`VSCodium`,`User`,`mcp.json`),globalScaffoldRoot:r,instructionsRoot:i},{ide:`Cursor`,configDir:m(t,`Cursor`,`User`),mcpConfigPath:m(t,`Cursor`,`User`,`mcp.json`),globalScaffoldRoot:o,instructionsRoot:null},{ide:`Cursor Nightly`,configDir:m(t,`Cursor Nightly`,`User`),mcpConfigPath:m(t,`Cursor Nightly`,`User`,`mcp.json`),globalScaffoldRoot:o,instructionsRoot:null},{ide:`Windsurf`,configDir:m(t,`Windsurf`,`User`),mcpConfigPath:m(t,`Windsurf`,`User`,`mcp.json`),globalScaffoldRoot:s,instructionsRoot:null})}else{let t=process.env.XDG_CONFIG_HOME??m(e,`.config`);n.push({ide:`VS Code`,configDir:m(t,`Code`,`User`),mcpConfigPath:m(t,`Code`,`User`,`mcp.json`),globalScaffoldRoot:r,instructionsRoot:i},{ide:`VS Code Insiders`,configDir:m(t,`Code - Insiders`,`User`),mcpConfigPath:m(t,`Code - Insiders`,`User`,`mcp.json`),globalScaffoldRoot:r,instructionsRoot:i},{ide:`VSCodium`,configDir:m(t,`VSCodium`,`User`),mcpConfigPath:m(t,`VSCodium`,`User`,`mcp.json`),globalScaffoldRoot:r,instructionsRoot:i},{ide:`Cursor`,configDir:m(t,`Cursor`,`User`),mcpConfigPath:m(t,`Cursor`,`User`,`mcp.json`),globalScaffoldRoot:o,instructionsRoot:null},{ide:`Cursor Nightly`,configDir:m(t,`Cursor Nightly`,`User`),mcpConfigPath:m(t,`Cursor Nightly`,`User`,`mcp.json`),globalScaffoldRoot:o,instructionsRoot:null},{ide:`Windsurf`,configDir:m(t,`Windsurf`,`User`),mcpConfigPath:m(t,`Windsurf`,`User`,`mcp.json`),globalScaffoldRoot:s,instructionsRoot:null})}return n.push({ide:`Claude Code`,configDir:m(e,`.claude`),mcpConfigPath:m(e,`.claude`,`mcp.json`),globalScaffoldRoot:a,instructionsRoot:null}),n.filter(e=>c(e.configDir))}function b(e,n,r=!1){let{mcpConfigPath:i,configDir:a}=e,o={...t},s={};if(c(i)){try{let e=u(i,`utf-8`);s=JSON.parse(e)}catch{let e=`${i}.bak`;f(e,u(i,`utf-8`),`utf-8`),console.log(` Backed up invalid ${i} to ${e}`),s={}}if((s.servers??s.mcpServers??{})[n]&&!r){console.log(` ${e.ide}: ${n} already configured (use --force to update)`);return}}let d=new Set([`VS Code`,`VS Code Insiders`,`VSCodium`,`Windsurf`]).has(e.ide)?`servers`:`mcpServers`,p=s[d]??{};p[n]=o,s[d]=p,l(a,{recursive:!0}),f(i,`${JSON.stringify(s,null,2)}\n`,`utf-8`),console.log(` ${e.ide}: configured ${n} in ${i}`)}const x=new Set([`VS Code`,`VS Code Insiders`,`VSCodium`]);function S(e,t=!1){if(!x.has(e.ide))return;let n=m(e.configDir,`settings.json`),r={};if(c(n))try{let e=u(n,`utf-8`);r=JSON.parse(e)}catch{console.log(` ${e.ide}: skipped settings.json (invalid JSON)`);return}let a=!1;for(let[e,n]of Object.entries(i))if(typeof n==`object`&&n){let t=typeof r[e]==`object`&&r[e]!==null?r[e]:{},i={...t,...n};JSON.stringify(i)!==JSON.stringify(t)&&(r[e]=i,a=!0)}else (t||!(e in r))&&(r[e]=n,a=!0);a&&(f(n,`${JSON.stringify(r,null,2)}\n`,`utf-8`),console.log(` ${e.ide}: updated settings.json`))}function C(t,n,i,u,d=!1){let p=new Set;for(let e of n)e.globalScaffoldRoot&&p.add(e.globalScaffoldRoot);if(p.size===0){console.log(` No IDEs with global scaffold support detected.`);return}let h=m(t,`scaffold`,`general`);for(let n of p){s(h,n,`agents`,u,d),s(h,n,`prompts`,u,d);let i=0;for(let e of r)c(m(h,`skills`,e))&&(s(h,n,`skills/${e}`,u,d),i++);for(let r of e){let e=m(t,`scaffold`,`flows`,r);c(m(e,`skills`))&&s(e,m(n,`flows`,r),`skills`,u,d)}console.log(` ${n}: scaffold updated (${i} skills)`)}let g=new Set,_=o(`aikit`,i),v=a(`aikit`,i);for(let e of n){if(!e.globalScaffoldRoot)continue;let t=e.globalScaffoldRoot;if(e.ide===`Claude Code`){let e=m(t,`CLAUDE.md`);f(e,`${_}\n---\n\n${v}`,`utf-8`),g.add(e)}else if(e.ide===`VS Code`||e.ide===`VS Code Insiders`||e.ide===`VSCodium`){let n=e.instructionsRoot??t;l(n,{recursive:!0});let r=m(n,`aikit.instructions.md`);g.has(r)||(f(r,`---\napplyTo: "**"\n---\n\n${_}\n---\n\n${v}`,`utf-8`),g.add(r))}else if(e.ide===`Cursor`||e.ide===`Cursor Nightly`){let e=m(t,`rules`);l(e,{recursive:!0});let n=m(e,`aikit.mdc`);g.has(n)||(f(n,`${_}\n---\n\n${v}`,`utf-8`),g.add(n))}else if(e.ide===`Windsurf`){let e=m(t,`rules`);l(e,{recursive:!0});let n=m(e,`aikit.md`);g.has(n)||(f(n,`${_}\n---\n\n${v}`,`utf-8`),g.add(n))}}g.size>0&&console.log(` Instruction files: ${[...g].join(`, `)}`)}function w(e){let t=[];for(let n of e){if(!n.globalScaffoldRoot)continue;let e=n.globalScaffoldRoot;if(n.ide===`VS Code`||n.ide===`VS Code Insiders`||n.ide===`VSCodium`){let r=n.instructionsRoot??e;t.push(m(r,`kb.instructions.md`))}else n.ide===`Cursor`||n.ide===`Cursor Nightly`?t.push(m(e,`rules`,`kb.mdc`)):n.ide===`Windsurf`&&t.push(m(e,`rules`,`kb.md`))}for(let e of t)c(e)&&(d(e),console.log(` Removed legacy file: ${e}`))}async function T(e){let t=n,r=m(p(h(import.meta.url)),`..`,`..`,`..`,`..`,`..`,`package.json`),i=JSON.parse(u(r,`utf-8`)).version;console.log(`Initializing @vpxa/aikit v${i}...\n`);let a=g();l(a,{recursive:!0}),console.log(` Global data store: ${a}`),_({version:1,workspaces:{}}),console.log(` Created registry.json`);let o=y();if(o.length===0)console.log(`
2
+ No supported IDEs detected. You can manually add the MCP server config.`);else{console.log(`\n Detected ${o.length} IDE(s):`);for(let n of o)b(n,t,e.force),S(n,e.force)}let s=m(p(h(import.meta.url)),`..`,`..`,`..`,`..`,`..`);console.log(`
3
+ Installing scaffold files:`),C(s,o,t,i,e.force),w(o),console.log(`
4
4
  User-level AI Kit installation complete!`),console.log(`
5
- Next steps:`),console.log(` 1. Open any workspace in your IDE`),console.log(` 2. The AI Kit server will auto-start and index the workspace`),console.log(` 3. Agents, prompts, skills & instructions are available globally`),console.log(` 4. No per-workspace init needed — just open a project and start coding`)}export{v as detectInstalledIdes,C as initUser,S as installGlobalScaffold,y as writeUserLevelMcpConfig,x as writeVscodeSettings};
5
+ Next steps:`),console.log(` 1. Open any workspace in your IDE`),console.log(` 2. The AI Kit server will auto-start and index the workspace`),console.log(` 3. Agents, prompts, skills & instructions are available globally`),console.log(` 4. No per-workspace init needed — just open a project and start coding`)}export{y as detectInstalledIdes,T as initUser,C as installGlobalScaffold,b as writeUserLevelMcpConfig,S as writeVscodeSettings};
@@ -1,3 +1,3 @@
1
- import{BackgroundTaskScheduler as e}from"./background-task.js";import{clearCompletionCache as t}from"./completions.js";import{CuratedKnowledgeManager as n}from"./curated-manager.js";import{createElicitor as r,noopElicitor as i}from"./elicitor.js";import{IdleTimer as a}from"./idle-timer.js";import{bridgeMcpLogging as o}from"./mcp-logging.js";import{MemoryMonitor as s}from"./memory-monitor.js";import{registerPrompts as c}from"./prompts.js";import{installReplayInterceptor as l}from"./replay-interceptor.js";import{ResourceNotifier as u}from"./resources/resource-notifier.js";import{registerResources as d}from"./resources/resources.js";import{createSamplingClient as f}from"./sampling.js";import{installStructuredContentGuard as p}from"./structured-content-guard.js";import{getToolMeta as m}from"./tool-metadata.js";import{installToolPrefix as h}from"./tool-prefix.js";import{ToolTimeoutError as g,getToolTimeout as _,withTimeout as v}from"./tool-timeout.js";import{registerAnalyzeDependenciesTool as y,registerAnalyzeDiagramTool as b,registerAnalyzeEntryPointsTool as x,registerAnalyzePatternsTool as S,registerAnalyzeStructureTool as ee,registerAnalyzeSymbolsTool as C,registerBlastRadiusTool as w}from"./tools/analyze.tools.js";import{registerAuditTool as T}from"./tools/audit.tool.js";import{registerBrainstormTool as E}from"./tools/brainstorm.tool.js";import{initBridgeComponents as te,registerErPullTool as D,registerErPushTool as O,registerErSyncStatusTool as k}from"./tools/bridge.tools.js";import{registerConfigTool as ne}from"./tools/config.tool.js";import{registerCompactTool as A,registerDeadSymbolsTool as j,registerFileSummaryTool as M,registerFindTool as N,registerScopeMapTool as re,registerSymbolTool as P,registerTraceTool as F}from"./tools/context.tools.js";import{registerErEvolveReviewTool as ie}from"./tools/evolution.tools.js";import{registerBatchTool as ae,registerCheckTool as oe,registerDelegateTool as se,registerEvalTool as ce,registerParseOutputTool as le,registerTestRunTool as I}from"./tools/execution.tools.js";import{registerFlowTools as ue}from"./tools/flow.tools.js";import{registerDigestTool as de,registerEvidenceMapTool as L,registerForgeClassifyTool as R,registerForgeGroundTool as fe,registerStratumCardTool as pe}from"./tools/forge.tools.js";import{registerForgetTool as me}from"./tools/forget.tool.js";import{registerGraphTool as he}from"./tools/graph.tool.js";import{registerGuideTool as z,registerHealthTool as B,registerProcessTool as V,registerWatchTool as H,registerWebFetchTool as U}from"./tools/infra.tools.js";import{registerListTool as ge}from"./tools/list.tool.js";import{registerLookupTool as _e}from"./tools/lookup.tool.js";import{registerCodemodTool as W,registerDataTransformTool as G,registerDiffParseTool as K,registerGitContextTool as q,registerRenameTool as ve}from"./tools/manipulation.tools.js";import{registerOnboardTool as ye}from"./tools/onboard.tool.js";import{registerCheckpointTool as be,registerLaneTool as xe,registerQueueTool as Se,registerStashTool as Ce,registerWorksetTool as we}from"./tools/persistence.tools.js";import{registerErUpdatePolicyTool as Te}from"./tools/policy.tools.js";import{registerPresentTool as Ee}from"./tools/present/tool.js";import"./tools/present/index.js";import{registerProduceKnowledgeTool as De}from"./tools/produce.tool.js";import{registerReadTool as Oe}from"./tools/read.tool.js";import{registerReindexTool as ke}from"./tools/reindex.tool.js";import{registerRememberTool as Ae}from"./tools/remember.tool.js";import{registerReplayTool as je}from"./tools/replay.tool.js";import{registerRestoreTool as Me}from"./tools/restore.tool.js";import{registerSearchTool as Ne}from"./tools/search.tool.js";import{getCurrentVersion as Pe}from"./version-check.js";import{registerEarlyStatusTool as Fe,registerStatusTool as Ie}from"./tools/status.tool.js";import{registerUpdateTool as Le}from"./tools/update.tool.js";import{registerChangelogTool as Re,registerEncodeTool as ze,registerEnvTool as Be,registerHttpTool as Ve,registerMeasureTool as He,registerRegexTestTool as Ue,registerSchemaValidateTool as We,registerSnippetTool as Ge,registerTimeTool as J,registerWebSearchTool as Ke}from"./tools/utility.tools.js";import{existsSync as qe,statSync as Je}from"node:fs";import{resolve as Ye}from"node:path";import{AIKIT_PATHS as Xe,createLogger as Ze,serializeError as Y}from"../../core/dist/index.js";import{initializeWasm as Qe}from"../../chunker/dist/index.js";import{OnnxEmbedder as $e}from"../../embeddings/dist/index.js";import{EvolutionCollector as et,PolicyStore as tt}from"../../enterprise-bridge/dist/index.js";import{FileHashCache as nt,IncrementalIndexer as rt}from"../../indexer/dist/index.js";import{SqliteGraphStore as it,createStore as at}from"../../store/dist/index.js";import{FileCache as ot}from"../../tools/dist/index.js";import{completable as st}from"@modelcontextprotocol/sdk/server/completable.js";import{McpServer as ct}from"@modelcontextprotocol/sdk/server/mcp.js";import{z as lt}from"zod";const X=Ze(`server`);async function Z(e){X.info(`Initializing AI Kit components`);let[t,r,i,a]=await Promise.all([(async()=>{let t=new $e({model:e.embedding.model,dimensions:e.embedding.dimensions});return await t.initialize(),X.info(`Embedder loaded`,{modelId:t.modelId,dimensions:t.dimensions}),t})(),(async()=>{let t=await at({backend:e.store.backend,path:e.store.path});return await t.initialize(),X.info(`Store initialized`),t})(),(async()=>{let t=new it({path:e.store.path});return await t.initialize(),X.info(`Graph store initialized`),t})(),(async()=>{let e=await Qe();return e?X.info(`WASM tree-sitter enabled for AST analysis`):X.warn(`WASM tree-sitter not available; analyzers will use regex fallback`),e})()]),o=new rt(t,r),s=new nt(e.store.path);s.load(),o.setHashCache(s);let c=e.curated.path,l=new n(c,r,t);o.setGraphStore(i);let u=te(e.er),d=u?new tt(e.curated.path):void 0;d&&X.info(`Policy store initialized`,{ruleCount:d.getRules().length});let f=u?new et:void 0,p=Ye(e.sources[0]?.path??process.cwd(),Xe.aiKb),m=qe(p),h=e.onboardDir?qe(e.onboardDir):!1,g=m||h,_,v=m?p:e.onboardDir;if(g&&v)try{_=Je(v).mtime.toISOString()}catch{}return X.info(`Onboard state detected`,{onboardComplete:g,onboardTimestamp:_,aiKbExists:m,onboardDirExists:h}),{embedder:t,store:r,indexer:o,curated:l,graphStore:i,fileCache:new ot,bridge:u,policyStore:d,evolutionCollector:f,onboardComplete:g,onboardTimestamp:_}}function ut(e,t){let n=new ct({name:t.serverName??`aikit`,version:Pe()},{capabilities:{logging:{}}});return o(n),h(n,t.toolPrefix??``),Q(n,e,t,r(n),new u(n),f(n)),c(n,{curated:e.curated,store:e.store,graphStore:e.graphStore},t.indexMode),n}function Q(e,t,n,r,i,a,o,s){l(e),p(e),Ne(e,t.embedder,t.store,t.graphStore,t.bridge,t.evolutionCollector,a),_e(e,t.store),Ie(e,t.store,t.graphStore,t.curated,{onboardComplete:t.onboardComplete,onboardTimestamp:t.onboardTimestamp},n,o,s),ne(e,n),ke(e,t.indexer,n,t.curated,t.store,i,o),Ae(e,t.curated,t.policyStore,t.evolutionCollector,i),Le(e,t.curated,i),me(e,t.curated,i),Oe(e,t.curated),ge(e,t.curated),ee(e,t.store,t.embedder),y(e,t.store,t.embedder),C(e,t.store,t.embedder),S(e,t.store,t.embedder),x(e,t.store,t.embedder),b(e,t.store,t.embedder),w(e,t.store,t.embedder,t.graphStore),De(e,n),ye(e,t.store,t.embedder,n),he(e,t.graphStore),T(e,t.store,t.embedder);let c=n.sources[0]?.path??process.cwd();A(e,t.embedder,t.fileCache,c),re(e,t.embedder,t.store),N(e,t.embedder,t.store),le(e),we(e),oe(e),ae(e,t.embedder,t.store),P(e,t.embedder,t.store,t.graphStore),ce(e),I(e),Ce(e),q(e),K(e),ve(e),W(e),Me(e),M(e,t.fileCache,c),be(e),G(e),F(e,t.embedder,t.store,t.graphStore),V(e),H(e),j(e,t.embedder,t.store),se(e,a),B(e),xe(e),Se(e),U(e),z(e,o),L(e),de(e,t.embedder),R(e),pe(e,t.embedder,t.fileCache),fe(e,t.embedder,t.store),Ee(e,r),r&&E(e,r),Ke(e),Ve(e),Ue(e),ze(e),He(e),Re(e),We(e),Ge(e),Be(e),J(e),ue(e,n),t.bridge&&(O(e,t.bridge,t.evolutionCollector),D(e,t.bridge),k(e,t.bridge)),t.policyStore&&Te(e,t.policyStore),t.evolutionCollector&&ie(e,t.evolutionCollector),d(e,t.store,t.curated),je(e)}async function dt(e){let t=await Z(e),n=ut(t,e);X.info(`MCP server configured`,{toolCount:$.length,resourceCount:2});let r=async()=>{try{let n=e.sources.map(e=>e.path).join(`, `);X.info(`Running initial index`,{sourcePaths:n});let r=await t.indexer.index(e,e=>{e.phase===`crawling`||e.phase===`done`||(e.phase===`chunking`&&e.currentFile&&X.debug(`Indexing file`,{current:e.filesProcessed+1,total:e.filesTotal,file:e.currentFile}),e.phase===`cleanup`&&X.debug(`Index cleanup`,{staleEntries:e.filesTotal-e.filesProcessed}))});X.info(`Initial index complete`,{filesProcessed:r.filesProcessed,filesSkipped:r.filesSkipped,chunksCreated:r.chunksCreated,durationMs:r.durationMs});try{await t.store.createFtsIndex()}catch(e){X.warn(`FTS index creation failed`,Y(e))}try{let e=await t.curated.reindexAll();X.info(`Curated re-index complete`,{indexed:e.indexed})}catch(e){X.error(`Curated re-index failed`,Y(e))}}catch(e){X.error(`Initial index failed; will retry on aikit_reindex`,Y(e))}},i=async()=>{X.info(`Shutting down`),await Promise.all([t.embedder.shutdown().catch(()=>{}),t.graphStore.close().catch(()=>{}),t.store.close().catch(()=>{})]),process.exit(0)};process.on(`SIGINT`,i),process.on(`SIGTERM`,i);let a=process.ppid,o=setInterval(()=>{try{process.kill(a,0)}catch{X.info(`Parent process died; shutting down`,{parentPid:a}),clearInterval(o),i()}},5e3);return o.unref(),{server:n,runInitialIndex:r,shutdown:i}}const ft=new Set(`batch.brainstorm.changelog.check.checkpoint.codemod.compact.config.data_transform.delegate.diff_parse.digest.encode.env.eval.evidence_map.file_summary.forge_classify.git_context.graph.guide.health.http.lane.measure.onboard.parse_output.present.process.produce_knowledge.queue.read.regex_test.reindex.remember.rename.replay.restore.schema_validate.scope_map.snippet.stash.status.stratum_card.test_run.time.update.forget.list.watch.web_fetch.web_search.workset`.split(`.`)),pt=5e3,mt=new Set(`brainstorm.changelog.check.checkpoint.codemod.data_transform.delegate.diff_parse.encode.env.eval.evidence_map.forge_classify.git_context.guide.present.health.http.lane.measure.parse_output.process.produce_knowledge.queue.regex_test.rename.replay.restore.schema_validate.snippet.stash.status.test_run.time.watch.web_fetch.web_search.workset`.split(`.`));function ht(e){oe(e),ce(e),I(e),le(e),se(e),q(e),K(e),ve(e),W(e),G(e),we(e),Ce(e),be(e),Me(e),xe(e),Se(e),B(e),V(e),H(e),U(e),z(e),L(e),R(e),Ee(e),E(e,i),De(e),je(e),Fe(e),Ke(e),Ve(e),Ue(e),ze(e),He(e),Re(e),We(e),Ge(e),Be(e),J(e)}const $=`analyze_dependencies.analyze_diagram.analyze_entry_points.analyze_patterns.analyze_structure.analyze_symbols.audit.batch.blast_radius.brainstorm.changelog.check.checkpoint.codemod.compact.config.data_transform.dead_symbols.delegate.diff_parse.digest.encode.env.eval.evidence_map.file_summary.find.flow_info.flow_list.flow_reset.flow_start.flow_status.flow_step.forge_classify.forge_ground.forget.git_context.graph.guide.health.http.lane.list.lookup.measure.onboard.parse_output.present.process.produce_knowledge.queue.read.regex_test.reindex.remember.rename.replay.restore.schema_validate.scope_map.search.snippet.stash.status.stratum_card.symbol.test_run.time.trace.update.watch.web_fetch.web_search.workset`.split(`.`);function gt(n,i){let l=new ct({name:n.serverName??`aikit`,version:Pe()},{capabilities:{logging:{}}}),d=`initializing`,p=``,y=!1,b=null,x=null,S=null;function ee(e){if(!e||typeof e!=`object`)return[];let t=e,n=[];for(let e of[`path`,`file`,`source_path`,`sourcePath`,`filePath`]){let r=t[e];typeof r==`string`&&r&&n.push(r)}for(let e of[`changed_files`,`paths`,`files`]){let r=t[e];if(Array.isArray(r))for(let e of r){if(typeof e==`string`){n.push(e);continue}e&&typeof e==`object`&&typeof e.path==`string`&&n.push(e.path)}}if(Array.isArray(t.sources))for(let e of t.sources)e&&typeof e==`object`&&typeof e.path==`string`&&n.push(e.path);return n}let C=()=>d===`failed`?[`❌ AI Kit initialization failed — this tool is unavailable.`,``,p?`Error: ${p}`:``,``,`**35 tools are still available** and fully functional:`,`check, eval, test_run, git_context, health, measure, web_fetch, web_search,`,`regex_test, encode, stash, checkpoint, lane, process, time, env, and more.`,``,`Try restarting the MCP server to retry initialization.`].filter(Boolean).join(`
1
+ import{BackgroundTaskScheduler as e}from"./background-task.js";import{clearCompletionCache as t}from"./completions.js";import{CuratedKnowledgeManager as n}from"./curated-manager.js";import{createElicitor as r,noopElicitor as i}from"./elicitor.js";import{IdleTimer as a}from"./idle-timer.js";import{bridgeMcpLogging as o}from"./mcp-logging.js";import{MemoryMonitor as s}from"./memory-monitor.js";import{registerPrompts as c}from"./prompts.js";import{installReplayInterceptor as l}from"./replay-interceptor.js";import{ResourceNotifier as u}from"./resources/resource-notifier.js";import{registerResources as d}from"./resources/resources.js";import{createSamplingClient as f}from"./sampling.js";import{installStructuredContentGuard as p}from"./structured-content-guard.js";import{getToolMeta as m}from"./tool-metadata.js";import{installToolPrefix as h}from"./tool-prefix.js";import{ToolTimeoutError as g,getToolTimeout as _,withTimeout as v}from"./tool-timeout.js";import{registerAnalyzeDependenciesTool as y,registerAnalyzeDiagramTool as b,registerAnalyzeEntryPointsTool as x,registerAnalyzePatternsTool as S,registerAnalyzeStructureTool as ee,registerAnalyzeSymbolsTool as C,registerBlastRadiusTool as w}from"./tools/analyze.tools.js";import{registerAuditTool as T}from"./tools/audit.tool.js";import{registerBrainstormTool as E}from"./tools/brainstorm.tool.js";import{initBridgeComponents as te,registerErPullTool as D,registerErPushTool as O,registerErSyncStatusTool as k}from"./tools/bridge.tools.js";import{registerConfigTool as ne}from"./tools/config.tool.js";import{registerCompactTool as A,registerDeadSymbolsTool as j,registerFileSummaryTool as M,registerFindTool as N,registerScopeMapTool as re,registerSymbolTool as P,registerTraceTool as F}from"./tools/context.tools.js";import{registerErEvolveReviewTool as ie}from"./tools/evolution.tools.js";import{registerBatchTool as ae,registerCheckTool as oe,registerDelegateTool as se,registerEvalTool as ce,registerParseOutputTool as le,registerTestRunTool as I}from"./tools/execution.tools.js";import{registerFlowTools as ue}from"./tools/flow.tools.js";import{registerDigestTool as de,registerEvidenceMapTool as L,registerForgeClassifyTool as R,registerForgeGroundTool as fe,registerStratumCardTool as pe}from"./tools/forge.tools.js";import{registerForgetTool as me}from"./tools/forget.tool.js";import{registerGraphTool as he}from"./tools/graph.tool.js";import{registerGuideTool as z,registerHealthTool as B,registerProcessTool as V,registerWatchTool as H,registerWebFetchTool as U}from"./tools/infra.tools.js";import{registerListTool as ge}from"./tools/list.tool.js";import{registerLookupTool as _e}from"./tools/lookup.tool.js";import{registerCodemodTool as W,registerDataTransformTool as G,registerDiffParseTool as K,registerGitContextTool as q,registerRenameTool as ve}from"./tools/manipulation.tools.js";import{registerOnboardTool as ye}from"./tools/onboard.tool.js";import{registerCheckpointTool as be,registerLaneTool as xe,registerQueueTool as Se,registerStashTool as Ce,registerWorksetTool as we}from"./tools/persistence.tools.js";import{registerErUpdatePolicyTool as Te}from"./tools/policy.tools.js";import{registerPresentTool as Ee}from"./tools/present/tool.js";import"./tools/present/index.js";import{registerProduceKnowledgeTool as De}from"./tools/produce.tool.js";import{registerReadTool as Oe}from"./tools/read.tool.js";import{registerReindexTool as ke}from"./tools/reindex.tool.js";import{registerRememberTool as Ae}from"./tools/remember.tool.js";import{registerReplayTool as je}from"./tools/replay.tool.js";import{registerRestoreTool as Me}from"./tools/restore.tool.js";import{registerSearchTool as Ne}from"./tools/search.tool.js";import{getCurrentVersion as Pe}from"./version-check.js";import{registerEarlyStatusTool as Fe,registerStatusTool as Ie}from"./tools/status.tool.js";import{registerUpdateTool as Le}from"./tools/update.tool.js";import{registerChangelogTool as Re,registerEncodeTool as ze,registerEnvTool as Be,registerHttpTool as Ve,registerMeasureTool as He,registerRegexTestTool as Ue,registerSchemaValidateTool as We,registerSnippetTool as Ge,registerTimeTool as J,registerWebSearchTool as Ke}from"./tools/utility.tools.js";import{existsSync as qe,statSync as Je}from"node:fs";import{resolve as Ye}from"node:path";import{AIKIT_PATHS as Xe,createLogger as Ze,serializeError as Y}from"../../core/dist/index.js";import{initializeWasm as Qe}from"../../chunker/dist/index.js";import{OnnxEmbedder as $e}from"../../embeddings/dist/index.js";import{EvolutionCollector as et,PolicyStore as tt}from"../../enterprise-bridge/dist/index.js";import{FileHashCache as nt,IncrementalIndexer as rt}from"../../indexer/dist/index.js";import{SqliteGraphStore as it,createStore as at}from"../../store/dist/index.js";import{FileCache as ot}from"../../tools/dist/index.js";import{completable as st}from"@modelcontextprotocol/sdk/server/completable.js";import{McpServer as ct}from"@modelcontextprotocol/sdk/server/mcp.js";import{z as lt}from"zod";const X=Ze(`server`);async function Z(e){X.info(`Initializing AI Kit components`);let[t,r,i,a]=await Promise.all([(async()=>{let t=new $e({model:e.embedding.model,dimensions:e.embedding.dimensions});return await t.initialize(),X.info(`Embedder loaded`,{modelId:t.modelId,dimensions:t.dimensions}),t})(),(async()=>{let t=await at({backend:e.store.backend,path:e.store.path});return await t.initialize(),X.info(`Store initialized`),t})(),(async()=>{let t=new it({path:e.store.path});return await t.initialize(),X.info(`Graph store initialized`),t})(),(async()=>{let e=await Qe();return e?X.info(`WASM tree-sitter enabled for AST analysis`):X.warn(`WASM tree-sitter not available; analyzers will use regex fallback`),e})()]),o=new rt(t,r),s=new nt(e.store.path);s.load(),o.setHashCache(s);let c=e.curated.path,l=new n(c,r,t);o.setGraphStore(i);let u=te(e.er),d=u?new tt(e.curated.path):void 0;d&&X.info(`Policy store initialized`,{ruleCount:d.getRules().length});let f=u?new et:void 0,p=Ye(e.sources[0]?.path??process.cwd(),Xe.aiKb),m=qe(p),h=e.onboardDir?qe(e.onboardDir):!1,g=m||h,_,v=m?p:e.onboardDir;if(g&&v)try{_=Je(v).mtime.toISOString()}catch{}return X.info(`Onboard state detected`,{onboardComplete:g,onboardTimestamp:_,aiKbExists:m,onboardDirExists:h}),{embedder:t,store:r,indexer:o,curated:l,graphStore:i,fileCache:new ot,bridge:u,policyStore:d,evolutionCollector:f,onboardComplete:g,onboardTimestamp:_}}function ut(e,t){let n=new ct({name:t.serverName??`aikit`,version:Pe()},{capabilities:{logging:{},completions:{},prompts:{}}});return o(n),h(n,t.toolPrefix??``),Q(n,e,t,r(n),new u(n),f(n)),c(n,{curated:e.curated,store:e.store,graphStore:e.graphStore},t.indexMode),n}function Q(e,t,n,r,i,a,o,s){l(e),p(e),Ne(e,t.embedder,t.store,t.graphStore,t.bridge,t.evolutionCollector,a),_e(e,t.store),Ie(e,t.store,t.graphStore,t.curated,{onboardComplete:t.onboardComplete,onboardTimestamp:t.onboardTimestamp},n,o,s),ne(e,n),ke(e,t.indexer,n,t.curated,t.store,i,o),Ae(e,t.curated,t.policyStore,t.evolutionCollector,i),Le(e,t.curated,i),me(e,t.curated,i),Oe(e,t.curated),ge(e,t.curated),ee(e,t.store,t.embedder),y(e,t.store,t.embedder),C(e,t.store,t.embedder),S(e,t.store,t.embedder),x(e,t.store,t.embedder),b(e,t.store,t.embedder),w(e,t.store,t.embedder,t.graphStore),De(e,n),ye(e,t.store,t.embedder,n),he(e,t.graphStore),T(e,t.store,t.embedder);let c=n.sources[0]?.path??process.cwd();A(e,t.embedder,t.fileCache,c),re(e,t.embedder,t.store),N(e,t.embedder,t.store),le(e),we(e),oe(e),ae(e,t.embedder,t.store),P(e,t.embedder,t.store,t.graphStore),ce(e),I(e),Ce(e),q(e),K(e),ve(e),W(e),Me(e),M(e,t.fileCache,c),be(e),G(e),F(e,t.embedder,t.store,t.graphStore),V(e),H(e),j(e,t.embedder,t.store),se(e,a),B(e),xe(e),Se(e),U(e),z(e,o),L(e),de(e,t.embedder),R(e),pe(e,t.embedder,t.fileCache),fe(e,t.embedder,t.store),Ee(e,r),r&&E(e,r),Ke(e),Ve(e),Ue(e),ze(e),He(e),Re(e),We(e),Ge(e),Be(e),J(e),ue(e,n),t.bridge&&(O(e,t.bridge,t.evolutionCollector),D(e,t.bridge),k(e,t.bridge)),t.policyStore&&Te(e,t.policyStore),t.evolutionCollector&&ie(e,t.evolutionCollector),d(e,t.store,t.curated),je(e)}async function dt(e){let t=await Z(e),n=ut(t,e);X.info(`MCP server configured`,{toolCount:$.length,resourceCount:2});let r=async()=>{try{let n=e.sources.map(e=>e.path).join(`, `);X.info(`Running initial index`,{sourcePaths:n});let r=await t.indexer.index(e,e=>{e.phase===`crawling`||e.phase===`done`||(e.phase===`chunking`&&e.currentFile&&X.debug(`Indexing file`,{current:e.filesProcessed+1,total:e.filesTotal,file:e.currentFile}),e.phase===`cleanup`&&X.debug(`Index cleanup`,{staleEntries:e.filesTotal-e.filesProcessed}))});X.info(`Initial index complete`,{filesProcessed:r.filesProcessed,filesSkipped:r.filesSkipped,chunksCreated:r.chunksCreated,durationMs:r.durationMs});try{await t.store.createFtsIndex()}catch(e){X.warn(`FTS index creation failed`,Y(e))}try{let e=await t.curated.reindexAll();X.info(`Curated re-index complete`,{indexed:e.indexed})}catch(e){X.error(`Curated re-index failed`,Y(e))}}catch(e){X.error(`Initial index failed; will retry on aikit_reindex`,Y(e))}},i=async()=>{X.info(`Shutting down`),await Promise.all([t.embedder.shutdown().catch(()=>{}),t.graphStore.close().catch(()=>{}),t.store.close().catch(()=>{})]),process.exit(0)};process.on(`SIGINT`,i),process.on(`SIGTERM`,i);let a=process.ppid,o=setInterval(()=>{try{process.kill(a,0)}catch{X.info(`Parent process died; shutting down`,{parentPid:a}),clearInterval(o),i()}},5e3);return o.unref(),{server:n,runInitialIndex:r,shutdown:i}}const ft=new Set(`batch.brainstorm.changelog.check.checkpoint.codemod.compact.config.data_transform.delegate.diff_parse.digest.encode.env.eval.evidence_map.file_summary.forge_classify.git_context.graph.guide.health.http.lane.measure.onboard.parse_output.present.process.produce_knowledge.queue.read.regex_test.reindex.remember.rename.replay.restore.schema_validate.scope_map.snippet.stash.status.stratum_card.test_run.time.update.forget.list.watch.web_fetch.web_search.workset`.split(`.`)),pt=5e3,mt=new Set(`brainstorm.changelog.check.checkpoint.codemod.data_transform.delegate.diff_parse.encode.env.eval.evidence_map.forge_classify.git_context.guide.present.health.http.lane.measure.parse_output.process.produce_knowledge.queue.regex_test.rename.replay.restore.schema_validate.snippet.stash.status.test_run.time.watch.web_fetch.web_search.workset`.split(`.`));function ht(e){oe(e),ce(e),I(e),le(e),se(e),q(e),K(e),ve(e),W(e),G(e),we(e),Ce(e),be(e),Me(e),xe(e),Se(e),B(e),V(e),H(e),U(e),z(e),L(e),R(e),Ee(e),E(e,i),De(e),je(e),Fe(e),Ke(e),Ve(e),Ue(e),ze(e),He(e),Re(e),We(e),Ge(e),Be(e),J(e)}const $=`analyze_dependencies.analyze_diagram.analyze_entry_points.analyze_patterns.analyze_structure.analyze_symbols.audit.batch.blast_radius.brainstorm.changelog.check.checkpoint.codemod.compact.config.data_transform.dead_symbols.delegate.diff_parse.digest.encode.env.eval.evidence_map.file_summary.find.flow_info.flow_list.flow_reset.flow_start.flow_status.flow_step.forge_classify.forge_ground.forget.git_context.graph.guide.health.http.lane.list.lookup.measure.onboard.parse_output.present.process.produce_knowledge.queue.read.regex_test.reindex.remember.rename.replay.restore.schema_validate.scope_map.search.snippet.stash.status.stratum_card.symbol.test_run.time.trace.update.watch.web_fetch.web_search.workset`.split(`.`);function gt(n,i){let l=new ct({name:n.serverName??`aikit`,version:Pe()},{capabilities:{logging:{},completions:{},prompts:{}}}),d=`initializing`,p=``,y=!1,b=null,x=null,S=null;function ee(e){if(!e||typeof e!=`object`)return[];let t=e,n=[];for(let e of[`path`,`file`,`source_path`,`sourcePath`,`filePath`]){let r=t[e];typeof r==`string`&&r&&n.push(r)}for(let e of[`changed_files`,`paths`,`files`]){let r=t[e];if(Array.isArray(r))for(let e of r){if(typeof e==`string`){n.push(e);continue}e&&typeof e==`object`&&typeof e.path==`string`&&n.push(e.path)}}if(Array.isArray(t.sources))for(let e of t.sources)e&&typeof e==`object`&&typeof e.path==`string`&&n.push(e.path);return n}let C=()=>d===`failed`?[`❌ AI Kit initialization failed — this tool is unavailable.`,``,p?`Error: ${p}`:``,``,`**35 tools are still available** and fully functional:`,`check, eval, test_run, git_context, health, measure, web_fetch, web_search,`,`regex_test, encode, stash, checkpoint, lane, process, time, env, and more.`,``,`Try restarting the MCP server to retry initialization.`].filter(Boolean).join(`
2
2
  `):[`AI Kit is still initializing (loading embeddings model & store).`,``,`**35 tools are already available** while initialization completes — including:`,`check, eval, test_run, git_context, health, measure, web_fetch, web_search,`,`regex_test, encode, stash, checkpoint, lane, process, time, env, and more.`,``,`This tool requires the AI Kit index. Please retry in a few seconds,`,`or use one of the available tools above in the meantime.`].join(`
3
- `);o(l),h(l,n.toolPrefix??``);let w=l.sendToolListChanged.bind(l);l.sendToolListChanged=()=>{};let T=[];for(let e of $){let t=m(e),n=l.registerTool(e,{title:t.title,description:`${t.title} — initializing, available shortly`,inputSchema:{},annotations:t.annotations},async()=>({content:[{type:`text`,text:C()}]}));mt.has(e)?n.remove():T.push(n)}ht(l),l.sendToolListChanged=w;let E=l.registerResource(`aikit-status`,`aikit://status`,{description:`AI Kit status (initializing...)`,mimeType:`text/plain`},async()=>({contents:[{uri:`aikit://status`,text:`AI Kit is initializing...`,mimeType:`text/plain`}]})),te=l.registerPrompt(`_init`,{description:`AI Kit is initializing prompts...`,argsSchema:{_prime:st(lt.string().optional(),()=>[])}},async()=>({messages:[{role:`user`,content:{type:`text`,text:C()}}]})),D,O=new Promise(e=>{D=e}),k,ne=new Promise(e=>{k=e}),A=()=>k?.(),j=(async()=>{await ne;let e;try{e=await Z(n)}catch(e){d=`failed`,p=e instanceof Error?e.message:String(e),X.error(`AI Kit initialization failed — server continuing with zero-dep tools only`,{error:p});return}let o=l.sendToolListChanged.bind(l);l.sendToolListChanged=()=>{};let m=l.sendPromptListChanged.bind(l);l.sendPromptListChanged=()=>{};let h=l.sendResourceListChanged.bind(l);l.sendResourceListChanged=()=>{};for(let e of T)e.remove();E.remove(),te.remove();let C=l._registeredTools??{};for(let e of mt)C[e]?.remove();let w=new u(l),O=f(l);Q(l,e,n,r(l),w,O,i,i===`smart`?(()=>{let e=S;return e?.getState?e.getState():null}):null),c(l,{curated:e.curated,store:e.store,graphStore:e.graphStore},i),l.sendToolListChanged=o,l.sendPromptListChanged=m,l.sendResourceListChanged=h,Promise.resolve(l.sendToolListChanged()).catch(()=>{}),Promise.resolve(l.sendPromptListChanged()).catch(()=>{}),Promise.resolve(l.sendResourceListChanged()).catch(()=>{});let k=l._registeredTools??{};for(let[t,n]of Object.entries(k)){if(ft.has(t))continue;let r=n.handler;n.handler=async(...n)=>{if(!e.indexer.isIndexing)return r(...n);let i=y?`re-indexing`:`running initial index`,a=new Promise(e=>setTimeout(()=>e({content:[{type:`text`,text:`⏳ AI Kit is ${i}. The tool "${t}" timed out waiting for index data (${pt/1e3}s).\n\nThe existing index may be temporarily locked. Please retry shortly — indexing will complete automatically.`}]}),pt));return Promise.race([r(...n),a])}}for(let[e,t]of Object.entries(k)){let n=t.handler,r=_(e);t.handler=async(...t)=>{try{return await v(()=>n(...t),r,e)}catch(t){if(t instanceof g)return{content:[{type:`text`,text:`⏳ Tool "${e}" timed out after ${r/1e3}s. This may indicate a long-running operation. Please retry or break the task into smaller steps.`}]};throw t}}}let A=Object.keys(k).length;A<$.length&&X.warn(`ALL_TOOL_NAMES count mismatch`,{expectedToolCount:$.length,registeredToolCount:A}),X.info(`MCP server configured`,{toolCount:$.length,resourceCount:4});let j=new s;j.onPressure((e,n)=>{e===`warning`&&t(),e===`critical`&&(X.warn(`Memory pressure critical — consider restarting`,{rssMB:Math.round(n/1024/1024)}),t())}),j.start();let M=new a;x=M,M.onIdle(async()=>{if(N.isRunning||e.indexer.isIndexing){X.info(`Idle cleanup deferred — background tasks still running`),M.touch();return}X.info(`Idle cleanup: closing store and graph connections`);try{await Promise.all([e.store.close().catch(()=>{}),e.graphStore.close().catch(()=>{})])}catch{}}),M.touch();for(let e of Object.values(k)){let t=e.handler;e.handler=async(...e)=>{if(M.touch(),S){let t=ee(e[0]);t.length>0&&S.prioritize(...t)}return t(...e)}}b=e,D?.(e)})(),M=async()=>{let e=await O;x?.setBusy(!0);try{let t=n.sources.map(e=>e.path).join(`, `);X.info(`Running initial index`,{sourcePaths:t});let r=await e.indexer.index(n,e=>{e.phase===`crawling`||e.phase===`done`||(e.phase===`chunking`&&e.currentFile&&X.debug(`Indexing file`,{current:e.filesProcessed+1,total:e.filesTotal,file:e.currentFile}),e.phase===`cleanup`&&X.debug(`Index cleanup`,{staleEntries:e.filesTotal-e.filesProcessed}))});y=!0,X.info(`Initial index complete`,{filesProcessed:r.filesProcessed,filesSkipped:r.filesSkipped,chunksCreated:r.chunksCreated,durationMs:r.durationMs});try{await e.store.createFtsIndex()}catch(e){X.warn(`FTS index creation failed`,Y(e))}try{let t=await e.curated.reindexAll();X.info(`Curated re-index complete`,{indexed:t.indexed})}catch(e){X.error(`Curated re-index failed`,Y(e))}}catch(e){X.error(`Initial index failed; will retry on aikit_reindex`,Y(e))}finally{x?.setBusy(!1)}},N=new e,re=()=>N.schedule({name:`initial-index`,fn:M}),P=process.ppid,F=setInterval(()=>{try{process.kill(P,0)}catch{X.info(`Parent process died; shutting down`,{parentPid:P}),clearInterval(F),O.then(async e=>{await Promise.all([e.embedder.shutdown().catch(()=>{}),e.graphStore.close().catch(()=>{}),e.store.close().catch(()=>{})])}).catch(()=>{}).finally(()=>process.exit(0))}},5e3);return F.unref(),{server:l,startInit:A,ready:j,runInitialIndex:re,get kb(){return b},scheduler:N,setSmartScheduler(e){S=e}}}export{$ as ALL_TOOL_NAMES,gt as createLazyServer,ut as createMcpServer,dt as createServer,Z as initializeKnowledgeBase,Q as registerMcpTools};
3
+ `);o(l),h(l,n.toolPrefix??``);let w=l.sendToolListChanged.bind(l);l.sendToolListChanged=()=>{};let T=[];for(let e of $){let t=m(e),n=l.registerTool(e,{title:t.title,description:`${t.title} — initializing, available shortly`,inputSchema:{},annotations:t.annotations},async()=>({content:[{type:`text`,text:C()}]}));mt.has(e)?n.remove():T.push(n)}ht(l),l.sendToolListChanged=w;let E=l.registerResource(`aikit-status`,`aikit://status`,{description:`AI Kit status (initializing...)`,mimeType:`text/plain`},async()=>({contents:[{uri:`aikit://status`,text:`AI Kit is initializing...`,mimeType:`text/plain`}]})),te=l.registerPrompt(`_init`,{description:`Initializing AI Kit…`,argsSchema:{_dummy:st(lt.string(),()=>[])}},async()=>({messages:[]})),D,O=new Promise(e=>{D=e}),k,ne=new Promise(e=>{k=e}),A=()=>k?.(),j=(async()=>{await ne;let e;try{e=await Z(n)}catch(e){d=`failed`,p=e instanceof Error?e.message:String(e),X.error(`AI Kit initialization failed — server continuing with zero-dep tools only`,{error:p});return}let o=l.sendToolListChanged.bind(l);l.sendToolListChanged=()=>{};let m=l.sendPromptListChanged.bind(l);l.sendPromptListChanged=()=>{};let h=l.sendResourceListChanged.bind(l);l.sendResourceListChanged=()=>{};for(let e of T)e.remove();E.remove(),te.remove();let C=l._registeredTools??{};for(let e of mt)C[e]?.remove();let w=new u(l),O=f(l);Q(l,e,n,r(l),w,O,i,i===`smart`?(()=>{let e=S;return e?.getState?e.getState():null}):null),c(l,{curated:e.curated,store:e.store,graphStore:e.graphStore},i),l.sendToolListChanged=o,l.sendPromptListChanged=m,l.sendResourceListChanged=h,Promise.resolve(l.sendToolListChanged()).catch(()=>{}),Promise.resolve(l.sendPromptListChanged()).catch(()=>{}),Promise.resolve(l.sendResourceListChanged()).catch(()=>{});let k=l._registeredTools??{};for(let[t,n]of Object.entries(k)){if(ft.has(t))continue;let r=n.handler;n.handler=async(...n)=>{if(!e.indexer.isIndexing)return r(...n);let i=y?`re-indexing`:`running initial index`,a=new Promise(e=>setTimeout(()=>e({content:[{type:`text`,text:`⏳ AI Kit is ${i}. The tool "${t}" timed out waiting for index data (${pt/1e3}s).\n\nThe existing index may be temporarily locked. Please retry shortly — indexing will complete automatically.`}]}),pt));return Promise.race([r(...n),a])}}for(let[e,t]of Object.entries(k)){let n=t.handler,r=_(e);t.handler=async(...t)=>{try{return await v(()=>n(...t),r,e)}catch(t){if(t instanceof g)return{content:[{type:`text`,text:`⏳ Tool "${e}" timed out after ${r/1e3}s. This may indicate a long-running operation. Please retry or break the task into smaller steps.`}]};throw t}}}let A=Object.keys(k).length;A<$.length&&X.warn(`ALL_TOOL_NAMES count mismatch`,{expectedToolCount:$.length,registeredToolCount:A}),X.info(`MCP server configured`,{toolCount:$.length,resourceCount:4});let j=new s;j.onPressure((e,n)=>{e===`warning`&&t(),e===`critical`&&(X.warn(`Memory pressure critical — consider restarting`,{rssMB:Math.round(n/1024/1024)}),t())}),j.start();let M=new a;x=M,M.onIdle(async()=>{if(N.isRunning||e.indexer.isIndexing){X.info(`Idle cleanup deferred — background tasks still running`),M.touch();return}X.info(`Idle cleanup: closing store and graph connections`);try{await Promise.all([e.store.close().catch(()=>{}),e.graphStore.close().catch(()=>{})])}catch{}}),M.touch();for(let e of Object.values(k)){let t=e.handler;e.handler=async(...e)=>{if(M.touch(),S){let t=ee(e[0]);t.length>0&&S.prioritize(...t)}return t(...e)}}b=e,D?.(e)})(),M=async()=>{let e=await O;x?.setBusy(!0);try{let t=n.sources.map(e=>e.path).join(`, `);X.info(`Running initial index`,{sourcePaths:t});let r=await e.indexer.index(n,e=>{e.phase===`crawling`||e.phase===`done`||(e.phase===`chunking`&&e.currentFile&&X.debug(`Indexing file`,{current:e.filesProcessed+1,total:e.filesTotal,file:e.currentFile}),e.phase===`cleanup`&&X.debug(`Index cleanup`,{staleEntries:e.filesTotal-e.filesProcessed}))});y=!0,X.info(`Initial index complete`,{filesProcessed:r.filesProcessed,filesSkipped:r.filesSkipped,chunksCreated:r.chunksCreated,durationMs:r.durationMs});try{await e.store.createFtsIndex()}catch(e){X.warn(`FTS index creation failed`,Y(e))}try{let t=await e.curated.reindexAll();X.info(`Curated re-index complete`,{indexed:t.indexed})}catch(e){X.error(`Curated re-index failed`,Y(e))}}catch(e){X.error(`Initial index failed; will retry on aikit_reindex`,Y(e))}finally{x?.setBusy(!1)}},N=new e,re=()=>N.schedule({name:`initial-index`,fn:M}),P=process.ppid,F=setInterval(()=>{try{process.kill(P,0)}catch{X.info(`Parent process died; shutting down`,{parentPid:P}),clearInterval(F),O.then(async e=>{await Promise.all([e.embedder.shutdown().catch(()=>{}),e.graphStore.close().catch(()=>{}),e.store.close().catch(()=>{})])}).catch(()=>{}).finally(()=>process.exit(0))}},5e3);return F.unref(),{server:l,startInit:A,ready:j,runInitialIndex:re,get kb(){return b},scheduler:N,setSmartScheduler(e){S=e}}}export{$ as ALL_TOOL_NAMES,gt as createLazyServer,ut as createMcpServer,dt as createServer,Z as initializeKnowledgeBase,Q as registerMcpTools};
@@ -1 +1 @@
1
- const e={search:{title:`Hybrid Search`,annotations:{readOnlyHint:!0,idempotentHint:!0}},find:{title:`Federated Find`,annotations:{readOnlyHint:!0,idempotentHint:!0}},symbol:{title:`Symbol Resolver`,annotations:{readOnlyHint:!0,idempotentHint:!0}},trace:{title:`Data Flow Tracer`,annotations:{readOnlyHint:!0,idempotentHint:!0}},scope_map:{title:`Task Scope Map`,annotations:{readOnlyHint:!0,idempotentHint:!0}},lookup:{title:`Chunk Lookup`,annotations:{readOnlyHint:!0,idempotentHint:!0}},dead_symbols:{title:`Dead Symbol Finder`,annotations:{readOnlyHint:!0,idempotentHint:!0}},file_summary:{title:`File Summary`,annotations:{readOnlyHint:!0,idempotentHint:!0}},analyze_structure:{title:`Analyze Structure`,annotations:{readOnlyHint:!0,idempotentHint:!0}},analyze_dependencies:{title:`Analyze Dependencies`,annotations:{readOnlyHint:!0,idempotentHint:!0}},analyze_symbols:{title:`Analyze Symbols`,annotations:{readOnlyHint:!0,idempotentHint:!0}},analyze_patterns:{title:`Analyze Patterns`,annotations:{readOnlyHint:!0,idempotentHint:!0}},analyze_entry_points:{title:`Analyze Entry Points`,annotations:{readOnlyHint:!0,idempotentHint:!0}},analyze_diagram:{title:`Analyze Diagram`,annotations:{readOnlyHint:!0,idempotentHint:!0}},blast_radius:{title:`Blast Radius`,annotations:{readOnlyHint:!0,idempotentHint:!0}},brainstorm:{title:`Brainstorm Session`,annotations:{readOnlyHint:!0,idempotentHint:!0}},remember:{title:`Remember Knowledge`,annotations:{readOnlyHint:!1}},read:{title:`Read Knowledge`,annotations:{readOnlyHint:!0,idempotentHint:!0}},update:{title:`Update Knowledge`,annotations:{readOnlyHint:!1}},forget:{title:`Forget Knowledge`,annotations:{readOnlyHint:!1,destructiveHint:!0}},list:{title:`List Knowledge`,annotations:{readOnlyHint:!0,idempotentHint:!0}},produce_knowledge:{title:`Produce Knowledge`,annotations:{readOnlyHint:!0,idempotentHint:!0}},compact:{title:`Semantic Compactor`,annotations:{readOnlyHint:!0,idempotentHint:!0}},digest:{title:`Multi-Source Digest`,annotations:{readOnlyHint:!0,idempotentHint:!0}},stratum_card:{title:`Stratum Card`,annotations:{readOnlyHint:!0,idempotentHint:!0}},forge_ground:{title:`FORGE Ground`,annotations:{readOnlyHint:!0,idempotentHint:!0}},forge_classify:{title:`FORGE Classify`,annotations:{readOnlyHint:!0,idempotentHint:!0}},evidence_map:{title:`Evidence Map`,annotations:{readOnlyHint:!1}},present:{title:`Rich Content Presenter`,annotations:{readOnlyHint:!0,idempotentHint:!0}},check:{title:`Typecheck & Lint`,annotations:{readOnlyHint:!0,openWorldHint:!0}},test_run:{title:`Run Tests`,annotations:{readOnlyHint:!0,openWorldHint:!0}},eval:{title:`Evaluate Code`,annotations:{readOnlyHint:!1,openWorldHint:!0}},batch:{title:`Batch Operations`,annotations:{readOnlyHint:!0,idempotentHint:!0}},audit:{title:`Project Audit`,annotations:{readOnlyHint:!0,idempotentHint:!0}},rename:{title:`Rename Symbol`,annotations:{readOnlyHint:!1,destructiveHint:!0}},restore:{title:`Restore`,annotations:{readOnlyHint:!1}},codemod:{title:`Codemod`,annotations:{readOnlyHint:!1,destructiveHint:!0}},data_transform:{title:`Data Transform`,annotations:{readOnlyHint:!0,idempotentHint:!0}},stash:{title:`Stash Values`,annotations:{readOnlyHint:!1}},checkpoint:{title:`Session Checkpoint`,annotations:{readOnlyHint:!1}},workset:{title:`Workset Manager`,annotations:{readOnlyHint:!1}},lane:{title:`Exploration Lane`,annotations:{readOnlyHint:!1}},git_context:{title:`Git Context`,annotations:{readOnlyHint:!0,idempotentHint:!0}},diff_parse:{title:`Diff Parser`,annotations:{readOnlyHint:!0,idempotentHint:!0}},parse_output:{title:`Parse Build Output`,annotations:{readOnlyHint:!0,idempotentHint:!0}},process:{title:`Process Manager`,annotations:{readOnlyHint:!1,openWorldHint:!0}},watch:{title:`File Watcher`,annotations:{readOnlyHint:!1,openWorldHint:!0}},delegate:{title:`Delegate Task`,annotations:{readOnlyHint:!1,openWorldHint:!0}},config:{title:`Configuration Manager`,annotations:{readOnlyHint:!1}},status:{title:`AI Kit Status`,annotations:{readOnlyHint:!0,idempotentHint:!0}},health:{title:`Health Check`,annotations:{readOnlyHint:!0,idempotentHint:!0}},reindex:{title:`Reindex`,annotations:{readOnlyHint:!1}},onboard:{title:`Onboard Codebase`,annotations:{readOnlyHint:!1}},graph:{title:`Knowledge Graph`,annotations:{readOnlyHint:!1}},guide:{title:`Tool Guide`,annotations:{readOnlyHint:!0,idempotentHint:!0}},replay:{title:`Replay History`,annotations:{readOnlyHint:!0,idempotentHint:!0}},changelog:{title:`Generate Changelog`,annotations:{readOnlyHint:!0,idempotentHint:!0}},regex_test:{title:`Regex Tester`,annotations:{readOnlyHint:!0,idempotentHint:!0}},encode:{title:`Encode / Decode`,annotations:{readOnlyHint:!0,idempotentHint:!0}},measure:{title:`Code Metrics`,annotations:{readOnlyHint:!0,idempotentHint:!0}},schema_validate:{title:`Schema Validator`,annotations:{readOnlyHint:!0,idempotentHint:!0}},snippet:{title:`Code Snippets`,annotations:{readOnlyHint:!1}},env:{title:`Environment Info`,annotations:{readOnlyHint:!0,idempotentHint:!0}},time:{title:`Date & Time`,annotations:{readOnlyHint:!0,idempotentHint:!0}},web_fetch:{title:`Web Fetch`,annotations:{readOnlyHint:!0,openWorldHint:!0}},web_search:{title:`Web Search`,annotations:{readOnlyHint:!0,openWorldHint:!0}},http:{title:`HTTP Request`,annotations:{readOnlyHint:!1,openWorldHint:!0}},queue:{title:`Operation Queue`,annotations:{readOnlyHint:!1}},bridge_push:{title:`Bridge Push`,annotations:{readOnlyHint:!1}},bridge_pull:{title:`Bridge Pull`,annotations:{readOnlyHint:!0,idempotentHint:!0}},bridge_sync:{title:`Bridge Sync Status`,annotations:{readOnlyHint:!0,idempotentHint:!0}},evolution_state:{title:`Evolution State`,annotations:{readOnlyHint:!1}},policy_check:{title:`Policy Check`,annotations:{readOnlyHint:!0,idempotentHint:!0}},flow_list:{title:`Flow List`,annotations:{readOnlyHint:!0,idempotentHint:!0}},flow_info:{title:`Flow Info`,annotations:{readOnlyHint:!0,idempotentHint:!0}},flow_start:{title:`Flow Start`,annotations:{readOnlyHint:!1}},flow_step:{title:`Flow Step`,annotations:{readOnlyHint:!1}},flow_status:{title:`Flow Status`,annotations:{readOnlyHint:!0,idempotentHint:!0}},flow_reset:{title:`Flow Reset`,annotations:{readOnlyHint:!1}}};function t(t){return e[t]??{title:t,annotations:{readOnlyHint:!1}}}export{e as TOOL_METADATA,t as getToolMeta};
1
+ const e={search:{title:`Hybrid Search`,annotations:{readOnlyHint:!0,idempotentHint:!0}},find:{title:`Federated Find`,annotations:{readOnlyHint:!0,idempotentHint:!0}},symbol:{title:`Symbol Resolver`,annotations:{readOnlyHint:!0,idempotentHint:!0}},trace:{title:`Data Flow Tracer`,annotations:{readOnlyHint:!0,idempotentHint:!0}},scope_map:{title:`Task Scope Map`,annotations:{readOnlyHint:!0,idempotentHint:!0}},lookup:{title:`Chunk Lookup`,annotations:{readOnlyHint:!0,idempotentHint:!0}},dead_symbols:{title:`Dead Symbol Finder`,annotations:{readOnlyHint:!0,idempotentHint:!0}},file_summary:{title:`File Summary`,annotations:{readOnlyHint:!0,idempotentHint:!0}},analyze_structure:{title:`Analyze Structure`,annotations:{readOnlyHint:!0,idempotentHint:!0}},analyze_dependencies:{title:`Analyze Dependencies`,annotations:{readOnlyHint:!0,idempotentHint:!0}},analyze_symbols:{title:`Analyze Symbols`,annotations:{readOnlyHint:!0,idempotentHint:!0}},analyze_patterns:{title:`Analyze Patterns`,annotations:{readOnlyHint:!0,idempotentHint:!0}},analyze_entry_points:{title:`Analyze Entry Points`,annotations:{readOnlyHint:!0,idempotentHint:!0}},analyze_diagram:{title:`Analyze Diagram`,annotations:{readOnlyHint:!0,idempotentHint:!0}},blast_radius:{title:`Blast Radius`,annotations:{readOnlyHint:!0,idempotentHint:!0}},brainstorm:{title:`Brainstorm Session`,annotations:{readOnlyHint:!0,idempotentHint:!0}},remember:{title:`Remember Knowledge`,annotations:{readOnlyHint:!1}},read:{title:`Read Knowledge`,annotations:{readOnlyHint:!0,idempotentHint:!0}},update:{title:`Update Knowledge`,annotations:{readOnlyHint:!1}},forget:{title:`Forget Knowledge`,annotations:{readOnlyHint:!1,destructiveHint:!0}},list:{title:`List Knowledge`,annotations:{readOnlyHint:!0,idempotentHint:!0}},produce_knowledge:{title:`Produce Knowledge`,annotations:{readOnlyHint:!0,idempotentHint:!0}},compact:{title:`Semantic Compactor`,annotations:{readOnlyHint:!0,idempotentHint:!0}},digest:{title:`Multi-Source Digest`,annotations:{readOnlyHint:!0,idempotentHint:!0}},stratum_card:{title:`Stratum Card`,annotations:{readOnlyHint:!0,idempotentHint:!0}},forge_ground:{title:`FORGE Ground`,annotations:{readOnlyHint:!0,idempotentHint:!0}},forge_classify:{title:`FORGE Classify`,annotations:{readOnlyHint:!0,idempotentHint:!0}},evidence_map:{title:`Evidence Map`,annotations:{readOnlyHint:!1}},present:{title:`Rich Content Presenter`,annotations:{readOnlyHint:!0,idempotentHint:!0}},check:{title:`Typecheck & Lint`,annotations:{readOnlyHint:!0,openWorldHint:!0}},test_run:{title:`Run Tests`,annotations:{readOnlyHint:!0,openWorldHint:!0}},eval:{title:`Evaluate Code`,annotations:{readOnlyHint:!1,openWorldHint:!0}},batch:{title:`Batch Operations`,annotations:{readOnlyHint:!0,idempotentHint:!0}},audit:{title:`Project Audit`,annotations:{readOnlyHint:!0,idempotentHint:!0}},rename:{title:`Rename Symbol`,annotations:{readOnlyHint:!1,destructiveHint:!0}},restore:{title:`Restore`,annotations:{readOnlyHint:!1}},codemod:{title:`Codemod`,annotations:{readOnlyHint:!1,destructiveHint:!0}},data_transform:{title:`Data Transform`,annotations:{readOnlyHint:!0,idempotentHint:!0}},stash:{title:`Stash Values`,annotations:{readOnlyHint:!1}},checkpoint:{title:`Session Checkpoint`,annotations:{readOnlyHint:!1}},workset:{title:`Workset Manager`,annotations:{readOnlyHint:!1}},lane:{title:`Exploration Lane`,annotations:{readOnlyHint:!1}},git_context:{title:`Git Context`,annotations:{readOnlyHint:!0,idempotentHint:!0}},diff_parse:{title:`Diff Parser`,annotations:{readOnlyHint:!0,idempotentHint:!0}},parse_output:{title:`Parse Build Output`,annotations:{readOnlyHint:!0,idempotentHint:!0}},process:{title:`Process Manager`,annotations:{readOnlyHint:!1,openWorldHint:!0}},watch:{title:`File Watcher`,annotations:{readOnlyHint:!1,openWorldHint:!0}},delegate:{title:`Delegate Task`,annotations:{readOnlyHint:!1,openWorldHint:!0}},config:{title:`Configuration Manager`,annotations:{readOnlyHint:!1}},status:{title:`AI Kit Status`,annotations:{readOnlyHint:!0,idempotentHint:!0}},health:{title:`Health Check`,annotations:{readOnlyHint:!0,idempotentHint:!0}},reindex:{title:`Reindex`,annotations:{readOnlyHint:!1}},onboard:{title:`Onboard Codebase`,annotations:{readOnlyHint:!1}},graph:{title:`Knowledge Graph`,annotations:{readOnlyHint:!1}},guide:{title:`Tool Guide`,annotations:{readOnlyHint:!0,idempotentHint:!0}},replay:{title:`Replay History`,annotations:{readOnlyHint:!0,idempotentHint:!0}},changelog:{title:`Generate Changelog`,annotations:{readOnlyHint:!0,idempotentHint:!0}},regex_test:{title:`Regex Tester`,annotations:{readOnlyHint:!0,idempotentHint:!0}},encode:{title:`Encode / Decode`,annotations:{readOnlyHint:!0,idempotentHint:!0}},measure:{title:`Code Metrics`,annotations:{readOnlyHint:!0,idempotentHint:!0}},schema_validate:{title:`Schema Validator`,annotations:{readOnlyHint:!0,idempotentHint:!0}},snippet:{title:`Code Snippets`,annotations:{readOnlyHint:!1}},env:{title:`Environment Info`,annotations:{readOnlyHint:!0,idempotentHint:!0}},time:{title:`Date & Time`,annotations:{readOnlyHint:!0,idempotentHint:!0}},web_fetch:{title:`Web Fetch`,annotations:{readOnlyHint:!0,openWorldHint:!0}},web_search:{title:`Web Search`,annotations:{readOnlyHint:!0,openWorldHint:!0}},http:{title:`HTTP Request`,annotations:{readOnlyHint:!1,openWorldHint:!0}},queue:{title:`Operation Queue`,annotations:{readOnlyHint:!1}},bridge_push:{title:`Bridge Push`,annotations:{readOnlyHint:!1}},bridge_pull:{title:`Bridge Pull`,annotations:{readOnlyHint:!0,idempotentHint:!0}},bridge_sync:{title:`Bridge Sync Status`,annotations:{readOnlyHint:!0,idempotentHint:!0}},evolution_state:{title:`Evolution State`,annotations:{readOnlyHint:!1}},policy_check:{title:`Policy Check`,annotations:{readOnlyHint:!0,idempotentHint:!0}},flow_list:{title:`Flow List`,annotations:{readOnlyHint:!0,idempotentHint:!0}},flow_info:{title:`Flow Info`,annotations:{readOnlyHint:!0,idempotentHint:!0}},flow_start:{title:`Flow Start`,annotations:{readOnlyHint:!1}},flow_step:{title:`Flow Step`,annotations:{readOnlyHint:!1}},flow_status:{title:`Flow Status`,annotations:{readOnlyHint:!0,idempotentHint:!0}},flow_reset:{title:`Flow Reset`,annotations:{readOnlyHint:!1}},flow_read_skill:{title:`Flow Read Skill`,annotations:{readOnlyHint:!0,idempotentHint:!0}}};function t(t){return e[t]??{title:t,annotations:{readOnlyHint:!1}}}export{e as TOOL_METADATA,t as getToolMeta};
@@ -1 +1 @@
1
- import{getToolMeta as e}from"../tool-metadata.js";import{join as t}from"node:path";import{z as n}from"zod";import{createLogger as r,serializeError as i}from"../../../core/dist/index.js";const a=r(`flow-tools`);function o(e){return{content:[{type:`text`,text:e}]}}function s(e){return e instanceof Error?e.message:String(e)}function c(r,c){let l=t(c.stateDir??t(c.sources[0].path,`.aikit-state`),`flows`),u=t(l,`registry.json`),d=t(l,`state.json`);async function f(){let{FlowRegistryManager:e,FlowStateMachine:t}=await import(`../../../flows/dist/index.js`);return{registry:new e(u),stateMachine:new t(d)}}let p=e(`flow_list`);r.registerTool(`flow_list`,{title:p.title,description:`List all installed flows and their steps`,annotations:p.annotations,inputSchema:{}},async()=>{try{let{registry:e,stateMachine:t}=await f(),n=e.list(),r=t.getStatus(),i={flows:n.map(e=>({name:e.name,version:e.version,source:e.source,sourceType:e.sourceType,format:e.format,steps:e.manifest.steps.map(e=>e.id)})),activeFlow:r.success&&r.data?{flow:r.data.flow,status:r.data.status,currentStep:r.data.currentStep}:null};return o(JSON.stringify(i,null,2))}catch(e){return a.error(`flow_list failed`,i(e)),o(`Error: ${s(e)}`)}});let m=e(`flow_info`);r.registerTool(`flow_info`,{title:m.title,description:`Show detailed information about a specific flow`,annotations:m.annotations,inputSchema:{name:n.string().describe(`Flow name to get info for`)}},async({name:e})=>{try{let{registry:t}=await f(),n=t.get(e);if(!n)return o(`Flow "${e}" not found. Use flow_list to see available flows.`);let r={name:n.name,version:n.version,description:n.manifest.description,source:n.source,sourceType:n.sourceType,format:n.format,installPath:n.installPath,registeredAt:n.registeredAt,updatedAt:n.updatedAt,steps:n.manifest.steps.map(e=>({id:e.id,name:e.name,skill:e.skill,produces:e.produces,requires:e.requires,description:e.description})),agents:n.manifest.agents,artifactsDir:n.manifest.artifacts_dir,install:n.manifest.install};return o(JSON.stringify(r,null,2))}catch(e){return a.error(`flow_info failed`,i(e)),o(`Error: ${s(e)}`)}});let h=e(`flow_start`);r.registerTool(`flow_start`,{title:h.title,description:`Start a flow. Sets the active flow and positions at the first step.`,annotations:h.annotations,inputSchema:{flow:n.string().describe(`Flow name to start (use flow_list to see options)`)}},async({flow:e})=>{try{let{registry:t,stateMachine:n}=await f(),r=t.get(e);if(!r)return o(`Flow "${e}" not found. Use flow_list to see available flows.`);let i=n.start(r.name,r.manifest);if(!i.success||!i.data)return o(`Cannot start: ${i.error}`);let a=i.data,s=r.manifest.steps.find(e=>e.id===a.currentStep),c={started:!0,flow:a.flow,currentStep:a.currentStep,currentStepSkill:s?.skill??null,currentStepDescription:s?.description??null,totalSteps:r.manifest.steps.length,stepSequence:r.manifest.steps.map(e=>e.id),artifactsDir:r.manifest.artifacts_dir};return o(JSON.stringify(c,null,2))}catch(e){return a.error(`flow_start failed`,i(e)),o(`Error: ${s(e)}`)}});let g=e(`flow_step`);r.registerTool(`flow_step`,{title:g.title,description:`Advance the active flow: complete current step and move to next, skip current step, or redo current step.`,annotations:g.annotations,inputSchema:{action:n.enum([`next`,`skip`,`redo`]).describe(`next: mark current step done and advance. skip: skip current step. redo: repeat current step.`)}},async({action:e})=>{try{let{registry:t,stateMachine:n}=await f(),r=n.load();if(!r)return o(`No active flow. Use flow_start first.`);let i=t.get(r.flow);if(!i)return o(`Flow "${r.flow}" not found in registry.`);let a=n.step(e,i.manifest);if(!a.success||!a.data)return o(`Cannot ${e}: ${a.error}`);let s=a.data,c=s.currentStep?i.manifest.steps.find(e=>e.id===s.currentStep):null,l={flow:s.flow,status:s.status,action:e,currentStep:s.currentStep,currentStepSkill:c?.skill??null,currentStepDescription:c?.description??null,completedSteps:s.completedSteps,skippedSteps:s.skippedSteps,totalSteps:i.manifest.steps.length,remaining:i.manifest.steps.filter(e=>!s.completedSteps.includes(e.id)&&!s.skippedSteps.includes(e.id)&&e.id!==s.currentStep).map(e=>e.id)};return o(JSON.stringify(l,null,2))}catch(e){return a.error(`flow_step failed`,i(e)),o(`Error: ${s(e)}`)}});let _=e(`flow_status`);r.registerTool(`flow_status`,{title:_.title,description:`Show the current flow execution state — which flow is active, current step, completed steps, and artifacts.`,annotations:_.annotations,inputSchema:{}},async()=>{try{let{registry:e,stateMachine:t}=await f(),n=t.getStatus();if(!n.success||!n.data)return o(`No active flow. Use flow_start to begin one, or flow_list to see available flows.`);let r=n.data,i=e.get(r.flow),a=i?.manifest.steps.find(e=>e.id===r.currentStep),s={flow:r.flow,status:r.status,currentStep:r.currentStep,currentStepSkill:a?.skill??null,currentStepDescription:a?.description??null,completedSteps:r.completedSteps,skippedSteps:r.skippedSteps,artifacts:r.artifacts,startedAt:r.startedAt,updatedAt:r.updatedAt,totalSteps:i?.manifest.steps.length??0,progress:i?`${r.completedSteps.length+r.skippedSteps.length}/${i.manifest.steps.length}`:`unknown`};return o(JSON.stringify(s,null,2))}catch(e){return a.error(`flow_status failed`,i(e)),o(`Error: ${s(e)}`)}});let v=e(`flow_reset`);r.registerTool(`flow_reset`,{title:v.title,description:`Reset the active flow, clearing all state. Use to start over or switch to a different flow.`,annotations:v.annotations,inputSchema:{}},async()=>{try{let{stateMachine:e}=await f(),t=e.reset();return t.success?o(`Flow state reset. Use flow_start to begin a new flow.`):o(`Reset failed: ${t.error}`)}catch(e){return a.error(`flow_reset failed`,i(e)),o(`Error: ${s(e)}`)}})}export{c as registerFlowTools};
1
+ import{getToolMeta as e}from"../tool-metadata.js";import{basename as t,join as n,resolve as r}from"node:path";import{z as i}from"zod";import{readFile as a}from"node:fs/promises";import{createLogger as o,serializeError as s}from"../../../core/dist/index.js";const c=o(`flow-tools`);function l(e){return{content:[{type:`text`,text:e}]}}function u(e){return e instanceof Error?e.message:String(e)}function d(o,d){let f=d.sources?.[0]?.path??process.cwd(),p=n(d.stateDir??n(d.sources[0].path,`.aikit-state`),`flows`),m=n(p,`registry.json`),h=n(p,`state.json`);function g(e,n){let i;return i=e.sourceType===`builtin`?r(f,`.github`,`flows`,t(e.installPath),n):r(e.installPath,n),i.replaceAll(`\\`,`/`)}function _(e){return e.sourceType===`builtin`?r(f,`.github`,`flows`,t(e.installPath)).replaceAll(`\\`,`/`):e.installPath.replaceAll(`\\`,`/`)}async function v(){let{FlowRegistryManager:e,FlowStateMachine:t}=await import(`../../../flows/dist/index.js`);return{registry:new e(m),stateMachine:new t(h)}}let y=e(`flow_list`);o.registerTool(`flow_list`,{title:y.title,description:`List all installed flows and their steps`,annotations:y.annotations,inputSchema:{}},async()=>{try{let{registry:e,stateMachine:t}=await v(),n=e.list(),r=t.getStatus(),i={flows:n.map(e=>({name:e.name,version:e.version,source:e.source,sourceType:e.sourceType,format:e.format,steps:e.manifest.steps.map(e=>e.id)})),activeFlow:r.success&&r.data?{flow:r.data.flow,status:r.data.status,currentStep:r.data.currentStep}:null};return l(JSON.stringify(i,null,2))}catch(e){return c.error(`flow_list failed`,s(e)),l(`Error: ${u(e)}`)}});let b=e(`flow_info`);o.registerTool(`flow_info`,{title:b.title,description:`Show detailed information about a specific flow`,annotations:b.annotations,inputSchema:{name:i.string().describe(`Flow name to get info for`)}},async({name:e})=>{try{let{registry:t}=await v(),n=t.get(e);if(!n)return l(`Flow "${e}" not found. Use flow_list to see available flows.`);let r={name:n.name,version:n.version,description:n.manifest.description,source:n.source,sourceType:n.sourceType,format:n.format,installPath:_(n),registeredAt:n.registeredAt,updatedAt:n.updatedAt,steps:n.manifest.steps.map(e=>({id:e.id,name:e.name,skill:g(n,e.skill),produces:e.produces,requires:e.requires,description:e.description})),agents:n.manifest.agents,artifactsDir:n.manifest.artifacts_dir,install:n.manifest.install};return l(JSON.stringify(r,null,2))}catch(e){return c.error(`flow_info failed`,s(e)),l(`Error: ${u(e)}`)}});let x=e(`flow_start`);o.registerTool(`flow_start`,{title:x.title,description:`Start a flow. Sets the active flow and positions at the first step.`,annotations:x.annotations,inputSchema:{flow:i.string().describe(`Flow name to start (use flow_list to see options)`)}},async({flow:e})=>{try{let{registry:t,stateMachine:n}=await v(),r=t.get(e);if(!r)return l(`Flow "${e}" not found. Use flow_list to see available flows.`);let i=n.start(r.name,r.manifest);if(!i.success||!i.data)return l(`Cannot start: ${i.error}`);let a=i.data,o=r.manifest.steps.find(e=>e.id===a.currentStep),s={started:!0,flow:a.flow,currentStep:a.currentStep,currentStepSkill:r&&o?g(r,o.skill):null,currentStepDescription:o?.description??null,totalSteps:r.manifest.steps.length,stepSequence:r.manifest.steps.map(e=>e.id),artifactsDir:r.manifest.artifacts_dir};return l(JSON.stringify(s,null,2))}catch(e){return c.error(`flow_start failed`,s(e)),l(`Error: ${u(e)}`)}});let S=e(`flow_step`);o.registerTool(`flow_step`,{title:S.title,description:`Advance the active flow: complete current step and move to next, skip current step, or redo current step.`,annotations:S.annotations,inputSchema:{action:i.enum([`next`,`skip`,`redo`]).describe(`next: mark current step done and advance. skip: skip current step. redo: repeat current step.`)}},async({action:e})=>{try{let{registry:t,stateMachine:n}=await v(),r=n.load();if(!r)return l(`No active flow. Use flow_start first.`);let i=t.get(r.flow);if(!i)return l(`Flow "${r.flow}" not found in registry.`);let a=n.step(e,i.manifest);if(!a.success||!a.data)return l(`Cannot ${e}: ${a.error}`);let o=a.data,s=o.currentStep?i.manifest.steps.find(e=>e.id===o.currentStep):null,c={flow:o.flow,status:o.status,action:e,currentStep:o.currentStep,currentStepSkill:i&&s?g(i,s.skill):null,currentStepDescription:s?.description??null,completedSteps:o.completedSteps,skippedSteps:o.skippedSteps,totalSteps:i.manifest.steps.length,remaining:i.manifest.steps.filter(e=>!o.completedSteps.includes(e.id)&&!o.skippedSteps.includes(e.id)&&e.id!==o.currentStep).map(e=>e.id)};return l(JSON.stringify(c,null,2))}catch(e){return c.error(`flow_step failed`,s(e)),l(`Error: ${u(e)}`)}});let C=e(`flow_status`);o.registerTool(`flow_status`,{title:C.title,description:`Show the current flow execution state — which flow is active, current step, completed steps, and artifacts.`,annotations:C.annotations,inputSchema:{}},async()=>{try{let{registry:e,stateMachine:t}=await v(),n=t.getStatus();if(!n.success||!n.data)return l(`No active flow. Use flow_start to begin one, or flow_list to see available flows.`);let r=n.data,i=e.get(r.flow),a=i?.manifest.steps.find(e=>e.id===r.currentStep),o=i&&a?g(i,a.skill):null,s={flow:r.flow,status:r.status,currentStep:r.currentStep,currentStepSkill:o,skillPath:o,currentStepDescription:a?.description??null,completedSteps:r.completedSteps,skippedSteps:r.skippedSteps,artifacts:r.artifacts,startedAt:r.startedAt,updatedAt:r.updatedAt,totalSteps:i?.manifest.steps.length??0,progress:i?`${r.completedSteps.length+r.skippedSteps.length}/${i.manifest.steps.length}`:`unknown`};return l(JSON.stringify(s,null,2))}catch(e){return c.error(`flow_status failed`,s(e)),l(`Error: ${u(e)}`)}});let w=e(`flow_read_skill`);o.registerTool(`flow_read_skill`,{title:w.title===`flow_read_skill`?`Flow Read Skill`:w.title,description:`Read the skill or instruction content for a flow step. If step is omitted, reads the current step.`,annotations:w.title===`flow_read_skill`?{readOnlyHint:!0,idempotentHint:!0}:w.annotations,inputSchema:{step:i.string().optional().describe(`Step id or name to read. Defaults to the current step.`)}},async({step:e})=>{try{let{registry:t,stateMachine:n}=await v(),r=n.getStatus();if(!r.success||!r.data)return l(`No active flow. Use flow_start to begin one, or flow_list to see available flows.`);let i=r.data,o=t.get(i.flow);if(!o)return l(`Flow "${i.flow}" not found in registry.`);let s=e??i.currentStep;if(!s)return l(`No current step is available for the active flow.`);let c=o.manifest.steps.find(e=>e.id===s||e.name===s);return l(c?await a(g(o,c.skill),`utf-8`):`Step "${s}" not found in flow "${i.flow}".`)}catch(e){return c.error(`flow_read_skill failed`,s(e)),e instanceof Error&&`code`in e&&e.code===`ENOENT`?l(`Could not read skill file: ${e.message}`):l(`Error: ${u(e)}`)}});let T=e(`flow_reset`);o.registerTool(`flow_reset`,{title:T.title,description:`Reset the active flow, clearing all state. Use to start over or switch to a different flow.`,annotations:T.annotations,inputSchema:{}},async()=>{try{let{stateMachine:e}=await v(),t=e.reset();return t.success?l(`Flow state reset. Use flow_start to begin a new flow.`):l(`Reset failed: ${t.error}`)}catch(e){return c.error(`flow_reset failed`,s(e)),l(`Error: ${u(e)}`)}})}export{d as registerFlowTools};
@@ -63,8 +63,8 @@ import{escHtml as e}from"../present-utils.js";function t(t,f){let p=typeof f==`s
63
63
  });
64
64
  const order = [...list.children].map(c => c.dataset.id);
65
65
  document.getElementById('sortStatus').textContent = 'Reordered: ' + order.join(', ');
66
- if(window.__kbCallback) {
67
- fetch(window.__kbCallback, {method:'POST',headers:{'Content-Type':'application/json'},
66
+ if(window.__aikitCallback) {
67
+ fetch(window.__aikitCallback, {method:'POST',headers:{'Content-Type':'application/json'},
68
68
  body: JSON.stringify({actionId:'reorder',value:JSON.stringify(order)})}).catch(()=>{});
69
69
  }
70
70
  });
@@ -211,8 +211,8 @@ ${a}
211
211
  });
212
212
  document.getElementById('pkApply').addEventListener('click', () => {
213
213
  const selected = [...list.querySelectorAll('input:checked')].map(cb => cb.dataset.id);
214
- if(window.__kbCallback) {
215
- fetch(window.__kbCallback, {method:'POST',headers:{'Content-Type':'application/json'},
214
+ if(window.__aikitCallback) {
215
+ fetch(window.__aikitCallback, {method:'POST',headers:{'Content-Type':'application/json'},
216
216
  body:JSON.stringify({actionId:'pick',value:JSON.stringify(selected)})}).catch(()=>{});
217
217
  }
218
218
  });
@@ -347,8 +347,8 @@ ${a}
347
347
  updatePreview();
348
348
 
349
349
  document.getElementById('fmSubmit').addEventListener('click', () => {
350
- if(window.__kbCallback) {
351
- fetch(window.__kbCallback, {method:'POST',headers:{'Content-Type':'application/json'},
350
+ if(window.__aikitCallback) {
351
+ fetch(window.__aikitCallback, {method:'POST',headers:{'Content-Type':'application/json'},
352
352
  body:JSON.stringify({actionId:'submit',value:JSON.stringify(getValues())})}).catch(()=>{});
353
353
  }
354
354
  });
@@ -1,4 +1,4 @@
1
- import{getToolMeta as e}from"../../tool-metadata.js";import{buildBrowserHtml as t}from"./browser.js";import{buildMarkdown as n}from"./markdown.js";import{readFileSync as r}from"node:fs";import{dirname as i,join as a}from"node:path";import{fileURLToPath as o}from"node:url";import{z as s}from"zod";import{RESOURCE_MIME_TYPE as c,registerAppResource as l,registerAppTool as u}from"@modelcontextprotocol/ext-apps/server";import{exec as d}from"node:child_process";import{createServer as f}from"node:http";import{createUIResource as p}from"@mcp-ui/server";const m=import.meta.dirname??i(o(import.meta.url)),h=`ui://kb/present.html`,g=s.object({type:s.enum([`button`,`select`]).describe(`Action type`),id:s.string().describe(`Unique action identifier`),label:s.string().describe(`Display label`),variant:s.enum([`primary`,`danger`,`default`]).optional().describe(`Button style variant`),options:s.array(s.union([s.string(),s.object({label:s.string(),value:s.string()})])).optional().describe(`Select options (for type=select)`)}),_={format:s.enum([`html`,`browser`]).default(`html`).describe(`Output format.
1
+ import{getToolMeta as e}from"../../tool-metadata.js";import{buildBrowserHtml as t}from"./browser.js";import{buildMarkdown as n}from"./markdown.js";import{readFileSync as r}from"node:fs";import{dirname as i,join as a}from"node:path";import{fileURLToPath as o}from"node:url";import{z as s}from"zod";import{RESOURCE_MIME_TYPE as c,registerAppResource as l,registerAppTool as u}from"@modelcontextprotocol/ext-apps/server";import{exec as d}from"node:child_process";import{createServer as f}from"node:http";import{createUIResource as p}from"@mcp-ui/server";const m=import.meta.dirname??i(o(import.meta.url)),h=`ui://aikit/present.html`,g=s.object({type:s.enum([`button`,`select`]).describe(`Action type`),id:s.string().describe(`Unique action identifier`),label:s.string().describe(`Display label`),variant:s.enum([`primary`,`danger`,`default`]).optional().describe(`Button style variant`),options:s.array(s.union([s.string(),s.object({label:s.string(),value:s.string()})])).optional().describe(`Select options (for type=select)`)}),_={format:s.enum([`html`,`browser`]).default(`html`).describe(`Output format.
2
2
  - "html" (default): Rich markdown in chat + embedded UIResource for MCP-UI hosts. Actions shown as numbered choices.
3
3
  - "browser": Rich markdown in chat + opens beautiful themed dashboard in the browser. When actions are provided, the browser shows interactive buttons and the tool blocks until user clicks, returning their selection.`),title:s.string().optional().describe(`Optional heading`),content:s.any().describe(`Content to present. Accepts: markdown string, array of objects (→ table), { nodes, edges } (→ mermaid graph), typed blocks [{ type, value }], or any JSON (→ tree).`),actions:s.array(g).optional().describe(`Interactive actions (buttons/selects). In html mode, shown as numbered choices. In browser mode, rendered as clickable buttons and the tool blocks until user clicks.`),template:s.enum([`auto`,`list-sort`,`data-table`,`picker`,`flame-graph`,`form`,`timeline`,`kanban`,`tree`,`diff-view`,`dashboard`]).optional().describe(`UI template for interactive display in MCP Apps hosts.
4
4
  - auto (default): detect from content shape
@@ -15,5 +15,5 @@ import{getToolMeta as e}from"../../tool-metadata.js";import{buildBrowserHtml as
15
15
  - "html" (default): Rich markdown in chat + embedded UIResource. Use for display-only content (tables, charts, reports, status boards) where no user interaction is needed.
16
16
  - "browser": Serves a themed dashboard on a local URL. Use ONLY when you need user interaction back (confirmations, selections, form input). The tool blocks until user clicks an action button, then returns their selection.
17
17
  FORMAT RULE: If no user interaction is needed → use "html". If you need user input back → use "browser".
18
- BROWSER WORKFLOW: After calling present with format "browser", you MUST extract the URL from the response and call openBrowserPage({ url }) to open it in VS Code Simple Browser. A system browser fallback also opens automatically, but always call openBrowserPage yourself.`,annotations:r.annotations,inputSchema:_,_meta:{ui:{resourceUri:h}}},async({format:e,title:t,content:r,actions:i,template:a})=>(e??`html`)===`browser`?await w(t,r,i,n,a):T(t,r,i,a))}async function w(e,r,i,a,o){let s=n(e,r),c=t(e,r,i,o),l=p({uri:`ui://kb/present-browser.html`,content:{type:`rawHtml`,htmlString:c},encoding:`text`,adapters:{mcpApps:{enabled:!0}}}),u,m,h=Array.isArray(i)?i:[],g=``,_;try{b&&=(b.close(),null),h.length>0&&(u=new Promise(e=>{m=e}));let e=!1;g=await new Promise((t,n)=>{let r=f((t,n)=>{if(e||(e=!0,_&&clearTimeout(_)),t.method===`POST`&&t.url===`/callback`){let e=``;t.on(`data`,t=>{e+=t.toString()}),t.on(`end`,()=>{n.writeHead(200,{"Content-Type":`application/json`,"Access-Control-Allow-Origin":`*`}),n.end(`{"ok":true}`);try{let t=JSON.parse(e);m?.(t)}catch{}});return}if(t.method===`OPTIONS`){n.writeHead(204,{"Access-Control-Allow-Origin":`*`,"Access-Control-Allow-Methods":`POST`,"Access-Control-Allow-Headers":`Content-Type`}),n.end();return}n.writeHead(200,{"Content-Type":`text/html; charset=utf-8`}),n.end(c)});r.listen(0,`127.0.0.1`,()=>{let i=r.address();if(typeof i==`object`&&i){b=r;let n=`http://127.0.0.1:${i.port}`;_=setTimeout(()=>{if(!e)try{d(process.platform===`win32`?`start "" "${n}"`:process.platform===`darwin`?`open "${n}"`:`xdg-open "${n}"`)}catch{}},8e3),_.unref(),t(n)}else n(Error(`Failed to start present server`))}),setTimeout(()=>{r.close(),b===r&&(b=null)},300*1e3).unref()})}catch{}let v=g?`${s}\n\n---\n🌐 **Dashboard opened in browser:** ${g}\n\n**IMPORTANT:** You MUST now open this URL using the \`openBrowserPage\` tool (Simple Browser). If that tool is unavailable, open the URL in the system browser as a fallback. If neither works, ask the user to open the URL manually.`:s;if(h.length>0&&a?.available&&u)try{let e=await Promise.race([u,new Promise((e,t)=>setTimeout(()=>t(Error(`timeout`)),300*1e3))]);return{content:[{type:`text`,text:`${v}\n\n✅ **Selected:** ${e.actionId} = \`${e.value}\``},l]}}catch{return{content:[{type:`text`,text:`${v}\n\n⚠️ *No selection received (timed out).*`},l]}}return{content:[{type:`text`,text:v},l]}}function T(e,r,i,a){let o=Array.isArray(i)?i:[],s=n(e,r);if(o.length>0){let e=[``];for(let t=0;t<o.length;t++){let n=o[t],r=typeof n.label==`string`?n.label:`Action ${t+1}`;if(n.type===`select`&&Array.isArray(n.options)){let i=n.options.map(e=>typeof e==`string`?e:e.label).join(`, `);e.push(`${t+1}. **${r}** — choose: ${i}`)}else e.push(`${t+1}. **${r}**`)}s+=`\n${e.join(`
19
- `)}`}let c=p({uri:`ui://kb/present-static.html`,content:{type:`rawHtml`,htmlString:t(e,r,i,a)},encoding:`text`,adapters:{mcpApps:{enabled:!0}}});return{content:[{type:`text`,text:s},c],structuredContent:{title:e,content:r,actions:o}}}export{w as formatAsBrowser,T as formatAsHtml,S as getPresentHtml,C as registerPresentTool,x as resolvePresentHtml};
18
+ BROWSER WORKFLOW: After calling present with format "browser", you MUST extract the URL from the response and call openBrowserPage({ url }) to open it in VS Code Simple Browser. A system browser fallback also opens automatically, but always call openBrowserPage yourself.`,annotations:r.annotations,inputSchema:_,_meta:{ui:{resourceUri:h}}},async({format:e,title:t,content:r,actions:i,template:a})=>(e??`html`)===`browser`?await w(t,r,i,n,a):T(t,r,i,a))}async function w(e,r,i,a,o){let s=n(e,r),c=t(e,r,i,o),l=p({uri:`ui://aikit/present-browser.html`,content:{type:`rawHtml`,htmlString:c},encoding:`text`,adapters:{mcpApps:{enabled:!0}}}),u,m,h=Array.isArray(i)?i:[],g=``,_;try{b&&=(b.close(),null),h.length>0&&(u=new Promise(e=>{m=e}));let e=!1;g=await new Promise((t,n)=>{let r=f((t,n)=>{if(e||(e=!0,_&&clearTimeout(_)),t.method===`POST`&&t.url===`/callback`){let e=``;t.on(`data`,t=>{e+=t.toString()}),t.on(`end`,()=>{n.writeHead(200,{"Content-Type":`application/json`,"Access-Control-Allow-Origin":`*`}),n.end(`{"ok":true}`);try{let t=JSON.parse(e);m?.(t)}catch{}});return}if(t.method===`OPTIONS`){n.writeHead(204,{"Access-Control-Allow-Origin":`*`,"Access-Control-Allow-Methods":`POST`,"Access-Control-Allow-Headers":`Content-Type`}),n.end();return}n.writeHead(200,{"Content-Type":`text/html; charset=utf-8`}),n.end(c)});r.listen(0,`127.0.0.1`,()=>{let i=r.address();if(typeof i==`object`&&i){b=r;let n=`http://127.0.0.1:${i.port}`;_=setTimeout(()=>{if(!e)try{d(process.platform===`win32`?`start "" "${n}"`:process.platform===`darwin`?`open "${n}"`:`xdg-open "${n}"`)}catch{}},8e3),_.unref(),t(n)}else n(Error(`Failed to start present server`))}),setTimeout(()=>{r.close(),b===r&&(b=null)},300*1e3).unref()})}catch{}let v=g?`${s}\n\n---\n🌐 **Dashboard opened in browser:** ${g}\n\n**IMPORTANT:** You MUST now open this URL using the \`openBrowserPage\` tool (Simple Browser). If that tool is unavailable, open the URL in the system browser as a fallback. If neither works, ask the user to open the URL manually.`:s;if(h.length>0&&a?.available&&u)try{let e=await Promise.race([u,new Promise((e,t)=>setTimeout(()=>t(Error(`timeout`)),300*1e3))]);return{content:[{type:`text`,text:`${v}\n\n✅ **Selected:** ${e.actionId} = \`${e.value}\``},l]}}catch{return{content:[{type:`text`,text:`${v}\n\n⚠️ *No selection received (timed out).*`},l]}}return{content:[{type:`text`,text:v},l]}}function T(e,r,i,a){let o=Array.isArray(i)?i:[],s=n(e,r);if(o.length>0){let e=[``];for(let t=0;t<o.length;t++){let n=o[t],r=typeof n.label==`string`?n.label:`Action ${t+1}`;if(n.type===`select`&&Array.isArray(n.options)){let i=n.options.map(e=>typeof e==`string`?e:e.label).join(`, `);e.push(`${t+1}. **${r}** — choose: ${i}`)}else e.push(`${t+1}. **${r}**`)}s+=`\n${e.join(`
19
+ `)}`}let c=p({uri:`ui://aikit/present-static.html`,content:{type:`rawHtml`,htmlString:t(e,r,i,a)},encoding:`text`,adapters:{mcpApps:{enabled:!0}}});return{content:[{type:`text`,text:s},c],structuredContent:{title:e,content:r,actions:o}}}export{w as formatAsBrowser,T as formatAsHtml,S as getPresentHtml,C as registerPresentTool,x as resolvePresentHtml};
@@ -1 +1 @@
1
- import{EMBEDDING_DEFAULTS as e,SEARCH_DEFAULTS as t,STORE_DEFAULTS as n,createLogger as r,serializeError as i,sourceTypeContentTypes as a}from"../../core/dist/index.js";import{Index as o,connect as s}from"@lancedb/lancedb";function c(e){if(!e)return[];try{let t=JSON.parse(e);return Array.isArray(t)?t:[]}catch{return[]}}const l=/^[\w.\-/ ]+$/,u=r(`store`);function d(e,t){if(!l.test(e))throw Error(`Invalid ${t} filter value: contains disallowed characters`);return e.replace(/'/g,`''`)}var f=class{db=null;table=null;dbPath;tableName;_draining=!1;_priorityQueue=[];_normalQueue=[];_ftsReady=!1;enqueueWrite(e,t=!1){return new Promise((n,r)=>{let i=async()=>{try{n(await e())}catch(e){r(e)}};t?this._priorityQueue.push(i):this._normalQueue.push(i),this._drain()})}async _drain(){if(!this._draining){this._draining=!0;try{for(;this._priorityQueue.length>0||this._normalQueue.length>0;){let e=this._priorityQueue.shift()??this._normalQueue.shift();e&&await e()}}finally{this._draining=!1}}}constructor(e){this.dbPath=e?.path??n.path,this.tableName=e?.tableName??n.tableName}async initialize(){this.db=await s(this.dbPath),(await this.db.tableNames()).includes(this.tableName)&&(this.table=await this.db.openTable(this.tableName),await this.createFtsIndex())}async upsert(e,t){if(e.length!==0){if(e.length!==t.length)throw Error(`Record count (${e.length}) does not match vector count (${t.length})`);return this.enqueueWrite(()=>this._upsertImpl(e,t))}}async upsertInteractive(e,t){if(e.length!==0){if(e.length!==t.length)throw Error(`Record count (${e.length}) does not match vector count (${t.length})`);return this.enqueueWrite(()=>this._upsertImpl(e,t),!0)}}async _upsertImpl(e,t){let n=e.map((e,n)=>({id:e.id,vector:Array.from(t[n]),content:e.content,sourcePath:e.sourcePath,contentType:e.contentType,headingPath:e.headingPath??``,chunkIndex:e.chunkIndex,totalChunks:e.totalChunks,startLine:e.startLine,endLine:e.endLine,fileHash:e.fileHash,indexedAt:e.indexedAt,origin:e.origin,tags:JSON.stringify(e.tags),category:e.category??``,version:e.version}));if(this.table){let t=[...new Set(e.map(e=>e.sourcePath))];for(let e of t)try{await this.table.delete(`sourcePath = '${d(e,`sourcePath`)}'`)}catch{}await this.table.add(n)}else try{this.table=await this.db?.createTable(this.tableName,n)??null}catch(e){if(String(e).includes(`already exists`)&&this.db)this.table=await this.db.openTable(this.tableName),await this.table.add(n);else throw e}}async search(e,n){if(!this.table)return[];let r=n?.limit??t.maxResults,i=n?.minScore??t.minScore,a=this.table.search(e).limit(r*2),o=this.buildFilterString(n);return o&&(a=a.where(o)),(await a.toArray()).map(e=>({record:this.fromLanceRecord(e),score:1-(e._distance??1)})).filter(e=>e.score>=i).slice(0,r)}async createFtsIndex(){return this.enqueueWrite(()=>this._createFtsIndexImpl())}async _createFtsIndexImpl(){if(this.table)try{await this.table.createIndex(`content`,{config:o.fts(),replace:!0}),this._ftsReady=!0,u.info(`FTS index created/updated`,{column:`content`})}catch(e){u.warn(`FTS index creation failed`,i(e))}}async ftsSearch(e,n){if(!this.table||!this._ftsReady)return[];let r=n?.limit??t.maxResults;try{let t=this.table.search(e).limit(r*2),i=this.buildFilterString(n);return i&&(t=t.where(i)),(await t.toArray()).map(e=>({record:this.fromLanceRecord(e),score:e._score??e._relevance_score??0}))}catch(e){return(e instanceof Error?e.message:String(e)).includes(`INVERTED index`)?(u.debug(`FTS search skipped — index not yet available`),this._ftsReady=!1):u.warn(`FTS search failed`,i(e)),[]}}async getById(e){if(!this.table)return null;let t=await this.table.query().where(`id = '${d(e,`id`)}'`).limit(1).toArray();return t.length===0?null:this.fromLanceRecord(t[0])}async deleteBySourcePath(e){return this.enqueueWrite(()=>this._deleteBySourcePathImpl(e))}async _deleteBySourcePathImpl(e){if(!this.table)return 0;let t=await this.getBySourcePath(e);return t.length===0?0:(await this.table.delete(`sourcePath = '${d(e,`sourcePath`)}'`),t.length)}async deleteById(e){return this.enqueueWrite(()=>this._deleteByIdImpl(e))}async deleteByIdInteractive(e){return this.enqueueWrite(()=>this._deleteByIdImpl(e),!0)}async _deleteByIdImpl(e){return!this.table||!await this.getById(e)?!1:(await this.table.delete(`id = '${d(e,`id`)}'`),!0)}async getBySourcePath(e){return this.table?(await this.table.query().where(`sourcePath = '${d(e,`sourcePath`)}'`).limit(1e3).toArray()).map(e=>this.fromLanceRecord(e)):[]}async getStats(){if(!this.table)return{totalRecords:0,totalFiles:0,contentTypeBreakdown:{},lastIndexedAt:null,storeBackend:`lancedb`,embeddingModel:e.model};let t=await this.table.countRows(),n=await this.table.query().select([`sourcePath`,`contentType`,`indexedAt`]).limit(1e5).toArray(),r={},i=new Set,a=null;for(let e of n){let t=e;r[t.contentType]=(r[t.contentType]??0)+1,i.add(t.sourcePath),(!a||t.indexedAt>a)&&(a=t.indexedAt)}return{totalRecords:t,totalFiles:i.size,contentTypeBreakdown:r,lastIndexedAt:a,storeBackend:`lancedb`,embeddingModel:e.model}}async listSourcePaths(){if(!this.table)return[];let e=await this.table.query().select([`sourcePath`]).limit(1e5).toArray();return[...new Set(e.map(e=>e.sourcePath))]}async dropTable(){return this.enqueueWrite(()=>this._dropTableImpl())}async _dropTableImpl(){if(this.db&&(await this.db.tableNames()).includes(this.tableName))for(let e=1;e<=3;e++)try{await this.db.dropTable(this.tableName);break}catch(t){if(e===3)throw t;let n=e*500;u.warn(`dropTable attempt failed, retrying`,{attempt:e,delayMs:n}),await new Promise(e=>setTimeout(e,n))}this.table=null}async close(){try{this.db&&typeof this.db.close==`function`&&await this.db.close()}catch{}this.table=null,this.db=null}buildFilterString(e){let t=[];if(e?.contentType&&t.push(`contentType = '${d(e.contentType,`contentType`)}'`),e?.sourceType){let n=a(e.sourceType);if(n.length>0){let e=n.map(e=>`'${d(e,`sourceType`)}'`).join(`, `);t.push(`contentType IN (${e})`)}}if(e?.origin&&t.push(`origin = '${d(e.origin,`origin`)}'`),e?.category&&t.push(`category = '${d(e.category,`category`)}'`),e?.tags&&e.tags.length>0){let n=e.tags.map(e=>`tags LIKE '%${d(e,`tag`)}%'`);t.push(`(${n.join(` OR `)})`)}return t.length>0?t.join(` AND `):null}fromLanceRecord(e){return{id:e.id,content:e.content,sourcePath:e.sourcePath,contentType:e.contentType,headingPath:e.headingPath||void 0,chunkIndex:e.chunkIndex,totalChunks:e.totalChunks,startLine:e.startLine,endLine:e.endLine,fileHash:e.fileHash,indexedAt:e.indexedAt,origin:e.origin,tags:c(e.tags),category:e.category||void 0,version:e.version}}};export{f as LanceStore};
1
+ import{EMBEDDING_DEFAULTS as e,SEARCH_DEFAULTS as t,STORE_DEFAULTS as n,createLogger as r,serializeError as i,sourceTypeContentTypes as a}from"../../core/dist/index.js";import{Index as o,connect as s}from"@lancedb/lancedb";function c(e){if(!e)return[];try{let t=JSON.parse(e);return Array.isArray(t)?t:[]}catch{return[]}}const l=/^[\w.\-/ ]+$/,u=r(`store`);function d(e,t){if(!l.test(e))throw Error(`Invalid ${t} filter value: contains disallowed characters`);return e.replace(/'/g,`''`)}var f=class{db=null;table=null;dbPath;tableName;_draining=!1;_priorityQueue=[];_normalQueue=[];_ftsReady=!1;enqueueWrite(e,t=!1){return new Promise((n,r)=>{let i=async()=>{try{n(await e())}catch(e){r(e)}};t?this._priorityQueue.push(i):this._normalQueue.push(i),this._drain()})}async _drain(){if(!this._draining){this._draining=!0;try{for(;this._priorityQueue.length>0||this._normalQueue.length>0;){let e=this._priorityQueue.shift()??this._normalQueue.shift();e&&await e()}}finally{this._draining=!1}}}constructor(e){this.dbPath=e?.path??n.path,this.tableName=e?.tableName??n.tableName}async initialize(){this.db=await s(this.dbPath),(await this.db.tableNames()).includes(this.tableName)&&(this.table=await this.db.openTable(this.tableName),await this.createFtsIndex())}async upsert(e,t){if(e.length!==0){if(e.length!==t.length)throw Error(`Record count (${e.length}) does not match vector count (${t.length})`);return this.enqueueWrite(()=>this._upsertImpl(e,t))}}async upsertInteractive(e,t){if(e.length!==0){if(e.length!==t.length)throw Error(`Record count (${e.length}) does not match vector count (${t.length})`);return this.enqueueWrite(()=>this._upsertImpl(e,t),!0)}}async _upsertImpl(e,t){let n=e.map((e,n)=>({id:e.id,vector:Array.from(t[n]),content:e.content,sourcePath:e.sourcePath,contentType:e.contentType,headingPath:e.headingPath??``,chunkIndex:e.chunkIndex,totalChunks:e.totalChunks,startLine:e.startLine,endLine:e.endLine,fileHash:e.fileHash,indexedAt:e.indexedAt,origin:e.origin,tags:JSON.stringify(e.tags),category:e.category??``,version:e.version}));if(this.table){let t=[...new Set(e.map(e=>e.sourcePath))];for(let e of t)try{await this.table.delete(`sourcePath = '${d(e,`sourcePath`)}'`)}catch{}await this.table.add(n)}else try{this.table=await this.db?.createTable(this.tableName,n)??null}catch(e){if(String(e).includes(`already exists`)&&this.db)this.table=await this.db.openTable(this.tableName),await this.table.add(n);else throw e}}async search(e,n){if(!this.table)return[];let r=n?.limit??t.maxResults,i=n?.minScore??t.minScore,a=this.table.search(e).limit(r*2),o=this.buildFilterString(n);return o&&(a=a.where(o)),(await a.toArray()).map(e=>({record:this.fromLanceRecord(e),score:1-(e._distance??1)})).filter(e=>e.score>=i).slice(0,r)}async createFtsIndex(){return this.enqueueWrite(()=>this._createFtsIndexImpl())}async _createFtsIndexImpl(){if(this.table)try{await this.table.createIndex(`content`,{config:o.fts({withPosition:!0}),replace:!0}),this._ftsReady=!0,u.info(`FTS index created/updated`,{column:`content`})}catch(e){u.warn(`FTS index creation failed`,i(e))}}async ftsSearch(e,n){if(!this.table||!this._ftsReady)return[];let r=n?.limit??t.maxResults;try{let t=this.table.search(e).limit(r*2),i=this.buildFilterString(n);return i&&(t=t.where(i)),(await t.toArray()).map(e=>({record:this.fromLanceRecord(e),score:e._score??e._relevance_score??0}))}catch(e){return(e instanceof Error?e.message:String(e)).includes(`INVERTED index`)?(u.debug(`FTS search skipped — index not yet available`),this._ftsReady=!1):u.warn(`FTS search failed`,i(e)),[]}}async getById(e){if(!this.table)return null;let t=await this.table.query().where(`id = '${d(e,`id`)}'`).limit(1).toArray();return t.length===0?null:this.fromLanceRecord(t[0])}async deleteBySourcePath(e){return this.enqueueWrite(()=>this._deleteBySourcePathImpl(e))}async _deleteBySourcePathImpl(e){if(!this.table)return 0;let t=await this.getBySourcePath(e);return t.length===0?0:(await this.table.delete(`sourcePath = '${d(e,`sourcePath`)}'`),t.length)}async deleteById(e){return this.enqueueWrite(()=>this._deleteByIdImpl(e))}async deleteByIdInteractive(e){return this.enqueueWrite(()=>this._deleteByIdImpl(e),!0)}async _deleteByIdImpl(e){return!this.table||!await this.getById(e)?!1:(await this.table.delete(`id = '${d(e,`id`)}'`),!0)}async getBySourcePath(e){return this.table?(await this.table.query().where(`sourcePath = '${d(e,`sourcePath`)}'`).limit(1e3).toArray()).map(e=>this.fromLanceRecord(e)):[]}async getStats(){if(!this.table)return{totalRecords:0,totalFiles:0,contentTypeBreakdown:{},lastIndexedAt:null,storeBackend:`lancedb`,embeddingModel:e.model};let t=await this.table.countRows(),n=await this.table.query().select([`sourcePath`,`contentType`,`indexedAt`]).limit(1e5).toArray(),r={},i=new Set,a=null;for(let e of n){let t=e;r[t.contentType]=(r[t.contentType]??0)+1,i.add(t.sourcePath),(!a||t.indexedAt>a)&&(a=t.indexedAt)}return{totalRecords:t,totalFiles:i.size,contentTypeBreakdown:r,lastIndexedAt:a,storeBackend:`lancedb`,embeddingModel:e.model}}async listSourcePaths(){if(!this.table)return[];let e=await this.table.query().select([`sourcePath`]).limit(1e5).toArray();return[...new Set(e.map(e=>e.sourcePath))]}async dropTable(){return this.enqueueWrite(()=>this._dropTableImpl())}async _dropTableImpl(){if(this.db&&(await this.db.tableNames()).includes(this.tableName))for(let e=1;e<=3;e++)try{await this.db.dropTable(this.tableName);break}catch(t){if(e===3)throw t;let n=e*500;u.warn(`dropTable attempt failed, retrying`,{attempt:e,delayMs:n}),await new Promise(e=>setTimeout(e,n))}this.table=null}async close(){try{this.db&&typeof this.db.close==`function`&&await this.db.close()}catch{}this.table=null,this.db=null}buildFilterString(e){let t=[];if(e?.contentType&&t.push(`contentType = '${d(e.contentType,`contentType`)}'`),e?.sourceType){let n=a(e.sourceType);if(n.length>0){let e=n.map(e=>`'${d(e,`sourceType`)}'`).join(`, `);t.push(`contentType IN (${e})`)}}if(e?.origin&&t.push(`origin = '${d(e.origin,`origin`)}'`),e?.category&&t.push(`category = '${d(e.category,`category`)}'`),e?.tags&&e.tags.length>0){let n=e.tags.map(e=>`tags LIKE '%${d(e,`tag`)}%'`);t.push(`(${n.join(` OR `)})`)}return t.length>0?t.join(` AND `):null}fromLanceRecord(e){return{id:e.id,content:e.content,sourcePath:e.sourcePath,contentType:e.contentType,headingPath:e.headingPath||void 0,chunkIndex:e.chunkIndex,totalChunks:e.totalChunks,startLine:e.startLine,endLine:e.endLine,fileHash:e.fileHash,indexedAt:e.indexedAt,origin:e.origin,tags:c(e.tags),category:e.category||void 0,version:e.version}}};export{f as LanceStore};
@@ -143,6 +143,9 @@ function generateSingleAgent(name, def) {
143
143
  : AGENT_BODIES[name] || '';
144
144
 
145
145
  const title = def.title || name;
146
+ const skillsSection = def.skills?.length
147
+ ? `\n## Skills (load on demand)\n\n| Skill | When to load |\n|-------|--------------|\n${def.skills.map(([s, w]) => `| ${s} | ${w} |`).join('\n')}\n`
148
+ : '';
146
149
 
147
150
  return `---
148
151
  description: '${def.description}'
@@ -154,7 +157,7 @@ model: ${model}
154
157
 
155
158
  You are the **${name}**, ${def.description.toLowerCase().replace(/^./, (c) => c.toLowerCase())}
156
159
 
157
- ${body}
160
+ ${body}${skillsSection}
158
161
 
159
162
  ${FLOWS_SECTION}
160
163
  `;
@@ -42,6 +42,10 @@ export const AGENTS = {
42
42
  toolRole: 'codeAgent',
43
43
  sharedBase: 'code-agent-base',
44
44
  category: 'implementation',
45
+ skills: [
46
+ ['aikit', '**Always** — AI Kit tool signatures, search, analysis'],
47
+ ['typescript', 'When writing TypeScript code — type patterns, generics, utility types'],
48
+ ],
45
49
  },
46
50
 
47
51
  Frontend: {
@@ -52,6 +56,15 @@ export const AGENTS = {
52
56
  toolRole: 'codeAgent',
53
57
  sharedBase: 'code-agent-base',
54
58
  category: 'implementation',
59
+ skills: [
60
+ ['aikit', '**Always** — AI Kit tool signatures, search, analysis'],
61
+ ['react', 'When building React components — hooks, patterns, Server Components'],
62
+ ['typescript', 'When writing TypeScript code — type patterns, generics, utility types'],
63
+ [
64
+ 'frontend-design',
65
+ 'When implementing UI/UX — design systems, accessibility, responsive patterns',
66
+ ],
67
+ ],
55
68
  },
56
69
 
57
70
  Refactor: {
@@ -73,6 +86,10 @@ export const AGENTS = {
73
86
  toolRole: 'debugger',
74
87
  sharedBase: 'code-agent-base',
75
88
  category: 'diagnostics',
89
+ skills: [
90
+ ['aikit', '**Always** — AI Kit tool signatures, search, analysis'],
91
+ ['typescript', 'When writing TypeScript code — type patterns, generics, utility types'],
92
+ ],
76
93
  },
77
94
 
78
95
  Security: {
@@ -82,6 +99,10 @@ export const AGENTS = {
82
99
  toolRole: 'security',
83
100
  sharedBase: null,
84
101
  category: 'diagnostics',
102
+ skills: [
103
+ ['aikit', '**Always** — AI Kit tool signatures, search, analysis'],
104
+ ['typescript', 'When reviewing code — security patterns, type safety'],
105
+ ],
85
106
  },
86
107
 
87
108
  // ─── Documentation ────────────────────────────────────────────────────
@@ -104,6 +125,7 @@ export const AGENTS = {
104
125
  toolRole: 'explorer',
105
126
  sharedBase: null,
106
127
  category: 'exploration',
128
+ skills: [['aikit', '**Always** — AI Kit tool signatures, search, analysis']],
107
129
  },
108
130
 
109
131
  // ─── Multi-variant roles ──────────────────────────────────────────────
@@ -129,21 +151,19 @@ export const AGENTS = {
129
151
  },
130
152
  Beta: {
131
153
  description:
132
- 'Research variant for multi-model decision protocol different LLM perspective',
154
+ 'Research variant pragmatic analysis with focus on trade-offs and edge cases',
133
155
  identity:
134
- ', a variant of the Researcher agent. You exist to provide a **different LLM perspective** during multi-model decision sessions. Approach problems with the same rigor but bring your own reasoning style.',
156
+ ', a variant of the Researcher agent optimized for **pragmatic analysis**. Focus on trade-offs, edge cases, and practical constraints. Challenge assumptions and highlight risks the primary researcher may overlook.',
135
157
  },
136
158
  Gamma: {
137
- description:
138
- 'Research variant for multi-model decision protocol — different LLM perspective',
159
+ description: 'Research variant — broad pattern matching across domains and technologies',
139
160
  identity:
140
- ', a variant of the Researcher agent. You exist to provide a **different LLM perspective** during multi-model decision sessions. Approach problems with the same rigor but bring your own reasoning style.',
161
+ ', a variant of the Researcher agent optimized for **cross-domain pattern matching**. Draw connections from other domains, frameworks, and industries. Bring breadth where Alpha brings depth.',
141
162
  },
142
163
  Delta: {
143
- description:
144
- 'Research variant for multi-model decision protocol — different LLM perspective',
164
+ description: 'Research variant — implementation feasibility and performance implications',
145
165
  identity:
146
- ', a variant of the Researcher agent. You exist to provide a **different LLM perspective** during multi-model decision sessions. Approach problems with the same rigor but bring your own reasoning style.',
166
+ ', a variant of the Researcher agent optimized for **implementation feasibility**. Focus on performance implications, scaling concerns, and concrete implementation paths. Ground theoretical proposals in practical reality.',
147
167
  },
148
168
  },
149
169
  },
@@ -156,6 +176,10 @@ export const AGENTS = {
156
176
  toolRole: 'reviewer',
157
177
  sharedBase: 'code-reviewer-base',
158
178
  category: 'review',
179
+ skills: [
180
+ ['aikit', '**Always** — AI Kit tool signatures, search, analysis'],
181
+ ['typescript', 'When reviewing TypeScript code — type patterns, best practices'],
182
+ ],
159
183
  variants: {
160
184
  Alpha: { description: 'Primary code reviewer' },
161
185
  Beta: { description: 'Code reviewer variant — different LLM perspective for dual review' },
@@ -170,6 +170,8 @@ Before every tool call, verify:
170
170
  | \`session-handoff\` | Context filling up, session ending, or major milestone |
171
171
  | \`lesson-learned\` | After completing work — extract engineering principles |
172
172
 
173
+ **When dispatching subagents**, include relevant skill names in the prompt so subagents know which skills to load (e.g., "Load the \`react\` and \`typescript\` skills for this task").
174
+
173
175
  ## Flow-Aware Routing
174
176
 
175
177
  At session start, check for an active flow:
@@ -366,7 +368,8 @@ When subagents complete, their visual outputs (from \`present\`) are NOT visible
366
368
 
367
369
  | Skill | When to load |
368
370
  |-------|--------------|
369
- | \`lesson-learned\` | After completing a refactor — extract principles from the before/after diff |`,
371
+ | \`lesson-learned\` | After completing a refactor — extract principles from the before/after diff |
372
+ | \`typescript\` | When refactoring TypeScript code — type patterns, generics, utility types |`,
370
373
 
371
374
  Security: `**Read \`AGENTS.md\`** in the workspace root for project conventions and AI Kit protocol.
372
375
 
@@ -454,7 +457,8 @@ When subagents complete, their visual outputs (from \`present\`) are NOT visible
454
457
  | Skill | When to load |
455
458
  |-------|--------------|
456
459
  | \`c4-architecture\` | When documenting system architecture — generate C4 Mermaid diagrams |
457
- | \`adr-skill\` | When documenting architecture decisions — create or update ADRs |`,
460
+ | \`adr-skill\` | When documenting architecture decisions — create or update ADRs |
461
+ | \`typescript\` | When documenting TypeScript APIs — type signatures, JSDoc patterns |`,
458
462
 
459
463
  Explorer: `**Read \`AGENTS.md\`** in the workspace root for project conventions and AI Kit protocol.
460
464
 
@@ -45,9 +45,9 @@ You may be invoked in two modes:
45
45
 
46
46
  ---
47
47
 
48
- ## MANDATORY FIRST ACTION — Knowledge Base Initialization
48
+ ## MANDATORY FIRST ACTION — AI Kit Initialization
49
49
 
50
- **Before ANY other work**, check the knowledge base:
50
+ **Before ANY other work**, check the AI Kit index:
51
51
 
52
52
  1. Run \`status({})\` — check **Onboard Status** and note the **Onboard Directory** path
53
53
  2. If onboard shows ❌:
@@ -62,6 +62,41 @@ You may be invoked in two modes:
62
62
 
63
63
  ---
64
64
 
65
+ ## Session Protocol
66
+
67
+ ### Start (do ALL)
68
+
69
+ \`\`\`
70
+ flow_status({}) # Check/resume active flow FIRST
71
+ # If flow active → flow_read_skill({ step }) → follow skill instructions
72
+ status({}) # Check AI Kit health + onboard state
73
+ # If onboard not run → onboard({ path: "." }) # First-time codebase analysis
74
+ flow_list({}) # See available flows
75
+ # Select flow based on task → flow_start({ flow: "<name>" }) # Start flow if appropriate
76
+ list() # See stored knowledge
77
+ search({ query: "SESSION CHECKPOINT", origin: "curated" }) # Resume prior work
78
+ \`\`\`
79
+
80
+ ## MCP Tool Categories
81
+
82
+ | Category | Tools | Purpose |
83
+ |----------|-------|---------|
84
+ | Flows | \`flow_list\`, \`flow_info\`, \`flow_start\`, \`flow_step\`, \`flow_status\`, \`flow_read_skill\`, \`flow_reset\` | Structured multi-step workflows |
85
+
86
+ ---
87
+
88
+ ## Domain Skills
89
+
90
+ Your agent file lists domain-specific skills in the **Skills** section. Load them as needed:
91
+
92
+ 1. Check if the current task matches a listed skill trigger
93
+ 2. If yes → load the skill file before starting implementation
94
+ 3. The \`aikit\` skill is **always loaded** — do not re-load it
95
+
96
+ > If no skills are listed for your agent, rely on AI Kit tools and onboard artifacts.
97
+
98
+ ---
99
+
65
100
  ## Information Lookup Order (MANDATORY)
66
101
 
67
102
  Always follow this order when you need to understand something. **Never skip to step 3 without checking steps 1-2 first.**
@@ -11,6 +11,13 @@ You are **Code-Reviewer-Alpha**, the primary Code-Reviewer agent.
11
11
 
12
12
  **Read .github/agents/_shared/code-reviewer-base.md NOW** — it contains your complete workflow and guidelines. All instructions there apply to you.
13
13
 
14
+ ## Skills (load on demand)
15
+
16
+ | Skill | When to load |
17
+ |-------|--------------|
18
+ | aikit | **Always** — AI Kit tool signatures, search, analysis |
19
+ | typescript | When reviewing TypeScript code — type patterns, best practices |
20
+
14
21
  ## Flows
15
22
 
16
23
  This project uses aikit's pluggable flow system. Check flow status with the `flow_status` MCP tool.
@@ -11,6 +11,13 @@ You are **Code-Reviewer-Beta**, a variant of Code-Reviewer. Same responsibilitie
11
11
 
12
12
  **Read .github/agents/_shared/code-reviewer-base.md NOW** — it contains your complete workflow and guidelines. All instructions there apply to you.
13
13
 
14
+ ## Skills (load on demand)
15
+
16
+ | Skill | When to load |
17
+ |-------|--------------|
18
+ | aikit | **Always** — AI Kit tool signatures, search, analysis |
19
+ | typescript | When reviewing TypeScript code — type patterns, best practices |
20
+
14
21
  ## Flows
15
22
 
16
23
  This project uses aikit's pluggable flow system. Check flow status with the `flow_status` MCP tool.
@@ -32,6 +32,13 @@ You are the **Debugger**, expert debugger that diagnoses issues, traces errors,
32
32
  - **Test the fix** — Every fix must have a test that would have caught the bug
33
33
  - **Verify before asserting** — Don't claim a function has a certain signature without checking via `symbol`. Don't reference a config option without confirming it exists in the codebase
34
34
  - **Break debug loops** — If you apply a fix, test, and get the same error 3 times: your hypothesis is wrong. STOP, discard your current theory, re-examine the error output and trace from a different entry point. Return `ESCALATE` if a fresh approach also fails
35
+ ## Skills (load on demand)
36
+
37
+ | Skill | When to load |
38
+ |-------|--------------|
39
+ | aikit | **Always** — AI Kit tool signatures, search, analysis |
40
+ | typescript | When writing TypeScript code — type patterns, generics, utility types |
41
+
35
42
 
36
43
  ## Flows
37
44
 
@@ -51,6 +51,7 @@ You are the **Documenter**, documentation specialist that creates and maintains
51
51
  |-------|--------------|
52
52
  | `c4-architecture` | When documenting system architecture — generate C4 Mermaid diagrams |
53
53
  | `adr-skill` | When documenting architecture decisions — create or update ADRs |
54
+ | `typescript` | When documenting TypeScript APIs — type signatures, JSDoc patterns |
54
55
 
55
56
  ## Flows
56
57
 
@@ -61,6 +61,12 @@ You are the **Explorer**, rapid codebase exploration to find files, usages, depe
61
61
  - **Speed over depth** — Provide a useful map quickly, not an exhaustive analysis
62
62
  - **Read-only** — Never create, edit, or delete files
63
63
  - **Structured output** — Always return findings in the format above
64
+ ## Skills (load on demand)
65
+
66
+ | Skill | When to load |
67
+ |-------|--------------|
68
+ | aikit | **Always** — AI Kit tool signatures, search, analysis |
69
+
64
70
 
65
71
  ## Flows
66
72
 
@@ -27,6 +27,15 @@ You are the **Frontend**, ui/ux specialist for react, styling, responsive design
27
27
  - **Follow design system** — Use existing tokens, don't create one-off values
28
28
  - **Responsive by default** — Mobile-first, test all breakpoints
29
29
  - **Test-first** — Component tests before implementation
30
+ ## Skills (load on demand)
31
+
32
+ | Skill | When to load |
33
+ |-------|--------------|
34
+ | aikit | **Always** — AI Kit tool signatures, search, analysis |
35
+ | react | When building React components — hooks, patterns, Server Components |
36
+ | typescript | When writing TypeScript code — type patterns, generics, utility types |
37
+ | frontend-design | When implementing UI/UX — design systems, accessibility, responsive patterns |
38
+
30
39
 
31
40
  ## Flows
32
41
 
@@ -31,6 +31,13 @@ You are the **Implementer**, persistent implementation agent that writes code fo
31
31
  - **Run `check` after every change** — Catch errors early
32
32
  - **Loop-break** — If the same test fails 3 times with the same error after your fixes, STOP. Re-read the error from scratch, check your assumptions with `trace` or `symbol`, and try a fundamentally different approach. Do not attempt a 4th fix in the same direction
33
33
  - **Think-first for complex tasks** — If a task involves 3+ files or non-obvious logic, outline your approach before writing code. Check existing patterns with `search` first. Design, then implement
34
+ ## Skills (load on demand)
35
+
36
+ | Skill | When to load |
37
+ |-------|--------------|
38
+ | aikit | **Always** — AI Kit tool signatures, search, analysis |
39
+ | typescript | When writing TypeScript code — type patterns, generics, utility types |
40
+
34
41
 
35
42
  ## Flows
36
43
 
@@ -31,9 +31,9 @@ You orchestrate the full development lifecycle: **planning → implementation
31
31
  | **Documenter** | Documentation specialist that creates and maintains comprehensive project documentation | GPT-5.4 | documentation |
32
32
  | **Explorer** | Rapid codebase exploration to find files, usages, dependencies, and structural context | Gemini 3 Flash (Preview) | exploration |
33
33
  | **Researcher-Alpha** | Primary deep research agent — also serves as default Researcher | Claude Opus 4.6 | research |
34
- | **Researcher-Beta** | Research variant for multi-model decision protocol different LLM perspective | Claude Sonnet 4.6 | research |
35
- | **Researcher-Gamma** | Research variant for multi-model decision protocol different LLM perspective | GPT-5.4 | research |
36
- | **Researcher-Delta** | Research variant for multi-model decision protocol different LLM perspective | Gemini 3.1 Pro (Preview) | research |
34
+ | **Researcher-Beta** | Research variant pragmatic analysis with focus on trade-offs and edge cases | Claude Sonnet 4.6 | research |
35
+ | **Researcher-Gamma** | Research variant broad pattern matching across domains and technologies | GPT-5.4 | research |
36
+ | **Researcher-Delta** | Research variant implementation feasibility and performance implications | Gemini 3.1 Pro (Preview) | research |
37
37
  | **Code-Reviewer-Alpha** | Primary code reviewer | GPT-5.4 | review |
38
38
  | **Code-Reviewer-Beta** | Code reviewer variant — different LLM perspective for dual review | Claude Opus 4.6 | review |
39
39
  | **Architect-Reviewer-Alpha** | Primary architecture reviewer | GPT-5.4 | review |
@@ -187,6 +187,8 @@ Before every tool call, verify:
187
187
  | `session-handoff` | Context filling up, session ending, or major milestone |
188
188
  | `lesson-learned` | After completing work — extract engineering principles |
189
189
 
190
+ **When dispatching subagents**, include relevant skill names in the prompt so subagents know which skills to load (e.g., "Load the `react` and `typescript` skills for this task").
191
+
190
192
  ## Flow-Aware Routing
191
193
 
192
194
  At session start, check for an active flow:
@@ -16,9 +16,9 @@ This directory contains AI agent definitions generated by `@vpxa/aikit init`.
16
16
  | **Documenter** | Documentation specialist that creates and maintains comprehensive project documentation | GPT-5.4 | documentation |
17
17
  | **Explorer** | Rapid codebase exploration to find files, usages, dependencies, and structural context | Gemini 3 Flash (Preview) | exploration |
18
18
  | **Researcher-Alpha** | Primary deep research agent — also serves as default Researcher | Claude Opus 4.6 | research |
19
- | **Researcher-Beta** | Research variant for multi-model decision protocol different LLM perspective | Claude Sonnet 4.6 | research |
20
- | **Researcher-Gamma** | Research variant for multi-model decision protocol different LLM perspective | GPT-5.4 | research |
21
- | **Researcher-Delta** | Research variant for multi-model decision protocol different LLM perspective | Gemini 3.1 Pro (Preview) | research |
19
+ | **Researcher-Beta** | Research variant pragmatic analysis with focus on trade-offs and edge cases | Claude Sonnet 4.6 | research |
20
+ | **Researcher-Gamma** | Research variant broad pattern matching across domains and technologies | GPT-5.4 | research |
21
+ | **Researcher-Delta** | Research variant implementation feasibility and performance implications | Gemini 3.1 Pro (Preview) | research |
22
22
  | **Code-Reviewer-Alpha** | Primary code reviewer | GPT-5.4 | review |
23
23
  | **Code-Reviewer-Beta** | Code reviewer variant — different LLM perspective for dual review | Claude Opus 4.6 | review |
24
24
  | **Architect-Reviewer-Alpha** | Primary architecture reviewer | GPT-5.4 | review |
@@ -34,6 +34,7 @@ You are the **Refactor**, code refactoring specialist that improves structure, r
34
34
  | Skill | When to load |
35
35
  |-------|--------------|
36
36
  | `lesson-learned` | After completing a refactor — extract principles from the before/after diff |
37
+ | `typescript` | When refactoring TypeScript code — type patterns, generics, utility types |
37
38
 
38
39
  ## Flows
39
40
 
@@ -1,5 +1,5 @@
1
1
  ---
2
- description: 'Research variant for multi-model decision protocol different LLM perspective'
2
+ description: 'Research variant pragmatic analysis with focus on trade-offs and edge cases'
3
3
  argument-hint: Research question, problem statement, or subsystem to investigate
4
4
  tools: [execute/runInTerminal, read/problems, read/readFile, read/terminalLastCommand, agent/runSubagent, search/changes, search/codebase, search/usages, web/fetch, web/githubRepo, browser/openBrowserPage, browser/readPage, browser/screenshotPage, browser/navigatePage, browser/clickElement, browser/dragElement, browser/hoverElement, browser/typeInPage, browser/runPlaywrightCode, browser/handleDialog, aikit/*]
5
5
  model: Claude Sonnet 4.6 (copilot)
@@ -7,7 +7,7 @@ model: Claude Sonnet 4.6 (copilot)
7
7
 
8
8
  # Researcher-Beta - The Context Gatherer
9
9
 
10
- You are **Researcher-Beta**, a variant of the Researcher agent. You exist to provide a **different LLM perspective** during multi-model decision sessions. Approach problems with the same rigor but bring your own reasoning style.
10
+ You are **Researcher-Beta**, a variant of the Researcher agent optimized for **pragmatic analysis**. Focus on trade-offs, edge cases, and practical constraints. Challenge assumptions and highlight risks the primary researcher may overlook.
11
11
 
12
12
  **Read .github/agents/_shared/researcher-base.md NOW** — it contains your complete workflow and guidelines. All instructions there apply to you.
13
13
 
@@ -1,5 +1,5 @@
1
1
  ---
2
- description: 'Research variant for multi-model decision protocol different LLM perspective'
2
+ description: 'Research variant implementation feasibility and performance implications'
3
3
  argument-hint: Research question, problem statement, or subsystem to investigate
4
4
  tools: [execute/runInTerminal, read/problems, read/readFile, read/terminalLastCommand, agent/runSubagent, search/changes, search/codebase, search/usages, web/fetch, web/githubRepo, browser/openBrowserPage, browser/readPage, browser/screenshotPage, browser/navigatePage, browser/clickElement, browser/dragElement, browser/hoverElement, browser/typeInPage, browser/runPlaywrightCode, browser/handleDialog, aikit/*]
5
5
  model: Gemini 3.1 Pro (Preview) (copilot)
@@ -7,7 +7,7 @@ model: Gemini 3.1 Pro (Preview) (copilot)
7
7
 
8
8
  # Researcher-Delta - The Context Gatherer
9
9
 
10
- You are **Researcher-Delta**, a variant of the Researcher agent. You exist to provide a **different LLM perspective** during multi-model decision sessions. Approach problems with the same rigor but bring your own reasoning style.
10
+ You are **Researcher-Delta**, a variant of the Researcher agent optimized for **implementation feasibility**. Focus on performance implications, scaling concerns, and concrete implementation paths. Ground theoretical proposals in practical reality.
11
11
 
12
12
  **Read .github/agents/_shared/researcher-base.md NOW** — it contains your complete workflow and guidelines. All instructions there apply to you.
13
13
 
@@ -1,5 +1,5 @@
1
1
  ---
2
- description: 'Research variant for multi-model decision protocol different LLM perspective'
2
+ description: 'Research variant broad pattern matching across domains and technologies'
3
3
  argument-hint: Research question, problem statement, or subsystem to investigate
4
4
  tools: [execute/runInTerminal, read/problems, read/readFile, read/terminalLastCommand, agent/runSubagent, search/changes, search/codebase, search/usages, web/fetch, web/githubRepo, browser/openBrowserPage, browser/readPage, browser/screenshotPage, browser/navigatePage, browser/clickElement, browser/dragElement, browser/hoverElement, browser/typeInPage, browser/runPlaywrightCode, browser/handleDialog, aikit/*]
5
5
  model: GPT-5.4 (copilot)
@@ -7,7 +7,7 @@ model: GPT-5.4 (copilot)
7
7
 
8
8
  # Researcher-Gamma - The Context Gatherer
9
9
 
10
- You are **Researcher-Gamma**, a variant of the Researcher agent. You exist to provide a **different LLM perspective** during multi-model decision sessions. Approach problems with the same rigor but bring your own reasoning style.
10
+ You are **Researcher-Gamma**, a variant of the Researcher agent optimized for **cross-domain pattern matching**. Draw connections from other domains, frameworks, and industries. Bring breadth where Alpha brings depth.
11
11
 
12
12
  **Read .github/agents/_shared/researcher-base.md NOW** — it contains your complete workflow and guidelines. All instructions there apply to you.
13
13
 
@@ -53,6 +53,13 @@ You are the **Security**, security specialist that analyzes code for vulnerabili
53
53
  ### Findings
54
54
  1. **[SEVERITY]** Title — Description, file:line, remediation
55
55
  ```
56
+ ## Skills (load on demand)
57
+
58
+ | Skill | When to load |
59
+ |-------|--------------|
60
+ | aikit | **Always** — AI Kit tool signatures, search, analysis |
61
+ | typescript | When reviewing code — security patterns, type safety |
62
+
56
63
 
57
64
  ## Flows
58
65
 
@@ -37,9 +37,9 @@ You may be invoked in two modes:
37
37
 
38
38
  ---
39
39
 
40
- ## MANDATORY FIRST ACTION — Knowledge Base Initialization
40
+ ## MANDATORY FIRST ACTION — AI Kit Initialization
41
41
 
42
- **Before ANY other work**, check the knowledge base:
42
+ **Before ANY other work**, check the AI Kit index:
43
43
 
44
44
  1. Run `status({})` — check **Onboard Status** and note the **Onboard Directory** path
45
45
  2. If onboard shows ❌:
@@ -54,6 +54,41 @@ You may be invoked in two modes:
54
54
 
55
55
  ---
56
56
 
57
+ ## Session Protocol
58
+
59
+ ### Start (do ALL)
60
+
61
+ ```
62
+ flow_status({}) # Check/resume active flow FIRST
63
+ # If flow active → flow_read_skill({ step }) → follow skill instructions
64
+ status({}) # Check AI Kit health + onboard state
65
+ # If onboard not run → onboard({ path: "." }) # First-time codebase analysis
66
+ flow_list({}) # See available flows
67
+ # Select flow based on task → flow_start({ flow: "<name>" }) # Start flow if appropriate
68
+ list() # See stored knowledge
69
+ search({ query: "SESSION CHECKPOINT", origin: "curated" }) # Resume prior work
70
+ ```
71
+
72
+ ## MCP Tool Categories
73
+
74
+ | Category | Tools | Purpose |
75
+ |----------|-------|---------|
76
+ | Flows | `flow_list`, `flow_info`, `flow_start`, `flow_step`, `flow_status`, `flow_read_skill`, `flow_reset` | Structured multi-step workflows |
77
+
78
+ ---
79
+
80
+ ## Domain Skills
81
+
82
+ Your agent file lists domain-specific skills in the **Skills** section. Load them as needed:
83
+
84
+ 1. Check if the current task matches a listed skill trigger
85
+ 2. If yes → load the skill file before starting implementation
86
+ 3. The `aikit` skill is **always loaded** — do not re-load it
87
+
88
+ > If no skills are listed for your agent, rely on AI Kit tools and onboard artifacts.
89
+
90
+ ---
91
+
57
92
  ## Information Lookup Order (MANDATORY)
58
93
 
59
94
  Always follow this order when you need to understand something. **Never skip to step 3 without checking steps 1-2 first.**
@@ -1,9 +1,9 @@
1
1
  ---
2
2
  name: aikit
3
- description: "Use the @vpxa/aikit Knowledge Base MCP server for codebase search, analysis, and persistent memory. Load this skill when using aikit_search, aikit_remember, aikit_analyze_*, or any aikit_* tool. Covers all 67 MCP tools: search (hybrid/semantic/keyword), code analysis (structure, dependencies, symbols, patterns, entry points, diagrams, blast radius), knowledge graph (auto-populated module/symbol/import graph with traversal), context management (worksets, stash, checkpoints, restore, lanes), code manipulation (rename, codemod, eval), knowledge management (remember/read/update/forget), web access (fetch, search, http), FORGE protocol (ground, classify, evidence map, stratum cards, digest), brainstorming (interactive ideation sessions), presentation (rich dashboards via present tool), onboarding (full codebase analysis in one call), and developer utilities (regex, encode, measure, changelog, schema-validate, snippet, env, time)."
3
+ description: "Use the @vpxa/aikit AI Kit MCP server for codebase search, analysis, and persistent memory. Load this skill when using aikit_search, aikit_remember, aikit_analyze_*, or any aikit_* tool. Covers all 67 MCP tools: search (hybrid/semantic/keyword), code analysis (structure, dependencies, symbols, patterns, entry points, diagrams, blast radius), knowledge graph (auto-populated module/symbol/import graph with traversal), context management (worksets, stash, checkpoints, restore, lanes), code manipulation (rename, codemod, eval), knowledge management (remember/read/update/forget), web access (fetch, search, http), FORGE protocol (ground, classify, evidence map, stratum cards, digest), brainstorming (interactive ideation sessions), presentation (rich dashboards via present tool), onboarding (full codebase analysis in one call), and developer utilities (regex, encode, measure, changelog, schema-validate, snippet, env, time)."
4
4
  ---
5
5
 
6
- # @vpxa/aikit — Knowledge Base Toolkit
6
+ # @vpxa/aikit — AI Kit
7
7
 
8
8
  Local-first AI developer toolkit — 67 MCP tools for search, analysis, context compression, FORGE quality gates, knowledge management, code manipulation, execution, web access, brainstorming, presentation, and developer utilities.
9
9
 
@@ -40,11 +40,16 @@ core → store → embeddings → chunker → indexer → analyzers → tools
40
40
 
41
41
  ## Session Protocol (MANDATORY)
42
42
 
43
- ### Start of Session
43
+ ### Start (do ALL)
44
44
  ```
45
- status()
46
- search({ query: "SESSION CHECKPOINT", origin: "curated" })
47
- list({ category: "conventions" })
45
+ flow_status({}) # Check/resume active flow FIRST
46
+ # If flow active → flow_read_skill({ step }) follow skill instructions
47
+ status({}) # Check AI Kit health + onboard state
48
+ # If onboard not run → onboard({ path: "." }) # First-time codebase analysis
49
+ flow_list({}) # See available flows
50
+ # Select flow based on task → flow_start({ flow: "<name>" }) # Start flow if appropriate
51
+ list() # See stored knowledge
52
+ search({ query: "SESSION CHECKPOINT", origin: "curated" }) # Resume prior work
48
53
  ```
49
54
 
50
55
  ### During Session
@@ -176,6 +181,17 @@ Lane actions: `create` (copy files to lane), `list`, `status` (modified/added/de
176
181
  | `queue` | `aikit queue` | Task queue for sequential agent operations (create/push/next/done/fail) |
177
182
  | `replay` | `aikit replay` | View or clear the audit trail of tool invocations (action: list/clear) |
178
183
 
184
+ ### Flows (7)
185
+ | Tool | CLI | Purpose |
186
+ |------|-----|---------|
187
+ | `flow_list` | `aikit flow list` | List available flows |
188
+ | `flow_info` | `aikit flow info` | Get flow details including steps and skill paths |
189
+ | `flow_start` | `aikit flow start` | Start a named flow |
190
+ | `flow_step` | `aikit flow step` | Advance, skip, or redo the current step |
191
+ | `flow_status` | `aikit flow status` | Check if a flow is active and view the current step |
192
+ | `flow_read_skill` | `aikit flow read-skill` | Read the current step's skill file content directly |
193
+ | `flow_reset` | `aikit flow reset` | Clear flow state to start over |
194
+
179
195
  ### Presentation (1)
180
196
  | Tool | CLI | Purpose |
181
197
  |------|-----|---------|
@@ -186,7 +202,79 @@ Lane actions: `create` (copy files to lane), `list`, `status` (modified/added/de
186
202
  |------|-----|---------|
187
203
  | `brainstorm` | — | Interactive brainstorming and ideation sessions with structured output |
188
204
 
189
- ## Search Strategy
205
+ ## Flow System
206
+
207
+ Flows are multi-step guided workflows that structure complex tasks. Each step has a skill file with detailed instructions, required artifacts, and agent assignments.
208
+
209
+ ### Built-in Flows
210
+
211
+ | Flow | Steps | Use When |
212
+ |------|-------|----------|
213
+ | `aikit:basic` | assess → implement → verify | Bug fixes, config changes, small features |
214
+ | `aikit:advanced` | spec → plan → task → execute → verify | New modules, cross-service changes, architectural work |
215
+
216
+ ### Flow Lifecycle
217
+
218
+ ```text
219
+ flow_list() # See available flows
220
+ flow_info({ flow: "aikit:basic" }) # View steps, skills, agents
221
+ flow_start({ flow: "aikit:basic" }) # Start — sets current step to first
222
+ flow_read_skill({ step: "assess" }) # Read current step's skill instructions
223
+ # ... do the work described in the skill ...
224
+ flow_step({ action: "next" }) # Advance to next step
225
+ flow_step({ action: "skip" }) # Skip current step
226
+ flow_step({ action: "redo" }) # Redo current step
227
+ flow_status() # Check progress
228
+ flow_reset() # Clear state, start over
229
+ ```
230
+
231
+ ### Creating Custom Flows
232
+
233
+ 1. Create a directory under `.github/flows/<flow-name>/`
234
+ 2. Add `manifest.yaml`:
235
+
236
+ ```yaml
237
+ name: my-flow
238
+ version: "1.0.0"
239
+ description: "My custom flow"
240
+ artifacts_dir: .spec
241
+ steps:
242
+ - id: design
243
+ name: Design
244
+ skill: skills/design/SKILL.md
245
+ description: "Create the design document"
246
+ produces: [design.md]
247
+ requires: []
248
+ agents: [Planner]
249
+ - id: implement
250
+ name: Implement
251
+ skill: skills/implement/SKILL.md
252
+ description: "Implement the design"
253
+ produces: [code]
254
+ requires: [design]
255
+ agents: [Implementer]
256
+ agents:
257
+ - agents/planner.agent.md
258
+ install: []
259
+ ```
260
+
261
+ 3. Add skill files under `.github/flows/<flow-name>/skills/<step-id>/SKILL.md`
262
+ 4. The flow appears in `flow_list()` automatically
263
+
264
+ ### How Flows Are Delivered
265
+
266
+ - **Built-in flows** ship with the AI Kit MCP server in `scaffold/flows/`
267
+ - `aikit init` copies them to `.github/flows/` in your workspace
268
+ - At runtime, flow tools resolve paths to `.github/flows/` (workspace-local copies)
269
+ - Custom flows placed in `.github/flows/` are discovered alongside built-in ones
270
+
271
+ ### Agent-Flow Integration
272
+
273
+ - All code agents have a "Flows" section instructing them to check `flow_status()` first
274
+ - If a flow is active, the agent follows the current step's skill via `flow_read_skill()`
275
+ - After completing a step's work, advance with `flow_step({ action: "next" })`
276
+ - The **Orchestrator** selects and starts flows; other agents follow them
277
+ - The **Orchestrator** specifies `aikit` skill loading — all agents should load `aikit` skill to access flow tools
190
278
 
191
279
  ## CRITICAL: Use AI Kit Tools Instead of Native IDE Tools
192
280
 
@@ -420,102 +508,37 @@ node bin/aikit.mjs serve --http --port 3210
420
508
 
421
509
  ---
422
510
 
423
- ## Flow System
424
-
425
- Flows are **pluggable, multi-step development workflows** that orchestrate agents through structured phases. Each flow is a directory containing a manifest (`flow.json`) and step skills (`skills/<step>/SKILL.md`).
426
-
427
- ### Architecture
428
-
429
- ```
430
- Flow Layer — Pluggable workflows (git repos, local dirs, built-in)
431
- Foundation Layer — Agents, MCP tools, skills (what you already have)
432
- Base Layer — aikit MCP server, CLI, index engine
433
- ```
434
-
435
- Flows compose Foundation Layer components (agents + skills + tools) into repeatable processes. They do NOT replace the foundation — they orchestrate it.
436
-
437
- ### Built-in Flows
511
+ ## Flows
438
512
 
439
- | Flow | Steps | Use When |
440
- |------|-------|----------|
441
- | `aikit:basic` | assess → implement → verify | Bug fixes, small features, refactoring |
442
- | `aikit:advanced` | spec → plan → task → execute → verify | New features, API design, architecture changes |
513
+ Flows are structured multi-step workflows that guide agents through complex tasks. They are the **primary workflow system** — use them instead of ad-hoc planning when a matching flow exists.
443
514
 
444
- ### Flow Manifest (`flow.json`)
515
+ ### Flow Tools
445
516
 
446
- ```json
447
- {
448
- "name": "aikit:basic",
449
- "version": "0.1.0",
450
- "description": "Quick development flow",
451
- "steps": [
452
- {
453
- "id": "assess",
454
- "name": "Assessment",
455
- "skill": "skills/assess/SKILL.md",
456
- "produces": ["assessment.md"],
457
- "requires": [],
458
- "agents": ["Explorer", "Researcher-Alpha"],
459
- "description": "Understand scope, analyze codebase"
460
- }
461
- ],
462
- "artifacts_dir": ".spec",
463
- "install": []
464
- }
465
- ```
466
-
467
- **Key fields:**
468
- - `artifacts_dir` — root folder for all flow artifacts (default: `.spec`). Each step writes to a work subfolder: `.spec/<step-slug>/`
469
- - `install` — external skills needed by this flow. Empty for built-ins. For custom flows: `[{ "name": "my-skill", "repo": "org/repo", "agent": "copilot" }]`
470
- - `steps[].produces` / `steps[].requires` — declare artifact dependencies between steps. A step can only start when all its `requires` artifacts exist.
471
-
472
- ### CLI Commands
517
+ | Tool | Purpose |
518
+ |------|---------|
519
+ | `flow_status` | Check if a flow is active + current step |
520
+ | `flow_list` | List available flows |
521
+ | `flow_info` | Get flow details including steps and skill paths |
522
+ | `flow_start` | Start a named flow |
523
+ | `flow_step` | Advance: `next`, `skip`, or `redo` current step |
524
+ | `flow_read_skill` | Read the current step's skill file content directly |
525
+ | `flow_reset` | Clear flow state to start over |
473
526
 
474
- | Command | Purpose |
475
- |---------|---------|
476
- | `aikit flow add <source>` | Install flow from git URL, local path, or built-in name |
477
- | `aikit flow remove <name>` | Remove an installed flow |
478
- | `aikit flow list` | List installed flows |
479
- | `aikit flow use <name>` | Set active flow for current project |
480
- | `aikit flow status` | Show current step, artifacts, progress |
481
- | `aikit flow start [step]` | Begin flow (or resume from step) |
482
- | `aikit flow reset` | Reset flow state to beginning |
527
+ ### Flow Selection
483
528
 
484
- ### State Machine
529
+ | Task Type | Flow | Why |
530
+ |-----------|------|-----|
531
+ | Bug fix, config change, small refactor | `aikit:basic` | Known scope, low risk |
532
+ | New feature in existing module | `aikit:basic` | Clear boundaries |
533
+ | New system/service/module | `aikit:advanced` | Needs spec + planning |
534
+ | Cross-service changes | `aikit:advanced` | Multiple boundaries |
535
+ | Architectural change | `aikit:advanced` | High impact |
536
+ | Unclear scope or exploratory | No flow | Use agent's native workflow |
485
537
 
486
- Flow progress is tracked in `.spec/flow-state.json`:
487
-
488
- ```json
489
- {
490
- "flow": "aikit:basic",
491
- "current_step": "implement",
492
- "steps": {
493
- "assess": { "status": "completed", "artifacts": ["assessment.md"] },
494
- "implement": { "status": "in_progress", "artifacts": [] },
495
- "verify": { "status": "pending", "artifacts": [] }
496
- }
497
- }
498
- ```
499
-
500
- Step transitions: `pending` → `in_progress` → `completed` (or `failed` → retry). A step can only start when all its `requires` artifacts exist.
501
-
502
- ### Flow Sources
503
-
504
- Flows can be installed from three sources:
505
-
506
- 1. **Built-in** — Ship with aikit (`aikit:basic`, `aikit:advanced`)
507
- 2. **Git** — `aikit flow add https://github.com/org/my-flow.git`
508
- 3. **Local** — `aikit flow add ./my-custom-flow`
509
-
510
- ### Creating Custom Flows
511
-
512
- ```
513
- my-flow/
514
- flow.json # Manifest with steps, agents, dependencies
515
- skills/
516
- step-one/SKILL.md
517
- step-two/SKILL.md
518
- README.md # Optional documentation
519
- ```
538
+ ### Flow Lifecycle
520
539
 
521
- Each step skill follows standard SKILL.md format with sections: Purpose, Inputs, Process, Outputs, Agents, Completion Criteria. Steps produce artifacts into `.spec/` that downstream steps consume.
540
+ 1. **Start**: `flow_list({})` choose flow `flow_start({ flow: "<name>" })`
541
+ 2. **Each step**: `flow_read_skill({ step: "<name>" })` → follow skill instructions → complete work
542
+ 3. **Advance**: `flow_step({ action: "next" })` → repeat from step 2
543
+ 4. **Resume**: `flow_status({})` → if active, `flow_read_skill` for current step → continue
544
+ 5. **Reset**: `flow_reset({})` if you need to start over