@vpxa/aikit 0.1.19 → 0.1.21

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. package/package.json +1 -1
  2. package/packages/core/dist/types.d.ts +2 -0
  3. package/packages/flows/dist/adapters/claude-plugin.js +1 -1
  4. package/packages/flows/dist/adapters/copilot.js +1 -1
  5. package/packages/flows/dist/builtins.js +1 -1
  6. package/packages/flows/dist/loader.js +1 -1
  7. package/packages/flows/dist/types.d.ts +2 -2
  8. package/packages/indexer/dist/smart-index-scheduler.d.ts +4 -1
  9. package/packages/indexer/dist/smart-index-scheduler.js +1 -1
  10. package/packages/server/dist/index.js +1 -1
  11. package/packages/server/dist/tool-metadata.js +1 -1
  12. package/packages/server/dist/tools/flow.tools.js +1 -1
  13. package/packages/store/dist/lance-store.d.ts +2 -0
  14. package/packages/store/dist/lance-store.js +1 -1
  15. package/scaffold/adapters/claude-code.mjs +4 -22
  16. package/scaffold/definitions/bodies.mjs +74 -62
  17. package/scaffold/definitions/plugins.mjs +92 -0
  18. package/scaffold/definitions/protocols.mjs +2 -2
  19. package/scaffold/flows/aikit-advanced/README.md +70 -0
  20. package/scaffold/flows/aikit-advanced/flow.json +15 -6
  21. package/scaffold/flows/aikit-advanced/steps/design/README.md +164 -0
  22. package/scaffold/flows/aikit-advanced/{skills/execute/SKILL.md → steps/execute/README.md} +19 -0
  23. package/scaffold/flows/aikit-advanced/{skills/plan/SKILL.md → steps/plan/README.md} +20 -0
  24. package/scaffold/flows/aikit-advanced/{skills/spec/SKILL.md → steps/spec/README.md} +19 -0
  25. package/scaffold/flows/aikit-advanced/{skills/task/SKILL.md → steps/task/README.md} +18 -0
  26. package/scaffold/flows/aikit-advanced/{skills/verify/SKILL.md → steps/verify/README.md} +21 -0
  27. package/scaffold/flows/aikit-basic/README.md +51 -0
  28. package/scaffold/flows/aikit-basic/flow.json +13 -4
  29. package/scaffold/flows/aikit-basic/{skills/assess/SKILL.md → steps/assess/README.md} +25 -0
  30. package/scaffold/flows/aikit-basic/steps/design/README.md +107 -0
  31. package/scaffold/flows/aikit-basic/{skills/implement/SKILL.md → steps/implement/README.md} +24 -0
  32. package/scaffold/flows/aikit-basic/{skills/verify/SKILL.md → steps/verify/README.md} +25 -0
  33. package/scaffold/general/agents/Orchestrator.agent.md +61 -53
  34. package/scaffold/general/agents/Planner.agent.md +12 -8
  35. package/scaffold/general/agents/_shared/code-agent-base.md +2 -2
  36. package/scaffold/general/skills/adr-skill/SKILL.md +6 -6
  37. package/scaffold/general/skills/aikit/SKILL.md +10 -10
  38. package/scaffold/general/skills/brainstorming/SKILL.md +11 -13
  39. package/scaffold/general/skills/c4-architecture/SKILL.md +1 -0
  40. package/scaffold/general/skills/requirements-clarity/SKILL.md +5 -3
  41. package/scaffold/general/skills/session-handoff/SKILL.md +2 -0
  42. package/scaffold/general/skills/brainstorming/scripts/frame-template.html +0 -365
  43. package/scaffold/general/skills/brainstorming/scripts/helper.js +0 -216
  44. package/scaffold/general/skills/brainstorming/scripts/server.cjs +0 -9
  45. package/scaffold/general/skills/brainstorming/scripts/server.src.cjs +0 -249
  46. package/scaffold/general/skills/brainstorming/visual-companion.md +0 -430
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@vpxa/aikit",
3
- "version": "0.1.19",
3
+ "version": "0.1.21",
4
4
  "type": "module",
5
5
  "description": "Local-first AI developer toolkit — knowledge base, code analysis, context management, and developer tools for LLM agents",
6
6
  "license": "MIT",
@@ -108,6 +108,8 @@ interface KBConfig {
108
108
  chunkOverlap: number;
109
109
  minChunkSize: number; /** Max files processed concurrently. Defaults to half of available CPU cores. */
110
110
  concurrency?: number;
111
+ trickleIntervalMs?: number;
112
+ trickleBatchSize?: number;
111
113
  };
112
114
  embedding: {
113
115
  model: string;
@@ -1 +1 @@
1
- import{existsSync as e,readFileSync as t,readdirSync as n}from"node:fs";import{basename as r,join as i}from"node:path";const a=[`spec`,`plan`,`task`,`execute`,`verify`];var o=class{format=`claude-plugin`;detect(t){return e(i(t,`.claude-plugin`,`plugin.json`))}async parse(e){let n=i(e,`.claude-plugin`,`plugin.json`),a=JSON.parse(t(n,`utf-8`)),o=this.discoverSteps(e),s=this.discoverAgents(e);return{name:a.name??r(e),version:a.version??`1.0.0`,description:a.description??``,author:a.author,steps:o,agents:s,artifacts_dir:`.spec`,install:[]}}discoverSteps(t){let r=i(t,`skills`);if(!e(r))return[];let o=n(r,{withFileTypes:!0}).filter(e=>e.isDirectory()).map(e=>e.name).sort((e,t)=>{let n=a.indexOf(e),r=a.indexOf(t);return n!==-1&&r!==-1?n-r:n===-1?r===-1?e.localeCompare(t):1:-1});return o.map((e,t)=>({id:e,name:e.charAt(0).toUpperCase()+e.slice(1),skill:`skills/${e}/SKILL.md`,produces:[`${e}.md`],requires:t>0?[o[t-1]]:[],agents:[],description:`${e} step`}))}discoverAgents(t){let r=i(t,`agents`);return e(r)?n(r,{withFileTypes:!0}).filter(e=>e.isFile()&&e.name.endsWith(`.md`)).map(e=>`agents/${e.name}`):[]}};export{o as ClaudePluginAdapter};
1
+ import{existsSync as e,readFileSync as t,readdirSync as n}from"node:fs";import{basename as r,join as i}from"node:path";const a=[`spec`,`plan`,`task`,`execute`,`verify`];var o=class{format=`claude-plugin`;detect(t){return e(i(t,`.claude-plugin`,`plugin.json`))}async parse(e){let n=i(e,`.claude-plugin`,`plugin.json`),a=JSON.parse(t(n,`utf-8`)),o=this.discoverSteps(e),s=this.discoverAgents(e);return{name:a.name??r(e),version:a.version??`1.0.0`,description:a.description??``,author:a.author,steps:o,agents:s,artifacts_dir:`.spec`,install:[]}}discoverSteps(t){let r=i(t,`skills`);if(!e(r))return[];let o=n(r,{withFileTypes:!0}).filter(e=>e.isDirectory()).map(e=>e.name).sort((e,t)=>{let n=a.indexOf(e),r=a.indexOf(t);return n!==-1&&r!==-1?n-r:n===-1?r===-1?e.localeCompare(t):1:-1});return o.map((e,t)=>({id:e,name:e.charAt(0).toUpperCase()+e.slice(1),instruction:`steps/${e}/README.md`,produces:[`${e}.md`],requires:t>0?[o[t-1]]:[],agents:[],description:`${e} step`}))}discoverAgents(t){let r=i(t,`agents`);return e(r)?n(r,{withFileTypes:!0}).filter(e=>e.isFile()&&e.name.endsWith(`.md`)).map(e=>`agents/${e.name}`):[]}};export{o as ClaudePluginAdapter};
@@ -1 +1 @@
1
- import{existsSync as e,readdirSync as t}from"node:fs";import{basename as n,join as r}from"node:path";var i=class{format=`copilot`;detect(t){return e(r(t,`.github`,`agents`))}async parse(e){let i=t(r(e,`.github`,`agents`),{withFileTypes:!0}).filter(e=>e.isFile()&&e.name.endsWith(`.md`)).map(e=>`.github/agents/${e.name}`),a=i.map((e,t)=>{let r=n(e,`.md`).toLowerCase().replace(/\.agent$/,``);return{id:r,name:r.charAt(0).toUpperCase()+r.slice(1),skill:e,produces:[`${r}.md`],requires:t>0?[i[t-1].replace(/.*\//,``).replace(/\.md$/,``).toLowerCase()]:[],agents:[e],description:`${r} agent step`}});return{name:n(e),version:`1.0.0`,description:`Copilot agents flow from ${n(e)}`,steps:a,agents:i,artifacts_dir:`.spec`,install:[]}}};export{i as CopilotAdapter};
1
+ import{existsSync as e,readdirSync as t}from"node:fs";import{basename as n,join as r}from"node:path";var i=class{format=`copilot`;detect(t){return e(r(t,`.github`,`agents`))}async parse(e){let i=t(r(e,`.github`,`agents`),{withFileTypes:!0}).filter(e=>e.isFile()&&e.name.endsWith(`.md`)).map(e=>`.github/agents/${e.name}`),a=i.map((e,t)=>{let r=n(e,`.md`).toLowerCase().replace(/\.agent$/,``);return{id:r,name:r.charAt(0).toUpperCase()+r.slice(1),instruction:e,produces:[`${r}.md`],requires:t>0?[i[t-1].replace(/.*\//,``).replace(/\.md$/,``).toLowerCase()]:[],agents:[e],description:`${r} agent step`}});return{name:n(e),version:`1.0.0`,description:`Copilot agents flow from ${n(e)}`,steps:a,agents:i,artifacts_dir:`.spec`,install:[]}}};export{i as CopilotAdapter};
@@ -1 +1 @@
1
- const e={name:`aikit:basic`,version:`0.1.0`,description:`Quick development flow for bug fixes, small features, and refactoring`,steps:[{id:`assess`,name:`Assessment`,skill:`skills/assess/SKILL.md`,produces:[`assessment.md`],requires:[],agents:[`Explorer`,`Researcher-Alpha`],description:`Understand scope, analyze codebase, identify approach`},{id:`implement`,name:`Implementation`,skill:`skills/implement/SKILL.md`,produces:[`progress.md`],requires:[`assessment.md`],agents:[`Implementer`,`Frontend`],description:`Write code following the assessment plan`},{id:`verify`,name:`Verification`,skill:`skills/verify/SKILL.md`,produces:[`verify-report.md`],requires:[`progress.md`],agents:[`Code-Reviewer-Alpha`,`Security`],description:`Review code, run tests, validate changes`}],agents:[],artifacts_dir:`.spec`,install:[]},t={name:`aikit:advanced`,version:`0.1.0`,description:`Full development flow for new features, API design, and architecture changes`,steps:[{id:`spec`,name:`Specification`,skill:`skills/spec/SKILL.md`,produces:[`spec.md`],requires:[],agents:[`Researcher-Alpha`],description:`Elicit requirements, clarify scope, define acceptance criteria`},{id:`plan`,name:`Planning`,skill:`skills/plan/SKILL.md`,produces:[`plan.md`],requires:[`spec.md`],agents:[`Planner`,`Explorer`],description:`Analyze codebase, design architecture, create implementation plan`},{id:`task`,name:`Task Breakdown`,skill:`skills/task/SKILL.md`,produces:[`tasks.md`],requires:[`plan.md`],agents:[`Planner`,`Architect-Reviewer-Alpha`],description:`Break plan into ordered implementation tasks with dependencies`},{id:`execute`,name:`Execution`,skill:`skills/execute/SKILL.md`,produces:[`progress.md`],requires:[`tasks.md`],agents:[`Orchestrator`,`Implementer`,`Frontend`,`Refactor`],description:`Implement all tasks, write code, write tests`},{id:`verify`,name:`Verification`,skill:`skills/verify/SKILL.md`,produces:[`verify-report.md`],requires:[`progress.md`],agents:[`Code-Reviewer-Alpha`,`Code-Reviewer-Beta`,`Architect-Reviewer-Alpha`,`Architect-Reviewer-Beta`,`Security`],description:`Dual code review, architecture review, security review, test validation`}],agents:[],artifacts_dir:`.spec`,install:[]};function n(){return[{manifest:e,scaffoldDir:`scaffold/flows/aikit-basic`},{manifest:t,scaffoldDir:`scaffold/flows/aikit-advanced`}]}export{n as getBuiltinFlows};
1
+ const e={name:`aikit:basic`,version:`0.1.0`,description:`Quick development flow for bug fixes, small features, and refactoring`,steps:[{id:`design`,name:`Design Gate`,instruction:`steps/design/README.md`,produces:[`design-decisions.md`],requires:[],agents:[`Researcher-Alpha`,`Researcher-Beta`,`Researcher-Gamma`,`Researcher-Delta`],description:`Evaluate task type, run brainstorming for features, FORGE classification. Auto-skips for bug fixes and refactors.`},{id:`assess`,name:`Assessment`,instruction:`steps/assess/README.md`,produces:[`assessment.md`],requires:[`design-decisions.md`],agents:[`Explorer`,`Researcher-Alpha`],description:`Understand scope, analyze codebase, identify approach`},{id:`implement`,name:`Implementation`,instruction:`steps/implement/README.md`,produces:[`progress.md`],requires:[`assessment.md`],agents:[`Implementer`,`Frontend`],description:`Write code following the assessment plan`},{id:`verify`,name:`Verification`,instruction:`steps/verify/README.md`,produces:[`verify-report.md`],requires:[`progress.md`],agents:[`Code-Reviewer-Alpha`,`Security`],description:`Review code, run tests, validate changes`}],agents:[],artifacts_dir:`.spec`,install:[]},t={name:`aikit:advanced`,version:`0.1.0`,description:`Full development flow for new features, API design, and architecture changes`,steps:[{id:`design`,name:`Design Gate`,instruction:`steps/design/README.md`,produces:[`design-decisions.md`],requires:[],agents:[`Researcher-Alpha`,`Researcher-Beta`,`Researcher-Gamma`,`Researcher-Delta`],description:`Full brainstorming, FORGE classification, decision protocol with parallel research. ADR for critical-tier tasks.`},{id:`spec`,name:`Specification`,instruction:`steps/spec/README.md`,produces:[`spec.md`],requires:[`design-decisions.md`],agents:[`Researcher-Alpha`],description:`Elicit requirements, clarify scope, define acceptance criteria`},{id:`plan`,name:`Planning`,instruction:`steps/plan/README.md`,produces:[`plan.md`],requires:[`spec.md`],agents:[`Planner`,`Explorer`],description:`Analyze codebase, design architecture, create implementation plan`},{id:`task`,name:`Task Breakdown`,instruction:`steps/task/README.md`,produces:[`tasks.md`],requires:[`plan.md`],agents:[`Planner`,`Architect-Reviewer-Alpha`],description:`Break plan into ordered implementation tasks with dependencies`},{id:`execute`,name:`Execution`,instruction:`steps/execute/README.md`,produces:[`progress.md`],requires:[`tasks.md`],agents:[`Orchestrator`,`Implementer`,`Frontend`,`Refactor`],description:`Implement all tasks, write code, write tests`},{id:`verify`,name:`Verification`,instruction:`steps/verify/README.md`,produces:[`verify-report.md`],requires:[`progress.md`],agents:[`Code-Reviewer-Alpha`,`Code-Reviewer-Beta`,`Architect-Reviewer-Alpha`,`Architect-Reviewer-Beta`,`Security`],description:`Dual code review, architecture review, security review, test validation`}],agents:[],artifacts_dir:`.spec`,install:[]};function n(){return[{manifest:e,scaffoldDir:`scaffold/flows/aikit-basic`},{manifest:t,scaffoldDir:`scaffold/flows/aikit-advanced`}]}export{n as getBuiltinFlows};
@@ -1,2 +1,2 @@
1
- import{getAdapter as e,getAdapterForSource as t}from"./adapters/index.js";import{existsSync as n}from"node:fs";var r=class{async load(e){if(!n(e))return{success:!1,error:`Source directory not found: ${e}`};let r=t(e);if(!r)return{success:!1,error:`No format adapter matches source: ${e}`};try{let t=await r.parse(e),n=this.validate(t);return n.success?{success:!0,data:{manifest:t,format:r.format}}:n}catch(e){return{success:!1,error:`Failed to parse flow: ${e instanceof Error?e.message:String(e)}`}}}async loadWithFormat(t,n){let r=e(n);try{let e=await r.parse(t),n=this.validate(e);return n.success?{success:!0,data:e}:n}catch(e){return{success:!1,error:`Failed to parse flow: ${e instanceof Error?e.message:String(e)}`}}}validate(e){let t=[];e.name?.trim()||t.push(`Missing flow name`),e.version?.trim()||t.push(`Missing flow version`),e.steps?.length||t.push(`Flow must have at least one step`);let n=new Set(e.steps.map(e=>e.id));for(let r of e.steps??[]){r.id?.trim()||t.push(`Step missing id`),r.skill?.trim()||t.push(`Step "${r.id}" missing skill path`);for(let e of r.requires??[])n.has(e)||t.push(`Step "${r.id}" requires unknown step "${e}"`)}return n.size!==(e.steps?.length??0)&&t.push(`Duplicate step IDs found`),t.length>0?{success:!1,error:`Validation failed:\n${t.join(`
1
+ import{getAdapter as e,getAdapterForSource as t}from"./adapters/index.js";import{existsSync as n}from"node:fs";var r=class{async load(e){if(!n(e))return{success:!1,error:`Source directory not found: ${e}`};let r=t(e);if(!r)return{success:!1,error:`No format adapter matches source: ${e}`};try{let t=await r.parse(e),n=this.validate(t);return n.success?{success:!0,data:{manifest:t,format:r.format}}:n}catch(e){return{success:!1,error:`Failed to parse flow: ${e instanceof Error?e.message:String(e)}`}}}async loadWithFormat(t,n){let r=e(n);try{let e=await r.parse(t),n=this.validate(e);return n.success?{success:!0,data:e}:n}catch(e){return{success:!1,error:`Failed to parse flow: ${e instanceof Error?e.message:String(e)}`}}}validate(e){let t=[];e.name?.trim()||t.push(`Missing flow name`),e.version?.trim()||t.push(`Missing flow version`),e.steps?.length||t.push(`Flow must have at least one step`);let n=new Set(e.steps.map(e=>e.id));for(let r of e.steps??[]){r.id?.trim()||t.push(`Step missing id`),r.instruction?.trim()||t.push(`Step "${r.id}" missing instruction path`);for(let e of r.requires??[])n.has(e)||t.push(`Step "${r.id}" requires unknown step "${e}"`)}return n.size!==(e.steps?.length??0)&&t.push(`Duplicate step IDs found`),t.length>0?{success:!1,error:`Validation failed:\n${t.join(`
2
2
  `)}`}:{success:!0}}};export{r as FlowLoader};
@@ -8,8 +8,8 @@ interface FlowStep {
8
8
  id: string;
9
9
  /** Human-readable step name */
10
10
  name: string;
11
- /** Relative path to the step's SKILL.md file */
12
- skill: string;
11
+ /** Relative path to the step's instruction file */
12
+ instruction: string;
13
13
  /** Artifact filenames this step produces */
14
14
  produces: string[];
15
15
  /** Step IDs this step depends on (must be completed first) */
@@ -9,6 +9,7 @@ import { KBConfig } from "../../core/dist/index.js";
9
9
  declare class SmartIndexScheduler {
10
10
  private readonly indexer;
11
11
  private readonly config;
12
+ private readonly store?;
12
13
  private trickleTimer;
13
14
  private readonly trickleIntervalMs;
14
15
  private readonly batchSize;
@@ -16,7 +17,9 @@ declare class SmartIndexScheduler {
16
17
  private changedFiles;
17
18
  private lastRefreshTime;
18
19
  private refreshing;
19
- constructor(indexer: IncrementalIndexer, config: KBConfig);
20
+ constructor(indexer: IncrementalIndexer, config: KBConfig, store?: {
21
+ createFtsIndex: () => Promise<void>;
22
+ } | undefined);
20
23
  /** Start the trickle indexing loop. */
21
24
  start(): void;
22
25
  /** Stop the scheduler and clear all timers. */
@@ -1 +1 @@
1
- import{statSync as e}from"node:fs";import{createLogger as t}from"../../core/dist/index.js";import{availableParallelism as n,loadavg as r}from"node:os";const i=t(`smart-index`),a=1.5;var o=class{trickleTimer=null;trickleIntervalMs;batchSize;priorityQueue=[];changedFiles=[];lastRefreshTime=0;refreshing=!1;constructor(e,t){this.indexer=e,this.config=t,this.trickleIntervalMs=this.readPositiveIntEnv(`AIKIT_SMART_TRICKLE_MS`,12e4),this.batchSize=this.readPositiveIntEnv(`AIKIT_SMART_BATCH_SIZE`,1)}start(){this.stop(),i.info(`Smart index scheduler started (trickle mode)`,{intervalMs:this.trickleIntervalMs,batchSize:this.batchSize}),this.scheduleTick()}stop(){this.trickleTimer&&=(clearTimeout(this.trickleTimer),null)}prioritize(...t){let n=[...new Set(t.filter(Boolean))].filter(t=>{try{return!e(t).isDirectory()}catch{return console.debug(`smart-index: skipping non-existent path: ${t}`),!1}});for(let e of n){let t=this.priorityQueue.indexOf(e);t>=0&&this.priorityQueue.splice(t,1)}for(let e of n.reverse())this.priorityQueue.unshift(e);this.priorityQueue.length>500&&(this.priorityQueue.length=500),n.length>0&&i.info(`Files prioritized for trickle indexing`,{added:n.length,queued:this.priorityQueue.length})}getState(){return{mode:`smart`,queueSize:this.priorityQueue.length,changedFilesSize:this.changedFiles.length,intervalMs:this.trickleIntervalMs,batchSize:this.batchSize,running:this.trickleTimer!==null}}readPositiveIntEnv(e,t){let n=Number(process.env[e]);return Number.isFinite(n)&&n>0?n:t}scheduleTick(){this.trickleTimer=setTimeout(()=>void this.tick(),this.trickleIntervalMs),this.trickleTimer.unref&&this.trickleTimer.unref()}async tick(){try{if(this.indexer.isIndexing){i.info(`Skipping trickle tick — indexing already in progress`);return}let e=this.getCpuCount(),t=r()[0];if(e>0&&t/e>a){i.info(`Skipping trickle tick — system load too high`,{load:t.toFixed(2),cpuCount:e,threshold:a});return}let n=await this.pickFiles();if(n.length===0){await this.maybeRefreshChangedFiles();return}i.info(`Trickle indexing tick started`,{count:n.length,files:n});let o=await this.indexer.indexFiles(this.config,n);this.changedFiles=this.changedFiles.filter(e=>!n.includes(e)),i.info(`Trickle indexing tick complete`,{filesProcessed:o.filesProcessed,filesSkipped:o.filesSkipped,chunksCreated:o.chunksCreated})}catch(e){i.error(`Trickle indexing tick failed`,{error:String(e)})}finally{this.scheduleTick()}}getCpuCount(){try{return typeof n==`function`?n():4}catch{return 4}}async pickFiles(){let e=[];for(;e.length<this.batchSize&&this.priorityQueue.length>0;){let t=this.priorityQueue.shift();t&&!e.includes(t)&&e.push(t)}if(e.length<this.batchSize)for(await this.maybeRefreshChangedFiles();e.length<this.batchSize&&this.changedFiles.length>0;){let t=this.changedFiles.shift();t&&!e.includes(t)&&e.push(t)}return e}async maybeRefreshChangedFiles(){let e=Date.now();if(!(this.refreshing||this.changedFiles.length>0&&e-this.lastRefreshTime<6e5)){this.refreshing=!0;try{this.changedFiles=await this.indexer.getChangedFiles(this.config),this.lastRefreshTime=e,this.changedFiles.length>0&&i.info(`Refreshed changed files for trickle indexing`,{count:this.changedFiles.length})}catch(e){i.error(`Failed to refresh changed files for trickle indexing`,{error:String(e)})}finally{this.refreshing=!1}}}};export{o as SmartIndexScheduler};
1
+ import{statSync as e}from"node:fs";import{createLogger as t}from"../../core/dist/index.js";import{availableParallelism as n,loadavg as r}from"node:os";const i=t(`smart-index`),a=1.5;var o=class{trickleTimer=null;trickleIntervalMs;batchSize;priorityQueue=[];changedFiles=[];lastRefreshTime=0;refreshing=!1;constructor(e,t,n){this.indexer=e,this.config=t,this.store=n,this.trickleIntervalMs=t.indexing.trickleIntervalMs??this.readPositiveIntEnv(`AIKIT_SMART_TRICKLE_MS`,3e4),this.batchSize=t.indexing.trickleBatchSize??this.readPositiveIntEnv(`AIKIT_SMART_BATCH_SIZE`,1)}start(){this.stop(),i.info(`Smart index scheduler started (trickle mode)`,{intervalMs:this.trickleIntervalMs,batchSize:this.batchSize}),this.scheduleTick()}stop(){this.trickleTimer&&=(clearTimeout(this.trickleTimer),null)}prioritize(...t){let n=[...new Set(t.filter(Boolean))].filter(t=>{try{return!e(t).isDirectory()}catch{return console.debug(`smart-index: skipping non-existent path: ${t}`),!1}});for(let e of n){let t=this.priorityQueue.indexOf(e);t>=0&&this.priorityQueue.splice(t,1)}for(let e of n.reverse())this.priorityQueue.unshift(e);this.priorityQueue.length>500&&(this.priorityQueue.length=500),n.length>0&&i.info(`Files prioritized for trickle indexing`,{added:n.length,queued:this.priorityQueue.length})}getState(){return{mode:`smart`,queueSize:this.priorityQueue.length,changedFilesSize:this.changedFiles.length,intervalMs:this.trickleIntervalMs,batchSize:this.batchSize,running:this.trickleTimer!==null}}readPositiveIntEnv(e,t){let n=Number(process.env[e]);return Number.isFinite(n)&&n>0?n:t}scheduleTick(){this.trickleTimer=setTimeout(()=>void this.tick(),this.trickleIntervalMs),this.trickleTimer.unref&&this.trickleTimer.unref()}async tick(){try{if(this.indexer.isIndexing){i.info(`Skipping trickle tick — indexing already in progress`);return}let e=this.getCpuCount(),t=r()[0];if(e>0&&t/e>a){i.info(`Skipping trickle tick — system load too high`,{load:t.toFixed(2),cpuCount:e,threshold:a});return}let n=await this.pickFiles();if(n.length===0){await this.maybeRefreshChangedFiles();return}i.info(`Trickle indexing tick started`,{count:n.length,files:n});let o=await this.indexer.indexFiles(this.config,n);if(this.store)try{await this.store.createFtsIndex()}catch(e){i.warn(`FTS index rebuild failed after trickle tick`,{error:String(e)})}this.changedFiles=this.changedFiles.filter(e=>!n.includes(e)),i.info(`Trickle indexing tick complete`,{filesProcessed:o.filesProcessed,filesSkipped:o.filesSkipped,chunksCreated:o.chunksCreated})}catch(e){i.error(`Trickle indexing tick failed`,{error:String(e)})}finally{this.scheduleTick()}}getCpuCount(){try{return typeof n==`function`?n():4}catch{return 4}}async pickFiles(){let e=[];for(;e.length<this.batchSize&&this.priorityQueue.length>0;){let t=this.priorityQueue.shift();t&&!e.includes(t)&&e.push(t)}if(e.length<this.batchSize)for(await this.maybeRefreshChangedFiles();e.length<this.batchSize&&this.changedFiles.length>0;){let t=this.changedFiles.shift();t&&!e.includes(t)&&e.push(t)}return e}async maybeRefreshChangedFiles(){let e=Date.now();if(!(this.refreshing||this.changedFiles.length>0&&e-this.lastRefreshTime<6e5)){this.refreshing=!0;try{this.changedFiles=await this.indexer.getChangedFiles(this.config),this.lastRefreshTime=e,this.changedFiles.length>0&&i.info(`Refreshed changed files for trickle indexing`,{count:this.changedFiles.length})}catch(e){i.error(`Failed to refresh changed files for trickle indexing`,{error:String(e)})}finally{this.refreshing=!1}}}};export{o as SmartIndexScheduler};
@@ -1 +1 @@
1
- import{readFileSync as e}from"node:fs";import{dirname as t,resolve as n}from"node:path";import{fileURLToPath as r}from"node:url";import{createLogger as i,serializeError as a}from"../../core/dist/index.js";import{parseArgs as o}from"node:util";const s=t(r(import.meta.url)),c=(()=>{try{let t=n(s,`..`,`..`,`..`,`package.json`);return JSON.parse(e(t,`utf-8`)).version??`0.0.0`}catch{return`0.0.0`}})(),l=i(`server`),{values:u}=o({options:{transport:{type:`string`,default:process.env.AIKIT_TRANSPORT??`stdio`},port:{type:`string`,default:process.env.AIKIT_PORT??`3210`}}});async function d(){if(process.on(`unhandledRejection`,e=>{l.error(`Unhandled rejection`,a(e))}),l.info(`Starting MCP AI Kit server`,{version:c}),u.transport===`http`){let[{default:e},{loadConfig:t,resolveIndexMode:n},{registerDashboardRoutes:r,resolveDashboardDir:i}]=await Promise.all([import(`express`),import(`./config.js`),import(`./dashboard-static.js`)]),o=t();l.info(`Config loaded`,{sourceCount:o.sources.length,storePath:o.store.path});let s=e();s.use(e.json());let c=Number(u.port);s.use((e,t,n)=>{if(t.setHeader(`Access-Control-Allow-Origin`,process.env.AIKIT_CORS_ORIGIN??`http://localhost:${c}`),t.setHeader(`Access-Control-Allow-Methods`,`GET, POST, DELETE, OPTIONS`),t.setHeader(`Access-Control-Allow-Headers`,`Content-Type, Authorization`),e.method===`OPTIONS`){t.status(204).end();return}n()}),r(s,i(),l),s.get(`/health`,(e,t)=>{t.json({status:`ok`})});let d=!1,f=null,p=null,m=null,h=Promise.resolve();s.post(`/mcp`,async(e,t)=>{if(!d||!p||!m){t.status(503).json({jsonrpc:`2.0`,error:{code:-32603,message:`Server initializing — please retry in a few seconds`},id:null});return}let n=h,r;h=new Promise(e=>{r=e}),await n;try{let n=new m({sessionIdGenerator:void 0});await p.connect(n),await n.handleRequest(e,t,e.body),n.close()}catch(e){if(l.error(`MCP handler error`,a(e)),!t.headersSent){let n=e instanceof Error?e.message:String(e),r=n.includes(`Not Acceptable`);t.status(r?406:500).json({jsonrpc:`2.0`,error:{code:r?-32e3:-32603,message:r?n:`Internal server error`},id:null})}}finally{r()}}),s.get(`/mcp`,(e,t)=>{t.writeHead(405).end(JSON.stringify({jsonrpc:`2.0`,error:{code:-32e3,message:`Method not allowed.`},id:null}))}),s.delete(`/mcp`,(e,t)=>{t.writeHead(405).end(JSON.stringify({jsonrpc:`2.0`,error:{code:-32e3,message:`Method not allowed.`},id:null}))});let g=s.listen(c,`127.0.0.1`,()=>{l.info(`MCP server listening`,{url:`http://127.0.0.1:${c}/mcp`,port:c}),setTimeout(async()=>{try{let[{createLazyServer:e,ALL_TOOL_NAMES:t},{StreamableHTTPServerTransport:r},{checkForUpdates:i,autoUpgradeScaffold:s}]=await Promise.all([import(`./server.js`),import(`@modelcontextprotocol/sdk/server/streamableHttp.js`),import(`./version-check.js`)]);i(),s();let c=n(o),u=e(o,c);p=u.server,m=r,d=!0,l.info(`MCP server configured (lazy — AI Kit initializing in background)`,{toolCount:t.length,resourceCount:2}),u.startInit(),c===`auto`?u.ready.then(async()=>{try{let e=o.sources.map(e=>e.path).join(`, `);l.info(`Running initial index`,{sourcePaths:e}),await u.runInitialIndex(),l.info(`Initial index complete`)}catch(e){l.error(`Initial index failed; will retry on aikit_reindex`,a(e))}}).catch(e=>l.error(`AI Kit init or indexing failed`,a(e))):c===`smart`?u.ready.then(async()=>{try{if(!u.kb)throw Error(`AI Kit components are not available after initialization`);let{SmartIndexScheduler:e}=await import(`../../indexer/dist/index.js`);f=new e(u.kb.indexer,o),f.start(),u.setSmartScheduler(f),l.info(`Smart index scheduler started (HTTP mode)`)}catch(e){l.error(`Failed to start smart index scheduler`,a(e))}}).catch(e=>l.error(`AI Kit initialization failed`,a(e))):(u.ready.catch(e=>l.error(`AI Kit initialization failed`,a(e))),l.info(`Initial full indexing skipped in HTTP mode`,{indexMode:c}))}catch(e){l.error(`Failed to load server modules`,a(e))}},100)}),_=async e=>{l.info(`Shutdown signal received`,{signal:e}),f?.stop(),g.close(),p&&await p.close(),process.exit(0)};process.on(`SIGINT`,()=>_(`SIGINT`)),process.on(`SIGTERM`,()=>_(`SIGTERM`))}else{let[{loadConfig:e,reconfigureForWorkspace:t,resolveIndexMode:n},{createLazyServer:i},{checkForUpdates:o,autoUpgradeScaffold:s},{RootsListChangedNotificationSchema:c}]=await Promise.all([import(`./config.js`),import(`./server.js`),import(`./version-check.js`),import(`@modelcontextprotocol/sdk/types.js`)]),u=e();l.info(`Config loaded`,{sourceCount:u.sources.length,storePath:u.store.path}),o(),s();let d=n(u),f=i(u,d),{server:p,startInit:m,ready:h,runInitialIndex:g}=f,{StdioServerTransport:_}=await import(`@modelcontextprotocol/sdk/server/stdio.js`),v=new _;await p.connect(v),l.info(`MCP server started`,{transport:`stdio`});let y=e=>{if(e.length===0)return!1;let n=e[0].uri,i=n.startsWith(`file://`)?r(n):n;return l.info(`MCP roots resolved`,{rootUri:n,rootPath:i,rootCount:e.length}),t(u,i),!0},b=!1;try{b=y((await p.server.listRoots()).roots),b||l.info(`No MCP roots yet; waiting for roots/list_changed notification`)}catch(e){l.warn(`MCP roots/list not supported by client; using cwd fallback`,{cwd:process.cwd(),...a(e)}),b=!0}b||=await new Promise(e=>{let t=setTimeout(()=>{l.warn(`Timed out waiting for MCP roots/list_changed; using cwd fallback`,{cwd:process.cwd()}),e(!1)},5e3);p.server.setNotificationHandler(c,async()=>{clearTimeout(t);try{e(y((await p.server.listRoots()).roots))}catch(t){l.warn(`roots/list retry failed after notification`,a(t)),e(!1)}})}),m();let x=null,S=()=>{x&&clearTimeout(x),x=setTimeout(()=>{l.info(`Auto-shutdown: no activity for 30 minutes — exiting`),process.exit(0)},18e5),x.unref&&x.unref()};S(),process.stdin.on(`data`,()=>S()),h.catch(e=>{l.error(`Initialization failed — server will continue with limited tools`,a(e))}),d===`auto`?g().catch(e=>l.error(`Initial index failed`,a(e))):d===`smart`?h.then(async()=>{try{if(!f.kb)throw Error(`AI Kit components are not available after initialization`);let{SmartIndexScheduler:e}=await import(`../../indexer/dist/index.js`),t=new e(f.kb.indexer,u);t.start(),f.setSmartScheduler(t),l.info(`Smart index scheduler started (stdio mode)`)}catch(e){l.error(`Failed to start smart index scheduler`,a(e))}}).catch(e=>l.error(`AI Kit init failed for smart scheduler`,a(e))):l.warn(`Initial full indexing skipped; use aikit_reindex to index manually`,{indexMode:d})}}d().catch(e=>{l.error(`Fatal error`,a(e)),process.exit(1)});export{};
1
+ import{readFileSync as e}from"node:fs";import{dirname as t,resolve as n}from"node:path";import{fileURLToPath as r}from"node:url";import{createLogger as i,serializeError as a}from"../../core/dist/index.js";import{parseArgs as o}from"node:util";const s=t(r(import.meta.url)),c=(()=>{try{let t=n(s,`..`,`..`,`..`,`package.json`);return JSON.parse(e(t,`utf-8`)).version??`0.0.0`}catch{return`0.0.0`}})(),l=i(`server`),{values:u}=o({options:{transport:{type:`string`,default:process.env.AIKIT_TRANSPORT??`stdio`},port:{type:`string`,default:process.env.AIKIT_PORT??`3210`}}});async function d(){if(process.on(`unhandledRejection`,e=>{l.error(`Unhandled rejection`,a(e))}),l.info(`Starting MCP AI Kit server`,{version:c}),u.transport===`http`){let[{default:e},{loadConfig:t,resolveIndexMode:n},{registerDashboardRoutes:r,resolveDashboardDir:i}]=await Promise.all([import(`express`),import(`./config.js`),import(`./dashboard-static.js`)]),o=t();l.info(`Config loaded`,{sourceCount:o.sources.length,storePath:o.store.path});let s=e();s.use(e.json());let c=Number(u.port);s.use((e,t,n)=>{if(t.setHeader(`Access-Control-Allow-Origin`,process.env.AIKIT_CORS_ORIGIN??`http://localhost:${c}`),t.setHeader(`Access-Control-Allow-Methods`,`GET, POST, DELETE, OPTIONS`),t.setHeader(`Access-Control-Allow-Headers`,`Content-Type, Authorization`),e.method===`OPTIONS`){t.status(204).end();return}n()}),r(s,i(),l),s.get(`/health`,(e,t)=>{t.json({status:`ok`})});let d=!1,f=null,p=null,m=null,h=Promise.resolve();s.post(`/mcp`,async(e,t)=>{if(!d||!p||!m){t.status(503).json({jsonrpc:`2.0`,error:{code:-32603,message:`Server initializing — please retry in a few seconds`},id:null});return}let n=h,r;h=new Promise(e=>{r=e}),await n;try{let n=new m({sessionIdGenerator:void 0});await p.connect(n),await n.handleRequest(e,t,e.body),n.close()}catch(e){if(l.error(`MCP handler error`,a(e)),!t.headersSent){let n=e instanceof Error?e.message:String(e),r=n.includes(`Not Acceptable`);t.status(r?406:500).json({jsonrpc:`2.0`,error:{code:r?-32e3:-32603,message:r?n:`Internal server error`},id:null})}}finally{r()}}),s.get(`/mcp`,(e,t)=>{t.writeHead(405).end(JSON.stringify({jsonrpc:`2.0`,error:{code:-32e3,message:`Method not allowed.`},id:null}))}),s.delete(`/mcp`,(e,t)=>{t.writeHead(405).end(JSON.stringify({jsonrpc:`2.0`,error:{code:-32e3,message:`Method not allowed.`},id:null}))});let g=s.listen(c,`127.0.0.1`,()=>{l.info(`MCP server listening`,{url:`http://127.0.0.1:${c}/mcp`,port:c}),setTimeout(async()=>{try{let[{createLazyServer:e,ALL_TOOL_NAMES:t},{StreamableHTTPServerTransport:r},{checkForUpdates:i,autoUpgradeScaffold:s}]=await Promise.all([import(`./server.js`),import(`@modelcontextprotocol/sdk/server/streamableHttp.js`),import(`./version-check.js`)]);i(),s();let c=n(o),u=e(o,c);p=u.server,m=r,d=!0,l.info(`MCP server configured (lazy — AI Kit initializing in background)`,{toolCount:t.length,resourceCount:2}),u.startInit(),c===`auto`?u.ready.then(async()=>{try{let e=o.sources.map(e=>e.path).join(`, `);l.info(`Running initial index`,{sourcePaths:e}),await u.runInitialIndex(),l.info(`Initial index complete`)}catch(e){l.error(`Initial index failed; will retry on aikit_reindex`,a(e))}}).catch(e=>l.error(`AI Kit init or indexing failed`,a(e))):c===`smart`?u.ready.then(async()=>{try{if(!u.kb)throw Error(`AI Kit components are not available after initialization`);let{SmartIndexScheduler:e}=await import(`../../indexer/dist/index.js`);f=new e(u.kb.indexer,o,u.kb.store),f.start(),u.setSmartScheduler(f),l.info(`Smart index scheduler started (HTTP mode)`)}catch(e){l.error(`Failed to start smart index scheduler`,a(e))}}).catch(e=>l.error(`AI Kit initialization failed`,a(e))):(u.ready.catch(e=>l.error(`AI Kit initialization failed`,a(e))),l.info(`Initial full indexing skipped in HTTP mode`,{indexMode:c}))}catch(e){l.error(`Failed to load server modules`,a(e))}},100)}),_=async e=>{l.info(`Shutdown signal received`,{signal:e}),f?.stop(),g.close(),p&&await p.close(),process.exit(0)};process.on(`SIGINT`,()=>_(`SIGINT`)),process.on(`SIGTERM`,()=>_(`SIGTERM`))}else{let[{loadConfig:e,reconfigureForWorkspace:t,resolveIndexMode:n},{createLazyServer:i},{checkForUpdates:o,autoUpgradeScaffold:s},{RootsListChangedNotificationSchema:c}]=await Promise.all([import(`./config.js`),import(`./server.js`),import(`./version-check.js`),import(`@modelcontextprotocol/sdk/types.js`)]),u=e();l.info(`Config loaded`,{sourceCount:u.sources.length,storePath:u.store.path}),o(),s();let d=n(u),f=i(u,d),{server:p,startInit:m,ready:h,runInitialIndex:g}=f,{StdioServerTransport:_}=await import(`@modelcontextprotocol/sdk/server/stdio.js`),v=new _;await p.connect(v),l.info(`MCP server started`,{transport:`stdio`});let y=e=>{if(e.length===0)return!1;let n=e[0].uri,i=n.startsWith(`file://`)?r(n):n;return l.info(`MCP roots resolved`,{rootUri:n,rootPath:i,rootCount:e.length}),t(u,i),!0},b=!1;try{b=y((await p.server.listRoots()).roots),b||l.info(`No MCP roots yet; waiting for roots/list_changed notification`)}catch(e){l.warn(`MCP roots/list not supported by client; using cwd fallback`,{cwd:process.cwd(),...a(e)}),b=!0}b||=await new Promise(e=>{let t=setTimeout(()=>{l.warn(`Timed out waiting for MCP roots/list_changed; using cwd fallback`,{cwd:process.cwd()}),e(!1)},5e3);p.server.setNotificationHandler(c,async()=>{clearTimeout(t);try{e(y((await p.server.listRoots()).roots))}catch(t){l.warn(`roots/list retry failed after notification`,a(t)),e(!1)}})}),m();let x=null,S=()=>{x&&clearTimeout(x),x=setTimeout(()=>{l.info(`Auto-shutdown: no activity for 30 minutes — exiting`),process.exit(0)},18e5),x.unref&&x.unref()};S(),process.stdin.on(`data`,()=>S()),h.catch(e=>{l.error(`Initialization failed — server will continue with limited tools`,a(e))}),d===`auto`?g().catch(e=>l.error(`Initial index failed`,a(e))):d===`smart`?h.then(async()=>{try{if(!f.kb)throw Error(`AI Kit components are not available after initialization`);let{SmartIndexScheduler:e}=await import(`../../indexer/dist/index.js`),t=new e(f.kb.indexer,u,f.kb.store);t.start(),f.setSmartScheduler(t),l.info(`Smart index scheduler started (stdio mode)`)}catch(e){l.error(`Failed to start smart index scheduler`,a(e))}}).catch(e=>l.error(`AI Kit init failed for smart scheduler`,a(e))):l.warn(`Initial full indexing skipped; use aikit_reindex to index manually`,{indexMode:d})}}d().catch(e=>{l.error(`Fatal error`,a(e)),process.exit(1)});export{};
@@ -1 +1 @@
1
- const e={search:{title:`Hybrid Search`,annotations:{readOnlyHint:!0,idempotentHint:!0}},find:{title:`Federated Find`,annotations:{readOnlyHint:!0,idempotentHint:!0}},symbol:{title:`Symbol Resolver`,annotations:{readOnlyHint:!0,idempotentHint:!0}},trace:{title:`Data Flow Tracer`,annotations:{readOnlyHint:!0,idempotentHint:!0}},scope_map:{title:`Task Scope Map`,annotations:{readOnlyHint:!0,idempotentHint:!0}},lookup:{title:`Chunk Lookup`,annotations:{readOnlyHint:!0,idempotentHint:!0}},dead_symbols:{title:`Dead Symbol Finder`,annotations:{readOnlyHint:!0,idempotentHint:!0}},file_summary:{title:`File Summary`,annotations:{readOnlyHint:!0,idempotentHint:!0}},analyze_structure:{title:`Analyze Structure`,annotations:{readOnlyHint:!0,idempotentHint:!0}},analyze_dependencies:{title:`Analyze Dependencies`,annotations:{readOnlyHint:!0,idempotentHint:!0}},analyze_symbols:{title:`Analyze Symbols`,annotations:{readOnlyHint:!0,idempotentHint:!0}},analyze_patterns:{title:`Analyze Patterns`,annotations:{readOnlyHint:!0,idempotentHint:!0}},analyze_entry_points:{title:`Analyze Entry Points`,annotations:{readOnlyHint:!0,idempotentHint:!0}},analyze_diagram:{title:`Analyze Diagram`,annotations:{readOnlyHint:!0,idempotentHint:!0}},blast_radius:{title:`Blast Radius`,annotations:{readOnlyHint:!0,idempotentHint:!0}},brainstorm:{title:`Brainstorm Session`,annotations:{readOnlyHint:!0,idempotentHint:!0}},remember:{title:`Remember Knowledge`,annotations:{readOnlyHint:!1}},read:{title:`Read Knowledge`,annotations:{readOnlyHint:!0,idempotentHint:!0}},update:{title:`Update Knowledge`,annotations:{readOnlyHint:!1}},forget:{title:`Forget Knowledge`,annotations:{readOnlyHint:!1,destructiveHint:!0}},list:{title:`List Knowledge`,annotations:{readOnlyHint:!0,idempotentHint:!0}},produce_knowledge:{title:`Produce Knowledge`,annotations:{readOnlyHint:!0,idempotentHint:!0}},compact:{title:`Semantic Compactor`,annotations:{readOnlyHint:!0,idempotentHint:!0}},digest:{title:`Multi-Source Digest`,annotations:{readOnlyHint:!0,idempotentHint:!0}},stratum_card:{title:`Stratum Card`,annotations:{readOnlyHint:!0,idempotentHint:!0}},forge_ground:{title:`FORGE Ground`,annotations:{readOnlyHint:!0,idempotentHint:!0}},forge_classify:{title:`FORGE Classify`,annotations:{readOnlyHint:!0,idempotentHint:!0}},evidence_map:{title:`Evidence Map`,annotations:{readOnlyHint:!1}},present:{title:`Rich Content Presenter`,annotations:{readOnlyHint:!0,idempotentHint:!0}},check:{title:`Typecheck & Lint`,annotations:{readOnlyHint:!0,openWorldHint:!0}},test_run:{title:`Run Tests`,annotations:{readOnlyHint:!0,openWorldHint:!0}},eval:{title:`Evaluate Code`,annotations:{readOnlyHint:!1,openWorldHint:!0}},batch:{title:`Batch Operations`,annotations:{readOnlyHint:!0,idempotentHint:!0}},audit:{title:`Project Audit`,annotations:{readOnlyHint:!0,idempotentHint:!0}},rename:{title:`Rename Symbol`,annotations:{readOnlyHint:!1,destructiveHint:!0}},restore:{title:`Restore`,annotations:{readOnlyHint:!1}},codemod:{title:`Codemod`,annotations:{readOnlyHint:!1,destructiveHint:!0}},data_transform:{title:`Data Transform`,annotations:{readOnlyHint:!0,idempotentHint:!0}},stash:{title:`Stash Values`,annotations:{readOnlyHint:!1}},checkpoint:{title:`Session Checkpoint`,annotations:{readOnlyHint:!1}},workset:{title:`Workset Manager`,annotations:{readOnlyHint:!1}},lane:{title:`Exploration Lane`,annotations:{readOnlyHint:!1}},git_context:{title:`Git Context`,annotations:{readOnlyHint:!0,idempotentHint:!0}},diff_parse:{title:`Diff Parser`,annotations:{readOnlyHint:!0,idempotentHint:!0}},parse_output:{title:`Parse Build Output`,annotations:{readOnlyHint:!0,idempotentHint:!0}},process:{title:`Process Manager`,annotations:{readOnlyHint:!1,openWorldHint:!0}},watch:{title:`File Watcher`,annotations:{readOnlyHint:!1,openWorldHint:!0}},delegate:{title:`Delegate Task`,annotations:{readOnlyHint:!1,openWorldHint:!0}},config:{title:`Configuration Manager`,annotations:{readOnlyHint:!1}},status:{title:`AI Kit Status`,annotations:{readOnlyHint:!0,idempotentHint:!0}},health:{title:`Health Check`,annotations:{readOnlyHint:!0,idempotentHint:!0}},reindex:{title:`Reindex`,annotations:{readOnlyHint:!1}},onboard:{title:`Onboard Codebase`,annotations:{readOnlyHint:!1}},graph:{title:`Knowledge Graph`,annotations:{readOnlyHint:!1}},guide:{title:`Tool Guide`,annotations:{readOnlyHint:!0,idempotentHint:!0}},replay:{title:`Replay History`,annotations:{readOnlyHint:!0,idempotentHint:!0}},changelog:{title:`Generate Changelog`,annotations:{readOnlyHint:!0,idempotentHint:!0}},regex_test:{title:`Regex Tester`,annotations:{readOnlyHint:!0,idempotentHint:!0}},encode:{title:`Encode / Decode`,annotations:{readOnlyHint:!0,idempotentHint:!0}},measure:{title:`Code Metrics`,annotations:{readOnlyHint:!0,idempotentHint:!0}},schema_validate:{title:`Schema Validator`,annotations:{readOnlyHint:!0,idempotentHint:!0}},snippet:{title:`Code Snippets`,annotations:{readOnlyHint:!1}},env:{title:`Environment Info`,annotations:{readOnlyHint:!0,idempotentHint:!0}},time:{title:`Date & Time`,annotations:{readOnlyHint:!0,idempotentHint:!0}},web_fetch:{title:`Web Fetch`,annotations:{readOnlyHint:!0,openWorldHint:!0}},web_search:{title:`Web Search`,annotations:{readOnlyHint:!0,openWorldHint:!0}},http:{title:`HTTP Request`,annotations:{readOnlyHint:!1,openWorldHint:!0}},queue:{title:`Operation Queue`,annotations:{readOnlyHint:!1}},bridge_push:{title:`Bridge Push`,annotations:{readOnlyHint:!1}},bridge_pull:{title:`Bridge Pull`,annotations:{readOnlyHint:!0,idempotentHint:!0}},bridge_sync:{title:`Bridge Sync Status`,annotations:{readOnlyHint:!0,idempotentHint:!0}},evolution_state:{title:`Evolution State`,annotations:{readOnlyHint:!1}},policy_check:{title:`Policy Check`,annotations:{readOnlyHint:!0,idempotentHint:!0}},flow_list:{title:`Flow List`,annotations:{readOnlyHint:!0,idempotentHint:!0}},flow_info:{title:`Flow Info`,annotations:{readOnlyHint:!0,idempotentHint:!0}},flow_start:{title:`Flow Start`,annotations:{readOnlyHint:!1}},flow_step:{title:`Flow Step`,annotations:{readOnlyHint:!1}},flow_status:{title:`Flow Status`,annotations:{readOnlyHint:!0,idempotentHint:!0}},flow_reset:{title:`Flow Reset`,annotations:{readOnlyHint:!1}},flow_read_skill:{title:`Flow Read Skill`,annotations:{readOnlyHint:!0,idempotentHint:!0}}};function t(t){return e[t]??{title:t,annotations:{readOnlyHint:!1}}}export{e as TOOL_METADATA,t as getToolMeta};
1
+ const e={search:{title:`Hybrid Search`,annotations:{readOnlyHint:!0,idempotentHint:!0}},find:{title:`Federated Find`,annotations:{readOnlyHint:!0,idempotentHint:!0}},symbol:{title:`Symbol Resolver`,annotations:{readOnlyHint:!0,idempotentHint:!0}},trace:{title:`Data Flow Tracer`,annotations:{readOnlyHint:!0,idempotentHint:!0}},scope_map:{title:`Task Scope Map`,annotations:{readOnlyHint:!0,idempotentHint:!0}},lookup:{title:`Chunk Lookup`,annotations:{readOnlyHint:!0,idempotentHint:!0}},dead_symbols:{title:`Dead Symbol Finder`,annotations:{readOnlyHint:!0,idempotentHint:!0}},file_summary:{title:`File Summary`,annotations:{readOnlyHint:!0,idempotentHint:!0}},analyze_structure:{title:`Analyze Structure`,annotations:{readOnlyHint:!0,idempotentHint:!0}},analyze_dependencies:{title:`Analyze Dependencies`,annotations:{readOnlyHint:!0,idempotentHint:!0}},analyze_symbols:{title:`Analyze Symbols`,annotations:{readOnlyHint:!0,idempotentHint:!0}},analyze_patterns:{title:`Analyze Patterns`,annotations:{readOnlyHint:!0,idempotentHint:!0}},analyze_entry_points:{title:`Analyze Entry Points`,annotations:{readOnlyHint:!0,idempotentHint:!0}},analyze_diagram:{title:`Analyze Diagram`,annotations:{readOnlyHint:!0,idempotentHint:!0}},blast_radius:{title:`Blast Radius`,annotations:{readOnlyHint:!0,idempotentHint:!0}},brainstorm:{title:`Brainstorm Session`,annotations:{readOnlyHint:!0,idempotentHint:!0}},remember:{title:`Remember Knowledge`,annotations:{readOnlyHint:!1}},read:{title:`Read Knowledge`,annotations:{readOnlyHint:!0,idempotentHint:!0}},update:{title:`Update Knowledge`,annotations:{readOnlyHint:!1}},forget:{title:`Forget Knowledge`,annotations:{readOnlyHint:!1,destructiveHint:!0}},list:{title:`List Knowledge`,annotations:{readOnlyHint:!0,idempotentHint:!0}},produce_knowledge:{title:`Produce Knowledge`,annotations:{readOnlyHint:!0,idempotentHint:!0}},compact:{title:`Semantic Compactor`,annotations:{readOnlyHint:!0,idempotentHint:!0}},digest:{title:`Multi-Source Digest`,annotations:{readOnlyHint:!0,idempotentHint:!0}},stratum_card:{title:`Stratum Card`,annotations:{readOnlyHint:!0,idempotentHint:!0}},forge_ground:{title:`FORGE Ground`,annotations:{readOnlyHint:!0,idempotentHint:!0}},forge_classify:{title:`FORGE Classify`,annotations:{readOnlyHint:!0,idempotentHint:!0}},evidence_map:{title:`Evidence Map`,annotations:{readOnlyHint:!1}},present:{title:`Rich Content Presenter`,annotations:{readOnlyHint:!0,idempotentHint:!0}},check:{title:`Typecheck & Lint`,annotations:{readOnlyHint:!0,openWorldHint:!0}},test_run:{title:`Run Tests`,annotations:{readOnlyHint:!0,openWorldHint:!0}},eval:{title:`Evaluate Code`,annotations:{readOnlyHint:!1,openWorldHint:!0}},batch:{title:`Batch Operations`,annotations:{readOnlyHint:!0,idempotentHint:!0}},audit:{title:`Project Audit`,annotations:{readOnlyHint:!0,idempotentHint:!0}},rename:{title:`Rename Symbol`,annotations:{readOnlyHint:!1,destructiveHint:!0}},restore:{title:`Restore`,annotations:{readOnlyHint:!1}},codemod:{title:`Codemod`,annotations:{readOnlyHint:!1,destructiveHint:!0}},data_transform:{title:`Data Transform`,annotations:{readOnlyHint:!0,idempotentHint:!0}},stash:{title:`Stash Values`,annotations:{readOnlyHint:!1}},checkpoint:{title:`Session Checkpoint`,annotations:{readOnlyHint:!1}},workset:{title:`Workset Manager`,annotations:{readOnlyHint:!1}},lane:{title:`Exploration Lane`,annotations:{readOnlyHint:!1}},git_context:{title:`Git Context`,annotations:{readOnlyHint:!0,idempotentHint:!0}},diff_parse:{title:`Diff Parser`,annotations:{readOnlyHint:!0,idempotentHint:!0}},parse_output:{title:`Parse Build Output`,annotations:{readOnlyHint:!0,idempotentHint:!0}},process:{title:`Process Manager`,annotations:{readOnlyHint:!1,openWorldHint:!0}},watch:{title:`File Watcher`,annotations:{readOnlyHint:!1,openWorldHint:!0}},delegate:{title:`Delegate Task`,annotations:{readOnlyHint:!1,openWorldHint:!0}},config:{title:`Configuration Manager`,annotations:{readOnlyHint:!1}},status:{title:`AI Kit Status`,annotations:{readOnlyHint:!0,idempotentHint:!0}},health:{title:`Health Check`,annotations:{readOnlyHint:!0,idempotentHint:!0}},reindex:{title:`Reindex`,annotations:{readOnlyHint:!1}},onboard:{title:`Onboard Codebase`,annotations:{readOnlyHint:!1}},graph:{title:`Knowledge Graph`,annotations:{readOnlyHint:!1}},guide:{title:`Tool Guide`,annotations:{readOnlyHint:!0,idempotentHint:!0}},replay:{title:`Replay History`,annotations:{readOnlyHint:!0,idempotentHint:!0}},changelog:{title:`Generate Changelog`,annotations:{readOnlyHint:!0,idempotentHint:!0}},regex_test:{title:`Regex Tester`,annotations:{readOnlyHint:!0,idempotentHint:!0}},encode:{title:`Encode / Decode`,annotations:{readOnlyHint:!0,idempotentHint:!0}},measure:{title:`Code Metrics`,annotations:{readOnlyHint:!0,idempotentHint:!0}},schema_validate:{title:`Schema Validator`,annotations:{readOnlyHint:!0,idempotentHint:!0}},snippet:{title:`Code Snippets`,annotations:{readOnlyHint:!1}},env:{title:`Environment Info`,annotations:{readOnlyHint:!0,idempotentHint:!0}},time:{title:`Date & Time`,annotations:{readOnlyHint:!0,idempotentHint:!0}},web_fetch:{title:`Web Fetch`,annotations:{readOnlyHint:!0,openWorldHint:!0}},web_search:{title:`Web Search`,annotations:{readOnlyHint:!0,openWorldHint:!0}},http:{title:`HTTP Request`,annotations:{readOnlyHint:!1,openWorldHint:!0}},queue:{title:`Operation Queue`,annotations:{readOnlyHint:!1}},bridge_push:{title:`Bridge Push`,annotations:{readOnlyHint:!1}},bridge_pull:{title:`Bridge Pull`,annotations:{readOnlyHint:!0,idempotentHint:!0}},bridge_sync:{title:`Bridge Sync Status`,annotations:{readOnlyHint:!0,idempotentHint:!0}},evolution_state:{title:`Evolution State`,annotations:{readOnlyHint:!1}},policy_check:{title:`Policy Check`,annotations:{readOnlyHint:!0,idempotentHint:!0}},flow_list:{title:`Flow List`,annotations:{readOnlyHint:!0,idempotentHint:!0}},flow_info:{title:`Flow Info`,annotations:{readOnlyHint:!0,idempotentHint:!0}},flow_start:{title:`Flow Start`,annotations:{readOnlyHint:!1}},flow_step:{title:`Flow Step`,annotations:{readOnlyHint:!1}},flow_status:{title:`Flow Status`,annotations:{readOnlyHint:!0,idempotentHint:!0}},flow_reset:{title:`Flow Reset`,annotations:{readOnlyHint:!1}},flow_read_instruction:{title:`Flow Read Instruction`,annotations:{readOnlyHint:!0,idempotentHint:!0}}};function t(t){return e[t]??{title:t,annotations:{readOnlyHint:!1}}}export{e as TOOL_METADATA,t as getToolMeta};
@@ -1 +1 @@
1
- import{getToolMeta as e}from"../tool-metadata.js";import{existsSync as t}from"node:fs";import{basename as n,join as r,resolve as i}from"node:path";import{z as a}from"zod";import{readFile as o}from"node:fs/promises";import{createLogger as s,serializeError as c}from"../../../core/dist/index.js";import{homedir as l}from"node:os";const u=s(`flow-tools`);function d(e){return{content:[{type:`text`,text:e}]}}function f(e){return e instanceof Error?e.message:String(e)}function p(s,p){let m=p.sources?.[0]?.path??process.cwd(),h=r(p.stateDir??r(p.sources[0].path,`.aikit-state`),`flows`),g=r(h,`registry.json`),_=r(h,`state.json`);function v(e,t){return i(y(e),t).replaceAll(`\\`,`/`)}function y(e){if(e.sourceType===`builtin`){let r=n(e.installPath),a=i(m,`.github`,`flows`,r);if(t(a))return a.replaceAll(`\\`,`/`);let o=i(l(),`.copilot`,`flows`,r);return t(o)?o.replaceAll(`\\`,`/`):a.replaceAll(`\\`,`/`)}return e.installPath.replaceAll(`\\`,`/`)}async function b(){let{FlowRegistryManager:e,FlowStateMachine:t}=await import(`../../../flows/dist/index.js`);return{registry:new e(g),stateMachine:new t(_)}}let x=e(`flow_list`);s.registerTool(`flow_list`,{title:x.title,description:`List all installed flows and their steps`,annotations:x.annotations,inputSchema:{}},async()=>{try{let{registry:e,stateMachine:t}=await b(),n=e.list(),r=t.getStatus(),i={flows:n.map(e=>({name:e.name,version:e.version,source:e.source,sourceType:e.sourceType,format:e.format,steps:e.manifest.steps.map(e=>e.id)})),activeFlow:r.success&&r.data?{flow:r.data.flow,status:r.data.status,currentStep:r.data.currentStep}:null};return d(JSON.stringify(i,null,2))}catch(e){return u.error(`flow_list failed`,c(e)),d(`Error: ${f(e)}`)}});let S=e(`flow_info`);s.registerTool(`flow_info`,{title:S.title,description:`Show detailed information about a specific flow`,annotations:S.annotations,inputSchema:{name:a.string().describe(`Flow name to get info for`)}},async({name:e})=>{try{let{registry:t}=await b(),n=t.get(e);if(!n)return d(`Flow "${e}" not found. Use flow_list to see available flows.`);let r={name:n.name,version:n.version,description:n.manifest.description,source:n.source,sourceType:n.sourceType,format:n.format,installPath:y(n),registeredAt:n.registeredAt,updatedAt:n.updatedAt,steps:n.manifest.steps.map(e=>({id:e.id,name:e.name,skill:v(n,e.skill),produces:e.produces,requires:e.requires,description:e.description})),agents:n.manifest.agents,artifactsDir:n.manifest.artifacts_dir,install:n.manifest.install};return d(JSON.stringify(r,null,2))}catch(e){return u.error(`flow_info failed`,c(e)),d(`Error: ${f(e)}`)}});let C=e(`flow_start`);s.registerTool(`flow_start`,{title:C.title,description:`Start a flow. Sets the active flow and positions at the first step.`,annotations:C.annotations,inputSchema:{flow:a.string().describe(`Flow name to start (use flow_list to see options)`)}},async({flow:e})=>{try{let{registry:t,stateMachine:n}=await b(),r=t.get(e);if(!r)return d(`Flow "${e}" not found. Use flow_list to see available flows.`);let i=n.start(r.name,r.manifest);if(!i.success||!i.data)return d(`Cannot start: ${i.error}`);let a=i.data,o=r.manifest.steps.find(e=>e.id===a.currentStep),s={started:!0,flow:a.flow,currentStep:a.currentStep,currentStepSkill:r&&o?v(r,o.skill):null,currentStepDescription:o?.description??null,totalSteps:r.manifest.steps.length,stepSequence:r.manifest.steps.map(e=>e.id),artifactsDir:r.manifest.artifacts_dir};return d(JSON.stringify(s,null,2))}catch(e){return u.error(`flow_start failed`,c(e)),d(`Error: ${f(e)}`)}});let w=e(`flow_step`);s.registerTool(`flow_step`,{title:w.title,description:`Advance the active flow: complete current step and move to next, skip current step, or redo current step.`,annotations:w.annotations,inputSchema:{action:a.enum([`next`,`skip`,`redo`]).describe(`next: mark current step done and advance. skip: skip current step. redo: repeat current step.`)}},async({action:e})=>{try{let{registry:t,stateMachine:n}=await b(),r=n.load();if(!r)return d(`No active flow. Use flow_start first.`);let i=t.get(r.flow);if(!i)return d(`Flow "${r.flow}" not found in registry.`);let a=n.step(e,i.manifest);if(!a.success||!a.data)return d(`Cannot ${e}: ${a.error}`);let o=a.data,s=o.currentStep?i.manifest.steps.find(e=>e.id===o.currentStep):null,c={flow:o.flow,status:o.status,action:e,currentStep:o.currentStep,currentStepSkill:i&&s?v(i,s.skill):null,currentStepDescription:s?.description??null,completedSteps:o.completedSteps,skippedSteps:o.skippedSteps,totalSteps:i.manifest.steps.length,remaining:i.manifest.steps.filter(e=>!o.completedSteps.includes(e.id)&&!o.skippedSteps.includes(e.id)&&e.id!==o.currentStep).map(e=>e.id)};return d(JSON.stringify(c,null,2))}catch(e){return u.error(`flow_step failed`,c(e)),d(`Error: ${f(e)}`)}});let T=e(`flow_status`);s.registerTool(`flow_status`,{title:T.title,description:`Show the current flow execution state — which flow is active, current step, completed steps, and artifacts.`,annotations:T.annotations,inputSchema:{}},async()=>{try{let{registry:e,stateMachine:t}=await b(),n=t.getStatus();if(!n.success||!n.data)return d(`No active flow. Use flow_start to begin one, or flow_list to see available flows.`);let r=n.data,i=e.get(r.flow),a=i?.manifest.steps.find(e=>e.id===r.currentStep),o=i&&a?v(i,a.skill):null,s={flow:r.flow,status:r.status,currentStep:r.currentStep,currentStepSkill:o,skillPath:o,currentStepDescription:a?.description??null,completedSteps:r.completedSteps,skippedSteps:r.skippedSteps,artifacts:r.artifacts,startedAt:r.startedAt,updatedAt:r.updatedAt,totalSteps:i?.manifest.steps.length??0,progress:i?`${r.completedSteps.length+r.skippedSteps.length}/${i.manifest.steps.length}`:`unknown`};return d(JSON.stringify(s,null,2))}catch(e){return u.error(`flow_status failed`,c(e)),d(`Error: ${f(e)}`)}});let E=e(`flow_read_skill`);s.registerTool(`flow_read_skill`,{title:E.title===`flow_read_skill`?`Flow Read Skill`:E.title,description:`Read the skill or instruction content for a flow step. If step is omitted, reads the current step.`,annotations:E.title===`flow_read_skill`?{readOnlyHint:!0,idempotentHint:!0}:E.annotations,inputSchema:{step:a.string().optional().describe(`Step id or name to read. Defaults to the current step.`)}},async({step:e})=>{try{let{registry:t,stateMachine:n}=await b(),r=n.getStatus();if(!r.success||!r.data)return d(`No active flow. Use flow_start to begin one, or flow_list to see available flows.`);let i=r.data,a=t.get(i.flow);if(!a)return d(`Flow "${i.flow}" not found in registry.`);let s=e??i.currentStep;if(!s)return d(`No current step is available for the active flow.`);let c=a.manifest.steps.find(e=>e.id===s||e.name===s);return d(c?await o(v(a,c.skill),`utf-8`):`Step "${s}" not found in flow "${i.flow}".`)}catch(e){return u.error(`flow_read_skill failed`,c(e)),e instanceof Error&&`code`in e&&e.code===`ENOENT`?d(`Could not read skill file: ${e.message}`):d(`Error: ${f(e)}`)}});let D=e(`flow_reset`);s.registerTool(`flow_reset`,{title:D.title,description:`Reset the active flow, clearing all state. Use to start over or switch to a different flow.`,annotations:D.annotations,inputSchema:{}},async()=>{try{let{stateMachine:e}=await b(),t=e.reset();return t.success?d(`Flow state reset. Use flow_start to begin a new flow.`):d(`Reset failed: ${t.error}`)}catch(e){return u.error(`flow_reset failed`,c(e)),d(`Error: ${f(e)}`)}})}export{p as registerFlowTools};
1
+ import{getToolMeta as e}from"../tool-metadata.js";import{existsSync as t}from"node:fs";import{basename as n,join as r,resolve as i}from"node:path";import{z as a}from"zod";import{readFile as o}from"node:fs/promises";import{createLogger as s,serializeError as c}from"../../../core/dist/index.js";import{homedir as l}from"node:os";const u=s(`flow-tools`);function d(e){return{content:[{type:`text`,text:e}]}}function f(e){return e instanceof Error?e.message:String(e)}function p(s,p){let m=p.sources?.[0]?.path??process.cwd(),h=r(p.stateDir??r(p.sources[0].path,`.aikit-state`),`flows`),g=r(h,`registry.json`),_=r(h,`state.json`);function v(e,t){return i(y(e),t).replaceAll(`\\`,`/`)}function y(e){if(e.sourceType===`builtin`){let r=n(e.installPath),a=i(m,`.github`,`flows`,r);if(t(a))return a.replaceAll(`\\`,`/`);let o=i(l(),`.copilot`,`flows`,r);return t(o)?o.replaceAll(`\\`,`/`):a.replaceAll(`\\`,`/`)}return e.installPath.replaceAll(`\\`,`/`)}async function b(){let{FlowRegistryManager:e,FlowStateMachine:t}=await import(`../../../flows/dist/index.js`);return{registry:new e(g),stateMachine:new t(_)}}let x=e(`flow_list`);s.registerTool(`flow_list`,{title:x.title,description:`List all installed flows and their steps`,annotations:x.annotations,inputSchema:{}},async()=>{try{let{registry:e,stateMachine:t}=await b(),n=e.list(),r=t.getStatus(),i={flows:n.map(e=>({name:e.name,version:e.version,source:e.source,sourceType:e.sourceType,format:e.format,steps:e.manifest.steps.map(e=>e.id)})),activeFlow:r.success&&r.data?{flow:r.data.flow,status:r.data.status,currentStep:r.data.currentStep}:null};return d(JSON.stringify(i,null,2))}catch(e){return u.error(`flow_list failed`,c(e)),d(`Error: ${f(e)}`)}});let S=e(`flow_info`);s.registerTool(`flow_info`,{title:S.title,description:`Show detailed information about a specific flow`,annotations:S.annotations,inputSchema:{name:a.string().describe(`Flow name to get info for`)}},async({name:e})=>{try{let{registry:t}=await b(),n=t.get(e);if(!n)return d(`Flow "${e}" not found. Use flow_list to see available flows.`);let r={name:n.name,version:n.version,description:n.manifest.description,source:n.source,sourceType:n.sourceType,format:n.format,installPath:y(n),registeredAt:n.registeredAt,updatedAt:n.updatedAt,steps:n.manifest.steps.map(e=>({id:e.id,name:e.name,instruction:v(n,e.instruction),produces:e.produces,requires:e.requires,description:e.description})),agents:n.manifest.agents,artifactsDir:n.manifest.artifacts_dir,install:n.manifest.install};return d(JSON.stringify(r,null,2))}catch(e){return u.error(`flow_info failed`,c(e)),d(`Error: ${f(e)}`)}});let C=e(`flow_start`);s.registerTool(`flow_start`,{title:C.title,description:`Start a flow. Sets the active flow and positions at the first step.`,annotations:C.annotations,inputSchema:{flow:a.string().describe(`Flow name to start (use flow_list to see options)`)}},async({flow:e})=>{try{let{registry:t,stateMachine:n}=await b(),r=t.get(e);if(!r)return d(`Flow "${e}" not found. Use flow_list to see available flows.`);let i=n.start(r.name,r.manifest);if(!i.success||!i.data)return d(`Cannot start: ${i.error}`);let a=i.data,o=r.manifest.steps.find(e=>e.id===a.currentStep),s={started:!0,flow:a.flow,currentStep:a.currentStep,currentStepInstruction:r&&o?v(r,o.instruction):null,currentStepDescription:o?.description??null,totalSteps:r.manifest.steps.length,stepSequence:r.manifest.steps.map(e=>e.id),artifactsDir:r.manifest.artifacts_dir};return d(JSON.stringify(s,null,2))}catch(e){return u.error(`flow_start failed`,c(e)),d(`Error: ${f(e)}`)}});let w=e(`flow_step`);s.registerTool(`flow_step`,{title:w.title,description:`Advance the active flow: complete current step and move to next, skip current step, or redo current step.`,annotations:w.annotations,inputSchema:{action:a.enum([`next`,`skip`,`redo`]).describe(`next: mark current step done and advance. skip: skip current step. redo: repeat current step.`)}},async({action:e})=>{try{let{registry:t,stateMachine:n}=await b(),r=n.load();if(!r)return d(`No active flow. Use flow_start first.`);let i=t.get(r.flow);if(!i)return d(`Flow "${r.flow}" not found in registry.`);let a=n.step(e,i.manifest);if(!a.success||!a.data)return d(`Cannot ${e}: ${a.error}`);let o=a.data,s=o.currentStep?i.manifest.steps.find(e=>e.id===o.currentStep):null,c={flow:o.flow,status:o.status,action:e,currentStep:o.currentStep,currentStepInstruction:i&&s?v(i,s.instruction):null,currentStepDescription:s?.description??null,completedSteps:o.completedSteps,skippedSteps:o.skippedSteps,totalSteps:i.manifest.steps.length,remaining:i.manifest.steps.filter(e=>!o.completedSteps.includes(e.id)&&!o.skippedSteps.includes(e.id)&&e.id!==o.currentStep).map(e=>e.id)};return d(JSON.stringify(c,null,2))}catch(e){return u.error(`flow_step failed`,c(e)),d(`Error: ${f(e)}`)}});let T=e(`flow_status`);s.registerTool(`flow_status`,{title:T.title,description:`Show the current flow execution state — which flow is active, current step, completed steps, and artifacts.`,annotations:T.annotations,inputSchema:{}},async()=>{try{let{registry:e,stateMachine:t}=await b(),n=t.getStatus();if(!n.success||!n.data)return d(`No active flow. Use flow_start to begin one, or flow_list to see available flows.`);let r=n.data,i=e.get(r.flow),a=i?.manifest.steps.find(e=>e.id===r.currentStep),o=i&&a?v(i,a.instruction):null,s={flow:r.flow,status:r.status,currentStep:r.currentStep,currentStepInstruction:o,instructionPath:o,currentStepDescription:a?.description??null,completedSteps:r.completedSteps,skippedSteps:r.skippedSteps,artifacts:r.artifacts,startedAt:r.startedAt,updatedAt:r.updatedAt,totalSteps:i?.manifest.steps.length??0,progress:i?`${r.completedSteps.length+r.skippedSteps.length}/${i.manifest.steps.length}`:`unknown`};return d(JSON.stringify(s,null,2))}catch(e){return u.error(`flow_status failed`,c(e)),d(`Error: ${f(e)}`)}});let E=e(`flow_read_instruction`);s.registerTool(`flow_read_instruction`,{title:E.title===`flow_read_instruction`?`Flow Read Instruction`:E.title,description:`Read the instruction content for a flow step. If step is omitted, reads the current step.`,annotations:E.title===`flow_read_instruction`?{readOnlyHint:!0,idempotentHint:!0}:E.annotations,inputSchema:{step:a.string().optional().describe(`Step id or name to read. Defaults to the current step.`)}},async({step:e})=>{try{let{registry:t,stateMachine:n}=await b(),r=n.getStatus();if(!r.success||!r.data)return d(`No active flow. Use flow_start to begin one, or flow_list to see available flows.`);let i=r.data,a=t.get(i.flow);if(!a)return d(`Flow "${i.flow}" not found in registry.`);let s=e??i.currentStep;if(!s)return d(`No current step is available for the active flow.`);let c=a.manifest.steps.find(e=>e.id===s||e.name===s);return d(c?await o(v(a,c.instruction),`utf-8`):`Step "${s}" not found in flow "${i.flow}".`)}catch(e){return u.error(`flow_read_instruction failed`,c(e)),e instanceof Error&&`code`in e&&e.code===`ENOENT`?d(`Could not read instruction file: ${e.message}`):d(`Error: ${f(e)}`)}});let D=e(`flow_reset`);s.registerTool(`flow_reset`,{title:D.title,description:`Reset the active flow, clearing all state. Use to start over or switch to a different flow.`,annotations:D.annotations,inputSchema:{}},async()=>{try{let{stateMachine:e}=await b(),t=e.reset();return t.success?d(`Flow state reset. Use flow_start to begin a new flow.`):d(`Reset failed: ${t.error}`)}catch(e){return u.error(`flow_reset failed`,c(e)),d(`Error: ${f(e)}`)}})}export{p as registerFlowTools};
@@ -11,6 +11,8 @@ declare class LanceStore implements IKnowledgeStore {
11
11
  private _priorityQueue;
12
12
  private _normalQueue;
13
13
  private _ftsReady;
14
+ private _ftsRecoveryAttemptAt;
15
+ private static readonly FTS_RECOVERY_COOLDOWN_MS;
14
16
  private enqueueWrite;
15
17
  private _drain;
16
18
  constructor(options?: {
@@ -1 +1 @@
1
- import{EMBEDDING_DEFAULTS as e,SEARCH_DEFAULTS as t,STORE_DEFAULTS as n,createLogger as r,serializeError as i,sourceTypeContentTypes as a}from"../../core/dist/index.js";import{Index as o,connect as s}from"@lancedb/lancedb";function c(e){if(!e)return[];try{let t=JSON.parse(e);return Array.isArray(t)?t:[]}catch{return[]}}const l=/^[\w.\-/ ]+$/,u=r(`store`);function d(e,t){if(!l.test(e))throw Error(`Invalid ${t} filter value: contains disallowed characters`);return e.replace(/'/g,`''`)}var f=class{db=null;table=null;dbPath;tableName;_draining=!1;_priorityQueue=[];_normalQueue=[];_ftsReady=!1;enqueueWrite(e,t=!1){return new Promise((n,r)=>{let i=async()=>{try{n(await e())}catch(e){r(e)}};t?this._priorityQueue.push(i):this._normalQueue.push(i),this._drain()})}async _drain(){if(!this._draining){this._draining=!0;try{for(;this._priorityQueue.length>0||this._normalQueue.length>0;){let e=this._priorityQueue.shift()??this._normalQueue.shift();e&&await e()}}finally{this._draining=!1}}}constructor(e){this.dbPath=e?.path??n.path,this.tableName=e?.tableName??n.tableName}async initialize(){this.db=await s(this.dbPath),(await this.db.tableNames()).includes(this.tableName)&&(this.table=await this.db.openTable(this.tableName),await this.createFtsIndex())}async upsert(e,t){if(e.length!==0){if(e.length!==t.length)throw Error(`Record count (${e.length}) does not match vector count (${t.length})`);return this.enqueueWrite(()=>this._upsertImpl(e,t))}}async upsertInteractive(e,t){if(e.length!==0){if(e.length!==t.length)throw Error(`Record count (${e.length}) does not match vector count (${t.length})`);return this.enqueueWrite(()=>this._upsertImpl(e,t),!0)}}async _upsertImpl(e,t){let n=e.map((e,n)=>({id:e.id,vector:Array.from(t[n]),content:e.content,sourcePath:e.sourcePath,contentType:e.contentType,headingPath:e.headingPath??``,chunkIndex:e.chunkIndex,totalChunks:e.totalChunks,startLine:e.startLine,endLine:e.endLine,fileHash:e.fileHash,indexedAt:e.indexedAt,origin:e.origin,tags:JSON.stringify(e.tags),category:e.category??``,version:e.version}));if(this.table){let t=[...new Set(e.map(e=>e.sourcePath))];for(let e of t)try{await this.table.delete(`sourcePath = '${d(e,`sourcePath`)}'`)}catch{}await this.table.add(n)}else try{this.table=await this.db?.createTable(this.tableName,n)??null}catch(e){if(String(e).includes(`already exists`)&&this.db)this.table=await this.db.openTable(this.tableName),await this.table.add(n);else throw e}}async search(e,n){if(!this.table)return[];let r=n?.limit??t.maxResults,i=n?.minScore??t.minScore,a=this.table.search(e).limit(r*2),o=this.buildFilterString(n);return o&&(a=a.where(o)),(await a.toArray()).map(e=>({record:this.fromLanceRecord(e),score:1-(e._distance??1)})).filter(e=>e.score>=i).slice(0,r)}async createFtsIndex(){return this.enqueueWrite(()=>this._createFtsIndexImpl())}async _createFtsIndexImpl(){if(this.table)try{await this.table.createIndex(`content`,{config:o.fts({withPosition:!0}),replace:!0}),this._ftsReady=!0,u.info(`FTS index created/updated`,{column:`content`})}catch(e){u.warn(`FTS index creation failed`,i(e))}}async ftsSearch(e,n){if(!this.table||!this._ftsReady)return[];let r=n?.limit??t.maxResults;try{let t=this.table.search(e).limit(r*2),i=this.buildFilterString(n);return i&&(t=t.where(i)),(await t.toArray()).map(e=>({record:this.fromLanceRecord(e),score:e._score??e._relevance_score??0}))}catch(e){return(e instanceof Error?e.message:String(e)).includes(`INVERTED index`)?(u.debug(`FTS search skipped — index not yet available`),this._ftsReady=!1):u.warn(`FTS search failed`,i(e)),[]}}async getById(e){if(!this.table)return null;let t=await this.table.query().where(`id = '${d(e,`id`)}'`).limit(1).toArray();return t.length===0?null:this.fromLanceRecord(t[0])}async deleteBySourcePath(e){return this.enqueueWrite(()=>this._deleteBySourcePathImpl(e))}async _deleteBySourcePathImpl(e){if(!this.table)return 0;let t=await this.getBySourcePath(e);return t.length===0?0:(await this.table.delete(`sourcePath = '${d(e,`sourcePath`)}'`),t.length)}async deleteById(e){return this.enqueueWrite(()=>this._deleteByIdImpl(e))}async deleteByIdInteractive(e){return this.enqueueWrite(()=>this._deleteByIdImpl(e),!0)}async _deleteByIdImpl(e){return!this.table||!await this.getById(e)?!1:(await this.table.delete(`id = '${d(e,`id`)}'`),!0)}async getBySourcePath(e){return this.table?(await this.table.query().where(`sourcePath = '${d(e,`sourcePath`)}'`).limit(1e3).toArray()).map(e=>this.fromLanceRecord(e)):[]}async getStats(){if(!this.table)return{totalRecords:0,totalFiles:0,contentTypeBreakdown:{},lastIndexedAt:null,storeBackend:`lancedb`,embeddingModel:e.model};let t=await this.table.countRows(),n=await this.table.query().select([`sourcePath`,`contentType`,`indexedAt`]).limit(1e5).toArray(),r={},i=new Set,a=null;for(let e of n){let t=e;r[t.contentType]=(r[t.contentType]??0)+1,i.add(t.sourcePath),(!a||t.indexedAt>a)&&(a=t.indexedAt)}return{totalRecords:t,totalFiles:i.size,contentTypeBreakdown:r,lastIndexedAt:a,storeBackend:`lancedb`,embeddingModel:e.model}}async listSourcePaths(){if(!this.table)return[];let e=await this.table.query().select([`sourcePath`]).limit(1e5).toArray();return[...new Set(e.map(e=>e.sourcePath))]}async dropTable(){return this.enqueueWrite(()=>this._dropTableImpl())}async _dropTableImpl(){if(this.db&&(await this.db.tableNames()).includes(this.tableName))for(let e=1;e<=3;e++)try{await this.db.dropTable(this.tableName);break}catch(t){if(e===3)throw t;let n=e*500;u.warn(`dropTable attempt failed, retrying`,{attempt:e,delayMs:n}),await new Promise(e=>setTimeout(e,n))}this.table=null}async close(){try{this.db&&typeof this.db.close==`function`&&await this.db.close()}catch{}this.table=null,this.db=null}buildFilterString(e){let t=[];if(e?.contentType&&t.push(`contentType = '${d(e.contentType,`contentType`)}'`),e?.sourceType){let n=a(e.sourceType);if(n.length>0){let e=n.map(e=>`'${d(e,`sourceType`)}'`).join(`, `);t.push(`contentType IN (${e})`)}}if(e?.origin&&t.push(`origin = '${d(e.origin,`origin`)}'`),e?.category&&t.push(`category = '${d(e.category,`category`)}'`),e?.tags&&e.tags.length>0){let n=e.tags.map(e=>`tags LIKE '%${d(e,`tag`)}%'`);t.push(`(${n.join(` OR `)})`)}return t.length>0?t.join(` AND `):null}fromLanceRecord(e){return{id:e.id,content:e.content,sourcePath:e.sourcePath,contentType:e.contentType,headingPath:e.headingPath||void 0,chunkIndex:e.chunkIndex,totalChunks:e.totalChunks,startLine:e.startLine,endLine:e.endLine,fileHash:e.fileHash,indexedAt:e.indexedAt,origin:e.origin,tags:c(e.tags),category:e.category||void 0,version:e.version}}};export{f as LanceStore};
1
+ import{EMBEDDING_DEFAULTS as e,SEARCH_DEFAULTS as t,STORE_DEFAULTS as n,createLogger as r,serializeError as i,sourceTypeContentTypes as a}from"../../core/dist/index.js";import{Index as o,connect as s}from"@lancedb/lancedb";function c(e){if(!e)return[];try{let t=JSON.parse(e);return Array.isArray(t)?t:[]}catch{return[]}}const l=/^[\w.\-/ ]+$/,u=r(`store`);function d(e,t){if(!l.test(e))throw Error(`Invalid ${t} filter value: contains disallowed characters`);return e.replace(/'/g,`''`)}var f=class r{db=null;table=null;dbPath;tableName;_draining=!1;_priorityQueue=[];_normalQueue=[];_ftsReady=!1;_ftsRecoveryAttemptAt=0;static FTS_RECOVERY_COOLDOWN_MS=300*1e3;enqueueWrite(e,t=!1){return new Promise((n,r)=>{let i=async()=>{try{n(await e())}catch(e){r(e)}};t?this._priorityQueue.push(i):this._normalQueue.push(i),this._drain()})}async _drain(){if(!this._draining){this._draining=!0;try{for(;this._priorityQueue.length>0||this._normalQueue.length>0;){let e=this._priorityQueue.shift()??this._normalQueue.shift();e&&await e()}}finally{this._draining=!1}}}constructor(e){this.dbPath=e?.path??n.path,this.tableName=e?.tableName??n.tableName}async initialize(){this.db=await s(this.dbPath),(await this.db.tableNames()).includes(this.tableName)&&(this.table=await this.db.openTable(this.tableName),await this.createFtsIndex())}async upsert(e,t){if(e.length!==0){if(e.length!==t.length)throw Error(`Record count (${e.length}) does not match vector count (${t.length})`);return this.enqueueWrite(()=>this._upsertImpl(e,t))}}async upsertInteractive(e,t){if(e.length!==0){if(e.length!==t.length)throw Error(`Record count (${e.length}) does not match vector count (${t.length})`);return this.enqueueWrite(()=>this._upsertImpl(e,t),!0)}}async _upsertImpl(e,t){let n=e.map((e,n)=>({id:e.id,vector:Array.from(t[n]),content:e.content,sourcePath:e.sourcePath,contentType:e.contentType,headingPath:e.headingPath??``,chunkIndex:e.chunkIndex,totalChunks:e.totalChunks,startLine:e.startLine,endLine:e.endLine,fileHash:e.fileHash,indexedAt:e.indexedAt,origin:e.origin,tags:JSON.stringify(e.tags),category:e.category??``,version:e.version}));if(this.table){let t=[...new Set(e.map(e=>e.sourcePath))];for(let e of t)try{await this.table.delete(`sourcePath = '${d(e,`sourcePath`)}'`)}catch{}await this.table.add(n)}else try{this.table=await this.db?.createTable(this.tableName,n)??null}catch(e){if(String(e).includes(`already exists`)&&this.db)this.table=await this.db.openTable(this.tableName),await this.table.add(n);else throw e}}async search(e,n){if(!this.table)return[];let r=n?.limit??t.maxResults,i=n?.minScore??t.minScore,a=this.table.search(e).limit(r*2),o=this.buildFilterString(n);return o&&(a=a.where(o)),(await a.toArray()).map(e=>({record:this.fromLanceRecord(e),score:1-(e._distance??1)})).filter(e=>e.score>=i).slice(0,r)}async createFtsIndex(){return this.enqueueWrite(()=>this._createFtsIndexImpl())}async _createFtsIndexImpl(){if(this.table)try{await this.table.createIndex(`content`,{config:o.fts({withPosition:!0}),replace:!0}),this._ftsReady=!0,this._ftsRecoveryAttemptAt=0,u.info(`FTS index created/updated`,{column:`content`})}catch(e){u.warn(`FTS index creation failed`,i(e))}}async ftsSearch(e,n){if(!this.table)return[];if(!this._ftsReady){let e=Date.now();if(e-this._ftsRecoveryAttemptAt<r.FTS_RECOVERY_COOLDOWN_MS)return[];this._ftsRecoveryAttemptAt=e;try{await this.createFtsIndex()}catch{return[]}if(!this._ftsReady)return[]}let a=n?.limit??t.maxResults;try{let t=this.table.search(e).limit(a*2),r=this.buildFilterString(n);return r&&(t=t.where(r)),(await t.toArray()).map(e=>({record:this.fromLanceRecord(e),score:e._score??e._relevance_score??0}))}catch(e){return(e instanceof Error?e.message:String(e)).includes(`INVERTED index`)?(u.debug(`FTS search skipped — index not yet available`),this._ftsReady=!1):u.warn(`FTS search failed`,i(e)),[]}}async getById(e){if(!this.table)return null;let t=await this.table.query().where(`id = '${d(e,`id`)}'`).limit(1).toArray();return t.length===0?null:this.fromLanceRecord(t[0])}async deleteBySourcePath(e){return this.enqueueWrite(()=>this._deleteBySourcePathImpl(e))}async _deleteBySourcePathImpl(e){if(!this.table)return 0;let t=await this.getBySourcePath(e);return t.length===0?0:(await this.table.delete(`sourcePath = '${d(e,`sourcePath`)}'`),t.length)}async deleteById(e){return this.enqueueWrite(()=>this._deleteByIdImpl(e))}async deleteByIdInteractive(e){return this.enqueueWrite(()=>this._deleteByIdImpl(e),!0)}async _deleteByIdImpl(e){return!this.table||!await this.getById(e)?!1:(await this.table.delete(`id = '${d(e,`id`)}'`),!0)}async getBySourcePath(e){return this.table?(await this.table.query().where(`sourcePath = '${d(e,`sourcePath`)}'`).limit(1e3).toArray()).map(e=>this.fromLanceRecord(e)):[]}async getStats(){if(!this.table)return{totalRecords:0,totalFiles:0,contentTypeBreakdown:{},lastIndexedAt:null,storeBackend:`lancedb`,embeddingModel:e.model};let t=await this.table.countRows(),n=await this.table.query().select([`sourcePath`,`contentType`,`indexedAt`]).limit(1e5).toArray(),r={},i=new Set,a=null;for(let e of n){let t=e;r[t.contentType]=(r[t.contentType]??0)+1,i.add(t.sourcePath),(!a||t.indexedAt>a)&&(a=t.indexedAt)}return{totalRecords:t,totalFiles:i.size,contentTypeBreakdown:r,lastIndexedAt:a,storeBackend:`lancedb`,embeddingModel:e.model}}async listSourcePaths(){if(!this.table)return[];let e=await this.table.query().select([`sourcePath`]).limit(1e5).toArray();return[...new Set(e.map(e=>e.sourcePath))]}async dropTable(){return this.enqueueWrite(()=>this._dropTableImpl())}async _dropTableImpl(){if(this.db&&(await this.db.tableNames()).includes(this.tableName))for(let e=1;e<=3;e++)try{await this.db.dropTable(this.tableName);break}catch(t){if(e===3)throw t;let n=e*500;u.warn(`dropTable attempt failed, retrying`,{attempt:e,delayMs:n}),await new Promise(e=>setTimeout(e,n))}this.table=null}async close(){try{this.db&&typeof this.db.close==`function`&&await this.db.close()}catch{}this.table=null,this.db=null}buildFilterString(e){let t=[];if(e?.contentType&&t.push(`contentType = '${d(e.contentType,`contentType`)}'`),e?.sourceType){let n=a(e.sourceType);if(n.length>0){let e=n.map(e=>`'${d(e,`sourceType`)}'`).join(`, `);t.push(`contentType IN (${e})`)}}if(e?.origin&&t.push(`origin = '${d(e.origin,`origin`)}'`),e?.category&&t.push(`category = '${d(e.category,`category`)}'`),e?.tags&&e.tags.length>0){let n=e.tags.map(e=>`tags LIKE '%${d(e,`tag`)}%'`);t.push(`(${n.join(` OR `)})`)}return t.length>0?t.join(` AND `):null}fromLanceRecord(e){return{id:e.id,content:e.content,sourcePath:e.sourcePath,contentType:e.contentType,headingPath:e.headingPath||void 0,chunkIndex:e.chunkIndex,totalChunks:e.totalChunks,startLine:e.startLine,endLine:e.endLine,fileHash:e.fileHash,indexedAt:e.indexedAt,origin:e.origin,tags:c(e.tags),category:e.category||void 0,version:e.version}}};export{f as LanceStore};
@@ -19,29 +19,11 @@ export const CLAUDE_FLOWS_SECTION = [
19
19
  ].join('\n');
20
20
 
21
21
  export const CLAUDE_ORCHESTRATOR_FLOW_ROUTING_SECTION = [
22
- '## Flow-Aware Routing',
23
- '',
24
- 'At session start, check for an active flow:',
25
- '1. Call `flow_status` to check if a flow is active',
26
- "2. If active and status is 'active':",
27
- ' - Note the current step name and skill path',
28
- " - Load the current step's skill file",
29
- ' - Follow its instructions for this step',
30
- " - When step is complete, call `flow_step({ action: 'next' })`",
31
- '3. If no active flow:',
32
- ' - Check `flow_list` for available flows',
33
- ' - Suggest starting a flow based on the task type',
34
- " - Use `flow_start({ flow: '<name>' })` to begin",
22
+ '## Flows',
35
23
  '',
36
- '### Flow MCP Tools',
37
- '| Tool | Purpose |',
38
- '|------|---------|',
39
- '| `flow_list` | List installed flows and active flow |',
40
- '| `flow_info` | Get detailed flow info including steps |',
41
- '| `flow_start` | Start a named flow |',
42
- '| `flow_step` | Advance: next, skip, or redo current step |',
43
- '| `flow_status` | Check current execution state |',
44
- '| `flow_reset` | Clear flow state to start over |',
24
+ "This project uses aikit's pluggable flow system. Check flow status with the `flow_status` MCP tool.",
25
+ "If a flow is active, follow the current step's skill instructions. Advance with `flow_step({ action: 'next' })`.",
26
+ 'Use `flow_list` to see available flows and `flow_start` to begin one.',
45
27
  ].join('\n');
46
28
 
47
29
  export function generateClaudeCode() {
@@ -24,15 +24,6 @@ ${agentTable}
24
24
 
25
25
  **Parallelism**: Read-only agents run in parallel freely. File-modifying agents run in parallel ONLY on completely different files. Max 4 concurrent file-modifying agents.
26
26
 
27
- ## Phase 0: Design Gate
28
-
29
- | Situation | Route |
30
- |-----------|-------|
31
- | New feature/component/behavior | **Brainstorming skill** → user dialogue → design doc |
32
- | Non-trivial technical decision | **Decision protocol** → 4 Researchers parallel → synthesize → ADR |
33
- | Both | Brainstorming first → escalate unresolved decisions to protocol |
34
- | Bug fix / refactor / explicit skip | **→ Phase 1** |
35
-
36
27
  ## FORGE Protocol
37
28
 
38
29
  1. \`forge_classify({ task, files })\` → determine tier (Floor/Standard/Critical)
@@ -40,32 +31,66 @@ ${agentTable}
40
31
  3. After review: \`evidence_map({ action: "gate", task_id })\` → YIELD/HOLD/HARD_BLOCK
41
32
  4. Auto-upgrade tier if unknowns reveal contract/security issues
42
33
 
43
- ## Flow-Driven Development
34
+ ## Flow-Driven Development (PRIMARY BEHAVIOR)
44
35
 
45
- Orchestrator uses the flow system for structured development. Flows define the step sequence — Orchestrator adds multi-agent orchestration, quality gates, and review protocols on top.
36
+ **After bootstrap, the Orchestrator MUST select and start a flow.** Flows define the step sequence — Orchestrator adds multi-agent orchestration, quality gates, and review protocols on top. Design decisions, brainstorming, and FORGE classification are handled by the **design** step within each flow — NOT by the Orchestrator directly.
46
37
 
47
- ### Flow Selection
38
+ ### Flow Activation (MANDATORY after bootstrap)
48
39
 
49
- | Situation | Flow | Steps |
50
- |-----------|------|-------|
51
- | Bug fix, small feature, refactoring | \`aikit:basic\` | assess → implement → verify |
52
- | New feature, major change, multi-file | \`aikit:advanced\` | spec → plan → task → execute → verify |
53
- | Custom/specialized work | Check \`flow_list\` | Follow flow-specific steps |
40
+ 1. \`flow_status\` check for an active flow from a previous session
41
+ 2. **If active flow exists:**
42
+ - Note current step name and instruction path
43
+ - Read the current step instruction with \`flow_read_instruction\`
44
+ - Follow its instructions
45
+ - When complete: \`flow_step({ action: 'next' })\`
46
+ 3. **If NO active flow:**
47
+ - \`flow_list\` — retrieve ALL available flows (builtin AND custom)
48
+ - **Auto-select** the flow when the task clearly matches:
54
49
 
55
- **If multiple flows could apply and user hasn't specified → ask user to choose.**
50
+ | Task signal | Auto-activate flow |
51
+ |-------------|--------------------|
52
+ | Bug fix, typo, hotfix, "fix ...", error reproduction | \`aikit:basic\` |
53
+ | Small feature (≤3 files), refactoring, cleanup, dependency update | \`aikit:basic\` |
54
+ | New feature, API design, architecture change, multi-component work | \`aikit:advanced\` |
55
+ | Task matches a custom flow's description/tags exactly | That custom flow |
56
56
 
57
- ### Session Start Flow Check
57
+ - **Auto-start:** When exactly one flow matches, start it immediately — \`flow_start({ flow: '<matched>' })\` — and inform the user which flow was activated and why.
58
+ - **Ask only when ambiguous:** If the task could fit multiple flows, or no flow clearly matches, present the options and let the user choose.
59
+ - Do NOT present a menu for obvious cases. Speed matters.
60
+ 4. **Every task goes through a flow.** There is no flowless path.
58
61
 
59
- 1. \`flow_status\` check for active flow
60
- 2. If active:
61
- - Note current step name and skill path
62
- - Read the current step skill with \`flow_read_skill\`
63
- - Follow its instructions
64
- - When complete: \`flow_step({ action: 'next' })\`
65
- 3. If no active flow:
66
- - \`flow_list\` check ALL available flows (builtin + custom)
67
- - Recommend appropriate flow based on task scope
68
- - \`flow_start({ flow: '<name>' })\` after user confirms
62
+ ### Flow Execution Loop
63
+
64
+ For EACH step in the active flow:
65
+
66
+ 1. \`flow_read_instruction\` read the current step's README.md
67
+ 2. Follow the step's instructions delegate work to the appropriate agents
68
+ 3. Apply **Orchestrator Protocols** (PRE-DISPATCH GATE, FORGE, review cycle) during execution
69
+ 4. When the step is complete and results are approved:
70
+ - \`flow_step({ action: 'next' })\` to advance
71
+ 5. Repeat until the flow is complete
72
+
73
+ **Custom flows work identically** — \`flow_list\` returns them alongside builtins. The execution loop is the same for ALL flows.
74
+
75
+ ### Flow Completion & Cleanup
76
+
77
+ Flows MUST be driven to completion. A flow left active forever blocks future work.
78
+
79
+ **Normal completion:**
80
+ - When the last step's \`flow_step({ action: 'next' })\` is called, the flow finishes automatically
81
+ - After completion: run post-implementation protocol (\`check\` → \`test_run\` → \`blast_radius\` → \`reindex\` → \`produce_knowledge\` → \`remember\`)
82
+ - Inform the user the flow is complete with a summary of artifacts produced
83
+
84
+ **Stale flow detection** (check at session start when \`flow_status\` returns an active flow):
85
+ - If the active flow's current step has no matching work context in the conversation → **ask the user**: "A flow \`<name>\` is active at step \`<step>\`. Continue, or reset to start fresh?"
86
+ - If the user says reset → \`flow_reset()\` then activate a new flow for the current task
87
+ - If the user says continue → resume from the current step
88
+
89
+ **Abandoned step recovery:**
90
+ - If a step has been attempted ≥ 2 times with \`BLOCKED\` status → escalate to user with diagnostics, offer to \`flow_step({ action: 'skip' })\` or \`flow_reset()\`
91
+ - Never silently retry a blocked step indefinitely
92
+
93
+ **One active flow at a time.** To switch tasks, the current flow must be completed or reset first.
69
94
 
70
95
  ### Orchestrator Protocols (apply during ALL flow steps)
71
96
 
@@ -114,7 +139,7 @@ Batch 2 (after batch 1):
114
139
  | \`flow_step\` | Advance: next, skip, or redo current step |
115
140
  | \`flow_status\` | Check current execution state |
116
141
  | \`flow_reset\` | Clear flow state to start over |
117
- | \`flow_read_skill\` | Read the skill content for the current step |
142
+ | \`flow_read_instruction\` | Read the instruction content for the current step |
118
143
 
119
144
  ## Emergency: STOP → ASSESS → CONTAIN → RECOVER → DOCUMENT
120
145
 
@@ -160,7 +185,7 @@ When subagents complete, their visual outputs (from \`present\`) are NOT visible
160
185
  3. **Maximize parallelism** — independent tasks MUST run as parallel \`runSubagent\` calls in the SAME function block. Sequential dispatch of parallelizable tasks is a protocol violation.
161
186
  4. **Fresh context per subagent** — paste relevant code, don't reference conversation history
162
187
  5. **Search AI Kit before planning** — check past decisions with \`search()\`
163
- 6. **Route correctly** — brainstorming for design, decision protocol for tech choices
188
+ 6. **Always use flows** — every task goes through a flow; design decisions happen in the flow's design step
164
189
  7. **Never proceed without user approval** at 🛑 stops
165
190
  8. **Max 2 retries** then escalate to user
166
191
 
@@ -197,35 +222,18 @@ Before every tool call, verify:
197
222
  |-------|--------------|
198
223
  | \`multi-agents-development\` | **Before any delegation** — task decomposition, dispatch templates, review pipeline, recovery patterns |
199
224
  | \`present\` | When presenting plans, findings, or visual content to the user — dashboards, tables, charts, timelines |
200
- | \`brainstorming\` | Before creative/design work (Phase 0) |
225
+ | \`brainstorming\` | When a flow's design step requires creative/design work |
201
226
  | \`session-handoff\` | Context filling up, session ending, or major milestone |
202
227
  | \`lesson-learned\` | After completing work — extract engineering principles |
203
228
 
204
229
  **When dispatching subagents**, include relevant skill names in the prompt so subagents know which skills to load (e.g., "Load the \`react\` and \`typescript\` skills for this task").
205
230
 
206
- ## Flow-Aware Routing
207
-
208
- At session start, check for an active flow:
209
- 1. Call \`flow_status\` to check if a flow is active
210
- 2. If active and status is 'active':
211
- - Note the current step name and skill path
212
- - Load the current step's skill file
213
- - Follow its instructions for this step
214
- - When step is complete, call \`flow_step({ action: 'next' })\`
215
- 3. If no active flow:
216
- - Check \`flow_list\` for available flows
217
- - Suggest starting a flow based on the task type
218
- - Use \`flow_start({ flow: '<name>' })\` to begin
231
+ ## Flows
219
232
 
220
- ### Flow MCP Tools
221
- | Tool | Purpose |
222
- |------|---------|
223
- | \`flow_list\` | List installed flows and active flow |
224
- | \`flow_info\` | Get detailed flow info including steps |
225
- | \`flow_start\` | Start a named flow |
226
- | \`flow_step\` | Advance: next, skip, or redo current step |
227
- | \`flow_status\` | Check current execution state |
228
- | \`flow_reset\` | Clear flow state to start over |`,
233
+ This project uses aikit's pluggable flow system. Check flow status with the \`flow_status\` MCP tool.
234
+ If a flow is active, follow the current step's instructions. Advance with \`flow_step({ action: 'next' })\`.
235
+ Use \`flow_list\` to see available flows and \`flow_start\` to begin one.
236
+ `,
229
237
 
230
238
  Planner: `**Read \`AGENTS.md\`** in the workspace root for project conventions and AI Kit protocol.
231
239
 
@@ -257,16 +265,20 @@ At session start, check for an active flow:
257
265
  5. **Dependency Graph** — For each phase, list dependencies. Group into parallel batches
258
266
  6. **Present** — Show plan with open questions, complexity estimate, parallel batch layout
259
267
 
260
- ## Flow Integration
268
+ ## Flow Integration (PRIMARY MODE)
269
+
270
+ The Planner is typically activated by the Orchestrator as part of a flow step (e.g., \`aikit:advanced\` plan step, \`aikit:basic\` assess step, or a custom flow's planning step).
261
271
 
262
- When activated as part of a flow (e.g., \`aikit:advanced\` plan step or \`aikit:basic\` assess step):
263
- 1. Check \`flow_status\` for current step context
264
- 2. Read the step's skill file for specific instructions
265
- 3. Follow skill instructions while applying Planner methodology
266
- 4. Produce required artifacts (as specified by the flow step's \`produces\` field)
267
- 5. When complete, report to Orchestrator (do NOT call \`flow_step\` — let Orchestrator advance)
272
+ **When activated as part of a flow:**
273
+ 1. \`flow_status\` check current step context and which flow is active
274
+ 2. \`flow_read_instruction\` — read the current step's README.md for specific instructions
275
+ 3. Follow the step's instructions as the primary guide, applying Planner methodology on top
276
+ 4. Read the flow's README.md for overall context on how the flow works
277
+ 5. Produce required artifacts (as specified by the flow step's \`produces\` field)
278
+ 6. When complete, report status to Orchestrator: \`DONE\` | \`DONE_WITH_CONCERNS\` | \`NEEDS_CONTEXT\` | \`BLOCKED\`
279
+ 7. Do NOT call \`flow_step\` — the Orchestrator controls flow advancement
268
280
 
269
- When no flow is active, operate autonomously following normal Planner methodology.
281
+ **When no flow is active** (standalone mode), operate autonomously following normal Planner methodology.
270
282
 
271
283
  ## Subagent Output Relay
272
284
 
@@ -20,6 +20,7 @@ export const PLUGINS = {
20
20
  description: 'Brainstorming & design exploration workflow',
21
21
  source: 'scaffold/general/skills/brainstorming/SKILL.md',
22
22
  required: true,
23
+ sidecars: ['scaffold/general/skills/brainstorming/spec-document-reviewer-prompt.md'],
23
24
  },
24
25
 
25
26
  'multi-agents-development': {
@@ -35,4 +36,95 @@ export const PLUGINS = {
35
36
  'scaffold/general/skills/multi-agents-development/parallel-dispatch-example.md',
36
37
  ],
37
38
  },
39
+
40
+ 'adr-skill': {
41
+ description:
42
+ 'Architecture Decision Records — create, maintain, and review ADRs for significant technical decisions',
43
+ source: 'scaffold/general/skills/adr-skill/SKILL.md',
44
+ required: true,
45
+ sidecars: [
46
+ 'scaffold/general/skills/adr-skill/assets/templates/adr-madr.md',
47
+ 'scaffold/general/skills/adr-skill/assets/templates/adr-readme.md',
48
+ 'scaffold/general/skills/adr-skill/assets/templates/adr-simple.md',
49
+ 'scaffold/general/skills/adr-skill/references/adr-conventions.md',
50
+ 'scaffold/general/skills/adr-skill/references/examples.md',
51
+ 'scaffold/general/skills/adr-skill/references/review-checklist.md',
52
+ 'scaffold/general/skills/adr-skill/references/template-variants.md',
53
+ 'scaffold/general/skills/adr-skill/scripts/bootstrap_adr.js',
54
+ 'scaffold/general/skills/adr-skill/scripts/new_adr.js',
55
+ 'scaffold/general/skills/adr-skill/scripts/set_adr_status.js',
56
+ ],
57
+ },
58
+
59
+ 'c4-architecture': {
60
+ description:
61
+ 'C4 model architecture diagrams using Mermaid — system context, container, component, and deployment views',
62
+ source: 'scaffold/general/skills/c4-architecture/SKILL.md',
63
+ required: true,
64
+ sidecars: [
65
+ 'scaffold/general/skills/c4-architecture/references/advanced-patterns.md',
66
+ 'scaffold/general/skills/c4-architecture/references/c4-syntax.md',
67
+ 'scaffold/general/skills/c4-architecture/references/common-mistakes.md',
68
+ ],
69
+ },
70
+
71
+ 'frontend-design': {
72
+ description:
73
+ 'Frontend design system — visual design thinking, typography, color, layout, motion, accessibility, and anti-pattern detection',
74
+ source: 'scaffold/general/skills/frontend-design/SKILL.md',
75
+ required: false,
76
+ },
77
+
78
+ 'lesson-learned': {
79
+ description: 'Extract engineering lessons from recent code changes via git history analysis',
80
+ source: 'scaffold/general/skills/lesson-learned/SKILL.md',
81
+ required: true,
82
+ sidecars: [
83
+ 'scaffold/general/skills/lesson-learned/references/anti-patterns.md',
84
+ 'scaffold/general/skills/lesson-learned/references/se-principles.md',
85
+ ],
86
+ },
87
+
88
+ present: {
89
+ description:
90
+ 'Rich interactive dashboards, charts, tables, timelines, and data visualizations via the present MCP tool',
91
+ source: 'scaffold/general/skills/present/SKILL.md',
92
+ required: true,
93
+ },
94
+
95
+ react: {
96
+ description:
97
+ 'React development patterns — component architecture, React 19 APIs, Server Components, TypeScript integration',
98
+ source: 'scaffold/general/skills/react/SKILL.md',
99
+ required: false,
100
+ },
101
+
102
+ 'requirements-clarity': {
103
+ description:
104
+ 'Clarify ambiguous requirements through focused dialogue — score 0-100 until ≥90 before implementation',
105
+ source: 'scaffold/general/skills/requirements-clarity/SKILL.md',
106
+ required: true,
107
+ },
108
+
109
+ 'session-handoff': {
110
+ description:
111
+ 'Comprehensive handoff documents for seamless AI agent session transfers and context preservation',
112
+ source: 'scaffold/general/skills/session-handoff/SKILL.md',
113
+ required: true,
114
+ sidecars: [
115
+ 'scaffold/general/skills/session-handoff/references/handoff-template.md',
116
+ 'scaffold/general/skills/session-handoff/references/resume-checklist.md',
117
+ 'scaffold/general/skills/session-handoff/scripts/check_staleness.js',
118
+ 'scaffold/general/skills/session-handoff/scripts/create_handoff.js',
119
+ 'scaffold/general/skills/session-handoff/scripts/list_handoffs.js',
120
+ 'scaffold/general/skills/session-handoff/scripts/validate_handoff.js',
121
+ ],
122
+ },
123
+
124
+ typescript: {
125
+ description:
126
+ 'TypeScript development patterns — type system, compiler config, advanced types, async patterns, module organization',
127
+ source: 'scaffold/general/skills/typescript/SKILL.md',
128
+ required: false,
129
+ },
38
130
  };
@@ -68,7 +68,7 @@ You may be invoked in two modes:
68
68
 
69
69
  \`\`\`
70
70
  flow_status({}) # Check/resume active flow FIRST
71
- # If flow active → flow_read_skill({ step }) → follow skill instructions
71
+ # If flow active → flow_read_instruction({ step }) → follow step instructions
72
72
  status({}) # Check AI Kit health + onboard state
73
73
  # If onboard not run → onboard({ path: "." }) # First-time codebase analysis
74
74
  flow_list({}) # See available flows
@@ -81,7 +81,7 @@ search({ query: "SESSION CHECKPOINT", origin: "curated" }) # Resume prior wo
81
81
 
82
82
  | Category | Tools | Purpose |
83
83
  |----------|-------|---------|
84
- | Flows | \`flow_list\`, \`flow_info\`, \`flow_start\`, \`flow_step\`, \`flow_status\`, \`flow_read_skill\`, \`flow_reset\` | Structured multi-step workflows |
84
+ | Flows | \`flow_list\`, \`flow_info\`, \`flow_start\`, \`flow_step\`, \`flow_status\`, \`flow_read_instruction\`, \`flow_reset\` | Structured multi-step workflows |
85
85
 
86
86
  ---
87
87