@vpxa/aikit 0.1.20 → 0.1.21

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. package/package.json +1 -1
  2. package/packages/core/dist/types.d.ts +2 -0
  3. package/packages/flows/dist/adapters/claude-plugin.js +1 -1
  4. package/packages/flows/dist/adapters/copilot.js +1 -1
  5. package/packages/flows/dist/builtins.js +1 -1
  6. package/packages/flows/dist/loader.js +1 -1
  7. package/packages/flows/dist/types.d.ts +2 -2
  8. package/packages/indexer/dist/smart-index-scheduler.d.ts +4 -1
  9. package/packages/indexer/dist/smart-index-scheduler.js +1 -1
  10. package/packages/server/dist/index.js +1 -1
  11. package/packages/server/dist/tool-metadata.js +1 -1
  12. package/packages/server/dist/tools/flow.tools.js +1 -1
  13. package/packages/store/dist/lance-store.d.ts +2 -0
  14. package/packages/store/dist/lance-store.js +1 -1
  15. package/scaffold/definitions/bodies.mjs +8 -8
  16. package/scaffold/definitions/plugins.mjs +92 -0
  17. package/scaffold/definitions/protocols.mjs +2 -2
  18. package/scaffold/flows/aikit-advanced/README.md +10 -10
  19. package/scaffold/flows/aikit-advanced/flow.json +6 -6
  20. package/scaffold/flows/aikit-advanced/{skills/design/SKILL.md → steps/design/README.md} +30 -0
  21. package/scaffold/flows/aikit-advanced/{skills/execute/SKILL.md → steps/execute/README.md} +19 -0
  22. package/scaffold/flows/aikit-advanced/{skills/plan/SKILL.md → steps/plan/README.md} +20 -0
  23. package/scaffold/flows/aikit-advanced/{skills/spec/SKILL.md → steps/spec/README.md} +19 -0
  24. package/scaffold/flows/aikit-advanced/{skills/task/SKILL.md → steps/task/README.md} +18 -0
  25. package/scaffold/flows/aikit-advanced/{skills/verify/SKILL.md → steps/verify/README.md} +21 -0
  26. package/scaffold/flows/aikit-basic/README.md +8 -8
  27. package/scaffold/flows/aikit-basic/flow.json +4 -4
  28. package/scaffold/flows/aikit-basic/{skills/assess/SKILL.md → steps/assess/README.md} +25 -0
  29. package/scaffold/flows/aikit-basic/{skills/design/SKILL.md → steps/design/README.md} +32 -0
  30. package/scaffold/flows/aikit-basic/{skills/implement/SKILL.md → steps/implement/README.md} +24 -0
  31. package/scaffold/flows/aikit-basic/{skills/verify/SKILL.md → steps/verify/README.md} +25 -0
  32. package/scaffold/general/agents/Orchestrator.agent.md +6 -6
  33. package/scaffold/general/agents/Planner.agent.md +2 -2
  34. package/scaffold/general/agents/_shared/code-agent-base.md +2 -2
  35. package/scaffold/general/skills/adr-skill/SKILL.md +6 -6
  36. package/scaffold/general/skills/aikit/SKILL.md +10 -10
  37. package/scaffold/general/skills/brainstorming/SKILL.md +11 -13
  38. package/scaffold/general/skills/c4-architecture/SKILL.md +1 -0
  39. package/scaffold/general/skills/requirements-clarity/SKILL.md +5 -3
  40. package/scaffold/general/skills/session-handoff/SKILL.md +2 -0
  41. package/scaffold/general/skills/brainstorming/scripts/frame-template.html +0 -365
  42. package/scaffold/general/skills/brainstorming/scripts/helper.js +0 -216
  43. package/scaffold/general/skills/brainstorming/scripts/server.cjs +0 -9
  44. package/scaffold/general/skills/brainstorming/scripts/server.src.cjs +0 -249
  45. package/scaffold/general/skills/brainstorming/visual-companion.md +0 -430
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@vpxa/aikit",
3
- "version": "0.1.20",
3
+ "version": "0.1.21",
4
4
  "type": "module",
5
5
  "description": "Local-first AI developer toolkit — knowledge base, code analysis, context management, and developer tools for LLM agents",
6
6
  "license": "MIT",
@@ -108,6 +108,8 @@ interface KBConfig {
108
108
  chunkOverlap: number;
109
109
  minChunkSize: number; /** Max files processed concurrently. Defaults to half of available CPU cores. */
110
110
  concurrency?: number;
111
+ trickleIntervalMs?: number;
112
+ trickleBatchSize?: number;
111
113
  };
112
114
  embedding: {
113
115
  model: string;
@@ -1 +1 @@
1
- import{existsSync as e,readFileSync as t,readdirSync as n}from"node:fs";import{basename as r,join as i}from"node:path";const a=[`spec`,`plan`,`task`,`execute`,`verify`];var o=class{format=`claude-plugin`;detect(t){return e(i(t,`.claude-plugin`,`plugin.json`))}async parse(e){let n=i(e,`.claude-plugin`,`plugin.json`),a=JSON.parse(t(n,`utf-8`)),o=this.discoverSteps(e),s=this.discoverAgents(e);return{name:a.name??r(e),version:a.version??`1.0.0`,description:a.description??``,author:a.author,steps:o,agents:s,artifacts_dir:`.spec`,install:[]}}discoverSteps(t){let r=i(t,`skills`);if(!e(r))return[];let o=n(r,{withFileTypes:!0}).filter(e=>e.isDirectory()).map(e=>e.name).sort((e,t)=>{let n=a.indexOf(e),r=a.indexOf(t);return n!==-1&&r!==-1?n-r:n===-1?r===-1?e.localeCompare(t):1:-1});return o.map((e,t)=>({id:e,name:e.charAt(0).toUpperCase()+e.slice(1),skill:`skills/${e}/SKILL.md`,produces:[`${e}.md`],requires:t>0?[o[t-1]]:[],agents:[],description:`${e} step`}))}discoverAgents(t){let r=i(t,`agents`);return e(r)?n(r,{withFileTypes:!0}).filter(e=>e.isFile()&&e.name.endsWith(`.md`)).map(e=>`agents/${e.name}`):[]}};export{o as ClaudePluginAdapter};
1
+ import{existsSync as e,readFileSync as t,readdirSync as n}from"node:fs";import{basename as r,join as i}from"node:path";const a=[`spec`,`plan`,`task`,`execute`,`verify`];var o=class{format=`claude-plugin`;detect(t){return e(i(t,`.claude-plugin`,`plugin.json`))}async parse(e){let n=i(e,`.claude-plugin`,`plugin.json`),a=JSON.parse(t(n,`utf-8`)),o=this.discoverSteps(e),s=this.discoverAgents(e);return{name:a.name??r(e),version:a.version??`1.0.0`,description:a.description??``,author:a.author,steps:o,agents:s,artifacts_dir:`.spec`,install:[]}}discoverSteps(t){let r=i(t,`skills`);if(!e(r))return[];let o=n(r,{withFileTypes:!0}).filter(e=>e.isDirectory()).map(e=>e.name).sort((e,t)=>{let n=a.indexOf(e),r=a.indexOf(t);return n!==-1&&r!==-1?n-r:n===-1?r===-1?e.localeCompare(t):1:-1});return o.map((e,t)=>({id:e,name:e.charAt(0).toUpperCase()+e.slice(1),instruction:`steps/${e}/README.md`,produces:[`${e}.md`],requires:t>0?[o[t-1]]:[],agents:[],description:`${e} step`}))}discoverAgents(t){let r=i(t,`agents`);return e(r)?n(r,{withFileTypes:!0}).filter(e=>e.isFile()&&e.name.endsWith(`.md`)).map(e=>`agents/${e.name}`):[]}};export{o as ClaudePluginAdapter};
@@ -1 +1 @@
1
- import{existsSync as e,readdirSync as t}from"node:fs";import{basename as n,join as r}from"node:path";var i=class{format=`copilot`;detect(t){return e(r(t,`.github`,`agents`))}async parse(e){let i=t(r(e,`.github`,`agents`),{withFileTypes:!0}).filter(e=>e.isFile()&&e.name.endsWith(`.md`)).map(e=>`.github/agents/${e.name}`),a=i.map((e,t)=>{let r=n(e,`.md`).toLowerCase().replace(/\.agent$/,``);return{id:r,name:r.charAt(0).toUpperCase()+r.slice(1),skill:e,produces:[`${r}.md`],requires:t>0?[i[t-1].replace(/.*\//,``).replace(/\.md$/,``).toLowerCase()]:[],agents:[e],description:`${r} agent step`}});return{name:n(e),version:`1.0.0`,description:`Copilot agents flow from ${n(e)}`,steps:a,agents:i,artifacts_dir:`.spec`,install:[]}}};export{i as CopilotAdapter};
1
+ import{existsSync as e,readdirSync as t}from"node:fs";import{basename as n,join as r}from"node:path";var i=class{format=`copilot`;detect(t){return e(r(t,`.github`,`agents`))}async parse(e){let i=t(r(e,`.github`,`agents`),{withFileTypes:!0}).filter(e=>e.isFile()&&e.name.endsWith(`.md`)).map(e=>`.github/agents/${e.name}`),a=i.map((e,t)=>{let r=n(e,`.md`).toLowerCase().replace(/\.agent$/,``);return{id:r,name:r.charAt(0).toUpperCase()+r.slice(1),instruction:e,produces:[`${r}.md`],requires:t>0?[i[t-1].replace(/.*\//,``).replace(/\.md$/,``).toLowerCase()]:[],agents:[e],description:`${r} agent step`}});return{name:n(e),version:`1.0.0`,description:`Copilot agents flow from ${n(e)}`,steps:a,agents:i,artifacts_dir:`.spec`,install:[]}}};export{i as CopilotAdapter};
@@ -1 +1 @@
1
- const e={name:`aikit:basic`,version:`0.1.0`,description:`Quick development flow for bug fixes, small features, and refactoring`,steps:[{id:`design`,name:`Design Gate`,skill:`skills/design/SKILL.md`,produces:[`design-decisions.md`],requires:[],agents:[`Researcher-Alpha`,`Researcher-Beta`,`Researcher-Gamma`,`Researcher-Delta`],description:`Evaluate task type, run brainstorming for features, FORGE classification. Auto-skips for bug fixes and refactors.`},{id:`assess`,name:`Assessment`,skill:`skills/assess/SKILL.md`,produces:[`assessment.md`],requires:[`design-decisions.md`],agents:[`Explorer`,`Researcher-Alpha`],description:`Understand scope, analyze codebase, identify approach`},{id:`implement`,name:`Implementation`,skill:`skills/implement/SKILL.md`,produces:[`progress.md`],requires:[`assessment.md`],agents:[`Implementer`,`Frontend`],description:`Write code following the assessment plan`},{id:`verify`,name:`Verification`,skill:`skills/verify/SKILL.md`,produces:[`verify-report.md`],requires:[`progress.md`],agents:[`Code-Reviewer-Alpha`,`Security`],description:`Review code, run tests, validate changes`}],agents:[],artifacts_dir:`.spec`,install:[]},t={name:`aikit:advanced`,version:`0.1.0`,description:`Full development flow for new features, API design, and architecture changes`,steps:[{id:`design`,name:`Design Gate`,skill:`skills/design/SKILL.md`,produces:[`design-decisions.md`],requires:[],agents:[`Researcher-Alpha`,`Researcher-Beta`,`Researcher-Gamma`,`Researcher-Delta`],description:`Full brainstorming, FORGE classification, decision protocol with parallel research. ADR for critical-tier tasks.`},{id:`spec`,name:`Specification`,skill:`skills/spec/SKILL.md`,produces:[`spec.md`],requires:[`design-decisions.md`],agents:[`Researcher-Alpha`],description:`Elicit requirements, clarify scope, define acceptance criteria`},{id:`plan`,name:`Planning`,skill:`skills/plan/SKILL.md`,produces:[`plan.md`],requires:[`spec.md`],agents:[`Planner`,`Explorer`],description:`Analyze codebase, design architecture, create implementation plan`},{id:`task`,name:`Task Breakdown`,skill:`skills/task/SKILL.md`,produces:[`tasks.md`],requires:[`plan.md`],agents:[`Planner`,`Architect-Reviewer-Alpha`],description:`Break plan into ordered implementation tasks with dependencies`},{id:`execute`,name:`Execution`,skill:`skills/execute/SKILL.md`,produces:[`progress.md`],requires:[`tasks.md`],agents:[`Orchestrator`,`Implementer`,`Frontend`,`Refactor`],description:`Implement all tasks, write code, write tests`},{id:`verify`,name:`Verification`,skill:`skills/verify/SKILL.md`,produces:[`verify-report.md`],requires:[`progress.md`],agents:[`Code-Reviewer-Alpha`,`Code-Reviewer-Beta`,`Architect-Reviewer-Alpha`,`Architect-Reviewer-Beta`,`Security`],description:`Dual code review, architecture review, security review, test validation`}],agents:[],artifacts_dir:`.spec`,install:[]};function n(){return[{manifest:e,scaffoldDir:`scaffold/flows/aikit-basic`},{manifest:t,scaffoldDir:`scaffold/flows/aikit-advanced`}]}export{n as getBuiltinFlows};
1
+ const e={name:`aikit:basic`,version:`0.1.0`,description:`Quick development flow for bug fixes, small features, and refactoring`,steps:[{id:`design`,name:`Design Gate`,instruction:`steps/design/README.md`,produces:[`design-decisions.md`],requires:[],agents:[`Researcher-Alpha`,`Researcher-Beta`,`Researcher-Gamma`,`Researcher-Delta`],description:`Evaluate task type, run brainstorming for features, FORGE classification. Auto-skips for bug fixes and refactors.`},{id:`assess`,name:`Assessment`,instruction:`steps/assess/README.md`,produces:[`assessment.md`],requires:[`design-decisions.md`],agents:[`Explorer`,`Researcher-Alpha`],description:`Understand scope, analyze codebase, identify approach`},{id:`implement`,name:`Implementation`,instruction:`steps/implement/README.md`,produces:[`progress.md`],requires:[`assessment.md`],agents:[`Implementer`,`Frontend`],description:`Write code following the assessment plan`},{id:`verify`,name:`Verification`,instruction:`steps/verify/README.md`,produces:[`verify-report.md`],requires:[`progress.md`],agents:[`Code-Reviewer-Alpha`,`Security`],description:`Review code, run tests, validate changes`}],agents:[],artifacts_dir:`.spec`,install:[]},t={name:`aikit:advanced`,version:`0.1.0`,description:`Full development flow for new features, API design, and architecture changes`,steps:[{id:`design`,name:`Design Gate`,instruction:`steps/design/README.md`,produces:[`design-decisions.md`],requires:[],agents:[`Researcher-Alpha`,`Researcher-Beta`,`Researcher-Gamma`,`Researcher-Delta`],description:`Full brainstorming, FORGE classification, decision protocol with parallel research. ADR for critical-tier tasks.`},{id:`spec`,name:`Specification`,instruction:`steps/spec/README.md`,produces:[`spec.md`],requires:[`design-decisions.md`],agents:[`Researcher-Alpha`],description:`Elicit requirements, clarify scope, define acceptance criteria`},{id:`plan`,name:`Planning`,instruction:`steps/plan/README.md`,produces:[`plan.md`],requires:[`spec.md`],agents:[`Planner`,`Explorer`],description:`Analyze codebase, design architecture, create implementation plan`},{id:`task`,name:`Task Breakdown`,instruction:`steps/task/README.md`,produces:[`tasks.md`],requires:[`plan.md`],agents:[`Planner`,`Architect-Reviewer-Alpha`],description:`Break plan into ordered implementation tasks with dependencies`},{id:`execute`,name:`Execution`,instruction:`steps/execute/README.md`,produces:[`progress.md`],requires:[`tasks.md`],agents:[`Orchestrator`,`Implementer`,`Frontend`,`Refactor`],description:`Implement all tasks, write code, write tests`},{id:`verify`,name:`Verification`,instruction:`steps/verify/README.md`,produces:[`verify-report.md`],requires:[`progress.md`],agents:[`Code-Reviewer-Alpha`,`Code-Reviewer-Beta`,`Architect-Reviewer-Alpha`,`Architect-Reviewer-Beta`,`Security`],description:`Dual code review, architecture review, security review, test validation`}],agents:[],artifacts_dir:`.spec`,install:[]};function n(){return[{manifest:e,scaffoldDir:`scaffold/flows/aikit-basic`},{manifest:t,scaffoldDir:`scaffold/flows/aikit-advanced`}]}export{n as getBuiltinFlows};
@@ -1,2 +1,2 @@
1
- import{getAdapter as e,getAdapterForSource as t}from"./adapters/index.js";import{existsSync as n}from"node:fs";var r=class{async load(e){if(!n(e))return{success:!1,error:`Source directory not found: ${e}`};let r=t(e);if(!r)return{success:!1,error:`No format adapter matches source: ${e}`};try{let t=await r.parse(e),n=this.validate(t);return n.success?{success:!0,data:{manifest:t,format:r.format}}:n}catch(e){return{success:!1,error:`Failed to parse flow: ${e instanceof Error?e.message:String(e)}`}}}async loadWithFormat(t,n){let r=e(n);try{let e=await r.parse(t),n=this.validate(e);return n.success?{success:!0,data:e}:n}catch(e){return{success:!1,error:`Failed to parse flow: ${e instanceof Error?e.message:String(e)}`}}}validate(e){let t=[];e.name?.trim()||t.push(`Missing flow name`),e.version?.trim()||t.push(`Missing flow version`),e.steps?.length||t.push(`Flow must have at least one step`);let n=new Set(e.steps.map(e=>e.id));for(let r of e.steps??[]){r.id?.trim()||t.push(`Step missing id`),r.skill?.trim()||t.push(`Step "${r.id}" missing skill path`);for(let e of r.requires??[])n.has(e)||t.push(`Step "${r.id}" requires unknown step "${e}"`)}return n.size!==(e.steps?.length??0)&&t.push(`Duplicate step IDs found`),t.length>0?{success:!1,error:`Validation failed:\n${t.join(`
1
+ import{getAdapter as e,getAdapterForSource as t}from"./adapters/index.js";import{existsSync as n}from"node:fs";var r=class{async load(e){if(!n(e))return{success:!1,error:`Source directory not found: ${e}`};let r=t(e);if(!r)return{success:!1,error:`No format adapter matches source: ${e}`};try{let t=await r.parse(e),n=this.validate(t);return n.success?{success:!0,data:{manifest:t,format:r.format}}:n}catch(e){return{success:!1,error:`Failed to parse flow: ${e instanceof Error?e.message:String(e)}`}}}async loadWithFormat(t,n){let r=e(n);try{let e=await r.parse(t),n=this.validate(e);return n.success?{success:!0,data:e}:n}catch(e){return{success:!1,error:`Failed to parse flow: ${e instanceof Error?e.message:String(e)}`}}}validate(e){let t=[];e.name?.trim()||t.push(`Missing flow name`),e.version?.trim()||t.push(`Missing flow version`),e.steps?.length||t.push(`Flow must have at least one step`);let n=new Set(e.steps.map(e=>e.id));for(let r of e.steps??[]){r.id?.trim()||t.push(`Step missing id`),r.instruction?.trim()||t.push(`Step "${r.id}" missing instruction path`);for(let e of r.requires??[])n.has(e)||t.push(`Step "${r.id}" requires unknown step "${e}"`)}return n.size!==(e.steps?.length??0)&&t.push(`Duplicate step IDs found`),t.length>0?{success:!1,error:`Validation failed:\n${t.join(`
2
2
  `)}`}:{success:!0}}};export{r as FlowLoader};
@@ -8,8 +8,8 @@ interface FlowStep {
8
8
  id: string;
9
9
  /** Human-readable step name */
10
10
  name: string;
11
- /** Relative path to the step's SKILL.md file */
12
- skill: string;
11
+ /** Relative path to the step's instruction file */
12
+ instruction: string;
13
13
  /** Artifact filenames this step produces */
14
14
  produces: string[];
15
15
  /** Step IDs this step depends on (must be completed first) */
@@ -9,6 +9,7 @@ import { KBConfig } from "../../core/dist/index.js";
9
9
  declare class SmartIndexScheduler {
10
10
  private readonly indexer;
11
11
  private readonly config;
12
+ private readonly store?;
12
13
  private trickleTimer;
13
14
  private readonly trickleIntervalMs;
14
15
  private readonly batchSize;
@@ -16,7 +17,9 @@ declare class SmartIndexScheduler {
16
17
  private changedFiles;
17
18
  private lastRefreshTime;
18
19
  private refreshing;
19
- constructor(indexer: IncrementalIndexer, config: KBConfig);
20
+ constructor(indexer: IncrementalIndexer, config: KBConfig, store?: {
21
+ createFtsIndex: () => Promise<void>;
22
+ } | undefined);
20
23
  /** Start the trickle indexing loop. */
21
24
  start(): void;
22
25
  /** Stop the scheduler and clear all timers. */
@@ -1 +1 @@
1
- import{statSync as e}from"node:fs";import{createLogger as t}from"../../core/dist/index.js";import{availableParallelism as n,loadavg as r}from"node:os";const i=t(`smart-index`),a=1.5;var o=class{trickleTimer=null;trickleIntervalMs;batchSize;priorityQueue=[];changedFiles=[];lastRefreshTime=0;refreshing=!1;constructor(e,t){this.indexer=e,this.config=t,this.trickleIntervalMs=this.readPositiveIntEnv(`AIKIT_SMART_TRICKLE_MS`,12e4),this.batchSize=this.readPositiveIntEnv(`AIKIT_SMART_BATCH_SIZE`,1)}start(){this.stop(),i.info(`Smart index scheduler started (trickle mode)`,{intervalMs:this.trickleIntervalMs,batchSize:this.batchSize}),this.scheduleTick()}stop(){this.trickleTimer&&=(clearTimeout(this.trickleTimer),null)}prioritize(...t){let n=[...new Set(t.filter(Boolean))].filter(t=>{try{return!e(t).isDirectory()}catch{return console.debug(`smart-index: skipping non-existent path: ${t}`),!1}});for(let e of n){let t=this.priorityQueue.indexOf(e);t>=0&&this.priorityQueue.splice(t,1)}for(let e of n.reverse())this.priorityQueue.unshift(e);this.priorityQueue.length>500&&(this.priorityQueue.length=500),n.length>0&&i.info(`Files prioritized for trickle indexing`,{added:n.length,queued:this.priorityQueue.length})}getState(){return{mode:`smart`,queueSize:this.priorityQueue.length,changedFilesSize:this.changedFiles.length,intervalMs:this.trickleIntervalMs,batchSize:this.batchSize,running:this.trickleTimer!==null}}readPositiveIntEnv(e,t){let n=Number(process.env[e]);return Number.isFinite(n)&&n>0?n:t}scheduleTick(){this.trickleTimer=setTimeout(()=>void this.tick(),this.trickleIntervalMs),this.trickleTimer.unref&&this.trickleTimer.unref()}async tick(){try{if(this.indexer.isIndexing){i.info(`Skipping trickle tick — indexing already in progress`);return}let e=this.getCpuCount(),t=r()[0];if(e>0&&t/e>a){i.info(`Skipping trickle tick — system load too high`,{load:t.toFixed(2),cpuCount:e,threshold:a});return}let n=await this.pickFiles();if(n.length===0){await this.maybeRefreshChangedFiles();return}i.info(`Trickle indexing tick started`,{count:n.length,files:n});let o=await this.indexer.indexFiles(this.config,n);this.changedFiles=this.changedFiles.filter(e=>!n.includes(e)),i.info(`Trickle indexing tick complete`,{filesProcessed:o.filesProcessed,filesSkipped:o.filesSkipped,chunksCreated:o.chunksCreated})}catch(e){i.error(`Trickle indexing tick failed`,{error:String(e)})}finally{this.scheduleTick()}}getCpuCount(){try{return typeof n==`function`?n():4}catch{return 4}}async pickFiles(){let e=[];for(;e.length<this.batchSize&&this.priorityQueue.length>0;){let t=this.priorityQueue.shift();t&&!e.includes(t)&&e.push(t)}if(e.length<this.batchSize)for(await this.maybeRefreshChangedFiles();e.length<this.batchSize&&this.changedFiles.length>0;){let t=this.changedFiles.shift();t&&!e.includes(t)&&e.push(t)}return e}async maybeRefreshChangedFiles(){let e=Date.now();if(!(this.refreshing||this.changedFiles.length>0&&e-this.lastRefreshTime<6e5)){this.refreshing=!0;try{this.changedFiles=await this.indexer.getChangedFiles(this.config),this.lastRefreshTime=e,this.changedFiles.length>0&&i.info(`Refreshed changed files for trickle indexing`,{count:this.changedFiles.length})}catch(e){i.error(`Failed to refresh changed files for trickle indexing`,{error:String(e)})}finally{this.refreshing=!1}}}};export{o as SmartIndexScheduler};
1
+ import{statSync as e}from"node:fs";import{createLogger as t}from"../../core/dist/index.js";import{availableParallelism as n,loadavg as r}from"node:os";const i=t(`smart-index`),a=1.5;var o=class{trickleTimer=null;trickleIntervalMs;batchSize;priorityQueue=[];changedFiles=[];lastRefreshTime=0;refreshing=!1;constructor(e,t,n){this.indexer=e,this.config=t,this.store=n,this.trickleIntervalMs=t.indexing.trickleIntervalMs??this.readPositiveIntEnv(`AIKIT_SMART_TRICKLE_MS`,3e4),this.batchSize=t.indexing.trickleBatchSize??this.readPositiveIntEnv(`AIKIT_SMART_BATCH_SIZE`,1)}start(){this.stop(),i.info(`Smart index scheduler started (trickle mode)`,{intervalMs:this.trickleIntervalMs,batchSize:this.batchSize}),this.scheduleTick()}stop(){this.trickleTimer&&=(clearTimeout(this.trickleTimer),null)}prioritize(...t){let n=[...new Set(t.filter(Boolean))].filter(t=>{try{return!e(t).isDirectory()}catch{return console.debug(`smart-index: skipping non-existent path: ${t}`),!1}});for(let e of n){let t=this.priorityQueue.indexOf(e);t>=0&&this.priorityQueue.splice(t,1)}for(let e of n.reverse())this.priorityQueue.unshift(e);this.priorityQueue.length>500&&(this.priorityQueue.length=500),n.length>0&&i.info(`Files prioritized for trickle indexing`,{added:n.length,queued:this.priorityQueue.length})}getState(){return{mode:`smart`,queueSize:this.priorityQueue.length,changedFilesSize:this.changedFiles.length,intervalMs:this.trickleIntervalMs,batchSize:this.batchSize,running:this.trickleTimer!==null}}readPositiveIntEnv(e,t){let n=Number(process.env[e]);return Number.isFinite(n)&&n>0?n:t}scheduleTick(){this.trickleTimer=setTimeout(()=>void this.tick(),this.trickleIntervalMs),this.trickleTimer.unref&&this.trickleTimer.unref()}async tick(){try{if(this.indexer.isIndexing){i.info(`Skipping trickle tick — indexing already in progress`);return}let e=this.getCpuCount(),t=r()[0];if(e>0&&t/e>a){i.info(`Skipping trickle tick — system load too high`,{load:t.toFixed(2),cpuCount:e,threshold:a});return}let n=await this.pickFiles();if(n.length===0){await this.maybeRefreshChangedFiles();return}i.info(`Trickle indexing tick started`,{count:n.length,files:n});let o=await this.indexer.indexFiles(this.config,n);if(this.store)try{await this.store.createFtsIndex()}catch(e){i.warn(`FTS index rebuild failed after trickle tick`,{error:String(e)})}this.changedFiles=this.changedFiles.filter(e=>!n.includes(e)),i.info(`Trickle indexing tick complete`,{filesProcessed:o.filesProcessed,filesSkipped:o.filesSkipped,chunksCreated:o.chunksCreated})}catch(e){i.error(`Trickle indexing tick failed`,{error:String(e)})}finally{this.scheduleTick()}}getCpuCount(){try{return typeof n==`function`?n():4}catch{return 4}}async pickFiles(){let e=[];for(;e.length<this.batchSize&&this.priorityQueue.length>0;){let t=this.priorityQueue.shift();t&&!e.includes(t)&&e.push(t)}if(e.length<this.batchSize)for(await this.maybeRefreshChangedFiles();e.length<this.batchSize&&this.changedFiles.length>0;){let t=this.changedFiles.shift();t&&!e.includes(t)&&e.push(t)}return e}async maybeRefreshChangedFiles(){let e=Date.now();if(!(this.refreshing||this.changedFiles.length>0&&e-this.lastRefreshTime<6e5)){this.refreshing=!0;try{this.changedFiles=await this.indexer.getChangedFiles(this.config),this.lastRefreshTime=e,this.changedFiles.length>0&&i.info(`Refreshed changed files for trickle indexing`,{count:this.changedFiles.length})}catch(e){i.error(`Failed to refresh changed files for trickle indexing`,{error:String(e)})}finally{this.refreshing=!1}}}};export{o as SmartIndexScheduler};
@@ -1 +1 @@
1
- import{readFileSync as e}from"node:fs";import{dirname as t,resolve as n}from"node:path";import{fileURLToPath as r}from"node:url";import{createLogger as i,serializeError as a}from"../../core/dist/index.js";import{parseArgs as o}from"node:util";const s=t(r(import.meta.url)),c=(()=>{try{let t=n(s,`..`,`..`,`..`,`package.json`);return JSON.parse(e(t,`utf-8`)).version??`0.0.0`}catch{return`0.0.0`}})(),l=i(`server`),{values:u}=o({options:{transport:{type:`string`,default:process.env.AIKIT_TRANSPORT??`stdio`},port:{type:`string`,default:process.env.AIKIT_PORT??`3210`}}});async function d(){if(process.on(`unhandledRejection`,e=>{l.error(`Unhandled rejection`,a(e))}),l.info(`Starting MCP AI Kit server`,{version:c}),u.transport===`http`){let[{default:e},{loadConfig:t,resolveIndexMode:n},{registerDashboardRoutes:r,resolveDashboardDir:i}]=await Promise.all([import(`express`),import(`./config.js`),import(`./dashboard-static.js`)]),o=t();l.info(`Config loaded`,{sourceCount:o.sources.length,storePath:o.store.path});let s=e();s.use(e.json());let c=Number(u.port);s.use((e,t,n)=>{if(t.setHeader(`Access-Control-Allow-Origin`,process.env.AIKIT_CORS_ORIGIN??`http://localhost:${c}`),t.setHeader(`Access-Control-Allow-Methods`,`GET, POST, DELETE, OPTIONS`),t.setHeader(`Access-Control-Allow-Headers`,`Content-Type, Authorization`),e.method===`OPTIONS`){t.status(204).end();return}n()}),r(s,i(),l),s.get(`/health`,(e,t)=>{t.json({status:`ok`})});let d=!1,f=null,p=null,m=null,h=Promise.resolve();s.post(`/mcp`,async(e,t)=>{if(!d||!p||!m){t.status(503).json({jsonrpc:`2.0`,error:{code:-32603,message:`Server initializing — please retry in a few seconds`},id:null});return}let n=h,r;h=new Promise(e=>{r=e}),await n;try{let n=new m({sessionIdGenerator:void 0});await p.connect(n),await n.handleRequest(e,t,e.body),n.close()}catch(e){if(l.error(`MCP handler error`,a(e)),!t.headersSent){let n=e instanceof Error?e.message:String(e),r=n.includes(`Not Acceptable`);t.status(r?406:500).json({jsonrpc:`2.0`,error:{code:r?-32e3:-32603,message:r?n:`Internal server error`},id:null})}}finally{r()}}),s.get(`/mcp`,(e,t)=>{t.writeHead(405).end(JSON.stringify({jsonrpc:`2.0`,error:{code:-32e3,message:`Method not allowed.`},id:null}))}),s.delete(`/mcp`,(e,t)=>{t.writeHead(405).end(JSON.stringify({jsonrpc:`2.0`,error:{code:-32e3,message:`Method not allowed.`},id:null}))});let g=s.listen(c,`127.0.0.1`,()=>{l.info(`MCP server listening`,{url:`http://127.0.0.1:${c}/mcp`,port:c}),setTimeout(async()=>{try{let[{createLazyServer:e,ALL_TOOL_NAMES:t},{StreamableHTTPServerTransport:r},{checkForUpdates:i,autoUpgradeScaffold:s}]=await Promise.all([import(`./server.js`),import(`@modelcontextprotocol/sdk/server/streamableHttp.js`),import(`./version-check.js`)]);i(),s();let c=n(o),u=e(o,c);p=u.server,m=r,d=!0,l.info(`MCP server configured (lazy — AI Kit initializing in background)`,{toolCount:t.length,resourceCount:2}),u.startInit(),c===`auto`?u.ready.then(async()=>{try{let e=o.sources.map(e=>e.path).join(`, `);l.info(`Running initial index`,{sourcePaths:e}),await u.runInitialIndex(),l.info(`Initial index complete`)}catch(e){l.error(`Initial index failed; will retry on aikit_reindex`,a(e))}}).catch(e=>l.error(`AI Kit init or indexing failed`,a(e))):c===`smart`?u.ready.then(async()=>{try{if(!u.kb)throw Error(`AI Kit components are not available after initialization`);let{SmartIndexScheduler:e}=await import(`../../indexer/dist/index.js`);f=new e(u.kb.indexer,o),f.start(),u.setSmartScheduler(f),l.info(`Smart index scheduler started (HTTP mode)`)}catch(e){l.error(`Failed to start smart index scheduler`,a(e))}}).catch(e=>l.error(`AI Kit initialization failed`,a(e))):(u.ready.catch(e=>l.error(`AI Kit initialization failed`,a(e))),l.info(`Initial full indexing skipped in HTTP mode`,{indexMode:c}))}catch(e){l.error(`Failed to load server modules`,a(e))}},100)}),_=async e=>{l.info(`Shutdown signal received`,{signal:e}),f?.stop(),g.close(),p&&await p.close(),process.exit(0)};process.on(`SIGINT`,()=>_(`SIGINT`)),process.on(`SIGTERM`,()=>_(`SIGTERM`))}else{let[{loadConfig:e,reconfigureForWorkspace:t,resolveIndexMode:n},{createLazyServer:i},{checkForUpdates:o,autoUpgradeScaffold:s},{RootsListChangedNotificationSchema:c}]=await Promise.all([import(`./config.js`),import(`./server.js`),import(`./version-check.js`),import(`@modelcontextprotocol/sdk/types.js`)]),u=e();l.info(`Config loaded`,{sourceCount:u.sources.length,storePath:u.store.path}),o(),s();let d=n(u),f=i(u,d),{server:p,startInit:m,ready:h,runInitialIndex:g}=f,{StdioServerTransport:_}=await import(`@modelcontextprotocol/sdk/server/stdio.js`),v=new _;await p.connect(v),l.info(`MCP server started`,{transport:`stdio`});let y=e=>{if(e.length===0)return!1;let n=e[0].uri,i=n.startsWith(`file://`)?r(n):n;return l.info(`MCP roots resolved`,{rootUri:n,rootPath:i,rootCount:e.length}),t(u,i),!0},b=!1;try{b=y((await p.server.listRoots()).roots),b||l.info(`No MCP roots yet; waiting for roots/list_changed notification`)}catch(e){l.warn(`MCP roots/list not supported by client; using cwd fallback`,{cwd:process.cwd(),...a(e)}),b=!0}b||=await new Promise(e=>{let t=setTimeout(()=>{l.warn(`Timed out waiting for MCP roots/list_changed; using cwd fallback`,{cwd:process.cwd()}),e(!1)},5e3);p.server.setNotificationHandler(c,async()=>{clearTimeout(t);try{e(y((await p.server.listRoots()).roots))}catch(t){l.warn(`roots/list retry failed after notification`,a(t)),e(!1)}})}),m();let x=null,S=()=>{x&&clearTimeout(x),x=setTimeout(()=>{l.info(`Auto-shutdown: no activity for 30 minutes — exiting`),process.exit(0)},18e5),x.unref&&x.unref()};S(),process.stdin.on(`data`,()=>S()),h.catch(e=>{l.error(`Initialization failed — server will continue with limited tools`,a(e))}),d===`auto`?g().catch(e=>l.error(`Initial index failed`,a(e))):d===`smart`?h.then(async()=>{try{if(!f.kb)throw Error(`AI Kit components are not available after initialization`);let{SmartIndexScheduler:e}=await import(`../../indexer/dist/index.js`),t=new e(f.kb.indexer,u);t.start(),f.setSmartScheduler(t),l.info(`Smart index scheduler started (stdio mode)`)}catch(e){l.error(`Failed to start smart index scheduler`,a(e))}}).catch(e=>l.error(`AI Kit init failed for smart scheduler`,a(e))):l.warn(`Initial full indexing skipped; use aikit_reindex to index manually`,{indexMode:d})}}d().catch(e=>{l.error(`Fatal error`,a(e)),process.exit(1)});export{};
1
+ import{readFileSync as e}from"node:fs";import{dirname as t,resolve as n}from"node:path";import{fileURLToPath as r}from"node:url";import{createLogger as i,serializeError as a}from"../../core/dist/index.js";import{parseArgs as o}from"node:util";const s=t(r(import.meta.url)),c=(()=>{try{let t=n(s,`..`,`..`,`..`,`package.json`);return JSON.parse(e(t,`utf-8`)).version??`0.0.0`}catch{return`0.0.0`}})(),l=i(`server`),{values:u}=o({options:{transport:{type:`string`,default:process.env.AIKIT_TRANSPORT??`stdio`},port:{type:`string`,default:process.env.AIKIT_PORT??`3210`}}});async function d(){if(process.on(`unhandledRejection`,e=>{l.error(`Unhandled rejection`,a(e))}),l.info(`Starting MCP AI Kit server`,{version:c}),u.transport===`http`){let[{default:e},{loadConfig:t,resolveIndexMode:n},{registerDashboardRoutes:r,resolveDashboardDir:i}]=await Promise.all([import(`express`),import(`./config.js`),import(`./dashboard-static.js`)]),o=t();l.info(`Config loaded`,{sourceCount:o.sources.length,storePath:o.store.path});let s=e();s.use(e.json());let c=Number(u.port);s.use((e,t,n)=>{if(t.setHeader(`Access-Control-Allow-Origin`,process.env.AIKIT_CORS_ORIGIN??`http://localhost:${c}`),t.setHeader(`Access-Control-Allow-Methods`,`GET, POST, DELETE, OPTIONS`),t.setHeader(`Access-Control-Allow-Headers`,`Content-Type, Authorization`),e.method===`OPTIONS`){t.status(204).end();return}n()}),r(s,i(),l),s.get(`/health`,(e,t)=>{t.json({status:`ok`})});let d=!1,f=null,p=null,m=null,h=Promise.resolve();s.post(`/mcp`,async(e,t)=>{if(!d||!p||!m){t.status(503).json({jsonrpc:`2.0`,error:{code:-32603,message:`Server initializing — please retry in a few seconds`},id:null});return}let n=h,r;h=new Promise(e=>{r=e}),await n;try{let n=new m({sessionIdGenerator:void 0});await p.connect(n),await n.handleRequest(e,t,e.body),n.close()}catch(e){if(l.error(`MCP handler error`,a(e)),!t.headersSent){let n=e instanceof Error?e.message:String(e),r=n.includes(`Not Acceptable`);t.status(r?406:500).json({jsonrpc:`2.0`,error:{code:r?-32e3:-32603,message:r?n:`Internal server error`},id:null})}}finally{r()}}),s.get(`/mcp`,(e,t)=>{t.writeHead(405).end(JSON.stringify({jsonrpc:`2.0`,error:{code:-32e3,message:`Method not allowed.`},id:null}))}),s.delete(`/mcp`,(e,t)=>{t.writeHead(405).end(JSON.stringify({jsonrpc:`2.0`,error:{code:-32e3,message:`Method not allowed.`},id:null}))});let g=s.listen(c,`127.0.0.1`,()=>{l.info(`MCP server listening`,{url:`http://127.0.0.1:${c}/mcp`,port:c}),setTimeout(async()=>{try{let[{createLazyServer:e,ALL_TOOL_NAMES:t},{StreamableHTTPServerTransport:r},{checkForUpdates:i,autoUpgradeScaffold:s}]=await Promise.all([import(`./server.js`),import(`@modelcontextprotocol/sdk/server/streamableHttp.js`),import(`./version-check.js`)]);i(),s();let c=n(o),u=e(o,c);p=u.server,m=r,d=!0,l.info(`MCP server configured (lazy — AI Kit initializing in background)`,{toolCount:t.length,resourceCount:2}),u.startInit(),c===`auto`?u.ready.then(async()=>{try{let e=o.sources.map(e=>e.path).join(`, `);l.info(`Running initial index`,{sourcePaths:e}),await u.runInitialIndex(),l.info(`Initial index complete`)}catch(e){l.error(`Initial index failed; will retry on aikit_reindex`,a(e))}}).catch(e=>l.error(`AI Kit init or indexing failed`,a(e))):c===`smart`?u.ready.then(async()=>{try{if(!u.kb)throw Error(`AI Kit components are not available after initialization`);let{SmartIndexScheduler:e}=await import(`../../indexer/dist/index.js`);f=new e(u.kb.indexer,o,u.kb.store),f.start(),u.setSmartScheduler(f),l.info(`Smart index scheduler started (HTTP mode)`)}catch(e){l.error(`Failed to start smart index scheduler`,a(e))}}).catch(e=>l.error(`AI Kit initialization failed`,a(e))):(u.ready.catch(e=>l.error(`AI Kit initialization failed`,a(e))),l.info(`Initial full indexing skipped in HTTP mode`,{indexMode:c}))}catch(e){l.error(`Failed to load server modules`,a(e))}},100)}),_=async e=>{l.info(`Shutdown signal received`,{signal:e}),f?.stop(),g.close(),p&&await p.close(),process.exit(0)};process.on(`SIGINT`,()=>_(`SIGINT`)),process.on(`SIGTERM`,()=>_(`SIGTERM`))}else{let[{loadConfig:e,reconfigureForWorkspace:t,resolveIndexMode:n},{createLazyServer:i},{checkForUpdates:o,autoUpgradeScaffold:s},{RootsListChangedNotificationSchema:c}]=await Promise.all([import(`./config.js`),import(`./server.js`),import(`./version-check.js`),import(`@modelcontextprotocol/sdk/types.js`)]),u=e();l.info(`Config loaded`,{sourceCount:u.sources.length,storePath:u.store.path}),o(),s();let d=n(u),f=i(u,d),{server:p,startInit:m,ready:h,runInitialIndex:g}=f,{StdioServerTransport:_}=await import(`@modelcontextprotocol/sdk/server/stdio.js`),v=new _;await p.connect(v),l.info(`MCP server started`,{transport:`stdio`});let y=e=>{if(e.length===0)return!1;let n=e[0].uri,i=n.startsWith(`file://`)?r(n):n;return l.info(`MCP roots resolved`,{rootUri:n,rootPath:i,rootCount:e.length}),t(u,i),!0},b=!1;try{b=y((await p.server.listRoots()).roots),b||l.info(`No MCP roots yet; waiting for roots/list_changed notification`)}catch(e){l.warn(`MCP roots/list not supported by client; using cwd fallback`,{cwd:process.cwd(),...a(e)}),b=!0}b||=await new Promise(e=>{let t=setTimeout(()=>{l.warn(`Timed out waiting for MCP roots/list_changed; using cwd fallback`,{cwd:process.cwd()}),e(!1)},5e3);p.server.setNotificationHandler(c,async()=>{clearTimeout(t);try{e(y((await p.server.listRoots()).roots))}catch(t){l.warn(`roots/list retry failed after notification`,a(t)),e(!1)}})}),m();let x=null,S=()=>{x&&clearTimeout(x),x=setTimeout(()=>{l.info(`Auto-shutdown: no activity for 30 minutes — exiting`),process.exit(0)},18e5),x.unref&&x.unref()};S(),process.stdin.on(`data`,()=>S()),h.catch(e=>{l.error(`Initialization failed — server will continue with limited tools`,a(e))}),d===`auto`?g().catch(e=>l.error(`Initial index failed`,a(e))):d===`smart`?h.then(async()=>{try{if(!f.kb)throw Error(`AI Kit components are not available after initialization`);let{SmartIndexScheduler:e}=await import(`../../indexer/dist/index.js`),t=new e(f.kb.indexer,u,f.kb.store);t.start(),f.setSmartScheduler(t),l.info(`Smart index scheduler started (stdio mode)`)}catch(e){l.error(`Failed to start smart index scheduler`,a(e))}}).catch(e=>l.error(`AI Kit init failed for smart scheduler`,a(e))):l.warn(`Initial full indexing skipped; use aikit_reindex to index manually`,{indexMode:d})}}d().catch(e=>{l.error(`Fatal error`,a(e)),process.exit(1)});export{};
@@ -1 +1 @@
1
- const e={search:{title:`Hybrid Search`,annotations:{readOnlyHint:!0,idempotentHint:!0}},find:{title:`Federated Find`,annotations:{readOnlyHint:!0,idempotentHint:!0}},symbol:{title:`Symbol Resolver`,annotations:{readOnlyHint:!0,idempotentHint:!0}},trace:{title:`Data Flow Tracer`,annotations:{readOnlyHint:!0,idempotentHint:!0}},scope_map:{title:`Task Scope Map`,annotations:{readOnlyHint:!0,idempotentHint:!0}},lookup:{title:`Chunk Lookup`,annotations:{readOnlyHint:!0,idempotentHint:!0}},dead_symbols:{title:`Dead Symbol Finder`,annotations:{readOnlyHint:!0,idempotentHint:!0}},file_summary:{title:`File Summary`,annotations:{readOnlyHint:!0,idempotentHint:!0}},analyze_structure:{title:`Analyze Structure`,annotations:{readOnlyHint:!0,idempotentHint:!0}},analyze_dependencies:{title:`Analyze Dependencies`,annotations:{readOnlyHint:!0,idempotentHint:!0}},analyze_symbols:{title:`Analyze Symbols`,annotations:{readOnlyHint:!0,idempotentHint:!0}},analyze_patterns:{title:`Analyze Patterns`,annotations:{readOnlyHint:!0,idempotentHint:!0}},analyze_entry_points:{title:`Analyze Entry Points`,annotations:{readOnlyHint:!0,idempotentHint:!0}},analyze_diagram:{title:`Analyze Diagram`,annotations:{readOnlyHint:!0,idempotentHint:!0}},blast_radius:{title:`Blast Radius`,annotations:{readOnlyHint:!0,idempotentHint:!0}},brainstorm:{title:`Brainstorm Session`,annotations:{readOnlyHint:!0,idempotentHint:!0}},remember:{title:`Remember Knowledge`,annotations:{readOnlyHint:!1}},read:{title:`Read Knowledge`,annotations:{readOnlyHint:!0,idempotentHint:!0}},update:{title:`Update Knowledge`,annotations:{readOnlyHint:!1}},forget:{title:`Forget Knowledge`,annotations:{readOnlyHint:!1,destructiveHint:!0}},list:{title:`List Knowledge`,annotations:{readOnlyHint:!0,idempotentHint:!0}},produce_knowledge:{title:`Produce Knowledge`,annotations:{readOnlyHint:!0,idempotentHint:!0}},compact:{title:`Semantic Compactor`,annotations:{readOnlyHint:!0,idempotentHint:!0}},digest:{title:`Multi-Source Digest`,annotations:{readOnlyHint:!0,idempotentHint:!0}},stratum_card:{title:`Stratum Card`,annotations:{readOnlyHint:!0,idempotentHint:!0}},forge_ground:{title:`FORGE Ground`,annotations:{readOnlyHint:!0,idempotentHint:!0}},forge_classify:{title:`FORGE Classify`,annotations:{readOnlyHint:!0,idempotentHint:!0}},evidence_map:{title:`Evidence Map`,annotations:{readOnlyHint:!1}},present:{title:`Rich Content Presenter`,annotations:{readOnlyHint:!0,idempotentHint:!0}},check:{title:`Typecheck & Lint`,annotations:{readOnlyHint:!0,openWorldHint:!0}},test_run:{title:`Run Tests`,annotations:{readOnlyHint:!0,openWorldHint:!0}},eval:{title:`Evaluate Code`,annotations:{readOnlyHint:!1,openWorldHint:!0}},batch:{title:`Batch Operations`,annotations:{readOnlyHint:!0,idempotentHint:!0}},audit:{title:`Project Audit`,annotations:{readOnlyHint:!0,idempotentHint:!0}},rename:{title:`Rename Symbol`,annotations:{readOnlyHint:!1,destructiveHint:!0}},restore:{title:`Restore`,annotations:{readOnlyHint:!1}},codemod:{title:`Codemod`,annotations:{readOnlyHint:!1,destructiveHint:!0}},data_transform:{title:`Data Transform`,annotations:{readOnlyHint:!0,idempotentHint:!0}},stash:{title:`Stash Values`,annotations:{readOnlyHint:!1}},checkpoint:{title:`Session Checkpoint`,annotations:{readOnlyHint:!1}},workset:{title:`Workset Manager`,annotations:{readOnlyHint:!1}},lane:{title:`Exploration Lane`,annotations:{readOnlyHint:!1}},git_context:{title:`Git Context`,annotations:{readOnlyHint:!0,idempotentHint:!0}},diff_parse:{title:`Diff Parser`,annotations:{readOnlyHint:!0,idempotentHint:!0}},parse_output:{title:`Parse Build Output`,annotations:{readOnlyHint:!0,idempotentHint:!0}},process:{title:`Process Manager`,annotations:{readOnlyHint:!1,openWorldHint:!0}},watch:{title:`File Watcher`,annotations:{readOnlyHint:!1,openWorldHint:!0}},delegate:{title:`Delegate Task`,annotations:{readOnlyHint:!1,openWorldHint:!0}},config:{title:`Configuration Manager`,annotations:{readOnlyHint:!1}},status:{title:`AI Kit Status`,annotations:{readOnlyHint:!0,idempotentHint:!0}},health:{title:`Health Check`,annotations:{readOnlyHint:!0,idempotentHint:!0}},reindex:{title:`Reindex`,annotations:{readOnlyHint:!1}},onboard:{title:`Onboard Codebase`,annotations:{readOnlyHint:!1}},graph:{title:`Knowledge Graph`,annotations:{readOnlyHint:!1}},guide:{title:`Tool Guide`,annotations:{readOnlyHint:!0,idempotentHint:!0}},replay:{title:`Replay History`,annotations:{readOnlyHint:!0,idempotentHint:!0}},changelog:{title:`Generate Changelog`,annotations:{readOnlyHint:!0,idempotentHint:!0}},regex_test:{title:`Regex Tester`,annotations:{readOnlyHint:!0,idempotentHint:!0}},encode:{title:`Encode / Decode`,annotations:{readOnlyHint:!0,idempotentHint:!0}},measure:{title:`Code Metrics`,annotations:{readOnlyHint:!0,idempotentHint:!0}},schema_validate:{title:`Schema Validator`,annotations:{readOnlyHint:!0,idempotentHint:!0}},snippet:{title:`Code Snippets`,annotations:{readOnlyHint:!1}},env:{title:`Environment Info`,annotations:{readOnlyHint:!0,idempotentHint:!0}},time:{title:`Date & Time`,annotations:{readOnlyHint:!0,idempotentHint:!0}},web_fetch:{title:`Web Fetch`,annotations:{readOnlyHint:!0,openWorldHint:!0}},web_search:{title:`Web Search`,annotations:{readOnlyHint:!0,openWorldHint:!0}},http:{title:`HTTP Request`,annotations:{readOnlyHint:!1,openWorldHint:!0}},queue:{title:`Operation Queue`,annotations:{readOnlyHint:!1}},bridge_push:{title:`Bridge Push`,annotations:{readOnlyHint:!1}},bridge_pull:{title:`Bridge Pull`,annotations:{readOnlyHint:!0,idempotentHint:!0}},bridge_sync:{title:`Bridge Sync Status`,annotations:{readOnlyHint:!0,idempotentHint:!0}},evolution_state:{title:`Evolution State`,annotations:{readOnlyHint:!1}},policy_check:{title:`Policy Check`,annotations:{readOnlyHint:!0,idempotentHint:!0}},flow_list:{title:`Flow List`,annotations:{readOnlyHint:!0,idempotentHint:!0}},flow_info:{title:`Flow Info`,annotations:{readOnlyHint:!0,idempotentHint:!0}},flow_start:{title:`Flow Start`,annotations:{readOnlyHint:!1}},flow_step:{title:`Flow Step`,annotations:{readOnlyHint:!1}},flow_status:{title:`Flow Status`,annotations:{readOnlyHint:!0,idempotentHint:!0}},flow_reset:{title:`Flow Reset`,annotations:{readOnlyHint:!1}},flow_read_skill:{title:`Flow Read Skill`,annotations:{readOnlyHint:!0,idempotentHint:!0}}};function t(t){return e[t]??{title:t,annotations:{readOnlyHint:!1}}}export{e as TOOL_METADATA,t as getToolMeta};
1
+ const e={search:{title:`Hybrid Search`,annotations:{readOnlyHint:!0,idempotentHint:!0}},find:{title:`Federated Find`,annotations:{readOnlyHint:!0,idempotentHint:!0}},symbol:{title:`Symbol Resolver`,annotations:{readOnlyHint:!0,idempotentHint:!0}},trace:{title:`Data Flow Tracer`,annotations:{readOnlyHint:!0,idempotentHint:!0}},scope_map:{title:`Task Scope Map`,annotations:{readOnlyHint:!0,idempotentHint:!0}},lookup:{title:`Chunk Lookup`,annotations:{readOnlyHint:!0,idempotentHint:!0}},dead_symbols:{title:`Dead Symbol Finder`,annotations:{readOnlyHint:!0,idempotentHint:!0}},file_summary:{title:`File Summary`,annotations:{readOnlyHint:!0,idempotentHint:!0}},analyze_structure:{title:`Analyze Structure`,annotations:{readOnlyHint:!0,idempotentHint:!0}},analyze_dependencies:{title:`Analyze Dependencies`,annotations:{readOnlyHint:!0,idempotentHint:!0}},analyze_symbols:{title:`Analyze Symbols`,annotations:{readOnlyHint:!0,idempotentHint:!0}},analyze_patterns:{title:`Analyze Patterns`,annotations:{readOnlyHint:!0,idempotentHint:!0}},analyze_entry_points:{title:`Analyze Entry Points`,annotations:{readOnlyHint:!0,idempotentHint:!0}},analyze_diagram:{title:`Analyze Diagram`,annotations:{readOnlyHint:!0,idempotentHint:!0}},blast_radius:{title:`Blast Radius`,annotations:{readOnlyHint:!0,idempotentHint:!0}},brainstorm:{title:`Brainstorm Session`,annotations:{readOnlyHint:!0,idempotentHint:!0}},remember:{title:`Remember Knowledge`,annotations:{readOnlyHint:!1}},read:{title:`Read Knowledge`,annotations:{readOnlyHint:!0,idempotentHint:!0}},update:{title:`Update Knowledge`,annotations:{readOnlyHint:!1}},forget:{title:`Forget Knowledge`,annotations:{readOnlyHint:!1,destructiveHint:!0}},list:{title:`List Knowledge`,annotations:{readOnlyHint:!0,idempotentHint:!0}},produce_knowledge:{title:`Produce Knowledge`,annotations:{readOnlyHint:!0,idempotentHint:!0}},compact:{title:`Semantic Compactor`,annotations:{readOnlyHint:!0,idempotentHint:!0}},digest:{title:`Multi-Source Digest`,annotations:{readOnlyHint:!0,idempotentHint:!0}},stratum_card:{title:`Stratum Card`,annotations:{readOnlyHint:!0,idempotentHint:!0}},forge_ground:{title:`FORGE Ground`,annotations:{readOnlyHint:!0,idempotentHint:!0}},forge_classify:{title:`FORGE Classify`,annotations:{readOnlyHint:!0,idempotentHint:!0}},evidence_map:{title:`Evidence Map`,annotations:{readOnlyHint:!1}},present:{title:`Rich Content Presenter`,annotations:{readOnlyHint:!0,idempotentHint:!0}},check:{title:`Typecheck & Lint`,annotations:{readOnlyHint:!0,openWorldHint:!0}},test_run:{title:`Run Tests`,annotations:{readOnlyHint:!0,openWorldHint:!0}},eval:{title:`Evaluate Code`,annotations:{readOnlyHint:!1,openWorldHint:!0}},batch:{title:`Batch Operations`,annotations:{readOnlyHint:!0,idempotentHint:!0}},audit:{title:`Project Audit`,annotations:{readOnlyHint:!0,idempotentHint:!0}},rename:{title:`Rename Symbol`,annotations:{readOnlyHint:!1,destructiveHint:!0}},restore:{title:`Restore`,annotations:{readOnlyHint:!1}},codemod:{title:`Codemod`,annotations:{readOnlyHint:!1,destructiveHint:!0}},data_transform:{title:`Data Transform`,annotations:{readOnlyHint:!0,idempotentHint:!0}},stash:{title:`Stash Values`,annotations:{readOnlyHint:!1}},checkpoint:{title:`Session Checkpoint`,annotations:{readOnlyHint:!1}},workset:{title:`Workset Manager`,annotations:{readOnlyHint:!1}},lane:{title:`Exploration Lane`,annotations:{readOnlyHint:!1}},git_context:{title:`Git Context`,annotations:{readOnlyHint:!0,idempotentHint:!0}},diff_parse:{title:`Diff Parser`,annotations:{readOnlyHint:!0,idempotentHint:!0}},parse_output:{title:`Parse Build Output`,annotations:{readOnlyHint:!0,idempotentHint:!0}},process:{title:`Process Manager`,annotations:{readOnlyHint:!1,openWorldHint:!0}},watch:{title:`File Watcher`,annotations:{readOnlyHint:!1,openWorldHint:!0}},delegate:{title:`Delegate Task`,annotations:{readOnlyHint:!1,openWorldHint:!0}},config:{title:`Configuration Manager`,annotations:{readOnlyHint:!1}},status:{title:`AI Kit Status`,annotations:{readOnlyHint:!0,idempotentHint:!0}},health:{title:`Health Check`,annotations:{readOnlyHint:!0,idempotentHint:!0}},reindex:{title:`Reindex`,annotations:{readOnlyHint:!1}},onboard:{title:`Onboard Codebase`,annotations:{readOnlyHint:!1}},graph:{title:`Knowledge Graph`,annotations:{readOnlyHint:!1}},guide:{title:`Tool Guide`,annotations:{readOnlyHint:!0,idempotentHint:!0}},replay:{title:`Replay History`,annotations:{readOnlyHint:!0,idempotentHint:!0}},changelog:{title:`Generate Changelog`,annotations:{readOnlyHint:!0,idempotentHint:!0}},regex_test:{title:`Regex Tester`,annotations:{readOnlyHint:!0,idempotentHint:!0}},encode:{title:`Encode / Decode`,annotations:{readOnlyHint:!0,idempotentHint:!0}},measure:{title:`Code Metrics`,annotations:{readOnlyHint:!0,idempotentHint:!0}},schema_validate:{title:`Schema Validator`,annotations:{readOnlyHint:!0,idempotentHint:!0}},snippet:{title:`Code Snippets`,annotations:{readOnlyHint:!1}},env:{title:`Environment Info`,annotations:{readOnlyHint:!0,idempotentHint:!0}},time:{title:`Date & Time`,annotations:{readOnlyHint:!0,idempotentHint:!0}},web_fetch:{title:`Web Fetch`,annotations:{readOnlyHint:!0,openWorldHint:!0}},web_search:{title:`Web Search`,annotations:{readOnlyHint:!0,openWorldHint:!0}},http:{title:`HTTP Request`,annotations:{readOnlyHint:!1,openWorldHint:!0}},queue:{title:`Operation Queue`,annotations:{readOnlyHint:!1}},bridge_push:{title:`Bridge Push`,annotations:{readOnlyHint:!1}},bridge_pull:{title:`Bridge Pull`,annotations:{readOnlyHint:!0,idempotentHint:!0}},bridge_sync:{title:`Bridge Sync Status`,annotations:{readOnlyHint:!0,idempotentHint:!0}},evolution_state:{title:`Evolution State`,annotations:{readOnlyHint:!1}},policy_check:{title:`Policy Check`,annotations:{readOnlyHint:!0,idempotentHint:!0}},flow_list:{title:`Flow List`,annotations:{readOnlyHint:!0,idempotentHint:!0}},flow_info:{title:`Flow Info`,annotations:{readOnlyHint:!0,idempotentHint:!0}},flow_start:{title:`Flow Start`,annotations:{readOnlyHint:!1}},flow_step:{title:`Flow Step`,annotations:{readOnlyHint:!1}},flow_status:{title:`Flow Status`,annotations:{readOnlyHint:!0,idempotentHint:!0}},flow_reset:{title:`Flow Reset`,annotations:{readOnlyHint:!1}},flow_read_instruction:{title:`Flow Read Instruction`,annotations:{readOnlyHint:!0,idempotentHint:!0}}};function t(t){return e[t]??{title:t,annotations:{readOnlyHint:!1}}}export{e as TOOL_METADATA,t as getToolMeta};
@@ -1 +1 @@
1
- import{getToolMeta as e}from"../tool-metadata.js";import{existsSync as t}from"node:fs";import{basename as n,join as r,resolve as i}from"node:path";import{z as a}from"zod";import{readFile as o}from"node:fs/promises";import{createLogger as s,serializeError as c}from"../../../core/dist/index.js";import{homedir as l}from"node:os";const u=s(`flow-tools`);function d(e){return{content:[{type:`text`,text:e}]}}function f(e){return e instanceof Error?e.message:String(e)}function p(s,p){let m=p.sources?.[0]?.path??process.cwd(),h=r(p.stateDir??r(p.sources[0].path,`.aikit-state`),`flows`),g=r(h,`registry.json`),_=r(h,`state.json`);function v(e,t){return i(y(e),t).replaceAll(`\\`,`/`)}function y(e){if(e.sourceType===`builtin`){let r=n(e.installPath),a=i(m,`.github`,`flows`,r);if(t(a))return a.replaceAll(`\\`,`/`);let o=i(l(),`.copilot`,`flows`,r);return t(o)?o.replaceAll(`\\`,`/`):a.replaceAll(`\\`,`/`)}return e.installPath.replaceAll(`\\`,`/`)}async function b(){let{FlowRegistryManager:e,FlowStateMachine:t}=await import(`../../../flows/dist/index.js`);return{registry:new e(g),stateMachine:new t(_)}}let x=e(`flow_list`);s.registerTool(`flow_list`,{title:x.title,description:`List all installed flows and their steps`,annotations:x.annotations,inputSchema:{}},async()=>{try{let{registry:e,stateMachine:t}=await b(),n=e.list(),r=t.getStatus(),i={flows:n.map(e=>({name:e.name,version:e.version,source:e.source,sourceType:e.sourceType,format:e.format,steps:e.manifest.steps.map(e=>e.id)})),activeFlow:r.success&&r.data?{flow:r.data.flow,status:r.data.status,currentStep:r.data.currentStep}:null};return d(JSON.stringify(i,null,2))}catch(e){return u.error(`flow_list failed`,c(e)),d(`Error: ${f(e)}`)}});let S=e(`flow_info`);s.registerTool(`flow_info`,{title:S.title,description:`Show detailed information about a specific flow`,annotations:S.annotations,inputSchema:{name:a.string().describe(`Flow name to get info for`)}},async({name:e})=>{try{let{registry:t}=await b(),n=t.get(e);if(!n)return d(`Flow "${e}" not found. Use flow_list to see available flows.`);let r={name:n.name,version:n.version,description:n.manifest.description,source:n.source,sourceType:n.sourceType,format:n.format,installPath:y(n),registeredAt:n.registeredAt,updatedAt:n.updatedAt,steps:n.manifest.steps.map(e=>({id:e.id,name:e.name,skill:v(n,e.skill),produces:e.produces,requires:e.requires,description:e.description})),agents:n.manifest.agents,artifactsDir:n.manifest.artifacts_dir,install:n.manifest.install};return d(JSON.stringify(r,null,2))}catch(e){return u.error(`flow_info failed`,c(e)),d(`Error: ${f(e)}`)}});let C=e(`flow_start`);s.registerTool(`flow_start`,{title:C.title,description:`Start a flow. Sets the active flow and positions at the first step.`,annotations:C.annotations,inputSchema:{flow:a.string().describe(`Flow name to start (use flow_list to see options)`)}},async({flow:e})=>{try{let{registry:t,stateMachine:n}=await b(),r=t.get(e);if(!r)return d(`Flow "${e}" not found. Use flow_list to see available flows.`);let i=n.start(r.name,r.manifest);if(!i.success||!i.data)return d(`Cannot start: ${i.error}`);let a=i.data,o=r.manifest.steps.find(e=>e.id===a.currentStep),s={started:!0,flow:a.flow,currentStep:a.currentStep,currentStepSkill:r&&o?v(r,o.skill):null,currentStepDescription:o?.description??null,totalSteps:r.manifest.steps.length,stepSequence:r.manifest.steps.map(e=>e.id),artifactsDir:r.manifest.artifacts_dir};return d(JSON.stringify(s,null,2))}catch(e){return u.error(`flow_start failed`,c(e)),d(`Error: ${f(e)}`)}});let w=e(`flow_step`);s.registerTool(`flow_step`,{title:w.title,description:`Advance the active flow: complete current step and move to next, skip current step, or redo current step.`,annotations:w.annotations,inputSchema:{action:a.enum([`next`,`skip`,`redo`]).describe(`next: mark current step done and advance. skip: skip current step. redo: repeat current step.`)}},async({action:e})=>{try{let{registry:t,stateMachine:n}=await b(),r=n.load();if(!r)return d(`No active flow. Use flow_start first.`);let i=t.get(r.flow);if(!i)return d(`Flow "${r.flow}" not found in registry.`);let a=n.step(e,i.manifest);if(!a.success||!a.data)return d(`Cannot ${e}: ${a.error}`);let o=a.data,s=o.currentStep?i.manifest.steps.find(e=>e.id===o.currentStep):null,c={flow:o.flow,status:o.status,action:e,currentStep:o.currentStep,currentStepSkill:i&&s?v(i,s.skill):null,currentStepDescription:s?.description??null,completedSteps:o.completedSteps,skippedSteps:o.skippedSteps,totalSteps:i.manifest.steps.length,remaining:i.manifest.steps.filter(e=>!o.completedSteps.includes(e.id)&&!o.skippedSteps.includes(e.id)&&e.id!==o.currentStep).map(e=>e.id)};return d(JSON.stringify(c,null,2))}catch(e){return u.error(`flow_step failed`,c(e)),d(`Error: ${f(e)}`)}});let T=e(`flow_status`);s.registerTool(`flow_status`,{title:T.title,description:`Show the current flow execution state — which flow is active, current step, completed steps, and artifacts.`,annotations:T.annotations,inputSchema:{}},async()=>{try{let{registry:e,stateMachine:t}=await b(),n=t.getStatus();if(!n.success||!n.data)return d(`No active flow. Use flow_start to begin one, or flow_list to see available flows.`);let r=n.data,i=e.get(r.flow),a=i?.manifest.steps.find(e=>e.id===r.currentStep),o=i&&a?v(i,a.skill):null,s={flow:r.flow,status:r.status,currentStep:r.currentStep,currentStepSkill:o,skillPath:o,currentStepDescription:a?.description??null,completedSteps:r.completedSteps,skippedSteps:r.skippedSteps,artifacts:r.artifacts,startedAt:r.startedAt,updatedAt:r.updatedAt,totalSteps:i?.manifest.steps.length??0,progress:i?`${r.completedSteps.length+r.skippedSteps.length}/${i.manifest.steps.length}`:`unknown`};return d(JSON.stringify(s,null,2))}catch(e){return u.error(`flow_status failed`,c(e)),d(`Error: ${f(e)}`)}});let E=e(`flow_read_skill`);s.registerTool(`flow_read_skill`,{title:E.title===`flow_read_skill`?`Flow Read Skill`:E.title,description:`Read the skill or instruction content for a flow step. If step is omitted, reads the current step.`,annotations:E.title===`flow_read_skill`?{readOnlyHint:!0,idempotentHint:!0}:E.annotations,inputSchema:{step:a.string().optional().describe(`Step id or name to read. Defaults to the current step.`)}},async({step:e})=>{try{let{registry:t,stateMachine:n}=await b(),r=n.getStatus();if(!r.success||!r.data)return d(`No active flow. Use flow_start to begin one, or flow_list to see available flows.`);let i=r.data,a=t.get(i.flow);if(!a)return d(`Flow "${i.flow}" not found in registry.`);let s=e??i.currentStep;if(!s)return d(`No current step is available for the active flow.`);let c=a.manifest.steps.find(e=>e.id===s||e.name===s);return d(c?await o(v(a,c.skill),`utf-8`):`Step "${s}" not found in flow "${i.flow}".`)}catch(e){return u.error(`flow_read_skill failed`,c(e)),e instanceof Error&&`code`in e&&e.code===`ENOENT`?d(`Could not read skill file: ${e.message}`):d(`Error: ${f(e)}`)}});let D=e(`flow_reset`);s.registerTool(`flow_reset`,{title:D.title,description:`Reset the active flow, clearing all state. Use to start over or switch to a different flow.`,annotations:D.annotations,inputSchema:{}},async()=>{try{let{stateMachine:e}=await b(),t=e.reset();return t.success?d(`Flow state reset. Use flow_start to begin a new flow.`):d(`Reset failed: ${t.error}`)}catch(e){return u.error(`flow_reset failed`,c(e)),d(`Error: ${f(e)}`)}})}export{p as registerFlowTools};
1
+ import{getToolMeta as e}from"../tool-metadata.js";import{existsSync as t}from"node:fs";import{basename as n,join as r,resolve as i}from"node:path";import{z as a}from"zod";import{readFile as o}from"node:fs/promises";import{createLogger as s,serializeError as c}from"../../../core/dist/index.js";import{homedir as l}from"node:os";const u=s(`flow-tools`);function d(e){return{content:[{type:`text`,text:e}]}}function f(e){return e instanceof Error?e.message:String(e)}function p(s,p){let m=p.sources?.[0]?.path??process.cwd(),h=r(p.stateDir??r(p.sources[0].path,`.aikit-state`),`flows`),g=r(h,`registry.json`),_=r(h,`state.json`);function v(e,t){return i(y(e),t).replaceAll(`\\`,`/`)}function y(e){if(e.sourceType===`builtin`){let r=n(e.installPath),a=i(m,`.github`,`flows`,r);if(t(a))return a.replaceAll(`\\`,`/`);let o=i(l(),`.copilot`,`flows`,r);return t(o)?o.replaceAll(`\\`,`/`):a.replaceAll(`\\`,`/`)}return e.installPath.replaceAll(`\\`,`/`)}async function b(){let{FlowRegistryManager:e,FlowStateMachine:t}=await import(`../../../flows/dist/index.js`);return{registry:new e(g),stateMachine:new t(_)}}let x=e(`flow_list`);s.registerTool(`flow_list`,{title:x.title,description:`List all installed flows and their steps`,annotations:x.annotations,inputSchema:{}},async()=>{try{let{registry:e,stateMachine:t}=await b(),n=e.list(),r=t.getStatus(),i={flows:n.map(e=>({name:e.name,version:e.version,source:e.source,sourceType:e.sourceType,format:e.format,steps:e.manifest.steps.map(e=>e.id)})),activeFlow:r.success&&r.data?{flow:r.data.flow,status:r.data.status,currentStep:r.data.currentStep}:null};return d(JSON.stringify(i,null,2))}catch(e){return u.error(`flow_list failed`,c(e)),d(`Error: ${f(e)}`)}});let S=e(`flow_info`);s.registerTool(`flow_info`,{title:S.title,description:`Show detailed information about a specific flow`,annotations:S.annotations,inputSchema:{name:a.string().describe(`Flow name to get info for`)}},async({name:e})=>{try{let{registry:t}=await b(),n=t.get(e);if(!n)return d(`Flow "${e}" not found. Use flow_list to see available flows.`);let r={name:n.name,version:n.version,description:n.manifest.description,source:n.source,sourceType:n.sourceType,format:n.format,installPath:y(n),registeredAt:n.registeredAt,updatedAt:n.updatedAt,steps:n.manifest.steps.map(e=>({id:e.id,name:e.name,instruction:v(n,e.instruction),produces:e.produces,requires:e.requires,description:e.description})),agents:n.manifest.agents,artifactsDir:n.manifest.artifacts_dir,install:n.manifest.install};return d(JSON.stringify(r,null,2))}catch(e){return u.error(`flow_info failed`,c(e)),d(`Error: ${f(e)}`)}});let C=e(`flow_start`);s.registerTool(`flow_start`,{title:C.title,description:`Start a flow. Sets the active flow and positions at the first step.`,annotations:C.annotations,inputSchema:{flow:a.string().describe(`Flow name to start (use flow_list to see options)`)}},async({flow:e})=>{try{let{registry:t,stateMachine:n}=await b(),r=t.get(e);if(!r)return d(`Flow "${e}" not found. Use flow_list to see available flows.`);let i=n.start(r.name,r.manifest);if(!i.success||!i.data)return d(`Cannot start: ${i.error}`);let a=i.data,o=r.manifest.steps.find(e=>e.id===a.currentStep),s={started:!0,flow:a.flow,currentStep:a.currentStep,currentStepInstruction:r&&o?v(r,o.instruction):null,currentStepDescription:o?.description??null,totalSteps:r.manifest.steps.length,stepSequence:r.manifest.steps.map(e=>e.id),artifactsDir:r.manifest.artifacts_dir};return d(JSON.stringify(s,null,2))}catch(e){return u.error(`flow_start failed`,c(e)),d(`Error: ${f(e)}`)}});let w=e(`flow_step`);s.registerTool(`flow_step`,{title:w.title,description:`Advance the active flow: complete current step and move to next, skip current step, or redo current step.`,annotations:w.annotations,inputSchema:{action:a.enum([`next`,`skip`,`redo`]).describe(`next: mark current step done and advance. skip: skip current step. redo: repeat current step.`)}},async({action:e})=>{try{let{registry:t,stateMachine:n}=await b(),r=n.load();if(!r)return d(`No active flow. Use flow_start first.`);let i=t.get(r.flow);if(!i)return d(`Flow "${r.flow}" not found in registry.`);let a=n.step(e,i.manifest);if(!a.success||!a.data)return d(`Cannot ${e}: ${a.error}`);let o=a.data,s=o.currentStep?i.manifest.steps.find(e=>e.id===o.currentStep):null,c={flow:o.flow,status:o.status,action:e,currentStep:o.currentStep,currentStepInstruction:i&&s?v(i,s.instruction):null,currentStepDescription:s?.description??null,completedSteps:o.completedSteps,skippedSteps:o.skippedSteps,totalSteps:i.manifest.steps.length,remaining:i.manifest.steps.filter(e=>!o.completedSteps.includes(e.id)&&!o.skippedSteps.includes(e.id)&&e.id!==o.currentStep).map(e=>e.id)};return d(JSON.stringify(c,null,2))}catch(e){return u.error(`flow_step failed`,c(e)),d(`Error: ${f(e)}`)}});let T=e(`flow_status`);s.registerTool(`flow_status`,{title:T.title,description:`Show the current flow execution state — which flow is active, current step, completed steps, and artifacts.`,annotations:T.annotations,inputSchema:{}},async()=>{try{let{registry:e,stateMachine:t}=await b(),n=t.getStatus();if(!n.success||!n.data)return d(`No active flow. Use flow_start to begin one, or flow_list to see available flows.`);let r=n.data,i=e.get(r.flow),a=i?.manifest.steps.find(e=>e.id===r.currentStep),o=i&&a?v(i,a.instruction):null,s={flow:r.flow,status:r.status,currentStep:r.currentStep,currentStepInstruction:o,instructionPath:o,currentStepDescription:a?.description??null,completedSteps:r.completedSteps,skippedSteps:r.skippedSteps,artifacts:r.artifacts,startedAt:r.startedAt,updatedAt:r.updatedAt,totalSteps:i?.manifest.steps.length??0,progress:i?`${r.completedSteps.length+r.skippedSteps.length}/${i.manifest.steps.length}`:`unknown`};return d(JSON.stringify(s,null,2))}catch(e){return u.error(`flow_status failed`,c(e)),d(`Error: ${f(e)}`)}});let E=e(`flow_read_instruction`);s.registerTool(`flow_read_instruction`,{title:E.title===`flow_read_instruction`?`Flow Read Instruction`:E.title,description:`Read the instruction content for a flow step. If step is omitted, reads the current step.`,annotations:E.title===`flow_read_instruction`?{readOnlyHint:!0,idempotentHint:!0}:E.annotations,inputSchema:{step:a.string().optional().describe(`Step id or name to read. Defaults to the current step.`)}},async({step:e})=>{try{let{registry:t,stateMachine:n}=await b(),r=n.getStatus();if(!r.success||!r.data)return d(`No active flow. Use flow_start to begin one, or flow_list to see available flows.`);let i=r.data,a=t.get(i.flow);if(!a)return d(`Flow "${i.flow}" not found in registry.`);let s=e??i.currentStep;if(!s)return d(`No current step is available for the active flow.`);let c=a.manifest.steps.find(e=>e.id===s||e.name===s);return d(c?await o(v(a,c.instruction),`utf-8`):`Step "${s}" not found in flow "${i.flow}".`)}catch(e){return u.error(`flow_read_instruction failed`,c(e)),e instanceof Error&&`code`in e&&e.code===`ENOENT`?d(`Could not read instruction file: ${e.message}`):d(`Error: ${f(e)}`)}});let D=e(`flow_reset`);s.registerTool(`flow_reset`,{title:D.title,description:`Reset the active flow, clearing all state. Use to start over or switch to a different flow.`,annotations:D.annotations,inputSchema:{}},async()=>{try{let{stateMachine:e}=await b(),t=e.reset();return t.success?d(`Flow state reset. Use flow_start to begin a new flow.`):d(`Reset failed: ${t.error}`)}catch(e){return u.error(`flow_reset failed`,c(e)),d(`Error: ${f(e)}`)}})}export{p as registerFlowTools};
@@ -11,6 +11,8 @@ declare class LanceStore implements IKnowledgeStore {
11
11
  private _priorityQueue;
12
12
  private _normalQueue;
13
13
  private _ftsReady;
14
+ private _ftsRecoveryAttemptAt;
15
+ private static readonly FTS_RECOVERY_COOLDOWN_MS;
14
16
  private enqueueWrite;
15
17
  private _drain;
16
18
  constructor(options?: {
@@ -1 +1 @@
1
- import{EMBEDDING_DEFAULTS as e,SEARCH_DEFAULTS as t,STORE_DEFAULTS as n,createLogger as r,serializeError as i,sourceTypeContentTypes as a}from"../../core/dist/index.js";import{Index as o,connect as s}from"@lancedb/lancedb";function c(e){if(!e)return[];try{let t=JSON.parse(e);return Array.isArray(t)?t:[]}catch{return[]}}const l=/^[\w.\-/ ]+$/,u=r(`store`);function d(e,t){if(!l.test(e))throw Error(`Invalid ${t} filter value: contains disallowed characters`);return e.replace(/'/g,`''`)}var f=class{db=null;table=null;dbPath;tableName;_draining=!1;_priorityQueue=[];_normalQueue=[];_ftsReady=!1;enqueueWrite(e,t=!1){return new Promise((n,r)=>{let i=async()=>{try{n(await e())}catch(e){r(e)}};t?this._priorityQueue.push(i):this._normalQueue.push(i),this._drain()})}async _drain(){if(!this._draining){this._draining=!0;try{for(;this._priorityQueue.length>0||this._normalQueue.length>0;){let e=this._priorityQueue.shift()??this._normalQueue.shift();e&&await e()}}finally{this._draining=!1}}}constructor(e){this.dbPath=e?.path??n.path,this.tableName=e?.tableName??n.tableName}async initialize(){this.db=await s(this.dbPath),(await this.db.tableNames()).includes(this.tableName)&&(this.table=await this.db.openTable(this.tableName),await this.createFtsIndex())}async upsert(e,t){if(e.length!==0){if(e.length!==t.length)throw Error(`Record count (${e.length}) does not match vector count (${t.length})`);return this.enqueueWrite(()=>this._upsertImpl(e,t))}}async upsertInteractive(e,t){if(e.length!==0){if(e.length!==t.length)throw Error(`Record count (${e.length}) does not match vector count (${t.length})`);return this.enqueueWrite(()=>this._upsertImpl(e,t),!0)}}async _upsertImpl(e,t){let n=e.map((e,n)=>({id:e.id,vector:Array.from(t[n]),content:e.content,sourcePath:e.sourcePath,contentType:e.contentType,headingPath:e.headingPath??``,chunkIndex:e.chunkIndex,totalChunks:e.totalChunks,startLine:e.startLine,endLine:e.endLine,fileHash:e.fileHash,indexedAt:e.indexedAt,origin:e.origin,tags:JSON.stringify(e.tags),category:e.category??``,version:e.version}));if(this.table){let t=[...new Set(e.map(e=>e.sourcePath))];for(let e of t)try{await this.table.delete(`sourcePath = '${d(e,`sourcePath`)}'`)}catch{}await this.table.add(n)}else try{this.table=await this.db?.createTable(this.tableName,n)??null}catch(e){if(String(e).includes(`already exists`)&&this.db)this.table=await this.db.openTable(this.tableName),await this.table.add(n);else throw e}}async search(e,n){if(!this.table)return[];let r=n?.limit??t.maxResults,i=n?.minScore??t.minScore,a=this.table.search(e).limit(r*2),o=this.buildFilterString(n);return o&&(a=a.where(o)),(await a.toArray()).map(e=>({record:this.fromLanceRecord(e),score:1-(e._distance??1)})).filter(e=>e.score>=i).slice(0,r)}async createFtsIndex(){return this.enqueueWrite(()=>this._createFtsIndexImpl())}async _createFtsIndexImpl(){if(this.table)try{await this.table.createIndex(`content`,{config:o.fts({withPosition:!0}),replace:!0}),this._ftsReady=!0,u.info(`FTS index created/updated`,{column:`content`})}catch(e){u.warn(`FTS index creation failed`,i(e))}}async ftsSearch(e,n){if(!this.table||!this._ftsReady)return[];let r=n?.limit??t.maxResults;try{let t=this.table.search(e).limit(r*2),i=this.buildFilterString(n);return i&&(t=t.where(i)),(await t.toArray()).map(e=>({record:this.fromLanceRecord(e),score:e._score??e._relevance_score??0}))}catch(e){return(e instanceof Error?e.message:String(e)).includes(`INVERTED index`)?(u.debug(`FTS search skipped — index not yet available`),this._ftsReady=!1):u.warn(`FTS search failed`,i(e)),[]}}async getById(e){if(!this.table)return null;let t=await this.table.query().where(`id = '${d(e,`id`)}'`).limit(1).toArray();return t.length===0?null:this.fromLanceRecord(t[0])}async deleteBySourcePath(e){return this.enqueueWrite(()=>this._deleteBySourcePathImpl(e))}async _deleteBySourcePathImpl(e){if(!this.table)return 0;let t=await this.getBySourcePath(e);return t.length===0?0:(await this.table.delete(`sourcePath = '${d(e,`sourcePath`)}'`),t.length)}async deleteById(e){return this.enqueueWrite(()=>this._deleteByIdImpl(e))}async deleteByIdInteractive(e){return this.enqueueWrite(()=>this._deleteByIdImpl(e),!0)}async _deleteByIdImpl(e){return!this.table||!await this.getById(e)?!1:(await this.table.delete(`id = '${d(e,`id`)}'`),!0)}async getBySourcePath(e){return this.table?(await this.table.query().where(`sourcePath = '${d(e,`sourcePath`)}'`).limit(1e3).toArray()).map(e=>this.fromLanceRecord(e)):[]}async getStats(){if(!this.table)return{totalRecords:0,totalFiles:0,contentTypeBreakdown:{},lastIndexedAt:null,storeBackend:`lancedb`,embeddingModel:e.model};let t=await this.table.countRows(),n=await this.table.query().select([`sourcePath`,`contentType`,`indexedAt`]).limit(1e5).toArray(),r={},i=new Set,a=null;for(let e of n){let t=e;r[t.contentType]=(r[t.contentType]??0)+1,i.add(t.sourcePath),(!a||t.indexedAt>a)&&(a=t.indexedAt)}return{totalRecords:t,totalFiles:i.size,contentTypeBreakdown:r,lastIndexedAt:a,storeBackend:`lancedb`,embeddingModel:e.model}}async listSourcePaths(){if(!this.table)return[];let e=await this.table.query().select([`sourcePath`]).limit(1e5).toArray();return[...new Set(e.map(e=>e.sourcePath))]}async dropTable(){return this.enqueueWrite(()=>this._dropTableImpl())}async _dropTableImpl(){if(this.db&&(await this.db.tableNames()).includes(this.tableName))for(let e=1;e<=3;e++)try{await this.db.dropTable(this.tableName);break}catch(t){if(e===3)throw t;let n=e*500;u.warn(`dropTable attempt failed, retrying`,{attempt:e,delayMs:n}),await new Promise(e=>setTimeout(e,n))}this.table=null}async close(){try{this.db&&typeof this.db.close==`function`&&await this.db.close()}catch{}this.table=null,this.db=null}buildFilterString(e){let t=[];if(e?.contentType&&t.push(`contentType = '${d(e.contentType,`contentType`)}'`),e?.sourceType){let n=a(e.sourceType);if(n.length>0){let e=n.map(e=>`'${d(e,`sourceType`)}'`).join(`, `);t.push(`contentType IN (${e})`)}}if(e?.origin&&t.push(`origin = '${d(e.origin,`origin`)}'`),e?.category&&t.push(`category = '${d(e.category,`category`)}'`),e?.tags&&e.tags.length>0){let n=e.tags.map(e=>`tags LIKE '%${d(e,`tag`)}%'`);t.push(`(${n.join(` OR `)})`)}return t.length>0?t.join(` AND `):null}fromLanceRecord(e){return{id:e.id,content:e.content,sourcePath:e.sourcePath,contentType:e.contentType,headingPath:e.headingPath||void 0,chunkIndex:e.chunkIndex,totalChunks:e.totalChunks,startLine:e.startLine,endLine:e.endLine,fileHash:e.fileHash,indexedAt:e.indexedAt,origin:e.origin,tags:c(e.tags),category:e.category||void 0,version:e.version}}};export{f as LanceStore};
1
+ import{EMBEDDING_DEFAULTS as e,SEARCH_DEFAULTS as t,STORE_DEFAULTS as n,createLogger as r,serializeError as i,sourceTypeContentTypes as a}from"../../core/dist/index.js";import{Index as o,connect as s}from"@lancedb/lancedb";function c(e){if(!e)return[];try{let t=JSON.parse(e);return Array.isArray(t)?t:[]}catch{return[]}}const l=/^[\w.\-/ ]+$/,u=r(`store`);function d(e,t){if(!l.test(e))throw Error(`Invalid ${t} filter value: contains disallowed characters`);return e.replace(/'/g,`''`)}var f=class r{db=null;table=null;dbPath;tableName;_draining=!1;_priorityQueue=[];_normalQueue=[];_ftsReady=!1;_ftsRecoveryAttemptAt=0;static FTS_RECOVERY_COOLDOWN_MS=300*1e3;enqueueWrite(e,t=!1){return new Promise((n,r)=>{let i=async()=>{try{n(await e())}catch(e){r(e)}};t?this._priorityQueue.push(i):this._normalQueue.push(i),this._drain()})}async _drain(){if(!this._draining){this._draining=!0;try{for(;this._priorityQueue.length>0||this._normalQueue.length>0;){let e=this._priorityQueue.shift()??this._normalQueue.shift();e&&await e()}}finally{this._draining=!1}}}constructor(e){this.dbPath=e?.path??n.path,this.tableName=e?.tableName??n.tableName}async initialize(){this.db=await s(this.dbPath),(await this.db.tableNames()).includes(this.tableName)&&(this.table=await this.db.openTable(this.tableName),await this.createFtsIndex())}async upsert(e,t){if(e.length!==0){if(e.length!==t.length)throw Error(`Record count (${e.length}) does not match vector count (${t.length})`);return this.enqueueWrite(()=>this._upsertImpl(e,t))}}async upsertInteractive(e,t){if(e.length!==0){if(e.length!==t.length)throw Error(`Record count (${e.length}) does not match vector count (${t.length})`);return this.enqueueWrite(()=>this._upsertImpl(e,t),!0)}}async _upsertImpl(e,t){let n=e.map((e,n)=>({id:e.id,vector:Array.from(t[n]),content:e.content,sourcePath:e.sourcePath,contentType:e.contentType,headingPath:e.headingPath??``,chunkIndex:e.chunkIndex,totalChunks:e.totalChunks,startLine:e.startLine,endLine:e.endLine,fileHash:e.fileHash,indexedAt:e.indexedAt,origin:e.origin,tags:JSON.stringify(e.tags),category:e.category??``,version:e.version}));if(this.table){let t=[...new Set(e.map(e=>e.sourcePath))];for(let e of t)try{await this.table.delete(`sourcePath = '${d(e,`sourcePath`)}'`)}catch{}await this.table.add(n)}else try{this.table=await this.db?.createTable(this.tableName,n)??null}catch(e){if(String(e).includes(`already exists`)&&this.db)this.table=await this.db.openTable(this.tableName),await this.table.add(n);else throw e}}async search(e,n){if(!this.table)return[];let r=n?.limit??t.maxResults,i=n?.minScore??t.minScore,a=this.table.search(e).limit(r*2),o=this.buildFilterString(n);return o&&(a=a.where(o)),(await a.toArray()).map(e=>({record:this.fromLanceRecord(e),score:1-(e._distance??1)})).filter(e=>e.score>=i).slice(0,r)}async createFtsIndex(){return this.enqueueWrite(()=>this._createFtsIndexImpl())}async _createFtsIndexImpl(){if(this.table)try{await this.table.createIndex(`content`,{config:o.fts({withPosition:!0}),replace:!0}),this._ftsReady=!0,this._ftsRecoveryAttemptAt=0,u.info(`FTS index created/updated`,{column:`content`})}catch(e){u.warn(`FTS index creation failed`,i(e))}}async ftsSearch(e,n){if(!this.table)return[];if(!this._ftsReady){let e=Date.now();if(e-this._ftsRecoveryAttemptAt<r.FTS_RECOVERY_COOLDOWN_MS)return[];this._ftsRecoveryAttemptAt=e;try{await this.createFtsIndex()}catch{return[]}if(!this._ftsReady)return[]}let a=n?.limit??t.maxResults;try{let t=this.table.search(e).limit(a*2),r=this.buildFilterString(n);return r&&(t=t.where(r)),(await t.toArray()).map(e=>({record:this.fromLanceRecord(e),score:e._score??e._relevance_score??0}))}catch(e){return(e instanceof Error?e.message:String(e)).includes(`INVERTED index`)?(u.debug(`FTS search skipped — index not yet available`),this._ftsReady=!1):u.warn(`FTS search failed`,i(e)),[]}}async getById(e){if(!this.table)return null;let t=await this.table.query().where(`id = '${d(e,`id`)}'`).limit(1).toArray();return t.length===0?null:this.fromLanceRecord(t[0])}async deleteBySourcePath(e){return this.enqueueWrite(()=>this._deleteBySourcePathImpl(e))}async _deleteBySourcePathImpl(e){if(!this.table)return 0;let t=await this.getBySourcePath(e);return t.length===0?0:(await this.table.delete(`sourcePath = '${d(e,`sourcePath`)}'`),t.length)}async deleteById(e){return this.enqueueWrite(()=>this._deleteByIdImpl(e))}async deleteByIdInteractive(e){return this.enqueueWrite(()=>this._deleteByIdImpl(e),!0)}async _deleteByIdImpl(e){return!this.table||!await this.getById(e)?!1:(await this.table.delete(`id = '${d(e,`id`)}'`),!0)}async getBySourcePath(e){return this.table?(await this.table.query().where(`sourcePath = '${d(e,`sourcePath`)}'`).limit(1e3).toArray()).map(e=>this.fromLanceRecord(e)):[]}async getStats(){if(!this.table)return{totalRecords:0,totalFiles:0,contentTypeBreakdown:{},lastIndexedAt:null,storeBackend:`lancedb`,embeddingModel:e.model};let t=await this.table.countRows(),n=await this.table.query().select([`sourcePath`,`contentType`,`indexedAt`]).limit(1e5).toArray(),r={},i=new Set,a=null;for(let e of n){let t=e;r[t.contentType]=(r[t.contentType]??0)+1,i.add(t.sourcePath),(!a||t.indexedAt>a)&&(a=t.indexedAt)}return{totalRecords:t,totalFiles:i.size,contentTypeBreakdown:r,lastIndexedAt:a,storeBackend:`lancedb`,embeddingModel:e.model}}async listSourcePaths(){if(!this.table)return[];let e=await this.table.query().select([`sourcePath`]).limit(1e5).toArray();return[...new Set(e.map(e=>e.sourcePath))]}async dropTable(){return this.enqueueWrite(()=>this._dropTableImpl())}async _dropTableImpl(){if(this.db&&(await this.db.tableNames()).includes(this.tableName))for(let e=1;e<=3;e++)try{await this.db.dropTable(this.tableName);break}catch(t){if(e===3)throw t;let n=e*500;u.warn(`dropTable attempt failed, retrying`,{attempt:e,delayMs:n}),await new Promise(e=>setTimeout(e,n))}this.table=null}async close(){try{this.db&&typeof this.db.close==`function`&&await this.db.close()}catch{}this.table=null,this.db=null}buildFilterString(e){let t=[];if(e?.contentType&&t.push(`contentType = '${d(e.contentType,`contentType`)}'`),e?.sourceType){let n=a(e.sourceType);if(n.length>0){let e=n.map(e=>`'${d(e,`sourceType`)}'`).join(`, `);t.push(`contentType IN (${e})`)}}if(e?.origin&&t.push(`origin = '${d(e.origin,`origin`)}'`),e?.category&&t.push(`category = '${d(e.category,`category`)}'`),e?.tags&&e.tags.length>0){let n=e.tags.map(e=>`tags LIKE '%${d(e,`tag`)}%'`);t.push(`(${n.join(` OR `)})`)}return t.length>0?t.join(` AND `):null}fromLanceRecord(e){return{id:e.id,content:e.content,sourcePath:e.sourcePath,contentType:e.contentType,headingPath:e.headingPath||void 0,chunkIndex:e.chunkIndex,totalChunks:e.totalChunks,startLine:e.startLine,endLine:e.endLine,fileHash:e.fileHash,indexedAt:e.indexedAt,origin:e.origin,tags:c(e.tags),category:e.category||void 0,version:e.version}}};export{f as LanceStore};
@@ -39,8 +39,8 @@ ${agentTable}
39
39
 
40
40
  1. \`flow_status\` — check for an active flow from a previous session
41
41
  2. **If active flow exists:**
42
- - Note current step name and skill path
43
- - Read the current step skill with \`flow_read_skill\`
42
+ - Note current step name and instruction path
43
+ - Read the current step instruction with \`flow_read_instruction\`
44
44
  - Follow its instructions
45
45
  - When complete: \`flow_step({ action: 'next' })\`
46
46
  3. **If NO active flow:**
@@ -63,8 +63,8 @@ ${agentTable}
63
63
 
64
64
  For EACH step in the active flow:
65
65
 
66
- 1. \`flow_read_skill\` — read the current step's SKILL.md
67
- 2. Follow the skill's instructions — delegate work to the appropriate agents
66
+ 1. \`flow_read_instruction\` — read the current step's README.md
67
+ 2. Follow the step's instructions — delegate work to the appropriate agents
68
68
  3. Apply **Orchestrator Protocols** (PRE-DISPATCH GATE, FORGE, review cycle) during execution
69
69
  4. When the step is complete and results are approved:
70
70
  - \`flow_step({ action: 'next' })\` to advance
@@ -139,7 +139,7 @@ Batch 2 (after batch 1):
139
139
  | \`flow_step\` | Advance: next, skip, or redo current step |
140
140
  | \`flow_status\` | Check current execution state |
141
141
  | \`flow_reset\` | Clear flow state to start over |
142
- | \`flow_read_skill\` | Read the skill content for the current step |
142
+ | \`flow_read_instruction\` | Read the instruction content for the current step |
143
143
 
144
144
  ## Emergency: STOP → ASSESS → CONTAIN → RECOVER → DOCUMENT
145
145
 
@@ -231,7 +231,7 @@ Before every tool call, verify:
231
231
  ## Flows
232
232
 
233
233
  This project uses aikit's pluggable flow system. Check flow status with the \`flow_status\` MCP tool.
234
- If a flow is active, follow the current step's skill instructions. Advance with \`flow_step({ action: 'next' })\`.
234
+ If a flow is active, follow the current step's instructions. Advance with \`flow_step({ action: 'next' })\`.
235
235
  Use \`flow_list\` to see available flows and \`flow_start\` to begin one.
236
236
  `,
237
237
 
@@ -271,8 +271,8 @@ The Planner is typically activated by the Orchestrator as part of a flow step (e
271
271
 
272
272
  **When activated as part of a flow:**
273
273
  1. \`flow_status\` — check current step context and which flow is active
274
- 2. \`flow_read_skill\` — read the current step's SKILL.md for specific instructions
275
- 3. Follow the skill's instructions as the primary guide, applying Planner methodology on top
274
+ 2. \`flow_read_instruction\` — read the current step's README.md for specific instructions
275
+ 3. Follow the step's instructions as the primary guide, applying Planner methodology on top
276
276
  4. Read the flow's README.md for overall context on how the flow works
277
277
  5. Produce required artifacts (as specified by the flow step's \`produces\` field)
278
278
  6. When complete, report status to Orchestrator: \`DONE\` | \`DONE_WITH_CONCERNS\` | \`NEEDS_CONTEXT\` | \`BLOCKED\`
@@ -20,6 +20,7 @@ export const PLUGINS = {
20
20
  description: 'Brainstorming & design exploration workflow',
21
21
  source: 'scaffold/general/skills/brainstorming/SKILL.md',
22
22
  required: true,
23
+ sidecars: ['scaffold/general/skills/brainstorming/spec-document-reviewer-prompt.md'],
23
24
  },
24
25
 
25
26
  'multi-agents-development': {
@@ -35,4 +36,95 @@ export const PLUGINS = {
35
36
  'scaffold/general/skills/multi-agents-development/parallel-dispatch-example.md',
36
37
  ],
37
38
  },
39
+
40
+ 'adr-skill': {
41
+ description:
42
+ 'Architecture Decision Records — create, maintain, and review ADRs for significant technical decisions',
43
+ source: 'scaffold/general/skills/adr-skill/SKILL.md',
44
+ required: true,
45
+ sidecars: [
46
+ 'scaffold/general/skills/adr-skill/assets/templates/adr-madr.md',
47
+ 'scaffold/general/skills/adr-skill/assets/templates/adr-readme.md',
48
+ 'scaffold/general/skills/adr-skill/assets/templates/adr-simple.md',
49
+ 'scaffold/general/skills/adr-skill/references/adr-conventions.md',
50
+ 'scaffold/general/skills/adr-skill/references/examples.md',
51
+ 'scaffold/general/skills/adr-skill/references/review-checklist.md',
52
+ 'scaffold/general/skills/adr-skill/references/template-variants.md',
53
+ 'scaffold/general/skills/adr-skill/scripts/bootstrap_adr.js',
54
+ 'scaffold/general/skills/adr-skill/scripts/new_adr.js',
55
+ 'scaffold/general/skills/adr-skill/scripts/set_adr_status.js',
56
+ ],
57
+ },
58
+
59
+ 'c4-architecture': {
60
+ description:
61
+ 'C4 model architecture diagrams using Mermaid — system context, container, component, and deployment views',
62
+ source: 'scaffold/general/skills/c4-architecture/SKILL.md',
63
+ required: true,
64
+ sidecars: [
65
+ 'scaffold/general/skills/c4-architecture/references/advanced-patterns.md',
66
+ 'scaffold/general/skills/c4-architecture/references/c4-syntax.md',
67
+ 'scaffold/general/skills/c4-architecture/references/common-mistakes.md',
68
+ ],
69
+ },
70
+
71
+ 'frontend-design': {
72
+ description:
73
+ 'Frontend design system — visual design thinking, typography, color, layout, motion, accessibility, and anti-pattern detection',
74
+ source: 'scaffold/general/skills/frontend-design/SKILL.md',
75
+ required: false,
76
+ },
77
+
78
+ 'lesson-learned': {
79
+ description: 'Extract engineering lessons from recent code changes via git history analysis',
80
+ source: 'scaffold/general/skills/lesson-learned/SKILL.md',
81
+ required: true,
82
+ sidecars: [
83
+ 'scaffold/general/skills/lesson-learned/references/anti-patterns.md',
84
+ 'scaffold/general/skills/lesson-learned/references/se-principles.md',
85
+ ],
86
+ },
87
+
88
+ present: {
89
+ description:
90
+ 'Rich interactive dashboards, charts, tables, timelines, and data visualizations via the present MCP tool',
91
+ source: 'scaffold/general/skills/present/SKILL.md',
92
+ required: true,
93
+ },
94
+
95
+ react: {
96
+ description:
97
+ 'React development patterns — component architecture, React 19 APIs, Server Components, TypeScript integration',
98
+ source: 'scaffold/general/skills/react/SKILL.md',
99
+ required: false,
100
+ },
101
+
102
+ 'requirements-clarity': {
103
+ description:
104
+ 'Clarify ambiguous requirements through focused dialogue — score 0-100 until ≥90 before implementation',
105
+ source: 'scaffold/general/skills/requirements-clarity/SKILL.md',
106
+ required: true,
107
+ },
108
+
109
+ 'session-handoff': {
110
+ description:
111
+ 'Comprehensive handoff documents for seamless AI agent session transfers and context preservation',
112
+ source: 'scaffold/general/skills/session-handoff/SKILL.md',
113
+ required: true,
114
+ sidecars: [
115
+ 'scaffold/general/skills/session-handoff/references/handoff-template.md',
116
+ 'scaffold/general/skills/session-handoff/references/resume-checklist.md',
117
+ 'scaffold/general/skills/session-handoff/scripts/check_staleness.js',
118
+ 'scaffold/general/skills/session-handoff/scripts/create_handoff.js',
119
+ 'scaffold/general/skills/session-handoff/scripts/list_handoffs.js',
120
+ 'scaffold/general/skills/session-handoff/scripts/validate_handoff.js',
121
+ ],
122
+ },
123
+
124
+ typescript: {
125
+ description:
126
+ 'TypeScript development patterns — type system, compiler config, advanced types, async patterns, module organization',
127
+ source: 'scaffold/general/skills/typescript/SKILL.md',
128
+ required: false,
129
+ },
38
130
  };
@@ -68,7 +68,7 @@ You may be invoked in two modes:
68
68
 
69
69
  \`\`\`
70
70
  flow_status({}) # Check/resume active flow FIRST
71
- # If flow active → flow_read_skill({ step }) → follow skill instructions
71
+ # If flow active → flow_read_instruction({ step }) → follow step instructions
72
72
  status({}) # Check AI Kit health + onboard state
73
73
  # If onboard not run → onboard({ path: "." }) # First-time codebase analysis
74
74
  flow_list({}) # See available flows
@@ -81,7 +81,7 @@ search({ query: "SESSION CHECKPOINT", origin: "curated" }) # Resume prior wo
81
81
 
82
82
  | Category | Tools | Purpose |
83
83
  |----------|-------|---------|
84
- | Flows | \`flow_list\`, \`flow_info\`, \`flow_start\`, \`flow_step\`, \`flow_status\`, \`flow_read_skill\`, \`flow_reset\` | Structured multi-step workflows |
84
+ | Flows | \`flow_list\`, \`flow_info\`, \`flow_start\`, \`flow_step\`, \`flow_status\`, \`flow_read_instruction\`, \`flow_reset\` | Structured multi-step workflows |
85
85
 
86
86
  ---
87
87
 
@@ -6,16 +6,16 @@ Full development flow for **new features, API design, and architecture changes**
6
6
 
7
7
  | # | Step | Skill | Produces | Requires | Agents |
8
8
  |---|------|-------|----------|----------|--------|
9
- | 1 | **Design Gate** | `skills/design/SKILL.md` | `design-decisions.md` | — | Researcher-Alpha/Beta/Gamma/Delta |
10
- | 2 | **Specification** | `skills/spec/SKILL.md` | `spec.md` | `design-decisions.md` | Researcher-Alpha |
11
- | 3 | **Planning** | `skills/plan/SKILL.md` | `plan.md` | `spec.md` | Planner, Explorer |
12
- | 4 | **Task Breakdown** | `skills/task/SKILL.md` | `tasks.md` | `plan.md` | Planner, Architect-Reviewer-Alpha |
13
- | 5 | **Execution** | `skills/execute/SKILL.md` | `progress.md` | `tasks.md` | Orchestrator, Implementer, Frontend, Refactor |
14
- | 6 | **Verification** | `skills/verify/SKILL.md` | `verify-report.md` | `progress.md` | Code-Reviewer-Alpha/Beta, Architect-Reviewer-Alpha/Beta, Security |
9
+ | 1 | **Design Gate** | `steps/design/README.md` | `design-decisions.md` | — | Researcher-Alpha/Beta/Gamma/Delta |
10
+ | 2 | **Specification** | `steps/spec/README.md` | `spec.md` | `design-decisions.md` | Researcher-Alpha |
11
+ | 3 | **Planning** | `steps/plan/README.md` | `plan.md` | `spec.md` | Planner, Explorer |
12
+ | 4 | **Task Breakdown** | `steps/task/README.md` | `tasks.md` | `plan.md` | Planner, Architect-Reviewer-Alpha |
13
+ | 5 | **Execution** | `steps/execute/README.md` | `progress.md` | `tasks.md` | Orchestrator, Implementer, Frontend, Refactor |
14
+ | 6 | **Verification** | `steps/verify/README.md` | `verify-report.md` | `progress.md` | Code-Reviewer-Alpha/Beta, Architect-Reviewer-Alpha/Beta, Security |
15
15
 
16
16
  ## How It Works
17
17
 
18
- Each step has a **SKILL.md** file that contains the detailed instructions for the agent(s) executing that step. The Orchestrator reads the SKILL.md via `flow_read_skill` and delegates work accordingly.
18
+ Each step has a **README.md** file that contains the detailed instructions for the agent(s) executing that step. The Orchestrator reads the README.md via `flow_read_instruction` and delegates work accordingly.
19
19
 
20
20
  ### Step 1: Design Gate
21
21
  - Full brainstorming session for new features and architectural changes
@@ -23,7 +23,7 @@ Each step has a **SKILL.md** file that contains the detailed instructions for th
23
23
  - Parallel 4-researcher decision protocol for non-trivial technical decisions
24
24
  - ADR generation for critical-tier tasks
25
25
  - **Mandatory user stop** before proceeding — design decisions must be approved
26
- - Read `skills/design/SKILL.md` for the full protocol
26
+ - Read `steps/design/README.md` for the full protocol
27
27
 
28
28
  ### Step 2: Specification
29
29
  - Elicit requirements from the user, clarify scope
@@ -58,8 +58,8 @@ Each step has a **SKILL.md** file that contains the detailed instructions for th
58
58
 
59
59
  When the Orchestrator activates a step:
60
60
 
61
- 1. **Read the skill first** — `flow_read_skill` returns the SKILL.md for the current step
62
- 2. **Follow skill instructions** — the SKILL.md is the primary guide for what to do
61
+ 1. **Read the instruction first** — `flow_read_instruction` returns the README.md for the current step
62
+ 2. **Follow step instructions** — the README.md is the primary guide for what to do
63
63
  3. **Delegate to listed agents** — each step lists which agents are appropriate
64
64
  4. **Produce the required artifact** — the step's `produces` field specifies what file to create in the artifacts directory
65
65
  5. **Check dependencies** — the step's `requires` field lists artifacts from previous steps that must exist
@@ -6,7 +6,7 @@
6
6
  {
7
7
  "id": "design",
8
8
  "name": "Design Gate",
9
- "skill": "skills/design/SKILL.md",
9
+ "instruction": "steps/design/README.md",
10
10
  "produces": ["design-decisions.md"],
11
11
  "requires": [],
12
12
  "agents": ["Researcher-Alpha", "Researcher-Beta", "Researcher-Gamma", "Researcher-Delta"],
@@ -15,7 +15,7 @@
15
15
  {
16
16
  "id": "spec",
17
17
  "name": "Specification",
18
- "skill": "skills/spec/SKILL.md",
18
+ "instruction": "steps/spec/README.md",
19
19
  "produces": ["spec.md"],
20
20
  "requires": ["design-decisions.md"],
21
21
  "agents": ["Researcher-Alpha"],
@@ -24,7 +24,7 @@
24
24
  {
25
25
  "id": "plan",
26
26
  "name": "Planning",
27
- "skill": "skills/plan/SKILL.md",
27
+ "instruction": "steps/plan/README.md",
28
28
  "produces": ["plan.md"],
29
29
  "requires": ["spec.md"],
30
30
  "agents": ["Planner", "Explorer"],
@@ -33,7 +33,7 @@
33
33
  {
34
34
  "id": "task",
35
35
  "name": "Task Breakdown",
36
- "skill": "skills/task/SKILL.md",
36
+ "instruction": "steps/task/README.md",
37
37
  "produces": ["tasks.md"],
38
38
  "requires": ["plan.md"],
39
39
  "agents": ["Planner", "Architect-Reviewer-Alpha"],
@@ -42,7 +42,7 @@
42
42
  {
43
43
  "id": "execute",
44
44
  "name": "Execution",
45
- "skill": "skills/execute/SKILL.md",
45
+ "instruction": "steps/execute/README.md",
46
46
  "produces": ["progress.md"],
47
47
  "requires": ["tasks.md"],
48
48
  "agents": ["Orchestrator", "Implementer", "Frontend", "Refactor"],
@@ -51,7 +51,7 @@
51
51
  {
52
52
  "id": "verify",
53
53
  "name": "Verification",
54
- "skill": "skills/verify/SKILL.md",
54
+ "instruction": "steps/verify/README.md",
55
55
  "produces": ["verify-report.md"],
56
56
  "requires": ["progress.md"],
57
57
  "agents": [
@@ -132,3 +132,33 @@ After user approves:
132
132
  - `Researcher-Beta` — Trade-offs and edge cases
133
133
  - `Researcher-Gamma` — Cross-domain patterns
134
134
  - `Researcher-Delta` — Feasibility and performance
135
+
136
+ ## Foundation Integration
137
+
138
+ Load these skills BEFORE executing this step:
139
+
140
+ | Skill | Purpose | When |
141
+ |-------|---------|------|
142
+ | `aikit` | Core MCP tools — search, analyze, remember, validate | Always (auto-loaded) |
143
+ | `present` | Rich rendering for any structured output — assessments, reports, comparisons, reviews, status boards, tables, charts, and all artifact content | Use for ANY output that benefits from rich rendering, not just dashboards |
144
+ | `multi-agents-development` | Dispatch templates, task decomposition, review pipeline patterns | Before dispatching any subagent |
145
+ | `brainstorming` | Structured ideation for design/creative decisions | Before any design choice or new feature exploration |
146
+ | `c4-architecture` | C4 model diagrams showing system structure changes | When visualizing proposed architecture |
147
+ | `adr-skill` | Architecture Decision Records for non-trivial decisions | Critical tier — document architecture decisions |
148
+
149
+ ### Presentation Rules
150
+ - Use `present` for **any output** that benefits from rich rendering — not limited to dashboards
151
+ - Assessments, reports, comparisons, reviews, status boards → `present({ format: "html" })`
152
+ - Tables, charts, progress tracking, code review findings → always present
153
+ - Artifact content and summaries → present with structured layout
154
+ - Only use plain text for brief confirmations and simple questions
155
+
156
+ ## Knowledge Capture
157
+
158
+ Before completing this step, persist important findings using `remember()`:
159
+
160
+ - **Design decisions**: Chosen approach and alternatives considered with trade-offs
161
+ - **Architecture patterns**: New patterns introduced or existing patterns that must be followed
162
+ - **Constraints discovered**: Technical limitations, compatibility requirements, or performance boundaries
163
+
164
+ **Every step produces knowledge worth preserving.** If you discovered something that would help a future session, call `remember()` now.
@@ -5,6 +5,14 @@ description: Implement all tasks from the task breakdown, dispatching agents in
5
5
 
6
6
  # Execution
7
7
 
8
+ ## Prerequisites Check
9
+
10
+ Before starting this step, verify:
11
+ - [ ] Task breakdown approved with valid task graph
12
+ - [ ] `.spec/<slug>/tasks.md` exists with defined batches and dependencies
13
+
14
+ If prerequisites are NOT met -> **backtrack to task step** (`flow_step({ action: 'redo' })` on previous step)
15
+
8
16
  ## Purpose
9
17
 
10
18
  Execute all tasks from the breakdown, dispatching implementation agents in batches for maximum parallelism while maintaining correctness.
@@ -86,6 +94,7 @@ Load these skills BEFORE executing this step:
86
94
  | `present` | Rich rendering for any structured output — assessments, reports, comparisons, reviews, status boards, tables, charts, and all artifact content | Use for ANY output that benefits from rich rendering, not just dashboards |
87
95
  | `multi-agents-development` | Dispatch templates, task decomposition, review pipeline patterns | Before dispatching any subagent |
88
96
  | `brainstorming` | Structured ideation for design/creative decisions | Before any design choice or new feature exploration |
97
+ | `session-handoff` | Context preservation for long-running execution phases | When execution spans multiple sessions or context is filling |
89
98
 
90
99
  ### Presentation Rules
91
100
  - Use `present` for **any output** that benefits from rich rendering — not limited to dashboards
@@ -122,3 +131,13 @@ Follow the `multi-agents-development` skill patterns for dispatch:
122
131
  - [ ] `test_run({})` passes
123
132
  - [ ] No blocked items remaining
124
133
  - [ ] `.spec/<slug>/progress.md` written
134
+
135
+ ## Knowledge Capture
136
+
137
+ Before completing this step, persist important findings using `remember()`:
138
+
139
+ - **Implementation decisions**: Why specific approaches were chosen over alternatives
140
+ - **Patterns established**: New conventions or patterns that future code should follow
141
+ - **Gotchas encountered**: Edge cases, workarounds, or non-obvious behaviors discovered during execution
142
+
143
+ **Every step produces knowledge worth preserving.** If you discovered something that would help a future session, call `remember()` now.