@vpxa/aikit 0.1.18 → 0.1.20
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +1 -1
- package/packages/cli/dist/commands/environment.js +1 -1
- package/packages/embeddings/dist/onnx-embedder.js +1 -1
- package/packages/flows/dist/builtins.js +1 -1
- package/packages/server/dist/server.js +1 -1
- package/packages/server/dist/tools/present/tool.d.ts +1 -1
- package/packages/server/dist/tools/present/tool.js +1 -1
- package/packages/tools/dist/delegate.js +1 -1
- package/packages/tools/dist/web-fetch.js +1 -1
- package/scaffold/adapters/claude-code.mjs +4 -22
- package/scaffold/definitions/bodies.mjs +72 -60
- package/scaffold/flows/aikit-advanced/README.md +70 -0
- package/scaffold/flows/aikit-advanced/flow.json +10 -1
- package/scaffold/flows/aikit-advanced/skills/design/SKILL.md +134 -0
- package/scaffold/flows/aikit-basic/README.md +51 -0
- package/scaffold/flows/aikit-basic/flow.json +10 -1
- package/scaffold/flows/aikit-basic/skills/design/SKILL.md +75 -0
- package/scaffold/general/agents/Orchestrator.agent.md +59 -51
- package/scaffold/general/agents/Planner.agent.md +12 -8
package/package.json
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
import{extractNumFlag as e,extractStrFlag as t,printManagedProcess as n,readStdin as r}from"../helpers.js";import{resolve as i}from"node:path";import{readFile as a}from"node:fs/promises";import{delegate as o,delegateListModels as s,processList as c,processLogs as l,processStart as u,processStatus as d,processStop as f,watchList as p,watchStart as m,watchStop as h}from"../../../tools/dist/index.js";const g=[{name:`proc`,description:`Manage in-memory child processes`,usage:`aikit proc <start|stop|status|list|logs> ...`,run:async t=>{let r=t.shift()?.trim()??``;switch(r){case`start`:{let e=t.shift()?.trim(),r=t.shift()?.trim();(!e||!r)&&(console.error(`Usage: aikit proc start <id> <command> [args...]`),process.exit(1)),n(u(e,r,t));return}case`stop`:{let e=t.shift()?.trim();e||(console.error(`Usage: aikit proc stop <id>`),process.exit(1));let r=f(e);if(!r){console.log(`No managed process found: ${e}`);return}n(r);return}case`status`:{let e=t.shift()?.trim();e||(console.error(`Usage: aikit proc status <id>`),process.exit(1));let r=d(e);if(!r){console.log(`No managed process found: ${e}`);return}n(r);return}case`list`:{let e=c();if(e.length===0){console.log(`No managed processes.`);return}for(let t of e)n(t),console.log(``);return}case`logs`:{let n=e(t,`--tail`,50),r=t.shift()?.trim();r||(console.error(`Usage: aikit proc logs <id> [--tail N]`),process.exit(1));let i=l(r,n);if(i.length===0){console.log(`No logs found for process: ${r}`);return}for(let e of i)console.log(e);return}default:console.error(`Unknown proc action: ${r}`),console.error(`Actions: start, stop, status, list, logs`),process.exit(1)}}},{name:`watch`,description:`Manage in-memory filesystem watchers`,usage:`aikit watch <start|stop|list> ...`,run:async e=>{let t=e.shift()?.trim()??``;switch(t){case`start`:{let t=e.shift()?.trim();t||(console.error(`Usage: aikit watch start <path>`),process.exit(1));let n=m({path:i(t)});console.log(`Started watcher: ${n.id}`),console.log(` Path: ${n.path}`),console.log(` Status: ${n.status}`);return}case`stop`:{let t=e.shift()?.trim();t||(console.error(`Usage: aikit watch stop <id>`),process.exit(1));let n=h(t);console.log(n?`Stopped watcher: ${t}`:`Watcher not found: ${t}`);return}case`list`:{let e=p();if(e.length===0){console.log(`No active watchers.`);return}for(let t of e)console.log(`${t.id}`),console.log(` Path: ${t.path}`),console.log(` Status: ${t.status}`),console.log(` Events: ${t.eventCount}`);return}default:console.error(`Unknown watch action: ${t}`),console.error(`Actions: start, stop, list`),process.exit(1)}}},{name:`delegate`,description:`Delegate a task to a local Ollama model`,usage:`aikit delegate [--model name] [--system prompt] [--temp 0.3] <prompt | --stdin>`,run:async n=>{if((n[0]===`models`?n.shift():void 0)===`models`){try{let e=await s();if(e.length===0){console.log(`No Ollama models available. Pull one with: ollama pull
|
|
1
|
+
import{extractNumFlag as e,extractStrFlag as t,printManagedProcess as n,readStdin as r}from"../helpers.js";import{resolve as i}from"node:path";import{readFile as a}from"node:fs/promises";import{delegate as o,delegateListModels as s,processList as c,processLogs as l,processStart as u,processStatus as d,processStop as f,watchList as p,watchStart as m,watchStop as h}from"../../../tools/dist/index.js";const g=[{name:`proc`,description:`Manage in-memory child processes`,usage:`aikit proc <start|stop|status|list|logs> ...`,run:async t=>{let r=t.shift()?.trim()??``;switch(r){case`start`:{let e=t.shift()?.trim(),r=t.shift()?.trim();(!e||!r)&&(console.error(`Usage: aikit proc start <id> <command> [args...]`),process.exit(1)),n(u(e,r,t));return}case`stop`:{let e=t.shift()?.trim();e||(console.error(`Usage: aikit proc stop <id>`),process.exit(1));let r=f(e);if(!r){console.log(`No managed process found: ${e}`);return}n(r);return}case`status`:{let e=t.shift()?.trim();e||(console.error(`Usage: aikit proc status <id>`),process.exit(1));let r=d(e);if(!r){console.log(`No managed process found: ${e}`);return}n(r);return}case`list`:{let e=c();if(e.length===0){console.log(`No managed processes.`);return}for(let t of e)n(t),console.log(``);return}case`logs`:{let n=e(t,`--tail`,50),r=t.shift()?.trim();r||(console.error(`Usage: aikit proc logs <id> [--tail N]`),process.exit(1));let i=l(r,n);if(i.length===0){console.log(`No logs found for process: ${r}`);return}for(let e of i)console.log(e);return}default:console.error(`Unknown proc action: ${r}`),console.error(`Actions: start, stop, status, list, logs`),process.exit(1)}}},{name:`watch`,description:`Manage in-memory filesystem watchers`,usage:`aikit watch <start|stop|list> ...`,run:async e=>{let t=e.shift()?.trim()??``;switch(t){case`start`:{let t=e.shift()?.trim();t||(console.error(`Usage: aikit watch start <path>`),process.exit(1));let n=m({path:i(t)});console.log(`Started watcher: ${n.id}`),console.log(` Path: ${n.path}`),console.log(` Status: ${n.status}`);return}case`stop`:{let t=e.shift()?.trim();t||(console.error(`Usage: aikit watch stop <id>`),process.exit(1));let n=h(t);console.log(n?`Stopped watcher: ${t}`:`Watcher not found: ${t}`);return}case`list`:{let e=p();if(e.length===0){console.log(`No active watchers.`);return}for(let t of e)console.log(`${t.id}`),console.log(` Path: ${t.path}`),console.log(` Status: ${t.status}`),console.log(` Events: ${t.eventCount}`);return}default:console.error(`Unknown watch action: ${t}`),console.error(`Actions: start, stop, list`),process.exit(1)}}},{name:`delegate`,description:`Delegate a task to a local Ollama model`,usage:`aikit delegate [--model name] [--system prompt] [--temp 0.3] <prompt | --stdin>`,run:async n=>{if((n[0]===`models`?n.shift():void 0)===`models`){try{let e=await s();if(e.length===0){console.log(`No Ollama models available. Pull one with: ollama pull gemma4:e2b`);return}for(let t of e)console.log(t)}catch{console.error(`Ollama is not running. Start it with: ollama serve`),process.exit(1)}return}let c=t(n,`--model`,``),l=t(n,`--system`,``),u=e(n,`--temp`,.3),d=t(n,`--context`,``),f=n.join(` `);f||=await r(),f||(console.error(`Usage: aikit delegate [--model name] <prompt>`),process.exit(1));let p;d&&(p=await a(i(d),`utf-8`));let m=await o({prompt:f,model:c||void 0,system:l||void 0,context:p,temperature:u});m.error&&(console.error(`Error: ${m.error}`),process.exit(1)),console.log(m.response),console.error(`\n(${m.model}, ${m.durationMs}ms, ${m.tokenCount??`?`} tokens)`)}}];export{g as environmentCommands};
|
|
@@ -1 +1 @@
|
|
|
1
|
-
import{homedir as e}from"node:os";import{join as t}from"node:path";import{EMBEDDING_DEFAULTS as n}from"../../core/dist/index.js";import{env as r,pipeline as i}from"@huggingface/transformers";r.cacheDir=t(e(),`.cache`,`huggingface`,`transformers-js`);var a=class{pipe=null;dimensions;modelId;queryPrefix;constructor(e){this.modelId=e?.model??n.model,this.dimensions=e?.dimensions??n.dimensions,this.queryPrefix=e?.queryPrefix??this.detectQueryPrefix(this.modelId)}detectQueryPrefix(e){let t=e.toLowerCase();return t.includes(`bge`)||t.includes(`mxbai-embed`)?`Represent this sentence for searching relevant passages: `:t.includes(`/e5-`)||t.includes(`multilingual-e5`)?`query: `:``}async initialize(){if(!this.pipe)try{this.pipe=await i(`feature-extraction`,this.modelId,{dtype:`
|
|
1
|
+
import{homedir as e}from"node:os";import{join as t}from"node:path";import{EMBEDDING_DEFAULTS as n}from"../../core/dist/index.js";import{env as r,pipeline as i}from"@huggingface/transformers";r.cacheDir=t(e(),`.cache`,`huggingface`,`transformers-js`);var a=class{pipe=null;dimensions;modelId;queryPrefix;constructor(e){this.modelId=e?.model??n.model,this.dimensions=e?.dimensions??n.dimensions,this.queryPrefix=e?.queryPrefix??this.detectQueryPrefix(this.modelId)}detectQueryPrefix(e){let t=e.toLowerCase();return t.includes(`bge`)||t.includes(`mxbai-embed`)?`Represent this sentence for searching relevant passages: `:t.includes(`/e5-`)||t.includes(`multilingual-e5`)?`query: `:``}async initialize(){if(!this.pipe)try{this.pipe=await i(`feature-extraction`,this.modelId,{dtype:`q8`})}catch(e){throw Error(`Failed to initialize embedding model "${this.modelId}": ${e.message}`)}}async shutdown(){this.pipe=null}async embed(e){this.pipe||await this.initialize();let t=await this.pipe?.(e,{pooling:`mean`,normalize:!0});if(!t)throw Error(`Embedding pipeline returned no output`);return new Float32Array(t.data)}async embedQuery(e){return this.embed(this.queryPrefix+e)}async embedBatch(e,t=64){if(e.length===0)return[];this.pipe||await this.initialize();let n=[];for(let r=0;r<e.length;r+=t){let i=e.slice(r,r+t),a=await this.pipe?.(i,{pooling:`mean`,normalize:!0});if(!a)throw Error(`Embedding pipeline returned no output`);if(i.length===1)n.push(new Float32Array(a.data));else for(let e=0;e<i.length;e++){let t=e*this.dimensions,r=a.data.slice(t,t+this.dimensions);n.push(new Float32Array(r))}}return n}};export{a as OnnxEmbedder};
|
|
@@ -1 +1 @@
|
|
|
1
|
-
const e={name:`aikit:basic`,version:`0.1.0`,description:`Quick development flow for bug fixes, small features, and refactoring`,steps:[{id:`assess`,name:`Assessment`,skill:`skills/assess/SKILL.md`,produces:[`assessment.md`],requires:[],agents:[`Explorer`,`Researcher-Alpha`],description:`Understand scope, analyze codebase, identify approach`},{id:`implement`,name:`Implementation`,skill:`skills/implement/SKILL.md`,produces:[`progress.md`],requires:[`assessment.md`],agents:[`Implementer`,`Frontend`],description:`Write code following the assessment plan`},{id:`verify`,name:`Verification`,skill:`skills/verify/SKILL.md`,produces:[`verify-report.md`],requires:[`progress.md`],agents:[`Code-Reviewer-Alpha`,`Security`],description:`Review code, run tests, validate changes`}],agents:[],artifacts_dir:`.spec`,install:[]},t={name:`aikit:advanced`,version:`0.1.0`,description:`Full development flow for new features, API design, and architecture changes`,steps:[{id:`spec`,name:`Specification`,skill:`skills/spec/SKILL.md`,produces:[`spec.md`],requires:[],agents:[`Researcher-Alpha`],description:`Elicit requirements, clarify scope, define acceptance criteria`},{id:`plan`,name:`Planning`,skill:`skills/plan/SKILL.md`,produces:[`plan.md`],requires:[`spec.md`],agents:[`Planner`,`Explorer`],description:`Analyze codebase, design architecture, create implementation plan`},{id:`task`,name:`Task Breakdown`,skill:`skills/task/SKILL.md`,produces:[`tasks.md`],requires:[`plan.md`],agents:[`Planner`,`Architect-Reviewer-Alpha`],description:`Break plan into ordered implementation tasks with dependencies`},{id:`execute`,name:`Execution`,skill:`skills/execute/SKILL.md`,produces:[`progress.md`],requires:[`tasks.md`],agents:[`Orchestrator`,`Implementer`,`Frontend`,`Refactor`],description:`Implement all tasks, write code, write tests`},{id:`verify`,name:`Verification`,skill:`skills/verify/SKILL.md`,produces:[`verify-report.md`],requires:[`progress.md`],agents:[`Code-Reviewer-Alpha`,`Code-Reviewer-Beta`,`Architect-Reviewer-Alpha`,`Architect-Reviewer-Beta`,`Security`],description:`Dual code review, architecture review, security review, test validation`}],agents:[],artifacts_dir:`.spec`,install:[]};function n(){return[{manifest:e,scaffoldDir:`scaffold/flows/aikit-basic`},{manifest:t,scaffoldDir:`scaffold/flows/aikit-advanced`}]}export{n as getBuiltinFlows};
|
|
1
|
+
const e={name:`aikit:basic`,version:`0.1.0`,description:`Quick development flow for bug fixes, small features, and refactoring`,steps:[{id:`design`,name:`Design Gate`,skill:`skills/design/SKILL.md`,produces:[`design-decisions.md`],requires:[],agents:[`Researcher-Alpha`,`Researcher-Beta`,`Researcher-Gamma`,`Researcher-Delta`],description:`Evaluate task type, run brainstorming for features, FORGE classification. Auto-skips for bug fixes and refactors.`},{id:`assess`,name:`Assessment`,skill:`skills/assess/SKILL.md`,produces:[`assessment.md`],requires:[`design-decisions.md`],agents:[`Explorer`,`Researcher-Alpha`],description:`Understand scope, analyze codebase, identify approach`},{id:`implement`,name:`Implementation`,skill:`skills/implement/SKILL.md`,produces:[`progress.md`],requires:[`assessment.md`],agents:[`Implementer`,`Frontend`],description:`Write code following the assessment plan`},{id:`verify`,name:`Verification`,skill:`skills/verify/SKILL.md`,produces:[`verify-report.md`],requires:[`progress.md`],agents:[`Code-Reviewer-Alpha`,`Security`],description:`Review code, run tests, validate changes`}],agents:[],artifacts_dir:`.spec`,install:[]},t={name:`aikit:advanced`,version:`0.1.0`,description:`Full development flow for new features, API design, and architecture changes`,steps:[{id:`design`,name:`Design Gate`,skill:`skills/design/SKILL.md`,produces:[`design-decisions.md`],requires:[],agents:[`Researcher-Alpha`,`Researcher-Beta`,`Researcher-Gamma`,`Researcher-Delta`],description:`Full brainstorming, FORGE classification, decision protocol with parallel research. ADR for critical-tier tasks.`},{id:`spec`,name:`Specification`,skill:`skills/spec/SKILL.md`,produces:[`spec.md`],requires:[`design-decisions.md`],agents:[`Researcher-Alpha`],description:`Elicit requirements, clarify scope, define acceptance criteria`},{id:`plan`,name:`Planning`,skill:`skills/plan/SKILL.md`,produces:[`plan.md`],requires:[`spec.md`],agents:[`Planner`,`Explorer`],description:`Analyze codebase, design architecture, create implementation plan`},{id:`task`,name:`Task Breakdown`,skill:`skills/task/SKILL.md`,produces:[`tasks.md`],requires:[`plan.md`],agents:[`Planner`,`Architect-Reviewer-Alpha`],description:`Break plan into ordered implementation tasks with dependencies`},{id:`execute`,name:`Execution`,skill:`skills/execute/SKILL.md`,produces:[`progress.md`],requires:[`tasks.md`],agents:[`Orchestrator`,`Implementer`,`Frontend`,`Refactor`],description:`Implement all tasks, write code, write tests`},{id:`verify`,name:`Verification`,skill:`skills/verify/SKILL.md`,produces:[`verify-report.md`],requires:[`progress.md`],agents:[`Code-Reviewer-Alpha`,`Code-Reviewer-Beta`,`Architect-Reviewer-Alpha`,`Architect-Reviewer-Beta`,`Security`],description:`Dual code review, architecture review, security review, test validation`}],agents:[],artifacts_dir:`.spec`,install:[]};function n(){return[{manifest:e,scaffoldDir:`scaffold/flows/aikit-basic`},{manifest:t,scaffoldDir:`scaffold/flows/aikit-advanced`}]}export{n as getBuiltinFlows};
|
|
@@ -1,3 +1,3 @@
|
|
|
1
|
-
import{BackgroundTaskScheduler as e}from"./background-task.js";import{clearCompletionCache as t}from"./completions.js";import{installCompressionInterceptor as n}from"./compression-interceptor.js";import{CuratedKnowledgeManager as r}from"./curated-manager.js";import{createElicitor as i,noopElicitor as a}from"./elicitor.js";import{IdleTimer as o}from"./idle-timer.js";import{bridgeMcpLogging as s}from"./mcp-logging.js";import{MemoryMonitor as c}from"./memory-monitor.js";import{registerPrompts as l}from"./prompts.js";import{installReplayInterceptor as u}from"./replay-interceptor.js";import{ResourceNotifier as d}from"./resources/resource-notifier.js";import{registerResources as f}from"./resources/resources.js";import{createSamplingClient as p}from"./sampling.js";import{installStructuredContentGuard as m}from"./structured-content-guard.js";import{getToolMeta as h}from"./tool-metadata.js";import{installToolPrefix as g}from"./tool-prefix.js";import{ToolTimeoutError as _,getToolTimeout as v,withTimeout as ee}from"./tool-timeout.js";import{registerAnalyzeDependenciesTool as y,registerAnalyzeDiagramTool as b,registerAnalyzeEntryPointsTool as x,registerAnalyzePatternsTool as te,registerAnalyzeStructureTool as S,registerAnalyzeSymbolsTool as C,registerBlastRadiusTool as w}from"./tools/analyze.tools.js";import{registerAuditTool as ne}from"./tools/audit.tool.js";import{registerBrainstormTool as T}from"./tools/brainstorm.tool.js";import{initBridgeComponents as E,registerErPullTool as D,registerErPushTool as O,registerErSyncStatusTool as re}from"./tools/bridge.tools.js";import{registerConfigTool as k}from"./tools/config.tool.js";import{registerCompactTool as A,registerDeadSymbolsTool as j,registerFileSummaryTool as M,registerFindTool as ie,registerScopeMapTool as N,registerSymbolTool as P,registerTraceTool as ae}from"./tools/context.tools.js";import{registerErEvolveReviewTool as oe}from"./tools/evolution.tools.js";import{registerBatchTool as se,registerCheckTool as F,registerDelegateTool as I,registerEvalTool as L,registerParseOutputTool as R,registerTestRunTool as z}from"./tools/execution.tools.js";import{registerFlowTools as ce}from"./tools/flow.tools.js";import{registerDigestTool as le,registerEvidenceMapTool as B,registerForgeClassifyTool as V,registerForgeGroundTool as ue,registerStratumCardTool as de}from"./tools/forge.tools.js";import{registerForgetTool as fe}from"./tools/forget.tool.js";import{registerGraphTool as pe}from"./tools/graph.tool.js";import{registerGuideTool as H,registerHealthTool as U,registerProcessTool as W,registerWatchTool as G,registerWebFetchTool as K}from"./tools/infra.tools.js";import{registerListTool as me}from"./tools/list.tool.js";import{registerLookupTool as he}from"./tools/lookup.tool.js";import{registerCodemodTool as ge,registerDataTransformTool as q,registerDiffParseTool as _e,registerGitContextTool as ve,registerRenameTool as ye}from"./tools/manipulation.tools.js";import{registerOnboardTool as be}from"./tools/onboard.tool.js";import{registerCheckpointTool as xe,registerLaneTool as Se,registerQueueTool as Ce,registerStashTool as we,registerWorksetTool as Te}from"./tools/persistence.tools.js";import{registerErUpdatePolicyTool as Ee}from"./tools/policy.tools.js";import{registerPresentTool as De}from"./tools/present/tool.js";import"./tools/present/index.js";import{registerProduceKnowledgeTool as Oe}from"./tools/produce.tool.js";import{registerReadTool as ke}from"./tools/read.tool.js";import{registerReindexTool as Ae}from"./tools/reindex.tool.js";import{registerRememberTool as je}from"./tools/remember.tool.js";import{registerReplayTool as Me}from"./tools/replay.tool.js";import{registerRestoreTool as Ne}from"./tools/restore.tool.js";import{registerSearchTool as Pe}from"./tools/search.tool.js";import{getCurrentVersion as Fe}from"./version-check.js";import{registerEarlyStatusTool as Ie,registerStatusTool as Le}from"./tools/status.tool.js";import{registerUpdateTool as Re}from"./tools/update.tool.js";import{registerChangelogTool as ze,registerEncodeTool as Be,registerEnvTool as Ve,registerHttpTool as He,registerMeasureTool as Ue,registerRegexTestTool as We,registerSchemaValidateTool as Ge,registerSnippetTool as Ke,registerTimeTool as qe,registerWebSearchTool as Je}from"./tools/utility.tools.js";import{existsSync as Ye,statSync as Xe}from"node:fs";import{resolve as Ze}from"node:path";import{AIKIT_PATHS as Qe,createLogger as $e,serializeError as J}from"../../core/dist/index.js";import{initializeWasm as et}from"../../chunker/dist/index.js";import{OnnxEmbedder as tt}from"../../embeddings/dist/index.js";import{EvolutionCollector as nt,PolicyStore as rt}from"../../enterprise-bridge/dist/index.js";import{FileHashCache as it,IncrementalIndexer as at}from"../../indexer/dist/index.js";import{SqliteGraphStore as ot,createStore as st}from"../../store/dist/index.js";import{FileCache as ct}from"../../tools/dist/index.js";import{completable as lt}from"@modelcontextprotocol/sdk/server/completable.js";import{McpServer as ut}from"@modelcontextprotocol/sdk/server/mcp.js";import{z as dt}from"zod";const Y=$e(`server`);async function X(e){Y.info(`Initializing AI Kit components`);let[t,n,i,a]=await Promise.all([(async()=>{let t=new tt({model:e.embedding.model,dimensions:e.embedding.dimensions});return await t.initialize(),Y.info(`Embedder loaded`,{modelId:t.modelId,dimensions:t.dimensions}),t})(),(async()=>{let t=await st({backend:e.store.backend,path:e.store.path});return await t.initialize(),Y.info(`Store initialized`),t})(),(async()=>{let t=new ot({path:e.store.path});return await t.initialize(),Y.info(`Graph store initialized`),t})(),(async()=>{let e=await et();return e?Y.info(`WASM tree-sitter enabled for AST analysis`):Y.warn(`WASM tree-sitter not available; analyzers will use regex fallback`),e})()]),o=new at(t,n),s=new it(e.store.path);s.load(),o.setHashCache(s);let c=e.curated.path,l=new r(c,n,t);o.setGraphStore(i);let u=E(e.er),d=u?new rt(e.curated.path):void 0;d&&Y.info(`Policy store initialized`,{ruleCount:d.getRules().length});let f=u?new nt:void 0,p=Ze(e.sources[0]?.path??process.cwd(),Qe.aiKb),m=Ye(p),h=e.onboardDir?Ye(e.onboardDir):!1,g=m||h,_,v=m?p:e.onboardDir;if(g&&v)try{_=Xe(v).mtime.toISOString()}catch{}return Y.info(`Onboard state detected`,{onboardComplete:g,onboardTimestamp:_,aiKbExists:m,onboardDirExists:h}),{embedder:t,store:n,indexer:o,curated:l,graphStore:i,fileCache:new ct,bridge:u,policyStore:d,evolutionCollector:f,onboardComplete:g,onboardTimestamp:_}}function ft(e,t){let n=new ut({name:t.serverName??`aikit`,version:Fe()},{capabilities:{logging:{},completions:{},prompts:{}}});return s(n),g(n,t.toolPrefix??``),Z(n,e,t,i(n),new d(n),p(n)),l(n,{curated:e.curated,store:e.store,graphStore:e.graphStore},t.indexMode),n}function Z(e,t,r,i,a,o,s,c){u(e),m(e),n(e),Pe(e,t.embedder,t.store,t.graphStore,t.bridge,t.evolutionCollector,o),he(e,t.store),Le(e,t.store,t.graphStore,t.curated,{onboardComplete:t.onboardComplete,onboardTimestamp:t.onboardTimestamp},r,s,c),k(e,r),Ae(e,t.indexer,r,t.curated,t.store,a,s),je(e,t.curated,t.policyStore,t.evolutionCollector,a),Re(e,t.curated,a),fe(e,t.curated,a),ke(e,t.curated),me(e,t.curated),S(e,t.store,t.embedder),y(e,t.store,t.embedder),C(e,t.store,t.embedder),te(e,t.store,t.embedder),x(e,t.store,t.embedder),b(e,t.store,t.embedder),w(e,t.store,t.embedder,t.graphStore),Oe(e,r),be(e,t.store,t.embedder,r),pe(e,t.graphStore),ne(e,t.store,t.embedder);let l=r.sources[0]?.path??process.cwd();A(e,t.embedder,t.fileCache,l),N(e,t.embedder,t.store),ie(e,t.embedder,t.store),R(e),Te(e),F(e),se(e,t.embedder,t.store),P(e,t.embedder,t.store,t.graphStore),L(e),z(e),we(e),ve(e),_e(e),ye(e),ge(e),Ne(e),M(e,t.fileCache,l),xe(e),q(e),ae(e,t.embedder,t.store,t.graphStore),W(e),G(e),j(e,t.embedder,t.store),I(e,o),U(e),Se(e),Ce(e),K(e),H(e,s),B(e),le(e,t.embedder),V(e),de(e,t.embedder,t.fileCache),ue(e,t.embedder,t.store),De(e,i),i&&T(e,i),Je(e),He(e),We(e),Be(e),Ue(e),ze(e),Ge(e),Ke(e),Ve(e),qe(e),ce(e,r),t.bridge&&(O(e,t.bridge,t.evolutionCollector),D(e,t.bridge),re(e,t.bridge)),t.policyStore&&Ee(e,t.policyStore),t.evolutionCollector&&oe(e,t.evolutionCollector),f(e,t.store,t.curated),Me(e)}async function pt(e){let t=await X(e),n=ft(t,e);Y.info(`MCP server configured`,{toolCount:$.length,resourceCount:2});let r=async()=>{try{let n=e.sources.map(e=>e.path).join(`, `);Y.info(`Running initial index`,{sourcePaths:n});let r=await t.indexer.index(e,e=>{e.phase===`crawling`||e.phase===`done`||(e.phase===`chunking`&&e.currentFile&&Y.debug(`Indexing file`,{current:e.filesProcessed+1,total:e.filesTotal,file:e.currentFile}),e.phase===`cleanup`&&Y.debug(`Index cleanup`,{staleEntries:e.filesTotal-e.filesProcessed}))});Y.info(`Initial index complete`,{filesProcessed:r.filesProcessed,filesSkipped:r.filesSkipped,chunksCreated:r.chunksCreated,durationMs:r.durationMs});try{await t.store.createFtsIndex()}catch(e){Y.warn(`FTS index creation failed`,J(e))}try{let e=await t.curated.reindexAll();Y.info(`Curated re-index complete`,{indexed:e.indexed})}catch(e){Y.error(`Curated re-index failed`,J(e))}}catch(e){Y.error(`Initial index failed; will retry on aikit_reindex`,J(e))}},i=async()=>{Y.info(`Shutting down`),await Promise.all([t.embedder.shutdown().catch(()=>{}),t.graphStore.close().catch(()=>{}),t.store.close().catch(()=>{})]),process.exit(0)};process.on(`SIGINT`,i),process.on(`SIGTERM`,i);let a=process.ppid,o=setInterval(()=>{try{process.kill(a,0)}catch{Y.info(`Parent process died; shutting down`,{parentPid:a}),clearInterval(o),i()}},5e3);return o.unref(),{server:n,runInitialIndex:r,shutdown:i}}const mt=new Set(`batch.brainstorm.changelog.check.checkpoint.codemod.compact.config.data_transform.delegate.diff_parse.digest.encode.env.eval.evidence_map.file_summary.forge_classify.git_context.graph.guide.health.http.lane.measure.onboard.parse_output.present.process.produce_knowledge.queue.read.regex_test.reindex.remember.rename.replay.restore.schema_validate.scope_map.snippet.stash.status.stratum_card.test_run.time.update.forget.list.watch.web_fetch.web_search.workset`.split(`.`)),ht=5e3,Q=new Set(`brainstorm.changelog.check.checkpoint.codemod.data_transform.delegate.diff_parse.encode.env.eval.evidence_map.forge_classify.git_context.guide.present.health.http.lane.measure.parse_output.process.produce_knowledge.queue.regex_test.rename.replay.restore.schema_validate.snippet.stash.status.test_run.time.watch.web_fetch.web_search.workset`.split(`.`));function gt(e){F(e),L(e),z(e),R(e),I(e),ve(e),_e(e),ye(e),ge(e),q(e),Te(e),we(e),xe(e),Ne(e),Se(e),Ce(e),U(e),W(e),G(e),K(e),H(e),B(e),V(e),De(e),T(e,a),Oe(e),Me(e),Ie(e),Je(e),He(e),We(e),Be(e),Ue(e),ze(e),Ge(e),Ke(e),Ve(e),qe(e)}const $=`analyze_dependencies.analyze_diagram.analyze_entry_points.analyze_patterns.analyze_structure.analyze_symbols.audit.batch.blast_radius.brainstorm.changelog.check.checkpoint.codemod.compact.config.data_transform.dead_symbols.delegate.diff_parse.digest.encode.env.eval.evidence_map.file_summary.find.flow_info.flow_list.flow_reset.flow_start.flow_status.flow_step.forge_classify.forge_ground.forget.git_context.graph.guide.health.http.lane.list.lookup.measure.onboard.parse_output.present.process.produce_knowledge.queue.read.regex_test.reindex.remember.rename.replay.restore.schema_validate.scope_map.search.snippet.stash.status.stratum_card.symbol.test_run.time.trace.update.watch.web_fetch.web_search.workset`.split(`.`);function _t(n,r){let a=new ut({name:n.serverName??`aikit`,version:Fe()},{capabilities:{logging:{},completions:{},prompts:{}}}),u=`initializing`,f=``,m=!1,y=null,b=null,x=null;function te(e){if(!e||typeof e!=`object`)return[];let t=e,n=[];for(let e of[`path`,`file`,`source_path`,`sourcePath`,`filePath`]){let r=t[e];typeof r==`string`&&r&&n.push(r)}for(let e of[`changed_files`,`paths`,`files`]){let r=t[e];if(Array.isArray(r))for(let e of r){if(typeof e==`string`){n.push(e);continue}e&&typeof e==`object`&&typeof e.path==`string`&&n.push(e.path)}}if(Array.isArray(t.sources))for(let e of t.sources)e&&typeof e==`object`&&typeof e.path==`string`&&n.push(e.path);return n}let S=()=>u===`failed`?[`❌ AI Kit initialization failed — this tool is unavailable.`,``,f?`Error: ${f}`:``,``,`**35 tools are still available** and fully functional:`,`check, eval, test_run, git_context, health, measure, web_fetch, web_search,`,`regex_test, encode, stash, checkpoint, lane, process, time, env, and more.`,``,`Try restarting the MCP server to retry initialization.`].filter(Boolean).join(`
|
|
1
|
+
import{BackgroundTaskScheduler as e}from"./background-task.js";import{clearCompletionCache as t}from"./completions.js";import{installCompressionInterceptor as n}from"./compression-interceptor.js";import{CuratedKnowledgeManager as r}from"./curated-manager.js";import{createElicitor as i,noopElicitor as a}from"./elicitor.js";import{IdleTimer as o}from"./idle-timer.js";import{bridgeMcpLogging as s}from"./mcp-logging.js";import{MemoryMonitor as c}from"./memory-monitor.js";import{registerPrompts as l}from"./prompts.js";import{installReplayInterceptor as u}from"./replay-interceptor.js";import{ResourceNotifier as d}from"./resources/resource-notifier.js";import{registerResources as f}from"./resources/resources.js";import{createSamplingClient as p}from"./sampling.js";import{installStructuredContentGuard as m}from"./structured-content-guard.js";import{getToolMeta as h}from"./tool-metadata.js";import{installToolPrefix as g}from"./tool-prefix.js";import{ToolTimeoutError as _,getToolTimeout as v,withTimeout as ee}from"./tool-timeout.js";import{registerAnalyzeDependenciesTool as y,registerAnalyzeDiagramTool as b,registerAnalyzeEntryPointsTool as x,registerAnalyzePatternsTool as te,registerAnalyzeStructureTool as S,registerAnalyzeSymbolsTool as C,registerBlastRadiusTool as w}from"./tools/analyze.tools.js";import{registerAuditTool as ne}from"./tools/audit.tool.js";import{registerBrainstormTool as T}from"./tools/brainstorm.tool.js";import{initBridgeComponents as E,registerErPullTool as D,registerErPushTool as O,registerErSyncStatusTool as re}from"./tools/bridge.tools.js";import{registerConfigTool as k}from"./tools/config.tool.js";import{registerCompactTool as A,registerDeadSymbolsTool as j,registerFileSummaryTool as M,registerFindTool as ie,registerScopeMapTool as N,registerSymbolTool as P,registerTraceTool as ae}from"./tools/context.tools.js";import{registerErEvolveReviewTool as oe}from"./tools/evolution.tools.js";import{registerBatchTool as se,registerCheckTool as F,registerDelegateTool as I,registerEvalTool as L,registerParseOutputTool as R,registerTestRunTool as z}from"./tools/execution.tools.js";import{registerFlowTools as ce}from"./tools/flow.tools.js";import{registerDigestTool as le,registerEvidenceMapTool as B,registerForgeClassifyTool as V,registerForgeGroundTool as ue,registerStratumCardTool as de}from"./tools/forge.tools.js";import{registerForgetTool as fe}from"./tools/forget.tool.js";import{registerGraphTool as pe}from"./tools/graph.tool.js";import{registerGuideTool as H,registerHealthTool as U,registerProcessTool as W,registerWatchTool as G,registerWebFetchTool as K}from"./tools/infra.tools.js";import{registerListTool as me}from"./tools/list.tool.js";import{registerLookupTool as he}from"./tools/lookup.tool.js";import{registerCodemodTool as ge,registerDataTransformTool as q,registerDiffParseTool as _e,registerGitContextTool as ve,registerRenameTool as ye}from"./tools/manipulation.tools.js";import{registerOnboardTool as be}from"./tools/onboard.tool.js";import{registerCheckpointTool as xe,registerLaneTool as Se,registerQueueTool as Ce,registerStashTool as we,registerWorksetTool as Te}from"./tools/persistence.tools.js";import{registerErUpdatePolicyTool as Ee}from"./tools/policy.tools.js";import{registerPresentTool as De}from"./tools/present/tool.js";import"./tools/present/index.js";import{registerProduceKnowledgeTool as Oe}from"./tools/produce.tool.js";import{registerReadTool as ke}from"./tools/read.tool.js";import{registerReindexTool as Ae}from"./tools/reindex.tool.js";import{registerRememberTool as je}from"./tools/remember.tool.js";import{registerReplayTool as Me}from"./tools/replay.tool.js";import{registerRestoreTool as Ne}from"./tools/restore.tool.js";import{registerSearchTool as Pe}from"./tools/search.tool.js";import{getCurrentVersion as Fe}from"./version-check.js";import{registerEarlyStatusTool as Ie,registerStatusTool as Le}from"./tools/status.tool.js";import{registerUpdateTool as Re}from"./tools/update.tool.js";import{registerChangelogTool as ze,registerEncodeTool as Be,registerEnvTool as Ve,registerHttpTool as He,registerMeasureTool as Ue,registerRegexTestTool as We,registerSchemaValidateTool as Ge,registerSnippetTool as Ke,registerTimeTool as qe,registerWebSearchTool as Je}from"./tools/utility.tools.js";import{existsSync as Ye,statSync as Xe}from"node:fs";import{resolve as Ze}from"node:path";import{AIKIT_PATHS as Qe,createLogger as $e,serializeError as J}from"../../core/dist/index.js";import{initializeWasm as et}from"../../chunker/dist/index.js";import{OnnxEmbedder as tt}from"../../embeddings/dist/index.js";import{EvolutionCollector as nt,PolicyStore as rt}from"../../enterprise-bridge/dist/index.js";import{FileHashCache as it,IncrementalIndexer as at}from"../../indexer/dist/index.js";import{SqliteGraphStore as ot,createStore as st}from"../../store/dist/index.js";import{FileCache as ct}from"../../tools/dist/index.js";import{completable as lt}from"@modelcontextprotocol/sdk/server/completable.js";import{McpServer as ut}from"@modelcontextprotocol/sdk/server/mcp.js";import{z as dt}from"zod";const Y=$e(`server`);async function X(e){Y.info(`Initializing AI Kit components`);let[t,n,i,a]=await Promise.all([(async()=>{let t=new tt({model:e.embedding.model,dimensions:e.embedding.dimensions});return await t.initialize(),Y.info(`Embedder loaded`,{modelId:t.modelId,dimensions:t.dimensions}),t})(),(async()=>{let t=await st({backend:e.store.backend,path:e.store.path});return await t.initialize(),Y.info(`Store initialized`),t})(),(async()=>{let t=new ot({path:e.store.path});return await t.initialize(),Y.info(`Graph store initialized`),t})(),(async()=>{let e=await et();return e?Y.info(`WASM tree-sitter enabled for AST analysis`):Y.warn(`WASM tree-sitter not available; analyzers will use regex fallback`),e})()]),o=new at(t,n),s=new it(e.store.path);s.load(),o.setHashCache(s);let c=e.curated.path,l=new r(c,n,t);o.setGraphStore(i);let u=E(e.er),d=u?new rt(e.curated.path):void 0;d&&Y.info(`Policy store initialized`,{ruleCount:d.getRules().length});let f=u?new nt:void 0,p=Ze(e.sources[0]?.path??process.cwd(),Qe.aiKb),m=Ye(p),h=e.onboardDir?Ye(e.onboardDir):!1,g=m||h,_,v=m?p:e.onboardDir;if(g&&v)try{_=Xe(v).mtime.toISOString()}catch{}return Y.info(`Onboard state detected`,{onboardComplete:g,onboardTimestamp:_,aiKbExists:m,onboardDirExists:h}),{embedder:t,store:n,indexer:o,curated:l,graphStore:i,fileCache:new ct,bridge:u,policyStore:d,evolutionCollector:f,onboardComplete:g,onboardTimestamp:_}}function ft(e,t){let n=new ut({name:t.serverName??`aikit`,version:Fe()},{capabilities:{logging:{},completions:{},prompts:{}}});return s(n),g(n,t.toolPrefix??``),Z(n,e,t,i(n),new d(n),p(n)),l(n,{curated:e.curated,store:e.store,graphStore:e.graphStore},t.indexMode),n}function Z(e,t,r,i,a,o,s,c){u(e),m(e),n(e),Pe(e,t.embedder,t.store,t.graphStore,t.bridge,t.evolutionCollector,o),he(e,t.store),Le(e,t.store,t.graphStore,t.curated,{onboardComplete:t.onboardComplete,onboardTimestamp:t.onboardTimestamp},r,s,c),k(e,r),Ae(e,t.indexer,r,t.curated,t.store,a,s),je(e,t.curated,t.policyStore,t.evolutionCollector,a),Re(e,t.curated,a),fe(e,t.curated,a),ke(e,t.curated),me(e,t.curated),S(e,t.store,t.embedder),y(e,t.store,t.embedder),C(e,t.store,t.embedder),te(e,t.store,t.embedder),x(e,t.store,t.embedder),b(e,t.store,t.embedder),w(e,t.store,t.embedder,t.graphStore),Oe(e,r),be(e,t.store,t.embedder,r),pe(e,t.graphStore),ne(e,t.store,t.embedder);let l=r.sources[0]?.path??process.cwd();A(e,t.embedder,t.fileCache,l),N(e,t.embedder,t.store),ie(e,t.embedder,t.store),R(e),Te(e),F(e),se(e,t.embedder,t.store),P(e,t.embedder,t.store,t.graphStore),L(e),z(e),we(e),ve(e),_e(e),ye(e),ge(e),Ne(e),M(e,t.fileCache,l),xe(e),q(e),ae(e,t.embedder,t.store,t.graphStore),W(e),G(e),j(e,t.embedder,t.store),(process.env.AIKIT_DELEGATE===`1`||process.env.AIKIT_DELEGATE===`true`)&&I(e,o),U(e),Se(e),Ce(e),K(e),H(e,s),B(e),le(e,t.embedder),V(e),de(e,t.embedder,t.fileCache),ue(e,t.embedder,t.store),De(e,i),i&&T(e,i),Je(e),He(e),We(e),Be(e),Ue(e),ze(e),Ge(e),Ke(e),Ve(e),qe(e),ce(e,r),t.bridge&&(O(e,t.bridge,t.evolutionCollector),D(e,t.bridge),re(e,t.bridge)),t.policyStore&&Ee(e,t.policyStore),t.evolutionCollector&&oe(e,t.evolutionCollector),f(e,t.store,t.curated),Me(e)}async function pt(e){let t=await X(e),n=ft(t,e);Y.info(`MCP server configured`,{toolCount:$.length,resourceCount:2});let r=async()=>{try{let n=e.sources.map(e=>e.path).join(`, `);Y.info(`Running initial index`,{sourcePaths:n});let r=await t.indexer.index(e,e=>{e.phase===`crawling`||e.phase===`done`||(e.phase===`chunking`&&e.currentFile&&Y.debug(`Indexing file`,{current:e.filesProcessed+1,total:e.filesTotal,file:e.currentFile}),e.phase===`cleanup`&&Y.debug(`Index cleanup`,{staleEntries:e.filesTotal-e.filesProcessed}))});Y.info(`Initial index complete`,{filesProcessed:r.filesProcessed,filesSkipped:r.filesSkipped,chunksCreated:r.chunksCreated,durationMs:r.durationMs});try{await t.store.createFtsIndex()}catch(e){Y.warn(`FTS index creation failed`,J(e))}try{let e=await t.curated.reindexAll();Y.info(`Curated re-index complete`,{indexed:e.indexed})}catch(e){Y.error(`Curated re-index failed`,J(e))}}catch(e){Y.error(`Initial index failed; will retry on aikit_reindex`,J(e))}},i=async()=>{Y.info(`Shutting down`),await Promise.all([t.embedder.shutdown().catch(()=>{}),t.graphStore.close().catch(()=>{}),t.store.close().catch(()=>{})]),process.exit(0)};process.on(`SIGINT`,i),process.on(`SIGTERM`,i);let a=process.ppid,o=setInterval(()=>{try{process.kill(a,0)}catch{Y.info(`Parent process died; shutting down`,{parentPid:a}),clearInterval(o),i()}},5e3);return o.unref(),{server:n,runInitialIndex:r,shutdown:i}}const mt=new Set(`batch.brainstorm.changelog.check.checkpoint.codemod.compact.config.data_transform.delegate.diff_parse.digest.encode.env.eval.evidence_map.file_summary.forge_classify.git_context.graph.guide.health.http.lane.measure.onboard.parse_output.present.process.produce_knowledge.queue.read.regex_test.reindex.remember.rename.replay.restore.schema_validate.scope_map.snippet.stash.status.stratum_card.test_run.time.update.forget.list.watch.web_fetch.web_search.workset`.split(`.`)),ht=5e3,Q=new Set(`brainstorm.changelog.check.checkpoint.codemod.data_transform.delegate.diff_parse.encode.env.eval.evidence_map.forge_classify.git_context.guide.present.health.http.lane.measure.parse_output.process.produce_knowledge.queue.regex_test.rename.replay.restore.schema_validate.snippet.stash.status.test_run.time.watch.web_fetch.web_search.workset`.split(`.`));function gt(e){F(e),L(e),z(e),R(e),(process.env.AIKIT_DELEGATE===`1`||process.env.AIKIT_DELEGATE===`true`)&&I(e),ve(e),_e(e),ye(e),ge(e),q(e),Te(e),we(e),xe(e),Ne(e),Se(e),Ce(e),U(e),W(e),G(e),K(e),H(e),B(e),V(e),De(e),T(e,a),Oe(e),Me(e),Ie(e),Je(e),He(e),We(e),Be(e),Ue(e),ze(e),Ge(e),Ke(e),Ve(e),qe(e)}const $=`analyze_dependencies.analyze_diagram.analyze_entry_points.analyze_patterns.analyze_structure.analyze_symbols.audit.batch.blast_radius.brainstorm.changelog.check.checkpoint.codemod.compact.config.data_transform.dead_symbols.delegate.diff_parse.digest.encode.env.eval.evidence_map.file_summary.find.flow_info.flow_list.flow_reset.flow_start.flow_status.flow_step.forge_classify.forge_ground.forget.git_context.graph.guide.health.http.lane.list.lookup.measure.onboard.parse_output.present.process.produce_knowledge.queue.read.regex_test.reindex.remember.rename.replay.restore.schema_validate.scope_map.search.snippet.stash.status.stratum_card.symbol.test_run.time.trace.update.watch.web_fetch.web_search.workset`.split(`.`);function _t(n,r){let a=new ut({name:n.serverName??`aikit`,version:Fe()},{capabilities:{logging:{},completions:{},prompts:{}}}),u=`initializing`,f=``,m=!1,y=null,b=null,x=null;function te(e){if(!e||typeof e!=`object`)return[];let t=e,n=[];for(let e of[`path`,`file`,`source_path`,`sourcePath`,`filePath`]){let r=t[e];typeof r==`string`&&r&&n.push(r)}for(let e of[`changed_files`,`paths`,`files`]){let r=t[e];if(Array.isArray(r))for(let e of r){if(typeof e==`string`){n.push(e);continue}e&&typeof e==`object`&&typeof e.path==`string`&&n.push(e.path)}}if(Array.isArray(t.sources))for(let e of t.sources)e&&typeof e==`object`&&typeof e.path==`string`&&n.push(e.path);return n}let S=()=>u===`failed`?[`❌ AI Kit initialization failed — this tool is unavailable.`,``,f?`Error: ${f}`:``,``,`**35 tools are still available** and fully functional:`,`check, eval, test_run, git_context, health, measure, web_fetch, web_search,`,`regex_test, encode, stash, checkpoint, lane, process, time, env, and more.`,``,`Try restarting the MCP server to retry initialization.`].filter(Boolean).join(`
|
|
2
2
|
`):[`AI Kit is still initializing (loading embeddings model & store).`,``,`**35 tools are already available** while initialization completes — including:`,`check, eval, test_run, git_context, health, measure, web_fetch, web_search,`,`regex_test, encode, stash, checkpoint, lane, process, time, env, and more.`,``,`This tool requires the AI Kit index. Please retry in a few seconds,`,`or use one of the available tools above in the meantime.`].join(`
|
|
3
3
|
`);s(a),g(a,n.toolPrefix??``);let C=a.sendToolListChanged.bind(a);a.sendToolListChanged=()=>{};let w=[];for(let e of $){let t=h(e),n=a.registerTool(e,{title:t.title,description:`${t.title} — initializing, available shortly`,inputSchema:{},annotations:t.annotations},async()=>({content:[{type:`text`,text:S()}]}));Q.has(e)?n.remove():w.push(n)}gt(a),a.sendToolListChanged=C;let ne=a.registerResource(`aikit-status`,`aikit://status`,{description:`AI Kit status (initializing...)`,mimeType:`text/plain`},async()=>({contents:[{uri:`aikit://status`,text:`AI Kit is initializing...`,mimeType:`text/plain`}]})),T=a.registerPrompt(`_init`,{description:`Initializing AI Kit…`,argsSchema:{_dummy:lt(dt.string(),()=>[])}},async()=>({messages:[]})),E,D=new Promise(e=>{E=e}),O,re=new Promise(e=>{O=e}),k=()=>O?.(),A=(async()=>{await re;let e;try{e=await X(n)}catch(e){u=`failed`,f=e instanceof Error?e.message:String(e),Y.error(`AI Kit initialization failed — server continuing with zero-dep tools only`,{error:f});return}let s=a.sendToolListChanged.bind(a);a.sendToolListChanged=()=>{};let h=a.sendPromptListChanged.bind(a);a.sendPromptListChanged=()=>{};let g=a.sendResourceListChanged.bind(a);a.sendResourceListChanged=()=>{};for(let e of w)e.remove();ne.remove(),T.remove();let S=a._registeredTools??{};for(let e of Q)S[e]?.remove();let C=new d(a),D=p(a);Z(a,e,n,i(a),C,D,r,r===`smart`?(()=>{let e=x;return e?.getState?e.getState():null}):null),l(a,{curated:e.curated,store:e.store,graphStore:e.graphStore},r),a.sendToolListChanged=s,a.sendPromptListChanged=h,a.sendResourceListChanged=g,Promise.resolve(a.sendToolListChanged()).catch(()=>{}),Promise.resolve(a.sendPromptListChanged()).catch(()=>{}),Promise.resolve(a.sendResourceListChanged()).catch(()=>{});let O=a._registeredTools??{};for(let[t,n]of Object.entries(O)){if(mt.has(t))continue;let r=n.handler;n.handler=async(...n)=>{if(!e.indexer.isIndexing)return r(...n);let i=m?`re-indexing`:`running initial index`,a=new Promise(e=>setTimeout(()=>e({content:[{type:`text`,text:`⏳ AI Kit is ${i}. The tool "${t}" timed out waiting for index data (${ht/1e3}s).\n\nThe existing index may be temporarily locked. Please retry shortly — indexing will complete automatically.`}]}),ht));return Promise.race([r(...n),a])}}for(let[e,t]of Object.entries(O)){let n=t.handler,r=v(e);t.handler=async(...t)=>{try{return await ee(()=>n(...t),r,e)}catch(t){if(t instanceof _)return{content:[{type:`text`,text:`⏳ Tool "${e}" timed out after ${r/1e3}s. This may indicate a long-running operation. Please retry or break the task into smaller steps.`}]};throw t}}}let k=Object.keys(O).length;k<$.length&&Y.warn(`ALL_TOOL_NAMES count mismatch`,{expectedToolCount:$.length,registeredToolCount:k}),Y.info(`MCP server configured`,{toolCount:$.length,resourceCount:4});let A=new c;A.onPressure((e,n)=>{e===`warning`&&t(),e===`critical`&&(Y.warn(`Memory pressure critical — consider restarting`,{rssMB:Math.round(n/1024/1024)}),t())}),A.start();let j=new o;b=j,j.onIdle(async()=>{if(M.isRunning||e.indexer.isIndexing){Y.info(`Idle cleanup deferred — background tasks still running`),j.touch();return}Y.info(`Idle cleanup: closing store and graph connections`);try{await Promise.all([e.store.close().catch(()=>{}),e.graphStore.close().catch(()=>{})])}catch{}}),j.touch();for(let e of Object.values(O)){let t=e.handler;e.handler=async(...e)=>{if(j.touch(),x){let t=te(e[0]);t.length>0&&x.prioritize(...t)}return t(...e)}}y=e,E?.(e)})(),j=async()=>{let e=await D;b?.setBusy(!0);try{let t=n.sources.map(e=>e.path).join(`, `);Y.info(`Running initial index`,{sourcePaths:t});let r=await e.indexer.index(n,e=>{e.phase===`crawling`||e.phase===`done`||(e.phase===`chunking`&&e.currentFile&&Y.debug(`Indexing file`,{current:e.filesProcessed+1,total:e.filesTotal,file:e.currentFile}),e.phase===`cleanup`&&Y.debug(`Index cleanup`,{staleEntries:e.filesTotal-e.filesProcessed}))});m=!0,Y.info(`Initial index complete`,{filesProcessed:r.filesProcessed,filesSkipped:r.filesSkipped,chunksCreated:r.chunksCreated,durationMs:r.durationMs});try{await e.store.createFtsIndex()}catch(e){Y.warn(`FTS index creation failed`,J(e))}try{let t=await e.curated.reindexAll();Y.info(`Curated re-index complete`,{indexed:t.indexed})}catch(e){Y.error(`Curated re-index failed`,J(e))}}catch(e){Y.error(`Initial index failed; will retry on aikit_reindex`,J(e))}finally{b?.setBusy(!1)}},M=new e,ie=()=>M.schedule({name:`initial-index`,fn:j}),N=process.ppid,P=setInterval(()=>{try{process.kill(N,0)}catch{Y.info(`Parent process died; shutting down`,{parentPid:N}),clearInterval(P),D.then(async e=>{await Promise.all([e.embedder.shutdown().catch(()=>{}),e.graphStore.close().catch(()=>{}),e.store.close().catch(()=>{})])}).catch(()=>{}).finally(()=>process.exit(0))}},5e3);return P.unref(),{server:a,startInit:k,ready:A,runInitialIndex:ie,get kb(){return y},scheduler:M,setSmartScheduler(e){x=e}}}export{$ as ALL_TOOL_NAMES,_t as createLazyServer,ft as createMcpServer,pt as createServer,X as initializeKnowledgeBase,Z as registerMcpTools};
|
|
@@ -17,7 +17,7 @@ declare function formatAsHtml(title: string | undefined, content: unknown, actio
|
|
|
17
17
|
type: 'text';
|
|
18
18
|
text: string;
|
|
19
19
|
} | UIResource>;
|
|
20
|
-
structuredContent
|
|
20
|
+
structuredContent?: {
|
|
21
21
|
title?: string;
|
|
22
22
|
content?: unknown;
|
|
23
23
|
actions: Array<Record<string, unknown>>;
|
|
@@ -16,4 +16,4 @@ import{getToolMeta as e}from"../../tool-metadata.js";import{buildBrowserHtml as
|
|
|
16
16
|
- "browser": Serves a themed dashboard on a local URL. Use ONLY when you need user interaction back (confirmations, selections, form input). The tool blocks until user clicks an action button, then returns their selection.
|
|
17
17
|
FORMAT RULE: If no user interaction is needed → use "html". If you need user input back → use "browser".
|
|
18
18
|
BROWSER WORKFLOW: After calling present with format "browser", you MUST extract the URL from the response and call openBrowserPage({ url }) to open it in VS Code Simple Browser. A system browser fallback also opens automatically, but always call openBrowserPage yourself.`,annotations:r.annotations,inputSchema:_,_meta:{ui:{resourceUri:h}}},async({format:e,title:t,content:r,actions:i,template:a})=>(e??`html`)===`browser`||Array.isArray(i)&&i.length>0?await w(t,r,i,n,a):T(t,r,i,a))}async function w(e,r,i,a,o){let s=n(e,r),c=t(e,r,i,o),l=p({uri:`ui://aikit/present-browser.html`,content:{type:`rawHtml`,htmlString:c},encoding:`text`,adapters:{mcpApps:{enabled:!0}}}),u,m,h=Array.isArray(i)?i:[],g=``,_;try{b&&=(b.close(),null),h.length>0&&(u=new Promise(e=>{m=e}));let e=!1;g=await new Promise((t,n)=>{let r=f((t,n)=>{if(e||(e=!0,_&&clearTimeout(_)),t.method===`POST`&&t.url===`/callback`){let e=``;t.on(`data`,t=>{e+=t.toString()}),t.on(`end`,()=>{n.writeHead(200,{"Content-Type":`application/json`,"Access-Control-Allow-Origin":`*`}),n.end(`{"ok":true}`);try{let t=JSON.parse(e);m?.(t)}catch{}});return}if(t.method===`OPTIONS`){n.writeHead(204,{"Access-Control-Allow-Origin":`*`,"Access-Control-Allow-Methods":`POST`,"Access-Control-Allow-Headers":`Content-Type`}),n.end();return}n.writeHead(200,{"Content-Type":`text/html; charset=utf-8`}),n.end(c)});r.listen(0,`127.0.0.1`,()=>{let i=r.address();if(typeof i==`object`&&i){b=r;let n=`http://127.0.0.1:${i.port}`;_=setTimeout(()=>{if(!e)try{d(process.platform===`win32`?`start "" "${n}"`:process.platform===`darwin`?`open "${n}"`:`xdg-open "${n}"`)}catch{}},8e3),_.unref(),t(n)}else n(Error(`Failed to start present server`))}),setTimeout(()=>{r.close(),b===r&&(b=null)},300*1e3).unref()})}catch{}let v=g?`${s}\n\n---\n🌐 **Dashboard opened in browser:** ${g}\n\n**IMPORTANT:** You MUST now open this URL using the \`openBrowserPage\` tool (Simple Browser). If that tool is unavailable, open the URL in the system browser as a fallback. If neither works, ask the user to open the URL manually.`:s;if(h.length>0&&a?.available&&u)try{let e=await Promise.race([u,new Promise((e,t)=>setTimeout(()=>t(Error(`timeout`)),300*1e3))]);return{content:[{type:`text`,text:`${v}\n\n✅ **Selected:** ${e.actionId} = \`${e.value}\``},l]}}catch{return{content:[{type:`text`,text:`${v}\n\n⚠️ *No selection received (timed out).*`},l]}}return{content:[{type:`text`,text:v},l]}}function T(e,r,i,a){let o=Array.isArray(i)?i:[],s=n(e,r);if(o.length>0){let e=[``];for(let t=0;t<o.length;t++){let n=o[t],r=typeof n.label==`string`?n.label:`Action ${t+1}`;if(n.type===`select`&&Array.isArray(n.options)){let i=n.options.map(e=>typeof e==`string`?e:e.label).join(`, `);e.push(`${t+1}. **${r}** — choose: ${i}`)}else e.push(`${t+1}. **${r}**`)}s+=`\n${e.join(`
|
|
19
|
-
`)}`}let c=p({uri:`ui://aikit/present-static.html`,content:{type:`rawHtml`,htmlString:t(e,r,i,a)},encoding:`text`,adapters:{mcpApps:{enabled:!0}}});return{content:[{type:`text`,text:s},c]
|
|
19
|
+
`)}`}let c=p({uri:`ui://aikit/present-static.html`,content:{type:`rawHtml`,htmlString:t(e,r,i,a)},encoding:`text`,adapters:{mcpApps:{enabled:!0}}});return{content:[{type:`text`,text:s},c]}}export{w as formatAsBrowser,T as formatAsHtml,S as getPresentHtml,C as registerPresentTool,x as resolvePresentHtml};
|
|
@@ -1 +1 @@
|
|
|
1
|
-
import{request as e}from"node:http";const t=`http://localhost:11434`;async function n(e=t){let n=await i(`${e}/api/tags`),r;try{r=JSON.parse(n)}catch{throw Error(`Ollama returned invalid JSON`)}return(r.models??[]).map(e=>e.name)}async function r(e){let r=e.baseUrl??t,i=e.timeout??12e4,o;try{o=await n(r)}catch{return{model:e.model??`unknown`,response:``,durationMs:0,error:`Ollama is not running at ${r}. Start it with: ollama serve`}}if(o.length===0)return{model:`none`,response:``,durationMs:0,error:`No Ollama models available. Pull one with: ollama pull
|
|
1
|
+
import{request as e}from"node:http";const t=`http://localhost:11434`;async function n(e=t){let n=await i(`${e}/api/tags`),r;try{r=JSON.parse(n)}catch{throw Error(`Ollama returned invalid JSON`)}return(r.models??[]).map(e=>e.name)}async function r(e){let r=e.baseUrl??t,i=e.timeout??12e4,o;try{o=await n(r)}catch{return{model:e.model??`unknown`,response:``,durationMs:0,error:`Ollama is not running at ${r}. Start it with: ollama serve`}}if(o.length===0)return{model:`none`,response:``,durationMs:0,error:`No Ollama models available. Pull one with: ollama pull gemma4:e2b`};let s=e.model??o[0];if(!o.includes(s))return{model:s,response:``,durationMs:0,error:`Model "${s}" not found. Available: ${o.join(`, `)}`};let c=``;e.context&&(c+=`<context>\n${e.context}\n</context>\n\n`),c+=e.prompt;let l=Date.now(),u=JSON.stringify({model:s,prompt:c,system:e.system,stream:!1,options:{temperature:e.temperature??.3}});try{let e=await a(`${r}/api/generate`,u,i),t=JSON.parse(e);return t.error?{model:s,response:``,durationMs:Date.now()-l,error:t.error}:{model:s,response:(t.response??``).trim(),durationMs:Date.now()-l,tokenCount:t.eval_count}}catch(e){return{model:s,response:``,durationMs:Date.now()-l,error:e instanceof Error?e.message:String(e)}}}function i(t){return new Promise((n,r)=>{let i=new URL(t),a=e({hostname:i.hostname,port:i.port,path:i.pathname,method:`GET`,timeout:5e3},e=>{let t=[];e.on(`data`,e=>t.push(e)),e.on(`end`,()=>n(Buffer.concat(t).toString(`utf-8`)))});a.on(`error`,r),a.on(`timeout`,()=>{a.destroy(),r(Error(`Connection timeout`))}),a.end()})}function a(t,n,r){return new Promise((i,a)=>{let o=new URL(t),s=e({hostname:o.hostname,port:o.port,path:o.pathname,method:`POST`,headers:{"Content-Type":`application/json`,"Content-Length":Buffer.byteLength(n)},timeout:r},e=>{let t=[];e.on(`data`,e=>t.push(e)),e.on(`end`,()=>i(Buffer.concat(t).toString(`utf-8`)))});s.on(`error`,a),s.on(`timeout`,()=>{s.destroy(),a(Error(`Ollama request timed out after ${r}ms`))}),s.write(n),s.end()})}export{r as delegate,n as delegateListModels};
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import{paragraphTruncate as e}from"./truncation.js";import t from"turndown";const n=`script,style,noscript,iframe,svg,nav,footer,header,aside,form,button,input,select,textarea,[role="navigation"],[role="banner"],[role="contentinfo"],[aria-hidden="true"],.sidebar,.nav,.menu,.footer,.header,.ad,.advertisement,.cookie-banner,.popup,.modal`.split(`,`),r=[`article`,`[role="main"]`,`main`,`.post-content`,`.article-content`,`.entry-content`,`.content`,`#content`,`.prose`,`.markdown-body`,`.documentation`,`.doc-content`];async function i(e){let{url:t,mode:i=`markdown`,selector:d,maxLength:f=15e3,includeMetadata:p=!0,includeLinks:m=!1,includeImages:h=!1,timeout:g=
|
|
1
|
+
import{paragraphTruncate as e}from"./truncation.js";import t from"turndown";const n=`script,style,noscript,iframe,svg,nav,footer,header,aside,form,button,input,select,textarea,[role="navigation"],[role="banner"],[role="contentinfo"],[aria-hidden="true"],.sidebar,.nav,.menu,.footer,.header,.ad,.advertisement,.cookie-banner,.popup,.modal`.split(`,`),r=[`article`,`[role="main"]`,`main`,`.post-content`,`.article-content`,`.entry-content`,`.content`,`#content`,`.prose`,`.markdown-body`,`.documentation`,`.doc-content`];async function i(e){let{url:t,mode:i=`markdown`,selector:d,maxLength:f=15e3,includeMetadata:p=!0,includeLinks:m=!1,includeImages:h=!1,timeout:g=6e4}=e,_=new URL(t);if(_.protocol!==`http:`&&_.protocol!==`https:`)throw Error(`Unsupported protocol: ${_.protocol} — only http/https allowed`);let v=new AbortController,y=setTimeout(()=>v.abort(),g),b,x,S=``,C=!1,w=``;try{if(b=await fetch(t,{signal:v.signal,headers:{"User-Agent":`aikit-web-fetch/1.0 (LLM context tool)`,Accept:`text/html,application/xhtml+xml,text/plain`},redirect:`follow`}),!b.ok)throw Error(`HTTP ${b.status}: ${b.statusText}`);x=await b.text(),S=b.headers.get(`content-type`)??``,C=/text\/html|application\/xhtml\+xml/i.test(S),w=b.url}finally{clearTimeout(y)}if(!C){let e=x,t=w.split(`/`).pop()??``;p&&(e=c(t,``,w)+e);let n=e.length,r=n>f;return r&&(e=u(e,f)),{content:e,title:t,description:``,url:w,originalLength:n,truncated:r}}let{parseHTML:T}=await import(`linkedom`),{document:E}=T(x),D=E.querySelector(`title`)?.textContent?.trim()??E.querySelector(`meta[property="og:title"]`)?.getAttribute(`content`)?.trim()??``,O=E.querySelector(`meta[name="description"]`)?.getAttribute(`content`)?.trim()??E.querySelector(`meta[property="og:description"]`)?.getAttribute(`content`)?.trim()??``;for(let e of n)for(let t of E.querySelectorAll(e))t.remove();if(!h)for(let e of E.querySelectorAll(`img`))e.remove();let k=null;if(d){if(k=E.querySelector(d),!k)throw Error(`Selector "${d}" matched no elements on the page`)}else{for(let e of r)if(k=E.querySelector(e),k)break;k||=E.querySelector(`body`)}if(!k)return{content:`(empty page — no content found)`,title:D,description:O,url:w,originalLength:0,truncated:!1};let A=k.innerHTML,j=[];if(m||i===`links`)for(let e of k.querySelectorAll(`a[href]`)){let t=e.textContent?.trim(),n=e.getAttribute(`href`);t&&n&&!n.startsWith(`#`)&&!n.startsWith(`javascript:`)&&j.push({text:t,href:l(n,w)})}let M;switch(i){case`raw`:M=A;break;case`links`:M=s(j);break;case`outline`:M=o(k);break;default:M=a(A,h);break}p&&i!==`links`&&(M=c(D,O,w)+M),m&&i!==`links`&&j.length>0&&(M+=`\n\n---\n\n## Links\n\n${s(j)}`);let N=M.length,P=N>f;return P&&(M=u(M,f)),{content:M,title:D,description:O,url:w,originalLength:N,truncated:P}}function a(e,n){let r=new t({headingStyle:`atx`,codeBlockStyle:`fenced`,bulletListMarker:`-`});r.addRule(`emptyLinks`,{filter:e=>e.nodeName===`A`&&!e.textContent?.trim(),replacement:()=>``}),n||r.addRule(`removeImages`,{filter:`img`,replacement:()=>``});let i=r.turndown(e);return i=i.replace(/\n{3,}/g,`
|
|
2
2
|
|
|
3
3
|
`).trim(),i}function o(e){let t=e.querySelectorAll(`h1, h2, h3, h4, h5, h6`),n=[];for(let e of t){let t=Number.parseInt(e.tagName.slice(1),10),r=` `.repeat(t-1),i=e.textContent?.trim()??``;i&&n.push(`${r}- ${i}`)}return n.length>0?n.join(`
|
|
4
4
|
`):`(no headings found)`}function s(e){if(e.length===0)return`(no links found)`;let t=new Set,n=[];for(let r of e)t.has(r.href)||(t.add(r.href),n.push(r));return n.map(e=>`- [${e.text}](${e.href})`).join(`
|
|
@@ -19,29 +19,11 @@ export const CLAUDE_FLOWS_SECTION = [
|
|
|
19
19
|
].join('\n');
|
|
20
20
|
|
|
21
21
|
export const CLAUDE_ORCHESTRATOR_FLOW_ROUTING_SECTION = [
|
|
22
|
-
'##
|
|
23
|
-
'',
|
|
24
|
-
'At session start, check for an active flow:',
|
|
25
|
-
'1. Call `flow_status` to check if a flow is active',
|
|
26
|
-
"2. If active and status is 'active':",
|
|
27
|
-
' - Note the current step name and skill path',
|
|
28
|
-
" - Load the current step's skill file",
|
|
29
|
-
' - Follow its instructions for this step',
|
|
30
|
-
" - When step is complete, call `flow_step({ action: 'next' })`",
|
|
31
|
-
'3. If no active flow:',
|
|
32
|
-
' - Check `flow_list` for available flows',
|
|
33
|
-
' - Suggest starting a flow based on the task type',
|
|
34
|
-
" - Use `flow_start({ flow: '<name>' })` to begin",
|
|
22
|
+
'## Flows',
|
|
35
23
|
'',
|
|
36
|
-
'
|
|
37
|
-
'
|
|
38
|
-
'
|
|
39
|
-
'| `flow_list` | List installed flows and active flow |',
|
|
40
|
-
'| `flow_info` | Get detailed flow info including steps |',
|
|
41
|
-
'| `flow_start` | Start a named flow |',
|
|
42
|
-
'| `flow_step` | Advance: next, skip, or redo current step |',
|
|
43
|
-
'| `flow_status` | Check current execution state |',
|
|
44
|
-
'| `flow_reset` | Clear flow state to start over |',
|
|
24
|
+
"This project uses aikit's pluggable flow system. Check flow status with the `flow_status` MCP tool.",
|
|
25
|
+
"If a flow is active, follow the current step's skill instructions. Advance with `flow_step({ action: 'next' })`.",
|
|
26
|
+
'Use `flow_list` to see available flows and `flow_start` to begin one.',
|
|
45
27
|
].join('\n');
|
|
46
28
|
|
|
47
29
|
export function generateClaudeCode() {
|
|
@@ -24,15 +24,6 @@ ${agentTable}
|
|
|
24
24
|
|
|
25
25
|
**Parallelism**: Read-only agents run in parallel freely. File-modifying agents run in parallel ONLY on completely different files. Max 4 concurrent file-modifying agents.
|
|
26
26
|
|
|
27
|
-
## Phase 0: Design Gate
|
|
28
|
-
|
|
29
|
-
| Situation | Route |
|
|
30
|
-
|-----------|-------|
|
|
31
|
-
| New feature/component/behavior | **Brainstorming skill** → user dialogue → design doc |
|
|
32
|
-
| Non-trivial technical decision | **Decision protocol** → 4 Researchers parallel → synthesize → ADR |
|
|
33
|
-
| Both | Brainstorming first → escalate unresolved decisions to protocol |
|
|
34
|
-
| Bug fix / refactor / explicit skip | **→ Phase 1** |
|
|
35
|
-
|
|
36
27
|
## FORGE Protocol
|
|
37
28
|
|
|
38
29
|
1. \`forge_classify({ task, files })\` → determine tier (Floor/Standard/Critical)
|
|
@@ -40,32 +31,66 @@ ${agentTable}
|
|
|
40
31
|
3. After review: \`evidence_map({ action: "gate", task_id })\` → YIELD/HOLD/HARD_BLOCK
|
|
41
32
|
4. Auto-upgrade tier if unknowns reveal contract/security issues
|
|
42
33
|
|
|
43
|
-
## Flow-Driven Development
|
|
44
|
-
|
|
45
|
-
Orchestrator uses the flow system for structured development. Flows define the step sequence — Orchestrator adds multi-agent orchestration, quality gates, and review protocols on top.
|
|
46
|
-
|
|
47
|
-
### Flow Selection
|
|
34
|
+
## Flow-Driven Development (PRIMARY BEHAVIOR)
|
|
48
35
|
|
|
49
|
-
|
|
50
|
-
|-----------|------|-------|
|
|
51
|
-
| Bug fix, small feature, refactoring | \`aikit:basic\` | assess → implement → verify |
|
|
52
|
-
| New feature, major change, multi-file | \`aikit:advanced\` | spec → plan → task → execute → verify |
|
|
53
|
-
| Custom/specialized work | Check \`flow_list\` | Follow flow-specific steps |
|
|
36
|
+
**After bootstrap, the Orchestrator MUST select and start a flow.** Flows define the step sequence — Orchestrator adds multi-agent orchestration, quality gates, and review protocols on top. Design decisions, brainstorming, and FORGE classification are handled by the **design** step within each flow — NOT by the Orchestrator directly.
|
|
54
37
|
|
|
55
|
-
|
|
38
|
+
### Flow Activation (MANDATORY after bootstrap)
|
|
56
39
|
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
1. \`flow_status\` — check for active flow
|
|
60
|
-
2. If active:
|
|
40
|
+
1. \`flow_status\` — check for an active flow from a previous session
|
|
41
|
+
2. **If active flow exists:**
|
|
61
42
|
- Note current step name and skill path
|
|
62
43
|
- Read the current step skill with \`flow_read_skill\`
|
|
63
44
|
- Follow its instructions
|
|
64
45
|
- When complete: \`flow_step({ action: 'next' })\`
|
|
65
|
-
3. If
|
|
66
|
-
- \`flow_list\` —
|
|
67
|
-
-
|
|
68
|
-
|
|
46
|
+
3. **If NO active flow:**
|
|
47
|
+
- \`flow_list\` — retrieve ALL available flows (builtin AND custom)
|
|
48
|
+
- **Auto-select** the flow when the task clearly matches:
|
|
49
|
+
|
|
50
|
+
| Task signal | Auto-activate flow |
|
|
51
|
+
|-------------|--------------------|
|
|
52
|
+
| Bug fix, typo, hotfix, "fix ...", error reproduction | \`aikit:basic\` |
|
|
53
|
+
| Small feature (≤3 files), refactoring, cleanup, dependency update | \`aikit:basic\` |
|
|
54
|
+
| New feature, API design, architecture change, multi-component work | \`aikit:advanced\` |
|
|
55
|
+
| Task matches a custom flow's description/tags exactly | That custom flow |
|
|
56
|
+
|
|
57
|
+
- **Auto-start:** When exactly one flow matches, start it immediately — \`flow_start({ flow: '<matched>' })\` — and inform the user which flow was activated and why.
|
|
58
|
+
- **Ask only when ambiguous:** If the task could fit multiple flows, or no flow clearly matches, present the options and let the user choose.
|
|
59
|
+
- Do NOT present a menu for obvious cases. Speed matters.
|
|
60
|
+
4. **Every task goes through a flow.** There is no flowless path.
|
|
61
|
+
|
|
62
|
+
### Flow Execution Loop
|
|
63
|
+
|
|
64
|
+
For EACH step in the active flow:
|
|
65
|
+
|
|
66
|
+
1. \`flow_read_skill\` — read the current step's SKILL.md
|
|
67
|
+
2. Follow the skill's instructions — delegate work to the appropriate agents
|
|
68
|
+
3. Apply **Orchestrator Protocols** (PRE-DISPATCH GATE, FORGE, review cycle) during execution
|
|
69
|
+
4. When the step is complete and results are approved:
|
|
70
|
+
- \`flow_step({ action: 'next' })\` to advance
|
|
71
|
+
5. Repeat until the flow is complete
|
|
72
|
+
|
|
73
|
+
**Custom flows work identically** — \`flow_list\` returns them alongside builtins. The execution loop is the same for ALL flows.
|
|
74
|
+
|
|
75
|
+
### Flow Completion & Cleanup
|
|
76
|
+
|
|
77
|
+
Flows MUST be driven to completion. A flow left active forever blocks future work.
|
|
78
|
+
|
|
79
|
+
**Normal completion:**
|
|
80
|
+
- When the last step's \`flow_step({ action: 'next' })\` is called, the flow finishes automatically
|
|
81
|
+
- After completion: run post-implementation protocol (\`check\` → \`test_run\` → \`blast_radius\` → \`reindex\` → \`produce_knowledge\` → \`remember\`)
|
|
82
|
+
- Inform the user the flow is complete with a summary of artifacts produced
|
|
83
|
+
|
|
84
|
+
**Stale flow detection** (check at session start when \`flow_status\` returns an active flow):
|
|
85
|
+
- If the active flow's current step has no matching work context in the conversation → **ask the user**: "A flow \`<name>\` is active at step \`<step>\`. Continue, or reset to start fresh?"
|
|
86
|
+
- If the user says reset → \`flow_reset()\` then activate a new flow for the current task
|
|
87
|
+
- If the user says continue → resume from the current step
|
|
88
|
+
|
|
89
|
+
**Abandoned step recovery:**
|
|
90
|
+
- If a step has been attempted ≥ 2 times with \`BLOCKED\` status → escalate to user with diagnostics, offer to \`flow_step({ action: 'skip' })\` or \`flow_reset()\`
|
|
91
|
+
- Never silently retry a blocked step indefinitely
|
|
92
|
+
|
|
93
|
+
**One active flow at a time.** To switch tasks, the current flow must be completed or reset first.
|
|
69
94
|
|
|
70
95
|
### Orchestrator Protocols (apply during ALL flow steps)
|
|
71
96
|
|
|
@@ -160,7 +185,7 @@ When subagents complete, their visual outputs (from \`present\`) are NOT visible
|
|
|
160
185
|
3. **Maximize parallelism** — independent tasks MUST run as parallel \`runSubagent\` calls in the SAME function block. Sequential dispatch of parallelizable tasks is a protocol violation.
|
|
161
186
|
4. **Fresh context per subagent** — paste relevant code, don't reference conversation history
|
|
162
187
|
5. **Search AI Kit before planning** — check past decisions with \`search()\`
|
|
163
|
-
6. **
|
|
188
|
+
6. **Always use flows** — every task goes through a flow; design decisions happen in the flow's design step
|
|
164
189
|
7. **Never proceed without user approval** at 🛑 stops
|
|
165
190
|
8. **Max 2 retries** then escalate to user
|
|
166
191
|
|
|
@@ -197,35 +222,18 @@ Before every tool call, verify:
|
|
|
197
222
|
|-------|--------------|
|
|
198
223
|
| \`multi-agents-development\` | **Before any delegation** — task decomposition, dispatch templates, review pipeline, recovery patterns |
|
|
199
224
|
| \`present\` | When presenting plans, findings, or visual content to the user — dashboards, tables, charts, timelines |
|
|
200
|
-
| \`brainstorming\` |
|
|
225
|
+
| \`brainstorming\` | When a flow's design step requires creative/design work |
|
|
201
226
|
| \`session-handoff\` | Context filling up, session ending, or major milestone |
|
|
202
227
|
| \`lesson-learned\` | After completing work — extract engineering principles |
|
|
203
228
|
|
|
204
229
|
**When dispatching subagents**, include relevant skill names in the prompt so subagents know which skills to load (e.g., "Load the \`react\` and \`typescript\` skills for this task").
|
|
205
230
|
|
|
206
|
-
##
|
|
231
|
+
## Flows
|
|
207
232
|
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
- Load the current step's skill file
|
|
213
|
-
- Follow its instructions for this step
|
|
214
|
-
- When step is complete, call \`flow_step({ action: 'next' })\`
|
|
215
|
-
3. If no active flow:
|
|
216
|
-
- Check \`flow_list\` for available flows
|
|
217
|
-
- Suggest starting a flow based on the task type
|
|
218
|
-
- Use \`flow_start({ flow: '<name>' })\` to begin
|
|
219
|
-
|
|
220
|
-
### Flow MCP Tools
|
|
221
|
-
| Tool | Purpose |
|
|
222
|
-
|------|---------|
|
|
223
|
-
| \`flow_list\` | List installed flows and active flow |
|
|
224
|
-
| \`flow_info\` | Get detailed flow info including steps |
|
|
225
|
-
| \`flow_start\` | Start a named flow |
|
|
226
|
-
| \`flow_step\` | Advance: next, skip, or redo current step |
|
|
227
|
-
| \`flow_status\` | Check current execution state |
|
|
228
|
-
| \`flow_reset\` | Clear flow state to start over |`,
|
|
233
|
+
This project uses aikit's pluggable flow system. Check flow status with the \`flow_status\` MCP tool.
|
|
234
|
+
If a flow is active, follow the current step's skill instructions. Advance with \`flow_step({ action: 'next' })\`.
|
|
235
|
+
Use \`flow_list\` to see available flows and \`flow_start\` to begin one.
|
|
236
|
+
`,
|
|
229
237
|
|
|
230
238
|
Planner: `**Read \`AGENTS.md\`** in the workspace root for project conventions and AI Kit protocol.
|
|
231
239
|
|
|
@@ -257,16 +265,20 @@ At session start, check for an active flow:
|
|
|
257
265
|
5. **Dependency Graph** — For each phase, list dependencies. Group into parallel batches
|
|
258
266
|
6. **Present** — Show plan with open questions, complexity estimate, parallel batch layout
|
|
259
267
|
|
|
260
|
-
## Flow Integration
|
|
268
|
+
## Flow Integration (PRIMARY MODE)
|
|
269
|
+
|
|
270
|
+
The Planner is typically activated by the Orchestrator as part of a flow step (e.g., \`aikit:advanced\` plan step, \`aikit:basic\` assess step, or a custom flow's planning step).
|
|
261
271
|
|
|
262
|
-
When activated as part of a flow
|
|
263
|
-
1.
|
|
264
|
-
2.
|
|
265
|
-
3. Follow skill instructions
|
|
266
|
-
4.
|
|
267
|
-
5.
|
|
272
|
+
**When activated as part of a flow:**
|
|
273
|
+
1. \`flow_status\` — check current step context and which flow is active
|
|
274
|
+
2. \`flow_read_skill\` — read the current step's SKILL.md for specific instructions
|
|
275
|
+
3. Follow the skill's instructions as the primary guide, applying Planner methodology on top
|
|
276
|
+
4. Read the flow's README.md for overall context on how the flow works
|
|
277
|
+
5. Produce required artifacts (as specified by the flow step's \`produces\` field)
|
|
278
|
+
6. When complete, report status to Orchestrator: \`DONE\` | \`DONE_WITH_CONCERNS\` | \`NEEDS_CONTEXT\` | \`BLOCKED\`
|
|
279
|
+
7. Do NOT call \`flow_step\` — the Orchestrator controls flow advancement
|
|
268
280
|
|
|
269
|
-
When no flow is active, operate autonomously following normal Planner methodology.
|
|
281
|
+
**When no flow is active** (standalone mode), operate autonomously following normal Planner methodology.
|
|
270
282
|
|
|
271
283
|
## Subagent Output Relay
|
|
272
284
|
|
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
# aikit:advanced — Full Development Flow
|
|
2
|
+
|
|
3
|
+
Full development flow for **new features, API design, and architecture changes**.
|
|
4
|
+
|
|
5
|
+
## Steps
|
|
6
|
+
|
|
7
|
+
| # | Step | Skill | Produces | Requires | Agents |
|
|
8
|
+
|---|------|-------|----------|----------|--------|
|
|
9
|
+
| 1 | **Design Gate** | `skills/design/SKILL.md` | `design-decisions.md` | — | Researcher-Alpha/Beta/Gamma/Delta |
|
|
10
|
+
| 2 | **Specification** | `skills/spec/SKILL.md` | `spec.md` | `design-decisions.md` | Researcher-Alpha |
|
|
11
|
+
| 3 | **Planning** | `skills/plan/SKILL.md` | `plan.md` | `spec.md` | Planner, Explorer |
|
|
12
|
+
| 4 | **Task Breakdown** | `skills/task/SKILL.md` | `tasks.md` | `plan.md` | Planner, Architect-Reviewer-Alpha |
|
|
13
|
+
| 5 | **Execution** | `skills/execute/SKILL.md` | `progress.md` | `tasks.md` | Orchestrator, Implementer, Frontend, Refactor |
|
|
14
|
+
| 6 | **Verification** | `skills/verify/SKILL.md` | `verify-report.md` | `progress.md` | Code-Reviewer-Alpha/Beta, Architect-Reviewer-Alpha/Beta, Security |
|
|
15
|
+
|
|
16
|
+
## How It Works
|
|
17
|
+
|
|
18
|
+
Each step has a **SKILL.md** file that contains the detailed instructions for the agent(s) executing that step. The Orchestrator reads the SKILL.md via `flow_read_skill` and delegates work accordingly.
|
|
19
|
+
|
|
20
|
+
### Step 1: Design Gate
|
|
21
|
+
- Full brainstorming session for new features and architectural changes
|
|
22
|
+
- FORGE classification (`forge_classify`) + grounding (`forge_ground`) for complex tasks
|
|
23
|
+
- Parallel 4-researcher decision protocol for non-trivial technical decisions
|
|
24
|
+
- ADR generation for critical-tier tasks
|
|
25
|
+
- **Mandatory user stop** before proceeding — design decisions must be approved
|
|
26
|
+
- Read `skills/design/SKILL.md` for the full protocol
|
|
27
|
+
|
|
28
|
+
### Step 2: Specification
|
|
29
|
+
- Elicit requirements from the user, clarify scope
|
|
30
|
+
- Define acceptance criteria and constraints
|
|
31
|
+
- Build on design decisions from the previous step
|
|
32
|
+
|
|
33
|
+
### Step 3: Planning
|
|
34
|
+
- Deep codebase analysis using `search`, `scope_map`, `trace`, `analyze_*`
|
|
35
|
+
- Design architecture based on spec and design decisions
|
|
36
|
+
- Create comprehensive implementation plan with file-level changes
|
|
37
|
+
|
|
38
|
+
### Step 4: Task Breakdown
|
|
39
|
+
- Break the plan into ordered, atomic implementation tasks
|
|
40
|
+
- Define dependencies between tasks
|
|
41
|
+
- Identify parallel batches for multi-agent execution
|
|
42
|
+
- Architecture review of the task structure
|
|
43
|
+
|
|
44
|
+
### Step 5: Execution
|
|
45
|
+
- Orchestrator dispatches agents in parallel batches per the task breakdown
|
|
46
|
+
- Each agent gets a scoped task (1-3 files) with clear acceptance criteria
|
|
47
|
+
- TDD: write tests first, then implement
|
|
48
|
+
- Per-batch review cycle: Code Review (dual) → Arch Review → Security → Evidence Gate
|
|
49
|
+
|
|
50
|
+
### Step 6: Verification
|
|
51
|
+
- Dual code review (Code-Reviewer-Alpha + Beta)
|
|
52
|
+
- Architecture review (Architect-Reviewer-Alpha + Beta)
|
|
53
|
+
- Security review
|
|
54
|
+
- Run `check({})` + `test_run({})` + `blast_radius({})`
|
|
55
|
+
- `evidence_map({ action: "gate" })` for final quality gate
|
|
56
|
+
|
|
57
|
+
## Using Skills Inside Steps
|
|
58
|
+
|
|
59
|
+
When the Orchestrator activates a step:
|
|
60
|
+
|
|
61
|
+
1. **Read the skill first** — `flow_read_skill` returns the SKILL.md for the current step
|
|
62
|
+
2. **Follow skill instructions** — the SKILL.md is the primary guide for what to do
|
|
63
|
+
3. **Delegate to listed agents** — each step lists which agents are appropriate
|
|
64
|
+
4. **Produce the required artifact** — the step's `produces` field specifies what file to create in the artifacts directory
|
|
65
|
+
5. **Check dependencies** — the step's `requires` field lists artifacts from previous steps that must exist
|
|
66
|
+
6. **Report status** — agents report `DONE` | `DONE_WITH_CONCERNS` | `NEEDS_CONTEXT` | `BLOCKED` to the Orchestrator
|
|
67
|
+
|
|
68
|
+
## Artifacts
|
|
69
|
+
|
|
70
|
+
All artifacts are stored in the `.spec/` directory relative to the project root.
|
|
@@ -3,12 +3,21 @@
|
|
|
3
3
|
"version": "0.1.0",
|
|
4
4
|
"description": "Full development flow for new features, API design, and architecture changes",
|
|
5
5
|
"steps": [
|
|
6
|
+
{
|
|
7
|
+
"id": "design",
|
|
8
|
+
"name": "Design Gate",
|
|
9
|
+
"skill": "skills/design/SKILL.md",
|
|
10
|
+
"produces": ["design-decisions.md"],
|
|
11
|
+
"requires": [],
|
|
12
|
+
"agents": ["Researcher-Alpha", "Researcher-Beta", "Researcher-Gamma", "Researcher-Delta"],
|
|
13
|
+
"description": "Full brainstorming, FORGE classification, decision protocol with parallel research. ADR for critical-tier tasks."
|
|
14
|
+
},
|
|
6
15
|
{
|
|
7
16
|
"id": "spec",
|
|
8
17
|
"name": "Specification",
|
|
9
18
|
"skill": "skills/spec/SKILL.md",
|
|
10
19
|
"produces": ["spec.md"],
|
|
11
|
-
"requires": [],
|
|
20
|
+
"requires": ["design-decisions.md"],
|
|
12
21
|
"agents": ["Researcher-Alpha"],
|
|
13
22
|
"description": "Elicit requirements, clarify scope, define acceptance criteria"
|
|
14
23
|
},
|
|
@@ -0,0 +1,134 @@
|
|
|
1
|
+
# Design Gate — Advanced Flow
|
|
2
|
+
|
|
3
|
+
Full design gate for new features, API design, and architecture changes. Runs brainstorming, decision protocol, and FORGE classification before specification begins.
|
|
4
|
+
|
|
5
|
+
## When This Step Runs
|
|
6
|
+
|
|
7
|
+
This is the **first step** of the `aikit:advanced` flow. It runs before specification.
|
|
8
|
+
|
|
9
|
+
## Instructions
|
|
10
|
+
|
|
11
|
+
### 1. Task Classification
|
|
12
|
+
|
|
13
|
+
Classify the task:
|
|
14
|
+
|
|
15
|
+
| Category | Indicators | Action |
|
|
16
|
+
|----------|-----------|--------|
|
|
17
|
+
| **Bug fix** | Error, regression, "fix" — wrong flow, should use `aikit:basic` | → Note mismatch, still run Quick Design |
|
|
18
|
+
| **New feature** | New behavior, new API, new component | → Run **Full Design** below |
|
|
19
|
+
| **Architecture change** | Restructure, migration, new pattern, cross-cutting | → Run **Full Design** with architecture focus |
|
|
20
|
+
|
|
21
|
+
### 2. FORGE Classification
|
|
22
|
+
|
|
23
|
+
Run `forge_classify({ task: "<task description>", files: [<relevant files>] })` to determine the complexity tier.
|
|
24
|
+
|
|
25
|
+
| Tier | Meaning | Design Depth |
|
|
26
|
+
|------|---------|-------------|
|
|
27
|
+
| **Floor** | Low risk, well-understood | Quick brainstorm, 1-2 decisions |
|
|
28
|
+
| **Standard** | Moderate complexity | Full brainstorm, parallel research, decision protocol |
|
|
29
|
+
| **Critical** | High risk, contract/security implications | Deep brainstorm, 4-researcher parallel review, ADR required |
|
|
30
|
+
|
|
31
|
+
### 3. Brainstorming Session
|
|
32
|
+
|
|
33
|
+
Load the `brainstorming` skill and conduct a structured brainstorming session:
|
|
34
|
+
|
|
35
|
+
1. **Intent Discovery** — What is the user trying to achieve? What problem does this solve?
|
|
36
|
+
2. **Constraint Mapping** — Technical constraints, time constraints, compatibility requirements
|
|
37
|
+
3. **Approach Exploration** — Generate 2-4 possible approaches
|
|
38
|
+
4. **Trade-off Analysis** — Compare approaches on: complexity, maintainability, performance, risk
|
|
39
|
+
|
|
40
|
+
For **Critical** tier tasks, also explore:
|
|
41
|
+
- Security implications
|
|
42
|
+
- Backward compatibility
|
|
43
|
+
- Migration path
|
|
44
|
+
- Rollback strategy
|
|
45
|
+
|
|
46
|
+
### 4. Decision Protocol (Standard & Critical tiers)
|
|
47
|
+
|
|
48
|
+
When technical decisions need resolution:
|
|
49
|
+
|
|
50
|
+
1. **Identify decisions** — List each decision point with 2+ viable options
|
|
51
|
+
2. **Parallel research** — Delegate to Researcher agents (2 for Standard, 4 for Critical):
|
|
52
|
+
- Researcher-Alpha: Deep analysis of primary approach
|
|
53
|
+
- Researcher-Beta: Trade-offs and edge cases of alternatives
|
|
54
|
+
- Researcher-Gamma: Cross-domain patterns and precedents
|
|
55
|
+
- Researcher-Delta: Feasibility and performance implications
|
|
56
|
+
3. **Synthesize** — Combine researcher findings into a recommendation per decision
|
|
57
|
+
4. **ADR** (Critical tier) — Load `adr-skill` and create an Architecture Decision Record
|
|
58
|
+
|
|
59
|
+
### 5. FORGE Ground (Standard & Critical tiers)
|
|
60
|
+
|
|
61
|
+
Run `forge_ground({ task, root_path: "." })` to:
|
|
62
|
+
- Scope the affected files and modules
|
|
63
|
+
- Identify unknowns and risks
|
|
64
|
+
- Load existing constraints and conventions
|
|
65
|
+
|
|
66
|
+
**Auto-upgrade check**: If `forge_ground` reveals contract-type unknowns or security concerns not caught by initial `forge_classify`, recommend tier upgrade.
|
|
67
|
+
|
|
68
|
+
### 6. Produce `design-decisions.md`
|
|
69
|
+
|
|
70
|
+
```markdown
|
|
71
|
+
## Design Decisions
|
|
72
|
+
|
|
73
|
+
### FORGE Assessment
|
|
74
|
+
- **Tier**: {Floor | Standard | Critical}
|
|
75
|
+
- **Rationale**: {why this tier}
|
|
76
|
+
- **Auto-upgrade**: {yes/no — if yes, explain}
|
|
77
|
+
|
|
78
|
+
### Task Summary
|
|
79
|
+
- **Goal**: {what we're building}
|
|
80
|
+
- **Problem**: {what problem this solves}
|
|
81
|
+
- **Users affected**: {who is impacted}
|
|
82
|
+
|
|
83
|
+
### Approach
|
|
84
|
+
- **Chosen approach**: {description}
|
|
85
|
+
- **Alternatives considered**: {list with reasons for rejection}
|
|
86
|
+
|
|
87
|
+
### Key Decisions
|
|
88
|
+
| # | Decision | Choice | Rationale |
|
|
89
|
+
|---|----------|--------|-----------|
|
|
90
|
+
| 1 | {decision} | {choice} | {why} |
|
|
91
|
+
|
|
92
|
+
### Constraints
|
|
93
|
+
- {constraint 1}
|
|
94
|
+
- {constraint 2}
|
|
95
|
+
|
|
96
|
+
### Risks
|
|
97
|
+
| Risk | Likelihood | Impact | Mitigation |
|
|
98
|
+
|------|-----------|--------|------------|
|
|
99
|
+
| {risk} | {L/M/H} | {L/M/H} | {mitigation} |
|
|
100
|
+
|
|
101
|
+
### Open Questions
|
|
102
|
+
- {question 1}
|
|
103
|
+
- {question 2}
|
|
104
|
+
```
|
|
105
|
+
|
|
106
|
+
### 7. Present to User
|
|
107
|
+
|
|
108
|
+
Use `present({ format: "html" })` (or `format: "browser"` in CLI mode) to show:
|
|
109
|
+
- Design decisions summary
|
|
110
|
+
- FORGE tier and rationale
|
|
111
|
+
- Key trade-offs
|
|
112
|
+
- Open questions requiring user input
|
|
113
|
+
|
|
114
|
+
**🛑 MANDATORY STOP** — Wait for user approval of design decisions before proceeding.
|
|
115
|
+
|
|
116
|
+
### 8. Report to Orchestrator
|
|
117
|
+
|
|
118
|
+
After user approves:
|
|
119
|
+
- `DONE` — design decisions approved, ready for specification
|
|
120
|
+
- `DONE_WITH_CONCERNS` — approved with caveats (list them)
|
|
121
|
+
- `NEEDS_CONTEXT` — user raised questions that need more research
|
|
122
|
+
|
|
123
|
+
**Do NOT call `flow_step`** — let the Orchestrator advance the flow.
|
|
124
|
+
|
|
125
|
+
## Produces
|
|
126
|
+
|
|
127
|
+
- `design-decisions.md` — FORGE tier, approach, key decisions, constraints, risks
|
|
128
|
+
|
|
129
|
+
## Agents
|
|
130
|
+
|
|
131
|
+
- `Researcher-Alpha` — Deep analysis of primary approach
|
|
132
|
+
- `Researcher-Beta` — Trade-offs and edge cases
|
|
133
|
+
- `Researcher-Gamma` — Cross-domain patterns
|
|
134
|
+
- `Researcher-Delta` — Feasibility and performance
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
# aikit:basic — Quick Development Flow
|
|
2
|
+
|
|
3
|
+
Quick development flow for **bug fixes, small features, and refactoring**.
|
|
4
|
+
|
|
5
|
+
## Steps
|
|
6
|
+
|
|
7
|
+
| # | Step | Skill | Produces | Requires | Agents |
|
|
8
|
+
|---|------|-------|----------|----------|--------|
|
|
9
|
+
| 1 | **Design Gate** | `skills/design/SKILL.md` | `design-decisions.md` | — | Researcher-Alpha/Beta/Gamma/Delta |
|
|
10
|
+
| 2 | **Assessment** | `skills/assess/SKILL.md` | `assessment.md` | `design-decisions.md` | Explorer, Researcher-Alpha |
|
|
11
|
+
| 3 | **Implementation** | `skills/implement/SKILL.md` | `progress.md` | `assessment.md` | Implementer, Frontend |
|
|
12
|
+
| 4 | **Verification** | `skills/verify/SKILL.md` | `verify-report.md` | `progress.md` | Code-Reviewer-Alpha, Security |
|
|
13
|
+
|
|
14
|
+
## How It Works
|
|
15
|
+
|
|
16
|
+
Each step has a **SKILL.md** file that contains the detailed instructions for the agent(s) executing that step. The Orchestrator reads the SKILL.md via `flow_read_skill` and delegates work accordingly.
|
|
17
|
+
|
|
18
|
+
### Step 1: Design Gate
|
|
19
|
+
- **Auto-skips** for bug fixes and refactors (produces a minimal `design-decisions.md` noting it was skipped)
|
|
20
|
+
- For small features: runs quick brainstorming, FORGE classification, and optional decision protocol
|
|
21
|
+
- Read `skills/design/SKILL.md` for the full decision tree
|
|
22
|
+
|
|
23
|
+
### Step 2: Assessment
|
|
24
|
+
- Explore the codebase to understand scope and impact
|
|
25
|
+
- Use `search`, `scope_map`, `file_summary`, `compact` to gather context
|
|
26
|
+
- Identify the approach and produce `assessment.md`
|
|
27
|
+
|
|
28
|
+
### Step 3: Implementation
|
|
29
|
+
- Write code following the assessment plan
|
|
30
|
+
- The Orchestrator dispatches Implementer/Frontend agents with specific file scopes
|
|
31
|
+
- Follow TDD practices where applicable
|
|
32
|
+
|
|
33
|
+
### Step 4: Verification
|
|
34
|
+
- Code review, test execution, security check
|
|
35
|
+
- Run `check({})` + `test_run({})` + `blast_radius({})`
|
|
36
|
+
- Produce `verify-report.md` with findings
|
|
37
|
+
|
|
38
|
+
## Using Skills Inside Steps
|
|
39
|
+
|
|
40
|
+
When the Orchestrator activates a step:
|
|
41
|
+
|
|
42
|
+
1. **Read the skill first** — `flow_read_skill` returns the SKILL.md for the current step
|
|
43
|
+
2. **Follow skill instructions** — the SKILL.md is the primary guide for what to do
|
|
44
|
+
3. **Delegate to listed agents** — each step lists which agents are appropriate
|
|
45
|
+
4. **Produce the required artifact** — the step's `produces` field specifies what file to create in the artifacts directory
|
|
46
|
+
5. **Check dependencies** — the step's `requires` field lists artifacts from previous steps that must exist
|
|
47
|
+
6. **Report status** — agents report `DONE` | `DONE_WITH_CONCERNS` | `NEEDS_CONTEXT` | `BLOCKED` to the Orchestrator
|
|
48
|
+
|
|
49
|
+
## Artifacts
|
|
50
|
+
|
|
51
|
+
All artifacts are stored in the `.spec/` directory relative to the project root.
|
|
@@ -3,12 +3,21 @@
|
|
|
3
3
|
"version": "0.1.0",
|
|
4
4
|
"description": "Quick development flow for bug fixes, small features, and refactoring",
|
|
5
5
|
"steps": [
|
|
6
|
+
{
|
|
7
|
+
"id": "design",
|
|
8
|
+
"name": "Design Gate",
|
|
9
|
+
"skill": "skills/design/SKILL.md",
|
|
10
|
+
"produces": ["design-decisions.md"],
|
|
11
|
+
"requires": [],
|
|
12
|
+
"agents": ["Researcher-Alpha", "Researcher-Beta", "Researcher-Gamma", "Researcher-Delta"],
|
|
13
|
+
"description": "Evaluate task type, run brainstorming for features, FORGE classification. Auto-skips for bug fixes and refactors."
|
|
14
|
+
},
|
|
6
15
|
{
|
|
7
16
|
"id": "assess",
|
|
8
17
|
"name": "Assessment",
|
|
9
18
|
"skill": "skills/assess/SKILL.md",
|
|
10
19
|
"produces": ["assessment.md"],
|
|
11
|
-
"requires": [],
|
|
20
|
+
"requires": ["design-decisions.md"],
|
|
12
21
|
"agents": ["Explorer", "Researcher-Alpha"],
|
|
13
22
|
"description": "Understand scope, analyze codebase, identify approach"
|
|
14
23
|
},
|
|
@@ -0,0 +1,75 @@
|
|
|
1
|
+
# Design Gate — Basic Flow
|
|
2
|
+
|
|
3
|
+
Lightweight design gate for bug fixes, small features, and refactoring. Evaluates the task type and determines whether design work is needed before proceeding.
|
|
4
|
+
|
|
5
|
+
## When This Step Runs
|
|
6
|
+
|
|
7
|
+
This is the **first step** of the `aikit:basic` flow. It runs before assessment.
|
|
8
|
+
|
|
9
|
+
## Instructions
|
|
10
|
+
|
|
11
|
+
### 1. Task Classification
|
|
12
|
+
|
|
13
|
+
Classify the task into one of these categories:
|
|
14
|
+
|
|
15
|
+
| Category | Indicators | Action |
|
|
16
|
+
|----------|-----------|--------|
|
|
17
|
+
| **Bug fix** | Error reports, stack traces, regression, "fix", "broken" | → **Auto-skip** to next step |
|
|
18
|
+
| **Refactor** | Code cleanup, rename, restructure, no behavior change | → **Auto-skip** to next step |
|
|
19
|
+
| **Small feature** | New behavior, new endpoint, new component, UI change | → Run **Quick Design** below |
|
|
20
|
+
|
|
21
|
+
**If the task is a bug fix or refactor**, produce a minimal `design-decisions.md`:
|
|
22
|
+
```markdown
|
|
23
|
+
## Design Decisions
|
|
24
|
+
- **Task type**: Bug fix / Refactor
|
|
25
|
+
- **Design gate**: Auto-skipped — no design work needed
|
|
26
|
+
- **Proceed to**: Assessment
|
|
27
|
+
```
|
|
28
|
+
Then report `DONE` to the Orchestrator so the flow advances.
|
|
29
|
+
|
|
30
|
+
### 2. Quick Design (Small Features Only)
|
|
31
|
+
|
|
32
|
+
For small features that need minimal design:
|
|
33
|
+
|
|
34
|
+
1. **FORGE Classify** — Run `forge_classify({ task: "<task description>", files: [<relevant files>] })` to determine complexity tier
|
|
35
|
+
2. **Brainstorming** (if tier ≥ Standard) — Load the `brainstorming` skill and run a focused brainstorming session:
|
|
36
|
+
- What is the user trying to achieve?
|
|
37
|
+
- What are the constraints?
|
|
38
|
+
- What is the simplest approach?
|
|
39
|
+
3. **Decision Protocol** (if technical decisions exist) — Delegate to 2-4 Researcher agents in parallel:
|
|
40
|
+
- Each researcher evaluates a different approach
|
|
41
|
+
- Synthesize findings into a recommendation
|
|
42
|
+
4. **Produce `design-decisions.md`**:
|
|
43
|
+
|
|
44
|
+
```markdown
|
|
45
|
+
## Design Decisions
|
|
46
|
+
|
|
47
|
+
### FORGE Assessment
|
|
48
|
+
- **Tier**: {Floor | Standard | Critical}
|
|
49
|
+
- **Rationale**: {why this tier}
|
|
50
|
+
|
|
51
|
+
### Task Summary
|
|
52
|
+
- **Goal**: {what we're building}
|
|
53
|
+
- **Approach**: {chosen approach}
|
|
54
|
+
- **Key decisions**: {list}
|
|
55
|
+
|
|
56
|
+
### Constraints
|
|
57
|
+
- {constraint 1}
|
|
58
|
+
- {constraint 2}
|
|
59
|
+
```
|
|
60
|
+
|
|
61
|
+
### 3. Report to Orchestrator
|
|
62
|
+
|
|
63
|
+
When complete, report status:
|
|
64
|
+
- `DONE` — design decisions captured, ready for assessment
|
|
65
|
+
- `DONE_WITH_CONCERNS` — design captured but open questions remain (list them)
|
|
66
|
+
|
|
67
|
+
**Do NOT call `flow_step`** — let the Orchestrator advance the flow.
|
|
68
|
+
|
|
69
|
+
## Produces
|
|
70
|
+
|
|
71
|
+
- `design-decisions.md` — Task classification, FORGE tier, key design decisions
|
|
72
|
+
|
|
73
|
+
## Agents
|
|
74
|
+
|
|
75
|
+
- `Researcher-Alpha`, `Researcher-Beta`, `Researcher-Gamma`, `Researcher-Delta` — for parallel research during decision protocol
|
|
@@ -41,15 +41,6 @@ You orchestrate the full development lifecycle: **planning → implementation
|
|
|
41
41
|
|
|
42
42
|
**Parallelism**: Read-only agents run in parallel freely. File-modifying agents run in parallel ONLY on completely different files. Max 4 concurrent file-modifying agents.
|
|
43
43
|
|
|
44
|
-
## Phase 0: Design Gate
|
|
45
|
-
|
|
46
|
-
| Situation | Route |
|
|
47
|
-
|-----------|-------|
|
|
48
|
-
| New feature/component/behavior | **Brainstorming skill** → user dialogue → design doc |
|
|
49
|
-
| Non-trivial technical decision | **Decision protocol** → 4 Researchers parallel → synthesize → ADR |
|
|
50
|
-
| Both | Brainstorming first → escalate unresolved decisions to protocol |
|
|
51
|
-
| Bug fix / refactor / explicit skip | **→ Phase 1** |
|
|
52
|
-
|
|
53
44
|
## FORGE Protocol
|
|
54
45
|
|
|
55
46
|
1. `forge_classify({ task, files })` → determine tier (Floor/Standard/Critical)
|
|
@@ -57,32 +48,66 @@ You orchestrate the full development lifecycle: **planning → implementation
|
|
|
57
48
|
3. After review: `evidence_map({ action: "gate", task_id })` → YIELD/HOLD/HARD_BLOCK
|
|
58
49
|
4. Auto-upgrade tier if unknowns reveal contract/security issues
|
|
59
50
|
|
|
60
|
-
## Flow-Driven Development
|
|
61
|
-
|
|
62
|
-
Orchestrator uses the flow system for structured development. Flows define the step sequence — Orchestrator adds multi-agent orchestration, quality gates, and review protocols on top.
|
|
63
|
-
|
|
64
|
-
### Flow Selection
|
|
51
|
+
## Flow-Driven Development (PRIMARY BEHAVIOR)
|
|
65
52
|
|
|
66
|
-
|
|
67
|
-
|-----------|------|-------|
|
|
68
|
-
| Bug fix, small feature, refactoring | `aikit:basic` | assess → implement → verify |
|
|
69
|
-
| New feature, major change, multi-file | `aikit:advanced` | spec → plan → task → execute → verify |
|
|
70
|
-
| Custom/specialized work | Check `flow_list` | Follow flow-specific steps |
|
|
53
|
+
**After bootstrap, the Orchestrator MUST select and start a flow.** Flows define the step sequence — Orchestrator adds multi-agent orchestration, quality gates, and review protocols on top. Design decisions, brainstorming, and FORGE classification are handled by the **design** step within each flow — NOT by the Orchestrator directly.
|
|
71
54
|
|
|
72
|
-
|
|
55
|
+
### Flow Activation (MANDATORY after bootstrap)
|
|
73
56
|
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
1. `flow_status` — check for active flow
|
|
77
|
-
2. If active:
|
|
57
|
+
1. `flow_status` — check for an active flow from a previous session
|
|
58
|
+
2. **If active flow exists:**
|
|
78
59
|
- Note current step name and skill path
|
|
79
60
|
- Read the current step skill with `flow_read_skill`
|
|
80
61
|
- Follow its instructions
|
|
81
62
|
- When complete: `flow_step({ action: 'next' })`
|
|
82
|
-
3. If
|
|
83
|
-
- `flow_list` —
|
|
84
|
-
-
|
|
85
|
-
|
|
63
|
+
3. **If NO active flow:**
|
|
64
|
+
- `flow_list` — retrieve ALL available flows (builtin AND custom)
|
|
65
|
+
- **Auto-select** the flow when the task clearly matches:
|
|
66
|
+
|
|
67
|
+
| Task signal | Auto-activate flow |
|
|
68
|
+
|-------------|--------------------|
|
|
69
|
+
| Bug fix, typo, hotfix, "fix ...", error reproduction | `aikit:basic` |
|
|
70
|
+
| Small feature (≤3 files), refactoring, cleanup, dependency update | `aikit:basic` |
|
|
71
|
+
| New feature, API design, architecture change, multi-component work | `aikit:advanced` |
|
|
72
|
+
| Task matches a custom flow's description/tags exactly | That custom flow |
|
|
73
|
+
|
|
74
|
+
- **Auto-start:** When exactly one flow matches, start it immediately — `flow_start({ flow: '<matched>' })` — and inform the user which flow was activated and why.
|
|
75
|
+
- **Ask only when ambiguous:** If the task could fit multiple flows, or no flow clearly matches, present the options and let the user choose.
|
|
76
|
+
- Do NOT present a menu for obvious cases. Speed matters.
|
|
77
|
+
4. **Every task goes through a flow.** There is no flowless path.
|
|
78
|
+
|
|
79
|
+
### Flow Execution Loop
|
|
80
|
+
|
|
81
|
+
For EACH step in the active flow:
|
|
82
|
+
|
|
83
|
+
1. `flow_read_skill` — read the current step's SKILL.md
|
|
84
|
+
2. Follow the skill's instructions — delegate work to the appropriate agents
|
|
85
|
+
3. Apply **Orchestrator Protocols** (PRE-DISPATCH GATE, FORGE, review cycle) during execution
|
|
86
|
+
4. When the step is complete and results are approved:
|
|
87
|
+
- `flow_step({ action: 'next' })` to advance
|
|
88
|
+
5. Repeat until the flow is complete
|
|
89
|
+
|
|
90
|
+
**Custom flows work identically** — `flow_list` returns them alongside builtins. The execution loop is the same for ALL flows.
|
|
91
|
+
|
|
92
|
+
### Flow Completion & Cleanup
|
|
93
|
+
|
|
94
|
+
Flows MUST be driven to completion. A flow left active forever blocks future work.
|
|
95
|
+
|
|
96
|
+
**Normal completion:**
|
|
97
|
+
- When the last step's `flow_step({ action: 'next' })` is called, the flow finishes automatically
|
|
98
|
+
- After completion: run post-implementation protocol (`check` → `test_run` → `blast_radius` → `reindex` → `produce_knowledge` → `remember`)
|
|
99
|
+
- Inform the user the flow is complete with a summary of artifacts produced
|
|
100
|
+
|
|
101
|
+
**Stale flow detection** (check at session start when `flow_status` returns an active flow):
|
|
102
|
+
- If the active flow's current step has no matching work context in the conversation → **ask the user**: "A flow `<name>` is active at step `<step>`. Continue, or reset to start fresh?"
|
|
103
|
+
- If the user says reset → `flow_reset()` then activate a new flow for the current task
|
|
104
|
+
- If the user says continue → resume from the current step
|
|
105
|
+
|
|
106
|
+
**Abandoned step recovery:**
|
|
107
|
+
- If a step has been attempted ≥ 2 times with `BLOCKED` status → escalate to user with diagnostics, offer to `flow_step({ action: 'skip' })` or `flow_reset()`
|
|
108
|
+
- Never silently retry a blocked step indefinitely
|
|
109
|
+
|
|
110
|
+
**One active flow at a time.** To switch tasks, the current flow must be completed or reset first.
|
|
86
111
|
|
|
87
112
|
### Orchestrator Protocols (apply during ALL flow steps)
|
|
88
113
|
|
|
@@ -177,7 +202,7 @@ When subagents complete, their visual outputs (from `present`) are NOT visible t
|
|
|
177
202
|
3. **Maximize parallelism** — independent tasks MUST run as parallel `runSubagent` calls in the SAME function block. Sequential dispatch of parallelizable tasks is a protocol violation.
|
|
178
203
|
4. **Fresh context per subagent** — paste relevant code, don't reference conversation history
|
|
179
204
|
5. **Search AI Kit before planning** — check past decisions with `search()`
|
|
180
|
-
6. **
|
|
205
|
+
6. **Always use flows** — every task goes through a flow; design decisions happen in the flow's design step
|
|
181
206
|
7. **Never proceed without user approval** at 🛑 stops
|
|
182
207
|
8. **Max 2 retries** then escalate to user
|
|
183
208
|
|
|
@@ -214,35 +239,18 @@ Before every tool call, verify:
|
|
|
214
239
|
|-------|--------------|
|
|
215
240
|
| `multi-agents-development` | **Before any delegation** — task decomposition, dispatch templates, review pipeline, recovery patterns |
|
|
216
241
|
| `present` | When presenting plans, findings, or visual content to the user — dashboards, tables, charts, timelines |
|
|
217
|
-
| `brainstorming` |
|
|
242
|
+
| `brainstorming` | When a flow's design step requires creative/design work |
|
|
218
243
|
| `session-handoff` | Context filling up, session ending, or major milestone |
|
|
219
244
|
| `lesson-learned` | After completing work — extract engineering principles |
|
|
220
245
|
|
|
221
246
|
**When dispatching subagents**, include relevant skill names in the prompt so subagents know which skills to load (e.g., "Load the `react` and `typescript` skills for this task").
|
|
222
247
|
|
|
223
|
-
##
|
|
248
|
+
## Flows
|
|
224
249
|
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
- Note the current step name and skill path
|
|
229
|
-
- Load the current step's skill file
|
|
230
|
-
- Follow its instructions for this step
|
|
231
|
-
- When step is complete, call `flow_step({ action: 'next' })`
|
|
232
|
-
3. If no active flow:
|
|
233
|
-
- Check `flow_list` for available flows
|
|
234
|
-
- Suggest starting a flow based on the task type
|
|
235
|
-
- Use `flow_start({ flow: '<name>' })` to begin
|
|
250
|
+
This project uses aikit's pluggable flow system. Check flow status with the `flow_status` MCP tool.
|
|
251
|
+
If a flow is active, follow the current step's skill instructions. Advance with `flow_step({ action: 'next' })`.
|
|
252
|
+
Use `flow_list` to see available flows and `flow_start` to begin one.
|
|
236
253
|
|
|
237
|
-
### Flow MCP Tools
|
|
238
|
-
| Tool | Purpose |
|
|
239
|
-
|------|---------|
|
|
240
|
-
| `flow_list` | List installed flows and active flow |
|
|
241
|
-
| `flow_info` | Get detailed flow info including steps |
|
|
242
|
-
| `flow_start` | Start a named flow |
|
|
243
|
-
| `flow_step` | Advance: next, skip, or redo current step |
|
|
244
|
-
| `flow_status` | Check current execution state |
|
|
245
|
-
| `flow_reset` | Clear flow state to start over |
|
|
246
254
|
|
|
247
255
|
## Flows
|
|
248
256
|
|
|
@@ -38,16 +38,20 @@ You are the **Planner**, autonomous planner that researches codebases and writes
|
|
|
38
38
|
5. **Dependency Graph** — For each phase, list dependencies. Group into parallel batches
|
|
39
39
|
6. **Present** — Show plan with open questions, complexity estimate, parallel batch layout
|
|
40
40
|
|
|
41
|
-
## Flow Integration
|
|
41
|
+
## Flow Integration (PRIMARY MODE)
|
|
42
42
|
|
|
43
|
-
|
|
44
|
-
1. Check `flow_status` for current step context
|
|
45
|
-
2. Read the step's skill file for specific instructions
|
|
46
|
-
3. Follow skill instructions while applying Planner methodology
|
|
47
|
-
4. Produce required artifacts (as specified by the flow step's `produces` field)
|
|
48
|
-
5. When complete, report to Orchestrator (do NOT call `flow_step` — let Orchestrator advance)
|
|
43
|
+
The Planner is typically activated by the Orchestrator as part of a flow step (e.g., `aikit:advanced` plan step, `aikit:basic` assess step, or a custom flow's planning step).
|
|
49
44
|
|
|
50
|
-
When
|
|
45
|
+
**When activated as part of a flow:**
|
|
46
|
+
1. `flow_status` — check current step context and which flow is active
|
|
47
|
+
2. `flow_read_skill` — read the current step's SKILL.md for specific instructions
|
|
48
|
+
3. Follow the skill's instructions as the primary guide, applying Planner methodology on top
|
|
49
|
+
4. Read the flow's README.md for overall context on how the flow works
|
|
50
|
+
5. Produce required artifacts (as specified by the flow step's `produces` field)
|
|
51
|
+
6. When complete, report status to Orchestrator: `DONE` | `DONE_WITH_CONCERNS` | `NEEDS_CONTEXT` | `BLOCKED`
|
|
52
|
+
7. Do NOT call `flow_step` — the Orchestrator controls flow advancement
|
|
53
|
+
|
|
54
|
+
**When no flow is active** (standalone mode), operate autonomously following normal Planner methodology.
|
|
51
55
|
|
|
52
56
|
## Subagent Output Relay
|
|
53
57
|
|