@trymesh/cli 0.3.22
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/assets/mesh-banner.svg +23 -0
- package/bin/mesh-daemon.cjs +25 -0
- package/bin/mesh.cjs +25 -0
- package/dist/agent-loop.js +1 -0
- package/dist/agent-os.js +1 -0
- package/dist/agents/critic.js +1 -0
- package/dist/agents/persona-loader.js +1 -0
- package/dist/agents/redteam.js +1 -0
- package/dist/audit/logger.js +1 -0
- package/dist/auth.js +1 -0
- package/dist/cache-manager.js +1 -0
- package/dist/command-filter.js +1 -0
- package/dist/command-safety.js +1 -0
- package/dist/company-brain.js +1 -0
- package/dist/composite-backend.js +1 -0
- package/dist/config.js +1 -0
- package/dist/context-artifacts.js +1 -0
- package/dist/context-assembler.js +1 -0
- package/dist/daemon-protocol.js +1 -0
- package/dist/daemon.js +2 -0
- package/dist/dashboard/assets/index-Co1IW0HE.css +1 -0
- package/dist/dashboard/assets/index-DIj6iOWd.js +1 -0
- package/dist/dashboard/index.html +13 -0
- package/dist/dashboard-server.js +1 -0
- package/dist/doctor.js +1 -0
- package/dist/index.js +2 -0
- package/dist/integrations/chatops/manager.js +1 -0
- package/dist/integrations/issues/github.js +1 -0
- package/dist/integrations/issues/jira.js +1 -0
- package/dist/integrations/issues/linear.js +1 -0
- package/dist/integrations/issues/manager.js +1 -0
- package/dist/integrations/issues/types.js +1 -0
- package/dist/integrations/telemetry/datadog.js +1 -0
- package/dist/integrations/telemetry/manager.js +1 -0
- package/dist/integrations/telemetry/otel.js +1 -0
- package/dist/integrations/telemetry/posthog.js +1 -0
- package/dist/integrations/telemetry/sentry.js +1 -0
- package/dist/issue-autopilot.js +1 -0
- package/dist/llm-client.js +1 -0
- package/dist/local-tools.js +1 -0
- package/dist/mcp-client.js +1 -0
- package/dist/mesh-brain.js +1 -0
- package/dist/mesh-core-adapter.js +1 -0
- package/dist/mesh-gateway.js +1 -0
- package/dist/mesh-portal.js +1 -0
- package/dist/model-catalog.js +1 -0
- package/dist/model-router.js +1 -0
- package/dist/moonshots/causal-autopsy.js +1 -0
- package/dist/moonshots/common.js +1 -0
- package/dist/moonshots/conversational-codebase.js +1 -0
- package/dist/moonshots/ephemeral-execution.js +1 -0
- package/dist/moonshots/fluid-mesh.js +1 -0
- package/dist/moonshots/hive-mind.js +1 -0
- package/dist/moonshots/live-wire.js +1 -0
- package/dist/moonshots/living-software.js +1 -0
- package/dist/moonshots/natural-language-source.js +1 -0
- package/dist/moonshots/precrime.js +1 -0
- package/dist/moonshots/probabilistic-codebase.js +1 -0
- package/dist/moonshots/proof-carrying-change.js +1 -0
- package/dist/moonshots/schrodingers-ast.js +1 -0
- package/dist/moonshots/semantic-git.js +1 -0
- package/dist/moonshots/semantic-sheriff.js +1 -0
- package/dist/moonshots/session-resurrection.js +1 -0
- package/dist/moonshots/shadow-deploy.js +1 -0
- package/dist/moonshots/spec-code.js +1 -0
- package/dist/moonshots/todo-resolver.js +1 -0
- package/dist/moonshots/tribunal.js +1 -0
- package/dist/nvidia-services.js +1 -0
- package/dist/production-readiness.js +1 -0
- package/dist/quality/property-tests.js +1 -0
- package/dist/quality/smt.js +1 -0
- package/dist/refactor/ts-compiler.js +1 -0
- package/dist/runtime/replay.js +1 -0
- package/dist/runtime-api.js +1 -0
- package/dist/runtime-observer.js +1 -0
- package/dist/security/self-defending.js +1 -0
- package/dist/session-capsule-store.js +1 -0
- package/dist/session-manager.js +1 -0
- package/dist/structured-logger.js +1 -0
- package/dist/support.js +1 -0
- package/dist/terminal-preview.js +1 -0
- package/dist/timeline/symptom-bisect.js +1 -0
- package/dist/timeline-manager.js +1 -0
- package/dist/tool-backend.js +1 -0
- package/dist/tool-schema.js +1 -0
- package/dist/voice-manager.js +1 -0
- package/dist/workspace-index.js +1 -0
- package/package.json +94 -0
- package/scripts/minify.js +114 -0
- package/scripts/postinstall.cjs +81 -0
- package/scripts/published-install-smoke.cjs +103 -0
- package/scripts/release-smoke.cjs +116 -0
- package/scripts/run-eval.ts +84 -0
- package/scripts/run-tests.cjs +54 -0
- package/scripts/update-brew.sh +57 -0
|
@@ -0,0 +1 @@
|
|
|
1
|
+
function e(e){return!(e.includes("node_modules")||e.includes(".git")||e.includes("dist")||e.includes(".mesh"))&&/\.(ts|tsx|js|jsx|mjs|cjs|json|md|py|go|rs|cpp|c|h|java)$/.test(e)}function t(e,t){return R.relative(e,t).split(R.sep).join("/")}function i(e,t){const i=R.resolve(e,t??"."),r=R.resolve(e),s=R.relative(r,i);if(s.startsWith("..")||R.isAbsolute(s))throw new Error(`Path escapes workspace root: ${t??"."}`);return i}async function r(e){try{return await x.access(e),!0}catch{return!1}}function s(e){try{return 0===q("which",[e],{stdio:"ignore"}).status}catch{return!1}}async function n(e,t){try{return JSON.parse(await x.readFile(e,"utf8"))}catch{return t}}async function a(e,t){await x.mkdir(R.dirname(e),{recursive:!0}),await x.writeFile(e,JSON.stringify(t),"utf8")}function o(e,t=100){return Array.from(new Set(e.filter(e=>Boolean(e&&e.trim())))).slice(0,t)}function c(e,t,i){return t.find(t=>Object.keys(e).some(e=>e.toLowerCase().includes(t)))??i}function l(e,t){const i=new Set(e.toLowerCase().split(/[^a-z0-9_.$/-]+/i).filter(e=>e.length>2));if(0===i.size)return 0;const r=t.toLowerCase();let s=0;for(const e of i)r.includes(e)&&(s+=1);return s/i.size}function p(e){const t=e.match(/# fail\s+(\d+)/);if(t)return Number(t[1]);const i=e.match(/(\d+)\s+failed/);return i?Number(i[1]):/fail|failing/i.test(e)?1:0}function m(e){const t=e.match(/bundle.*?([+-]?\d+(?:\.\d+)?)\s*kb/i);return t?Number(t[1]):0}function u(e){const t=[],i=[/\b(?:app|router|server)\.(get|post|put|patch|delete|all)\(\s*["'`]([^"'`]+)["'`]/g,/\b(?:fetch|axios\.(?:get|post|put|patch|delete))\(\s*["'`]([^"'`]+)["'`]/g];for(const r of i)for(const i of e.matchAll(r)){const r=i[2]?i[1].toUpperCase():"FETCH",s=i[2]??i[1],n=e.slice(0,i.index??0).split(/\r?\n/g).length;t.push({method:r,route:s,line:n})}return t}function d(e){const t=[],i=[{kind:"function",regex:/\b(?:export\s+)?(?:async\s+)?function\s+([A-Za-z_$][\w$]*)/g},{kind:"class",regex:/\b(?:export\s+)?class\s+([A-Za-z_$][\w$]*)/g},{kind:"const",regex:/\b(?:export\s+)?const\s+([A-Za-z_$][\w$]*)\s*=/g},{kind:"type",regex:/\b(?:export\s+)?(?:type|interface)\s+([A-Za-z_$][\w$]*)/g},{kind:"zod",regex:/\b(?:export\s+)?const\s+([A-Za-z_$][\w$]*)\s*=\s*z\.(?:object|string|number|array|boolean|enum|nativeEnum|union|intersection|tuple|record|map|set|function|lazy|promise|any|unknown|never|void|undefined|null|nan)/g}];for(const r of i)for(const i of e.matchAll(r.regex))t.push({name:i[1],kind:r.kind,line:e.slice(0,i.index??0).split(/\r?\n/g).length});return t}function h(e){const t={},i=[{regex:/\b(?:export\s+)?interface\s+([A-Za-z_$][\w$]*)\s*(\{[\s\S]*?\})/g},{regex:/\b(?:export\s+)?type\s+([A-Za-z_$][\w$]*)\s*=\s*([^;]+);/g},{regex:/\b(?:export\s+)?const\s+([A-Za-z_$][\w$]*)\s*=\s*z\.object\s*\((\{[\s\S]*?\})\)/g}];for(const r of i)for(const i of e.matchAll(r.regex))t[i[1]]=i[2].replace(/\s+/g," ").trim();return t}function g(e){const t=[],i=[/import\s+.*?\s+from\s+["'`]([^"'`]+)["'`]/g,/require\(\s*["'`]([^"'`]+)["'`]\)/g];for(const r of i)for(const i of e.matchAll(r))t.push(i[1]);return t}function f(e,t){const i=[];return/\b(exec|spawn|execFile)\s*\(/.test(t)&&i.push("shell execution"),/\b(rm\s+-rf|fs\.rm|deleteFile|unlink)\b/.test(t)&&i.push("destructive file operation"),/\b(SECRET|TOKEN|PASSWORD|API_KEY)\b/.test(t)&&i.push("secret-bearing code"),/\b(auth|session|jwt|oauth)\b/i.test(`${e}\n${t}`)&&i.push("auth boundary"),/\b(sql|query|prisma|supabase|database|migration)\b/i.test(`${e}\n${t}`)&&i.push("data persistence boundary"),(e.includes("agent-loop")||e.includes("local-tools"))&&i.push("agent runtime core"),o(i,10)}function y(e){return o(e.toLowerCase().split(/[^a-z0-9_/-]+/g).map(e=>e.trim()).filter(e=>e.length>=3),80)}function w(e){return e.toLowerCase().replace(/[^a-z0-9._-]+/g,"-").replace(/^-+|-+$/g,"").slice(0,80)||"item"}function k(e){const t=e.replace(/[^a-zA-Z0-9_-]/g,"_");return t.length>0?t:"tool"}function b(e){return e>=85?"critical":e>=60?"high":e>=35?"medium":"low"}function v(e,t,i=12){const r=function(e){return R.basename(e).replace(/\.(test|spec)\.(ts|tsx|js|jsx|mjs|cjs)$/i,"").replace(/\.(ts|tsx|js|jsx|mjs|cjs)$/i,"").toLowerCase()}(e);if(!r)return[];const s=R.dirname(e).toLowerCase();return t.filter(e=>{const t=e.toLowerCase();return t.includes(r)||"."!==s&&t.includes(s)}).slice(0,i)}async function S(e,t,i){const r=await async function(e){const t=F();t.add(Ue);try{const i=R.join(e,".gitignore");if(_(i)){const e=await x.readFile(i,"utf8");t.add(e)}}catch{}return t}(i),s=[e],n=[];for(;s.length>0&&n.length<t;){const e=s.shift();if(!e)break;const a=await x.readdir(e,{withFileTypes:!0}).catch(()=>[]);for(const o of a){if(n.length>=t)break;const a=R.join(e,o.name),c=R.relative(i,a).split(R.sep).join("/");o.name.startsWith(".")&&".github"!==o.name||(r.ignores(c)||(o.isDirectory()?s.push(a):o.isFile()&&n.push(a)))}}return n}import{promises as x,existsSync as _,watch as j}from"node:fs";import R from"node:path";import{exec as C,execFile as A,spawn as M,spawnSync as q}from"node:child_process";import{promisify as I}from"node:util";import $ from"node:os";import P from"node:crypto";import F from"ignore";new class{model=null;isDownloading=!1;async getModel(e){if(this.model)return this.model;if(this.isDownloading){for(;this.isDownloading;)await new Promise(e=>setTimeout(e,500));return this.model}this.isDownloading=!0;try{const{pipeline:e}=await new Function('return import("@xenova/transformers")')().catch(()=>({pipeline:null}));if(!e)throw new Error("transformers_not_installed");const t=process.env.MESH_EMBEDDING_MODEL||"Xenova/nomic-embed-code";try{this.model=await e("feature-extraction",t)}catch{this.model=await e("feature-extraction","Xenova/all-MiniLM-L6-v2")}return this.isDownloading=!1,this.model}catch(e){return this.isDownloading=!1,null}}async getEmbedding(e){const t=await this.getModel();if(!t)return null;const i=await t(e,{pooling:"mean",normalize:!0});return Array.from(i.data)}cosineSimilarity(e,t){let i=0,r=0,s=0;for(let n=0;n<e.length;n++)i+=e[n]*t[n],r+=e[n]*e[n],s+=t[n]*t[n];return i/(Math.sqrt(r)*Math.sqrt(s))}};const E=I(C),T=I(A);import{MeshCoreAdapter as D}from"./mesh-core-adapter.js";import{CacheManager as N}from"./cache-manager.js";import{BedrockLlmClient as L}from"./llm-client.js";import{analyzeImageWithNvidia as O,DEFAULT_NVIDIA_VISION_MODELS as B,resolveNvidiaApiKey as z}from"./nvidia-services.js";import{routeMeshTask as G}from"./model-router.js";import{ProductionReadinessEngine as U}from"./production-readiness.js";import{CompanyBrainEngine as V}from"./company-brain.js";import{IssueAutopilotEngine as W}from"./issue-autopilot.js";import{WorkspaceIndex as H}from"./workspace-index.js";import{TimelineManager as Z}from"./timeline-manager.js";import{RuntimeObserver as K}from"./runtime-observer.js";import{AgentOs as Q}from"./agent-os.js";import{captureFrontendPreview as J}from"./terminal-preview.js";import{openContextArtifact as Y}from"./context-artifacts.js";import{MeshBrainClient as X,normalizeDiffPattern as ee,normalizeErrorSignature as te}from"./mesh-brain.js";import{runDaemonCli as ie}from"./daemon.js";import{DAEMON_SOCKET_PATH as re}from"./daemon-protocol.js";import se from"node:net";import{IssuePipelineManager as ne}from"./integrations/issues/manager.js";import{ChatopsManager as ae}from"./integrations/chatops/manager.js";import{scoreSignal as oe,TelemetryManager as ce}from"./integrations/telemetry/manager.js";import{ReplayEngine as le}from"./runtime/replay.js";import{SymptomBisectEngine as pe}from"./timeline/symptom-bisect.js";import{PersonaLoader as me}from"./agents/persona-loader.js";import{runCritic as ue}from"./agents/critic.js";import{runRedTeam as de}from"./agents/redteam.js";import{TsCompilerRefactor as he}from"./refactor/ts-compiler.js";import{PropertyTestGenerator as ge}from"./quality/property-tests.js";import{SmtEdgeCaseFinder as fe}from"./quality/smt.js";import{AuditLogger as ye}from"./audit/logger.js";import{assertCommandAllowed as we,parseAllowedCommand as ke}from"./command-safety.js";import{StructuredLogger as be}from"./structured-logger.js";import{ToolInputValidationError as ve,validateToolInput as Se}from"./tool-schema.js";import{GEMINI_FLASH_LITE_MODEL_ID as xe}from"./model-catalog.js";import{SelfDefendingCodeEngine as _e}from"./security/self-defending.js";import{PrecrimeEngine as je}from"./moonshots/precrime.js";import{ShadowDeployEngine as Re}from"./moonshots/shadow-deploy.js";import{SemanticGitEngine as Ce}from"./moonshots/semantic-git.js";import{ProbabilisticCodebaseEngine as Ae}from"./moonshots/probabilistic-codebase.js";import{SpecCodeEngine as Me}from"./moonshots/spec-code.js";import{ConversationalCodebaseEngine as qe}from"./moonshots/conversational-codebase.js";import{NaturalLanguageSourceEngine as Ie}from"./moonshots/natural-language-source.js";import{FluidMeshEngine as $e}from"./moonshots/fluid-mesh.js";import{LivingSoftwareEngine as Pe}from"./moonshots/living-software.js";import{ProofCarryingChangeEngine as Fe}from"./moonshots/proof-carrying-change.js";import{CausalAutopsyEngine as Ee}from"./moonshots/causal-autopsy.js";import{TodoResolverEngine as Te}from"./moonshots/todo-resolver.js";import{LiveWireEngine as De}from"./moonshots/live-wire.js";import{SchrodingersAstEngine as Ne}from"./moonshots/schrodingers-ast.js";import{HiveMindEngine as Le}from"./moonshots/hive-mind.js";import{EphemeralExecutionEngine as Oe}from"./moonshots/ephemeral-execution.js";import{TribunalEngine as Be}from"./moonshots/tribunal.js";import{SessionResurrectionEngine as ze}from"./moonshots/session-resurrection.js";import{SemanticSheriffEngine as Ge}from"./moonshots/semantic-sheriff.js";const Ue=[".git","node_modules","dist",".mesh"],Ve=function(e,t,i,r){const s=Number(e);return Number.isFinite(s)?Math.max(i,Math.min(r,Math.trunc(s))):t}(process.env.MESH_INDEX_PARALLELISM,12,1,128),We=["low","medium","high"],He=/^(1|true|yes)$/i.test(process.env.MESH_DISABLE_WATCHERS??""),Ze=/^(1|true|yes)$/i.test(process.env.MESH_ENABLE_BACKGROUND_RESOLVER??""),Ke=["src","tests","docs","scripts","packages","mesh-core/src","worker/src"];export class LocalToolBackend{workspaceRoot;config;meshCore=new D;cache;agentPlan="No plan defined yet.";watchers=[];recentChanges=[];sessionSymbolIndex=new Map;projectLexicon={};entangledWorkspaces=[];changeStack=[];speculativeFixes=new Map;workspaceIndex;timelines;runtimeObserver;agentOs;meshBrain;issuePipeline;chatops;telemetry;replayEngine;symptomBisect;personaLoader;tsRefactor;propertyTests;smtFinder;audit;logger;startupTasks=[];selfDefense;precrime;shadowDeploy;semanticGit;probabilisticCodebase;specCode;conversationalCodebase;naturalLanguageSource;fluidMesh;livingSoftware;proofCarryingChange;causalAutopsy;todoResolver;liveWire;schrodingersAst;hiveMind;ephemeralExecution;tribunal;sessionResurrection;semanticSheriff;productionReadiness;companyBrain;issueAutopilot;constructor(e,t){this.workspaceRoot=e,this.config=t,this.cache=new N(t??{agent:{workspaceRoot:e,maxSteps:8,mode:"local",enableCloudCache:!0,themeColor:"cyan",voice:{configured:!1,language:"auto",speed:260,voice:"auto",microphone:"default"}},bedrock:{endpointBase:"",modelId:"",fallbackModelIds:[],temperature:0,maxTokens:0},mcp:{args:[]},supabase:{},telemetry:{contribute:!1}}),this.workspaceIndex=new H(e,this.meshCore,this.cache,{apiKey:t?.bedrock?.bearerToken,baseUrl:t?.bedrock?.endpointBase,googleApiKey:t?.bedrock?.googleApiKey}),this.startLiveSyncWatcher(),this.timelines=new Z(e),this.runtimeObserver=new K(e),this.agentOs=new Q(e,this.timelines),this.meshBrain=new X({workspaceRoot:e,telemetryContribute:Boolean(t?.telemetry?.contribute),endpoint:t?.telemetry?.meshBrainEndpoint}),this.issuePipeline=new ne(e,{intentCompile:async e=>this.intentCompile({intent:e}),impactMap:async e=>this.impactMap({symbol:e})}),this.chatops=new ae(e,{intentCompile:async e=>this.intentCompile({intent:e}),predictiveRepair:async()=>this.predictiveRepair({action:"analyze"})}),this.telemetry=new ce(e),this.replayEngine=new le(e,this.timelines),this.symptomBisect=new pe(e,this.timelines),this.personaLoader=new me(e),this.tsRefactor=new he(e),this.propertyTests=new ge(e),this.smtFinder=new fe(e),this.audit=new ye(e),this.logger=new be(e),this.selfDefense=new _e(e),this.precrime=new je(e),this.shadowDeploy=new Re(e,this.timelines),this.semanticGit=new Ce(e,this.timelines),this.probabilisticCodebase=new Ae(e),this.specCode=new Me(e),this.conversationalCodebase=new qe(e),this.naturalLanguageSource=new Ie(e),this.fluidMesh=new $e(e),this.livingSoftware=new Pe(e,(e,t)=>this.callTool(e,t)),this.proofCarryingChange=new Fe(e),this.causalAutopsy=new Ee(e),this.todoResolver=new Te(e,(e,t)=>this.callTool(e,t)),this.liveWire=new De(e),this.schrodingersAst=new Ne(e),this.hiveMind=new Le(e),this.ephemeralExecution=new Oe(e,(e,t)=>this.callTool(e,t));const i=t?async(e,i,r=0,s)=>{const n="haiku"===s?xe:t.bedrock.modelId,a=new L({endpointBase:t.bedrock.endpointBase,modelId:n,bearerToken:t.bedrock.bearerToken,temperature:r,maxTokens:1500}),o=await a.converse([{role:"user",content:[{text:i}]}],[],e);return"text"===o.kind?o.text:""}:void 0;this.tribunal=new Be(e,i),this.sessionResurrection=new ze(e),this.semanticSheriff=new Ge(e),this.productionReadiness=new U(e,(e,t)=>this.callTool(e,t)),this.companyBrain=new V(e,(e,t)=>this.callTool(e,t)),this.issueAutopilot=new W(e,{callTool:(e,t)=>this.callTool(e,t),callLlm:t?async({system:e,user:i,temperature:r=.1,maxTokens:s=8192})=>{const n=new L({endpointBase:t.bedrock.endpointBase,modelId:t.bedrock.modelId,fallbackModelIds:t.bedrock.fallbackModelIds,bearerToken:t.bedrock.bearerToken,temperature:r,maxTokens:s}),a=await n.converse([{role:"user",content:[{text:i}]}],[],e);return"text"===a.kind?a.text:""}:void 0}),this.startupTasks.push(this.bootstrapRepoDnaMemory(),this.agentOs.ensureDefaultDefinitions(),this.runtimeObserver.writeDefaultRunbooks()),Promise.allSettled(this.startupTasks),this.startWatcher()}startWatcher(){if(He||this.watchers.length>0)return;const i=[this.workspaceRoot,...Ke.map(e=>R.join(this.workspaceRoot,e))].filter((e,t,i)=>_(e)&&i.indexOf(e)===t);for(const r of i)try{const i=j(r,{recursive:!1},async(i,s)=>{if(!s)return;const n=R.resolve(r,s),a=t(this.workspaceRoot,n);a.startsWith("..")||R.isAbsolute(a)||!e(a)||await this.handleWatchedFile(n,a)});i.on("error",()=>this.closeWatcher(i)),i.unref?.(),this.watchers.push(i)}catch{}}closeWatcher(e){try{e.close()}catch{}this.watchers=this.watchers.filter(t=>t!==e)}async handleWatchedFile(e,t){try{const i=await x.stat(e);if(!i.isFile())return;const r=Math.floor(i.mtimeMs);try{const{stdout:e}=await E(`git diff -U1 "${t}"`,{cwd:this.workspaceRoot});e&&(this.recentChanges.unshift({file:t,diff:e.slice(0,1e3),time:(new Date).toISOString()}),this.recentChanges.length>5&&this.recentChanges.pop())}catch{}setTimeout(()=>{this.refreshWatchedFile(e,t,r).catch(()=>{})},1e3)}catch{}}async refreshWatchedFile(e,t,i){const r=await x.stat(e).catch(()=>null);if(!r?.isFile()||Math.floor(r.mtimeMs)!==i)return;if(await this.workspaceIndex.partialUpdate([t]).catch(()=>{}),!Ze||!this.meshCore.isAvailable)return;const s=await x.readFile(e,"utf8").catch(()=>null);if(!s)return;const n=await this.meshCore.summarizeAllTiers(t,s);if(await Promise.all(We.map(e=>this.cache.setCapsule(t,e,n[e],i))),t.match(/\.(ts|js|tsx)$/)){const e=await this.getDiagnostics();e.ok||await this.recordPredictiveRepairSignal(t,e)}}async listTools(){return[{name:"workspace.list_files",description:"List files in the local workspace.",inputSchema:{type:"object",properties:{path:{type:"string"},limit:{type:"number"}}}},{name:"workspace.read_file",description:"Read the Mesh capsule (optimized summary) of a file. Use this for general understanding.",inputSchema:{type:"object",required:["path"],properties:{path:{type:"string"},tier:{type:"string",enum:["low","medium","high"],default:"medium"}}}},{name:"workspace.read_slice",description:"Read a semantic context slice of a specific function/class from a file. PERFECT for minimizing context usage on large files while retaining all necessary imports and the exact AST block.",inputSchema:{type:"object",required:["path","symbol"],properties:{path:{type:"string"},symbol:{type:"string",description:"The exact name of the function, class, or method you want to read."}}}},{name:"workspace.open_artifact",description:"Open a specific locally stored tool-result artifact by id. ONLY call this when the user explicitly asks to see artifact details, or when a prior tool call returned truncated data and you need a specific field. NEVER call this automatically after another tool call — the tool result is already in context.",inputSchema:{type:"object",required:["id"],properties:{id:{type:"string"},query:{type:"string",description:"Optional search terms to extract matching lines from the artifact."},maxChars:{type:"number",description:"Maximum characters to return, default 4000."}}}},{name:"workspace.read_file_raw",description:"Read the actual raw source code of a file. ONLY use this when you need to edit the file or see exact implementation details that are missing from the capsule.",inputSchema:{type:"object",required:["path"],properties:{path:{type:"string"}}}},{name:"workspace.semantic_search",description:"Semantic (RAG) search over the codebase using vector embeddings. Returns the most relevant files and code snippets for a natural-language query. Use this first before grep_capsules for conceptual questions like 'where is auth handled?' or 'how does the agent loop work?'.",inputSchema:{type:"object",required:["query"],properties:{query:{type:"string",description:"Natural language description of what you're looking for"},limit:{type:"number",description:"Max results (default 8, max 20)"},mode:{type:"string",enum:["architecture","symbol","route","test"],description:"Search mode (default: architecture)"}}}},{name:"workspace.search_files",description:"Search file paths by substring.",inputSchema:{type:"object",required:["query"],properties:{query:{type:"string"},limit:{type:"number"},path:{type:"string"}}}},{name:"workspace.grep_content",description:"Search raw file contents. Expensive, use only if grep_capsules fails to find what you need.",inputSchema:{type:"object",required:["query"],properties:{query:{type:"string"},limit:{type:"number"},path:{type:"string"}}}},{name:"workspace.grep_capsules",description:"Search in cached capsules (summaries). Very fast and efficient for high-level searching.",inputSchema:{type:"object",required:["query"],properties:{query:{type:"string"},limit:{type:"number"}}}},{name:"workspace.write_file",description:"Write content to a file, creating or overwriting it. Automatically creates parent directories.",requiresApproval:!0,inputSchema:{type:"object",required:["path","content"],properties:{path:{type:"string"},content:{type:"string"}}}},{name:"workspace.run_command",description:"Run a shell command in the workspace and return its stdout and stderr.",requiresApproval:!0,inputSchema:{type:"object",required:["command"],properties:{command:{type:"string"}}}},{name:"workspace.get_index_status",description:"Get current indexing progress and cache coverage"},{name:"workspace.index_status",description:"Get persistent local code-intelligence index status, storage path, and stale-file count.",inputSchema:{type:"object",properties:{}}},{name:"workspace.read_multiple_files",description:"Read multiple files from the workspace in a single call.",inputSchema:{type:"object",required:["paths"],properties:{paths:{type:"array",items:{type:"string"}}}}},{name:"workspace.read_file_lines",description:"Read specific lines from a file.",inputSchema:{type:"object",required:["path","startLine","endLine"],properties:{path:{type:"string"},startLine:{type:"number"},endLine:{type:"number"}}}},{name:"workspace.list_directory",description:"List contents of a specific directory (not recursive).",inputSchema:{type:"object",properties:{path:{type:"string"}}}},{name:"workspace.get_file_info",description:"Get detailed information about a file (size, mtime, etc).",inputSchema:{type:"object",required:["path"],properties:{path:{type:"string"}}}},{name:"workspace.move_file",description:"Move or rename a file.",inputSchema:{type:"object",required:["sourcePath","destinationPath"],properties:{sourcePath:{type:"string"},destinationPath:{type:"string"}}}},{name:"workspace.delete_file",description:"Delete a file from the workspace.",inputSchema:{type:"object",required:["path"],properties:{path:{type:"string"}}}},{name:"workspace.patch_file",description:"Replace a specific block of text in a file.",inputSchema:{type:"object",required:["path","search","replace"],properties:{path:{type:"string"},search:{type:"string"},replace:{type:"string"}}}},{name:"workspace.patch_surgical",description:"Apply a surgical search-and-replace block. More robust than patch_file as it uses context-aware matching.",requiresApproval:!0,inputSchema:{type:"object",required:["path","searchBlock","replaceBlock"],properties:{path:{type:"string"},searchBlock:{type:"string",description:"The exact block of code to find (including indentation)."},replaceBlock:{type:"string",description:"The new block of code to put in its place."}}}},{name:"workspace.list_symbols",description:"List functions, classes, and variables in a file using Mesh Intelligence.",inputSchema:{type:"object",required:["path"],properties:{path:{type:"string"}}}},{name:"workspace.expand_symbol",description:"Get the raw source code for a specific symbol (function/class) from a file. Use this to see implementation details without reading the whole file.",inputSchema:{type:"object",required:["path","symbolName"],properties:{path:{type:"string"},symbolName:{type:"string"}}}},{name:"workspace.get_file_graph",description:"Get the import/export dependency graph for a specific file.",inputSchema:{type:"object",required:["path"],properties:{path:{type:"string"}}}},{name:"workspace.read_dir_overview",description:"Get a high-level overview of all files in a directory using ultra-low tier capsules. Efficient for understanding a module's public API.",inputSchema:{type:"object",required:["path"],properties:{path:{type:"string"}}}},{name:"workspace.git_diff",description:"Show uncommitted changes in the workspace.",inputSchema:{type:"object",properties:{path:{type:"string",description:"Optional path to limit diff to."}}}},{name:"web.read_docs",description:"Fetch and read documentation from a URL. Extracts the main text content.",inputSchema:{type:"object",required:["url"],properties:{url:{type:"string"}}}},{name:"workspace.run_in_shadow",description:"Run a command in a temporary, isolated copy of the workspace (Shadow Workspace) to safely test changes without affecting the main directory.",requiresApproval:!0,inputSchema:{type:"object",required:["command"],properties:{command:{type:"string"}}}},{name:"workspace.get_diagnostics",description:"Run project linters and type checkers (like tsc) to find errors.",inputSchema:{type:"object",properties:{}}},{name:"workspace.find_references",description:"Find all usages of a specific symbol across the workspace (Lightweight LSP alternative).",inputSchema:{type:"object",required:["symbol"],properties:{symbol:{type:"string"}}}},{name:"workspace.ask_codebase",description:"Query the persistent local code-intelligence index. Returns cited matches with file, symbol, line range, confidence, and match rationale.",inputSchema:{type:"object",required:["query"],properties:{query:{type:"string",description:"Natural language query about the codebase."},mode:{type:"string",enum:["architecture","bug","edit-impact","test-impact","ownership","recent-change","runtime-path"],default:"architecture"},limit:{type:"number",default:8}}}},{name:"workspace.explain_symbol",description:"Explain an indexed symbol with definition location, callers, dependencies, exports, and linked tests.",inputSchema:{type:"object",required:["symbol"],properties:{symbol:{type:"string"}}}},{name:"workspace.impact_map",description:"Map edit/test/runtime impact for a path, symbol, or unified diff using the persistent index graph.",inputSchema:{type:"object",properties:{path:{type:"string"},symbol:{type:"string"},diff:{type:"string"}}}},{name:"workspace.expand_execution_path",description:"Dependency-Slicing: Returns the body of a specific function AND the signatures of all other functions it calls.",inputSchema:{type:"object",required:["path","symbolName"],properties:{path:{type:"string"},symbolName:{type:"string"}}}},{name:"workspace.generate_lexicon",description:"Void-Protocol: Create a session dictionary of project terms. Enables using #ID in patches for max token saving.",inputSchema:{type:"object",properties:{paths:{type:"array",items:{type:"string"},description:"Files to scan for dictionary terms."}}}},{name:"workspace.ghost_verify",description:"Ghost Branch Lifecycle: Run tests/build on a proposed change in a parallel timeline. Ensures NO REGRESSIONS before applying to main workspace.",requiresApproval:!0,inputSchema:{type:"object",required:["patch","testCommand"],properties:{patch:{type:"string",description:"The alien_patch or surgical patch to verify."},testCommand:{type:"string",description:"Command to run in ghost branch (e.g. 'npm test')."}}}},{name:"workspace.timeline_create",description:"Create an isolated speculative timeline using a git worktree when possible, falling back to an isolated checkout copy.",inputSchema:{type:"object",properties:{name:{type:"string"},baseRef:{type:"string",default:"HEAD"}}}},{name:"workspace.timeline_apply_patch",description:"Apply a unified diff patch inside an isolated timeline without touching the main workspace.",inputSchema:{type:"object",required:["timelineId","patch"],properties:{timelineId:{type:"string"},patch:{type:"string"}}}},{name:"workspace.timeline_run",description:"Run a verification command inside an isolated timeline and persist stdout/stderr artifacts.",requiresApproval:!0,inputSchema:{type:"object",required:["timelineId","command"],properties:{timelineId:{type:"string"},command:{type:"string"},timeoutMs:{type:"number"}}}},{name:"workspace.timeline_compare",description:"Compare one or more timelines by diff stat, changed files, execution time, last command, and verification verdict.",inputSchema:{type:"object",required:["timelineIds"],properties:{timelineIds:{type:"array",items:{type:"string"}}}}},{name:"workspace.timeline_promote",description:"Promote a passing timeline diff into the main workspace.",requiresApproval:!0,inputSchema:{type:"object",required:["timelineId"],properties:{timelineId:{type:"string"}}}},{name:"workspace.timeline_list",description:"List recent speculative timelines for this workspace.",inputSchema:{type:"object",properties:{}}},{name:"workspace.symptom_bisect",description:"Autonomous git bisect by symptom description. Validates a verification command across commit history and returns likely introducing commit.",inputSchema:{type:"object",required:["symptom"],properties:{symptom:{type:"string"},verificationCommand:{type:"string"},searchDepth:{type:"number",default:50}}}},{name:"workspace.what_if",description:"Counterfactual mode: evaluate a migration/refactor in an isolated timeline and return a What-If report without applying changes by default.",inputSchema:{type:"object",required:["hypothesis"],properties:{hypothesis:{type:"string"},verificationCommand:{type:"string"},promote:{type:"boolean",default:!1}}}},{name:"workspace.self_defend",description:"Moonshot 05: continuously harden code. Scans/probes ReDoS-class vulnerabilities, writes a security ledger, and returns verified findings.",inputSchema:{type:"object",properties:{action:{type:"string",enum:["scan","probe","harden","daemon_tick","status"],default:"scan"},path:{type:"string"},maxFiles:{type:"number",default:500},confirm:{type:"boolean",default:!1},verificationCommand:{type:"string"},timeoutMs:{type:"number"}}}},{name:"workspace.precrime",description:"Moonshot 08: predict likely bugs before they happen from diffs, repo structure, coverage hints, and production telemetry.",inputSchema:{type:"object",properties:{action:{type:"string",enum:["analyze","gate","record_outcome","status"],default:"analyze"},maxFiles:{type:"number",default:250},path:{type:"string"},file:{type:"string"},incident:{type:"boolean"},outcome:{type:"string",enum:["incident","clean"]},severity:{type:"string",enum:["critical","high","medium","low"]},tags:{type:"array",items:{type:"string"}},verificationCommand:{type:"string"},notes:{type:"string"}}}},{name:"workspace.end_staging",description:"Moonshot 06: shadow-deploy verification ledger. Runs checks in a timeline and reports promotion gates before human review.",requiresApproval:!0,inputSchema:{type:"object",properties:{action:{type:"string",enum:["shadow","status"],default:"shadow"},command:{type:"string",default:"npm test"},verificationCommand:{type:"string"},timeoutMs:{type:"number"}}}},{name:"workspace.todo_resolver",description:"Moonshot: Autonomous Technical Debt Resolver. Scans for TODO/FIXME markers and resolves them via timeline and fix racing.",inputSchema:{type:"object",properties:{action:{type:"string",enum:["scan","resolve"],default:"scan"},file:{type:"string",description:"Required for resolve action. Target file."},text:{type:"string",description:"Required for resolve action. TODO text."},maxFiles:{type:"number"}}}},{name:"workspace.live_wire",description:"Endgame: Mesh Live-Wire. Hot-swap AST in a running Node V8 process without downtime.",inputSchema:{type:"object",properties:{action:{type:"string",enum:["attach","status"],default:"attach"},target:{type:"string",description:"Port (e.g. 9229) or PID of the target process."},scriptName:{type:"string",description:"Name of the script/file to patch in V8 memory."},newFunctionBody:{type:"string",description:"The new code payload to inject."}}}},{name:"workspace.schrodingers_ast",description:"Endgame: Schrödinger's AST. Generates a QuantumRouter to run multiple AST variants in superposition and measure their performance.",inputSchema:{type:"object",properties:{action:{type:"string",enum:["superpose","status"],default:"superpose"},file:{type:"string",description:"Target file path."},functionName:{type:"string",description:"Target function to wrap in superposition."},variants:{type:"array",items:{type:"string"},description:"Array of function body strings."}}}},{name:"workspace.hive_mind",description:"Endgame: The Hive Mind. Broadcast uncommitted AST intents via P2P (simulated) to prevent merge conflicts before they happen.",inputSchema:{type:"object",properties:{action:{type:"string",enum:["share_thoughts","status"],default:"share_thoughts"}}}},{name:"workspace.ephemeral_execution",description:"Endgame: Ephemeral Execution (Zero-Source). Starts a JIT server that hallucinates routing logic per request without storing source code.",inputSchema:{type:"object",properties:{action:{type:"string",enum:["start","status"],default:"start"},port:{type:"number",description:"Port for the ephemeral server."},specPath:{type:"string",description:"Path to OpenAPI/GraphQL spec."}}}},{name:"workspace.semantic_git",description:"Moonshot 02: semantic Git. Plans, verifies, and optionally promotes conflict resolutions in isolated timelines based on symbol-level merge semantics.",inputSchema:{type:"object",properties:{action:{type:"string",enum:["analyze","plan","resolve","verify","status"],default:"analyze"},path:{type:"string"},verificationCommand:{type:"string"},timeoutMs:{type:"number"},promote:{type:"boolean",default:!1}}}},{name:"workspace.probabilistic_codebase",description:"Moonshot 04: plan safe probabilistic variants and routing guardrails for routes, hotspots, and pure functions.",inputSchema:{type:"object",properties:{action:{type:"string",enum:["plan","status"],default:"plan"},intent:{type:"string"}}}},{name:"workspace.conversational_codebase",description:"Moonshot 03: symbol-level memory and answers so the codebase can explain its own state, history, and conventions.",inputSchema:{type:"object",properties:{action:{type:"string",enum:["ask","record","map","status"],default:"ask"},query:{type:"string"},symbol:{type:"string"},note:{type:"string"}}}},{name:"workspace.spec_code",description:"Moonshot 01: bidirectional spec-code system. Synthesizes behavior contracts from code/tests/routes, accepts human specs, detects drift, locks contracts, and emits materialization patch plans.",inputSchema:{type:"object",properties:{action:{type:"string",enum:["synthesize","check","assert","lock","unlock","materialize","status"],default:"synthesize"},id:{type:"string"},subject:{type:"string"},behavior:{type:"string"},file:{type:"string"}}}},{name:"workspace.natural_language_source",description:"Moonshot 09: compile constrained natural-language intent into an implementation IR, patch plan, and verification plan.",inputSchema:{type:"object",properties:{action:{type:"string",enum:["compile","status"],default:"compile"},intent:{type:"string"},source:{type:"string"}}}},{name:"workspace.fluid_mesh",description:"Moonshot 07: map repository capabilities as portable units independent of file/repo boundaries.",inputSchema:{type:"object",properties:{action:{type:"string",enum:["map","status"],default:"map"}}}},{name:"workspace.living_software",description:"Moonshot 10: synthesize all moonshot ledgers into a living-software pulse with health scores and next interventions. Includes 'drive_coverage' action for autonomous test generation.",inputSchema:{type:"object",properties:{action:{type:"string",enum:["pulse","status","drive_coverage"],default:"pulse"}}}},{name:"workspace.proof_carrying_change",description:"Generate a promotion-grade proof bundle for a change: intent, touched capabilities, affected contracts, risk model, verification, rollback, and assumptions.",inputSchema:{type:"object",properties:{action:{type:"string",enum:["generate","verify","status"],default:"generate"},intent:{type:"string"},verificationCommand:{type:"string"},timeoutMs:{type:"number"}}}},{name:"workspace.causal_autopsy",description:"Reconstruct a failure's causal chain from symptom text, runtime evidence, diffs, config/dependency deltas, proofs, and Mesh ledgers.",inputSchema:{type:"object",properties:{action:{type:"string",enum:["investigate","status"],default:"investigate"},symptom:{type:"string"},runId:{type:"string"},failingCommand:{type:"string"},timeoutMs:{type:"number"}}}},{name:"workspace.tribunal",description:"Moonshot: Cross-Model Tribunal. Routes a hard engineering problem to three expert AI panelists (Correctness, Performance, Resilience), runs a structured debate where each critiques the others, and synthesizes the dominant solution. Produces a decision artifact with full debate trail.",inputSchema:{type:"object",properties:{action:{type:"string",enum:["convene","status"],default:"convene"},problem:{type:"string",description:"The engineering problem or decision to adjudicate."},context:{type:"string",description:"Optional codebase context or constraints to include."}}}},{name:"workspace.session_resurrection",description:"Moonshot: Cognitive Session Resurrection. Captures your current intent, open questions, failed approaches, insights, and next actions as a persistent snapshot. Reconstructs your full mental state at the start of any future session so you never start cold again.",inputSchema:{type:"object",properties:{action:{type:"string",enum:["capture","resurrect","checkpoint","status","clear"],default:"resurrect"},intent:{type:"string",description:"Required for capture: what are you trying to accomplish?"},filesInFocus:{type:"array",items:{type:"string"},description:"Files actively being worked on."},openQuestions:{type:"array",items:{type:"string"},description:"Unresolved questions blocking progress."},failedApproaches:{type:"array",items:{type:"object"},description:"Array of {approach, reason} objects."},insights:{type:"array",items:{type:"string"},description:"Key discoveries from this session."},nextActions:{type:"array",items:{type:"string"},description:"Ordered list of highest-leverage next steps."},note:{type:"string",description:"Free-form note to attach to the snapshot."}}}},{name:"workspace.semantic_sheriff",description:"Moonshot: Semantic Contract Sheriff. Fingerprints every module's semantic meaning (exports, purpose, behavioral patterns). Detects when refactoring silently changes what a module MEANS even when tests pass. Lock critical contracts to trigger critical-severity alerts on drift.",inputSchema:{type:"object",properties:{action:{type:"string",enum:["scan","verify","lock","unlock","drift","status","clear"],default:"verify"},file:{type:"string",description:"Target file for lock/unlock/verify actions."},maxFiles:{type:"number",default:400,description:"Max files to scan."},force:{type:"boolean",default:!1,description:"Force full re-verify of all contracts (not just changed files)."}}}},{name:"agent.assemble_team",description:"Classify a task and auto-assemble specialist personas from .mesh/personas for coordinated execution.",inputSchema:{type:"object",required:["task"],properties:{task:{type:"string"}}}},{name:"workspace.alien_patch",description:"Mesh-Alien-OS: Apply a high-density symbolic patch using session IDs and opcodes. MAX TOKENS SAVED. Example: !1 > { r: true }",requiresApproval:!0,inputSchema:{type:"object",required:["patch"],properties:{patch:{type:"string",description:"Symbolic patch string. Format: ![ID] > { [ALIENT_CODE] }"}}}},{name:"workspace.session_index_symbols",description:"Generate short session IDs for file symbols to enable Mesh-Alien-OS patching.",inputSchema:{type:"object",required:["paths"],properties:{paths:{type:"array",items:{type:"string"}}}}},{name:"workspace.rename_symbol",description:"AST-Native Micro-Edit: Safely renames a specific function/variable in a file without rewriting blocks of code. Auto-validates syntax.",requiresApproval:!0,inputSchema:{type:"object",required:["path","oldName","newName"],properties:{path:{type:"string"},oldName:{type:"string",description:"Exact current name."},newName:{type:"string",description:"The new name to apply."}}}},{name:"workspace.extract_function",description:"Compiler-backed refactor: extract a selected line range into a named function using ts-morph.",requiresApproval:!0,inputSchema:{type:"object",required:["path","functionName","startLine","endLine"],properties:{path:{type:"string"},functionName:{type:"string"},startLine:{type:"number"},endLine:{type:"number"}}}},{name:"workspace.inline_symbol",description:"Compiler-backed refactor: inline a variable symbol and remove its declaration where safe.",requiresApproval:!0,inputSchema:{type:"object",required:["path","symbolName"],properties:{path:{type:"string"},symbolName:{type:"string"}}}},{name:"workspace.move_to_module",description:"Compiler-backed refactor: move an exported function/variable into another module.",requiresApproval:!0,inputSchema:{type:"object",required:["fromPath","toPath","symbolName"],properties:{fromPath:{type:"string"},toPath:{type:"string"},symbolName:{type:"string"}}}},{name:"workspace.generate_properties",description:"Generate property-based tests (fast-check) for modified functions or the full workspace.",requiresApproval:!0,inputSchema:{type:"object",properties:{path:{type:"string"},functionName:{type:"string"},all:{type:"boolean",default:!1}}}},{name:"workspace.find_edge_cases",description:"SMT-inspired edge-case discovery for tagged functions; generates counterexample tests.",requiresApproval:!0,inputSchema:{type:"object",required:["path"],properties:{path:{type:"string"},functionName:{type:"string"}}}},{name:"workspace.audit",description:"Enterprise audit trail utilities: replay and verify cryptographic hash-chain integrity.",inputSchema:{type:"object",properties:{action:{type:"string",enum:["replay","verify"],default:"verify"},limit:{type:"number",default:200}}}},{name:"workspace.semantic_undo",description:"Non-Linear Chrono-Untangling: Revert a specific past concept or feature implementation without breaking more recent changes. Uses AST graph theory to safely de-merge old logic.",requiresApproval:!0,inputSchema:{type:"object",required:["concept"],properties:{concept:{type:"string",description:"The feature or concept name to remove (e.g. 'Auth System')."}}}},{name:"workspace.finalize_task",description:"Semantic Git & PR Automator: Creates a branch, generates a semantic commit message from the current agent plan, and prepares a PR.",requiresApproval:!0,inputSchema:{type:"object",required:["branchName","commitMessage"],properties:{branchName:{type:"string"},commitMessage:{type:"string",description:"A detailed semantic commit message explaining the WHY."}}}},{name:"web.inspect_ui",description:"Multi-modal Sight: Takes a screenshot of a local or remote URL using Playwright and returns a base64 string for visual UI/UX debugging. Requires `playwright` to be installed locally (`npm i -g playwright && npx playwright install chromium`). For a zero-dependency alternative use `frontend.preview` (built-in Chrome CDP).",inputSchema:{type:"object",required:["url"],properties:{url:{type:"string"}}}},{name:"frontend.preview",description:"Render a real frontend screenshot in the terminal using Chrome DevTools Protocol directly, then Kitty/iTerm2/Sixel terminal graphics when available. Does not use Playwright.",requiresApproval:!0,inputSchema:{type:"object",required:["url"],properties:{url:{type:"string"},width:{type:"number",default:1280},height:{type:"number",default:800},waitMs:{type:"number",default:1200},render:{type:"boolean",default:!0},protocol:{type:"string",enum:["auto","kitty","iterm2","sixel","none"],default:"auto"},outputPath:{type:"string"}}}},{name:"workspace.query_ast",description:"Tree-sitter Query Engine: Search the codebase for structural patterns using ast-grep (sg) syntax. Uses a locally installed `ast-grep` binary if available (recommended: `brew install ast-grep`), otherwise falls back to `npx @ast-grep/cli` (requires network on first run).",inputSchema:{type:"object",required:["pattern"],properties:{pattern:{type:"string",description:"The AST pattern to search for. E.g., 'function $A() { $$$ }'"}}}},{name:"workspace.get_recent_changes",description:"Time-Travel AST Diffing: Get recent background modifications (git diffs) tracked by the workspace watcher. Use this to catch up on changes without re-reading files.",inputSchema:{type:"object",properties:{}}},{name:"workspace.run_with_telemetry",description:"Runtime Telemetry Injection: Runs a Node.js command and attaches a V8 inspector. If it crashes, it dumps the exact memory state (local variables) at the exact moment of the crash.",requiresApproval:!0,inputSchema:{type:"object",required:["command"],properties:{command:{type:"string",description:"Node command to run (e.g. 'node src/index.js' or 'npm run dev')."}}}},{name:"runtime.start",description:"Start a runtime observer for a command or .mesh/runbooks/<profile>.json profile. Captures stdout/stderr as replayable run artifacts.",requiresApproval:!0,inputSchema:{type:"object",properties:{command:{type:"string"},profile:{type:"string"},timeoutMs:{type:"number"}}}},{name:"runtime.capture_failure",description:"Capture failure details, stack frames, and log tails for a runtime observer run.",inputSchema:{type:"object",required:["runId"],properties:{runId:{type:"string"}}}},{name:"runtime.capture_deep_autopsy",description:"Failure Autopsy: Reconstructs the causal chain of a crash using inspector-backed stack frames and scope values when available, with log fallback otherwise.",inputSchema:{type:"object",required:["runId"],properties:{runId:{type:"string"}}}},{name:"runtime.trace_request",description:"Map a URL, test name, or stack frame to likely runtime-path index queries.",inputSchema:{type:"object",properties:{url:{type:"string"},testName:{type:"string"},stackFrame:{type:"string"}}}},{name:"runtime.explain_failure",description:"Explain a captured runtime/test failure and list likely source files.",inputSchema:{type:"object",required:["runId"],properties:{runId:{type:"string"}}}},{name:"runtime.fix_failure",description:"Turn a captured failure into a timeline-first fix task with recommended verification tools.",inputSchema:{type:"object",required:["runId"],properties:{runId:{type:"string"}}}},{name:"runtime.replay_trace",description:"Production incident replay from OpenTelemetry trace IDs or Sentry event IDs, with optional commit-range divergence analysis.",inputSchema:{type:"object",properties:{traceId:{type:"string"},sentryEventId:{type:"string"},commitRange:{type:"string",description:"Optional git commit range in form start..end."}}}},{name:"workspace.validate_patch",description:"Pre-Cognitive Ghost Execution: Test a surgical patch in memory without actually saving it. Useful to check if your code compiles before committing to it.",inputSchema:{type:"object",required:["path","searchBlock","replaceBlock"],properties:{path:{type:"string"},searchBlock:{type:"string"},replaceBlock:{type:"string"}}}},{name:"workspace.trace_symbol",description:"Symbolic Trace Routing: Recursively trace the data flow and usages of a specific symbol backwards to find its origin and call sites.",inputSchema:{type:"object",required:["path","symbol"],properties:{path:{type:"string"},symbol:{type:"string"}}}},{name:"agent.race_fixes",description:"Multiverse Fix Racing: Generates multiple candidate fixes in parallel timelines, verifies them, compares telemetry, and ranks them by stability and quality.",inputSchema:{type:"object",required:["task","verificationCommand"],properties:{task:{type:"string",description:"The problem to fix (e.g. 'Fix the linter error in src/auth.ts')."},verificationCommand:{type:"string",description:"The command to run to verify the fix (e.g. 'npm test')."},candidates:{type:"number",description:"Number of parallel timelines to spawn (default: 3).",default:3}}}},{name:"agent.spawn_swarm",description:"Neural Swarm Orchestration: Spawns multiple sub-agents to work on different parts of a task in parallel. Max efficiency for multi-file features.",inputSchema:{type:"object",required:["subTasks"],properties:{subTasks:{type:"array",items:{type:"object",required:["id","prompt"],properties:{id:{type:"string",description:"Unique ID for this sub-task."},prompt:{type:"string",description:"Instruction for this specific sub-agent."}}}}}}},{name:"agent.spawn",description:"Create a role-scoped worker record and isolated timeline from .mesh/agents/<role>.md.",inputSchema:{type:"object",required:["role","task"],properties:{role:{type:"string"},task:{type:"string"},workspaceScope:{type:"array",items:{type:"string"}},writeScope:{type:"array",items:{type:"string"}}}}},{name:"agent.status",description:"List worker records or inspect one worker by id.",inputSchema:{type:"object",properties:{id:{type:"string"}}}},{name:"agent.review",description:"Review a timeline diff with deterministic safety and verification heuristics.",inputSchema:{type:"object",required:["timelineId"],properties:{timelineId:{type:"string"}}}},{name:"agent.merge_verified",description:"Promote a timeline only after it has a passing verification verdict.",requiresApproval:!0,inputSchema:{type:"object",required:["timelineId"],properties:{timelineId:{type:"string"}}}},{name:"agent.invoke_sub_agent",description:"Hierarchical MoE: Dispatch a lightweight sub-agent to research a topic or summarize files autonomously. Keeps the main context clean.",inputSchema:{type:"object",required:["prompt"],properties:{prompt:{type:"string",description:"Detailed instruction for the sub-agent. E.g., 'Read files in /src and summarize how routing works.'"}}}},{name:"agent.plan",description:"Update or read the agent's internal scratchpad/plan. Use this to keep track of multi-step tasks.",inputSchema:{type:"object",properties:{action:{type:"string",enum:["read","write"],default:"read"},plan:{type:"string",description:"The plan to write (only for action='write')."}}}},{name:"workspace.get_env_info",description:"Get information about the local environment (OS, Node version, common tools).",inputSchema:{type:"object",properties:{}}},{name:"workspace.grep_ripgrep",description:"Fast content search using ripgrep (if available).",inputSchema:{type:"object",required:["query"],properties:{query:{type:"string"},path:{type:"string"},includePattern:{type:"string"}}}},{name:"workspace.git_status",description:"Get current git status (branch, changed files)."},{name:"workspace.check_sync",description:"Verify cloud (L2) synchronization status"},{name:"workspace.index_everything",description:"Explicitly trigger full workspace indexing (generate all capsules)"},{name:"workspace.digital_twin",description:"Build or read the Codebase Digital Twin: symbols, routes, tests, deploy/config files, env names, risk hotspots, and git state.",inputSchema:{type:"object",properties:{action:{type:"string",enum:["build","read","status"],default:"build"}}}},{name:"workspace.predictive_repair",description:"Predictive Repair Daemon: analyze diagnostics, recent diffs, and learned risk memory to prepare verifiable repair candidates.",inputSchema:{type:"object",properties:{action:{type:"string",enum:["analyze","status","clear"],default:"analyze"},verificationCommand:{type:"string",description:"Optional verification command to attach to prepared repairs."}}}},{name:"workspace.engineering_memory",description:"Read, record, or learn repository-specific engineering rules from accepted/rejected work and risky modules.",inputSchema:{type:"object",properties:{action:{type:"string",enum:["read","record","learn"],default:"read"},outcome:{type:"string",enum:["accepted","rejected","neutral"]},note:{type:"string"},rule:{type:"string"},files:{type:"array",items:{type:"string"}}}}},{name:"workspace.brain",description:"Mesh Brain network-effect interface: query global fix patterns, read contribution stats, or opt out.",inputSchema:{type:"object",properties:{action:{type:"string",enum:["stats","query","opt_out"],default:"stats"},error:{type:"string",description:"Error text or signature to query globally learned patterns."},limit:{type:"number",default:5}}}},{name:"workspace.company_brain",description:"Company Codebase Brain: build, query, record, ingest, or export durable repo intelligence with citations, decisions, risks, ownership, and verification memory.",inputSchema:{type:"object",properties:{action:{type:"string",enum:["build","query","status","record","ingest","export"],default:"status"},query:{type:"string",description:"Question or search query for action=query."},question:{type:"string",description:"Alias for query."},title:{type:"string",description:"Decision/rule/lesson title for action=record."},body:{type:"string",description:"Decision/rule/lesson body for action=record or action=ingest."},rule:{type:"string",description:"Rule to persist into Company Brain and Engineering Memory."},note:{type:"string",description:"Short note to persist."},kind:{type:"string",enum:["decision","rule","lesson","risk","owner","pattern","runtime","autopilot"],default:"decision"},source:{type:"string"},files:{type:"array",items:{type:"string"}},limit:{type:"number",default:8},maxFiles:{type:"number",default:1200},path:{type:"string",description:"Export path for action=export."}}}},{name:"workspace.daemon",description:"Daemon mode controls: start/stop/status/digest for the background Mesh service.",inputSchema:{type:"object",properties:{action:{type:"string",enum:["start","status","digest","stop"],default:"status"}}}},{name:"workspace.issue_pipeline",description:"Issue-to-PR pipeline for GitHub, Linear, and Jira. Scans tagged issues and drafts PR payloads with repo-grounded intent and impact.",inputSchema:{type:"object",properties:{action:{type:"string",enum:["scan","status"],default:"scan"},provider:{type:"string",enum:["github","linear","jira"]},issueId:{type:"string"}}}},{name:"workspace.issue_autopilot",description:"Production Issue-to-PR Autopilot: convert a GitHub/Linear/Jira/manual issue into a verified timeline patch, proof bundle, and optional PR branch/PR.",inputSchema:{type:"object",properties:{action:{type:"string",enum:["status","plan","run","pr","create_pr","submit_pr"],default:"status"},issueUrl:{type:"string",description:"GitHub issue URL or external issue URL."},url:{type:"string",description:"Alias for issueUrl."},provider:{type:"string",enum:["github","linear","jira","manual"]},issueId:{type:"string"},title:{type:"string",description:"Manual issue title."},body:{type:"string",description:"Manual issue body."},description:{type:"string",description:"Alias for body."},labels:{type:"array",items:{type:"string"}},verificationCommand:{type:"string",description:"Command run in the isolated timeline."},baseRef:{type:"string"},baseBranch:{type:"string"},branchName:{type:"string"},prTitle:{type:"string"},patch:{type:"string",description:"Optional raw git patch to apply instead of LLM generation."},maxAttempts:{type:"number",default:2},timeoutMs:{type:"number",default:24e4},submitPr:{type:"boolean",default:!1},push:{type:"boolean",default:!0},maxBrainFiles:{type:"number",default:1200}}}},{name:"workspace.chatops",description:"Slack/Discord co-engineer integration for investigation threads, progress updates, and approval-driven draft PR handoff.",inputSchema:{type:"object",properties:{action:{type:"string",enum:["investigate","approve","status"],default:"investigate"},platform:{type:"string",enum:["slack","discord"],default:"slack"},channel:{type:"string",default:"general"},message:{type:"string"}}}},{name:"workspace.production_status",description:"Production awareness status fed by telemetry connectors (Sentry, Datadog, PostHog, OTel).",inputSchema:{type:"object",properties:{action:{type:"string",enum:["refresh","status"],default:"status"}}}},{name:"workspace.model_route",description:"Route a task to the best Mesh model roles: chat, sidecar, retrieval, vision, safety, and required verification gates.",inputSchema:{type:"object",required:["task"],properties:{task:{type:"string"}}}},{name:"workspace.production_readiness",description:"Production readiness gate across model orchestration, RAG, timelines, runtime learning, visual checks, memory, and PR review.",inputSchema:{type:"object",properties:{action:{type:"string",enum:["audit","gate","review","status"],default:"audit"},intent:{type:"string"},verificationCommand:{type:"string"},url:{type:"string",description:"Optional local app URL for live visual readiness checks."}}}},{name:"workspace.intent_compile",description:"Intent Compiler: turn product intent into a repo-grounded implementation contract with likely files, risks, tests, rollout, and verification steps.",inputSchema:{type:"object",required:["intent"],properties:{intent:{type:"string"},verificationCommand:{type:"string"}}}},{name:"workspace.cockpit_snapshot",description:"Live Architecture Cockpit snapshot: digital twin, timeline, runtime, repair, memory, risk, and coverage state for dashboards.",inputSchema:{type:"object",properties:{}}},{name:"workspace.causal_intelligence",description:"Causal Software Intelligence: build, query, or inspect a causal graph linking files, risks, tests, repairs, memory rules, and git change pressure.",inputSchema:{type:"object",properties:{action:{type:"string",enum:["build","read","status","query"],default:"build"},query:{type:"string",description:"Question to answer against the causal graph when action='query'."}}}},{name:"workspace.discovery_lab",description:"Autonomous Discovery Lab: discover high-impact improvements from the causal graph, diagnostics, repair queue, and repo memory.",inputSchema:{type:"object",properties:{action:{type:"string",enum:["run","status","clear"],default:"run"},verificationCommand:{type:"string",description:"Optional verification command attached to generated experiments."},maxDiscoveries:{type:"number",default:8}}}},{name:"workspace.reality_fork",description:"Reality Fork Engine: turn an intent into multiple scored project realities and optionally materialize them as isolated timelines.",inputSchema:{type:"object",properties:{action:{type:"string",enum:["plan","fork","status","clear"],default:"plan"},intent:{type:"string",description:"Goal to explore across multiple alternative project realities."},forks:{type:"number",default:4},verificationCommand:{type:"string"},runVerification:{type:"boolean",default:!1}}}},{name:"workspace.ghost_engineer",description:"Ghost Engineer Replay: learn the local engineer's repo-specific working style, predict their implementation path, detect divergence, and materialize a style-conformant autopilot timeline.",inputSchema:{type:"object",properties:{action:{type:"string",enum:["learn","profile","status","predict","divergence","patch","clear"],default:"profile"},goal:{type:"string",description:"Implementation goal for predict or patch actions."},plan:{type:"string",description:"Plan text to compare against the learned engineer profile."},verificationCommand:{type:"string"}}}}]}async callTool(e,t,i){const r=(await this.listTools()).find(t=>t.name===e);try{const i=Se(e,t,r?.inputSchema);t=i.args,i.warnings.length>0&&this.logger.write("warn","tool.input_schema_warnings",{tool:e,warnings:i.warnings}).catch(()=>{})}catch(t){throw this.logger.write("error","tool.input_schema_rejected",{tool:e,issues:t instanceof ve?t.issues:[t.message]}).catch(()=>{}),t}this.logger.write("info","tool.call",{tool:e,requiresApproval:!0===r?.requiresApproval}).catch(()=>{}),this.audit.append(e,t,{pending:!0}).catch(()=>{});try{const r=await this.executeTool(e,t,i);return this.audit.append(e,t,r).catch(()=>{}),r}catch(i){throw this.audit.append(e,t,{error:i.message}).catch(()=>{}),i}}async executeTool(e,t,i){switch(e){case"workspace.list_files":return this.listFiles(t);case"workspace.read_file":return this.readFile(t);case"workspace.read_slice":return this.readSlice(t);case"workspace.open_artifact":return Y(this.workspaceRoot,t);case"workspace.semantic_search":return this.semanticSearch(t);case"workspace.search_files":return this.searchFiles(t);case"workspace.check_sync":return this.checkSync();case"workspace.grep_content":return this.grepContent(t);case"workspace.index_everything":return this.indexEverything();case"workspace.write_file":return this.writeFile(t);case"workspace.run_command":return this.runCommand(t,i?.onProgress);case"workspace.read_file_raw":return this.readFileRaw(t);case"workspace.grep_capsules":return this.grepCapsules(t);case"workspace.get_index_status":case"workspace.index_status":return this.getIndexStatus();case"workspace.read_multiple_files":return this.readMultipleFiles(t);case"workspace.read_file_lines":return this.readFileLines(t);case"workspace.list_directory":return this.listDirectory(t);case"workspace.get_file_info":return this.getFileInfo(t);case"workspace.move_file":return this.moveFile(t);case"workspace.delete_file":return this.deleteFile(t);case"workspace.patch_file":return this.patchFile(t);case"workspace.patch_surgical":return this.patchSurgical(t);case"workspace.list_symbols":return this.listSymbols(t);case"workspace.expand_symbol":return this.expandSymbol(t);case"workspace.get_file_graph":return this.getFileGraph(t);case"workspace.read_dir_overview":return this.readDirOverview(t);case"web.read_docs":return this.readDocs(t);case"workspace.run_in_shadow":return this.runInShadow(t,i?.onProgress);case"workspace.get_diagnostics":return this.getDiagnostics(i?.onProgress);case"workspace.find_references":return this.findReferences(t);case"workspace.ask_codebase":return this.askCodebase(t,i?.onProgress);case"workspace.explain_symbol":return this.explainSymbol(t);case"workspace.impact_map":return this.impactMap(t);case"workspace.expand_execution_path":return this.expandExecutionPath(t);case"workspace.rename_symbol":return this.renameSymbol(t);case"workspace.extract_function":return this.extractFunction(t);case"workspace.inline_symbol":return this.inlineSymbol(t);case"workspace.move_to_module":return this.moveToModule(t);case"workspace.generate_properties":return this.propertyTests.generate({path:"string"==typeof t.path?t.path:void 0,functionName:"string"==typeof t.functionName?t.functionName:void 0,all:Boolean(t.all)});case"workspace.find_edge_cases":return this.smtFinder.find({path:String(t.path??""),functionName:"string"==typeof t.functionName?t.functionName:void 0});case"workspace.audit":return this.auditTool(t);case"workspace.semantic_undo":return this.semanticUndo(t,i?.onProgress);case"workspace.undo":return this.performUndo();case"workspace.alien_patch":return this.alienPatch(t);case"workspace.session_index_symbols":return this.sessionIndexSymbols(t);case"workspace.generate_lexicon":return this.generateLexicon(t);case"workspace.ghost_verify":return this.ghostVerify(t,i?.onProgress);case"workspace.timeline_create":return this.timelines.create(t);case"workspace.timeline_apply_patch":return this.timelines.applyPatch({timelineId:String(t.timelineId??""),patch:String(t.patch??"")});case"workspace.timeline_run":return we(String(t.command??"")),this.timelines.run({timelineId:String(t.timelineId??""),command:String(t.command??""),timeoutMs:"number"==typeof t.timeoutMs?t.timeoutMs:void 0});case"workspace.timeline_compare":return this.timelines.compare({timelineIds:Array.isArray(t.timelineIds)?t.timelineIds.map(String):[]});case"workspace.timeline_promote":return this.timelinePromoteWithBrain({timelineId:String(t.timelineId??"")});case"workspace.timeline_list":return this.timelines.list();case"workspace.symptom_bisect":return this.symptomBisect.run({symptom:String(t.symptom??""),verificationCommand:"string"==typeof t.verificationCommand?t.verificationCommand:void 0,searchDepth:"number"==typeof t.searchDepth?t.searchDepth:void 0});case"workspace.what_if":return this.whatIf(t,i?.onProgress);case"workspace.self_defend":return this.selfDefense.run(t);case"workspace.precrime":return this.precrime.run(t);case"workspace.end_staging":return this.shadowDeploy.run(t);case"workspace.semantic_git":return this.semanticGit.run(t);case"workspace.probabilistic_codebase":return this.probabilisticCodebase.run(t);case"workspace.conversational_codebase":return this.conversationalCodebase.run(t);case"workspace.spec_code":return this.specCode.run(t);case"workspace.natural_language_source":return this.naturalLanguageSource.run(t);case"workspace.fluid_mesh":return this.fluidMesh.run(t);case"workspace.living_software":return this.livingSoftware.run(t);case"workspace.proof_carrying_change":return this.proofCarryingChange.run(t);case"workspace.causal_autopsy":return this.causalAutopsy.run(t);case"workspace.todo_resolver":return this.todoResolver.run(t);case"workspace.live_wire":return this.liveWire.run(t);case"workspace.schrodingers_ast":return this.schrodingersAst.run(t);case"workspace.hive_mind":return this.hiveMind.run(t);case"workspace.tribunal":return this.tribunal.run(t);case"workspace.session_resurrection":return this.sessionResurrection.run(t);case"workspace.semantic_sheriff":return this.semanticSheriff.run(t);case"agent.assemble_team":return this.personaLoader.assembleTeam(String(t.task??""));case"workspace.finalize_task":return this.finalizeTask(t,i?.onProgress);case"web.inspect_ui":return this.inspectUi(t,i?.onProgress);case"frontend.preview":return this.previewFrontend(t,i?.onProgress);case"workspace.query_ast":return this.queryAst(t);case"workspace.get_recent_changes":return{ok:!0,changes:this.recentChanges.length>0?this.recentChanges:"No recent changes detected in background."};case"workspace.run_with_telemetry":return this.runWithTelemetry(t,i?.onProgress);case"runtime.start":return this.runtimeObserver.start(t);case"runtime.capture_failure":return this.runtimeObserver.captureFailure(t);case"runtime.capture_deep_autopsy":return this.runtimeObserver.captureDeepAutopsy(t);case"runtime.trace_request":return this.runtimeObserver.traceRequest(t);case"runtime.explain_failure":return this.runtimeObserver.explainFailure(t);case"runtime.fix_failure":return this.runtimeObserver.fixFailure(t);case"runtime.replay_trace":return this.replayEngine.replayTrace({traceId:"string"==typeof t.traceId?t.traceId:void 0,sentryEventId:"string"==typeof t.sentryEventId?t.sentryEventId:void 0,commitRange:"string"==typeof t.commitRange?t.commitRange:void 0});case"workspace.validate_patch":return this.validatePatch(t);case"workspace.trace_symbol":return this.traceSymbol(t);case"agent.invoke_sub_agent":return this.invokeSubAgent(t,i?.onProgress);case"agent.race_fixes":return this.raceFixes(t,i?.onProgress);case"agent.spawn_swarm":return this.spawnSwarm(t,i?.onProgress);case"agent.spawn":return this.agentOs.spawn(t);case"agent.status":return this.agentOs.status(t);case"agent.review":return this.agentOs.review(t);case"agent.merge_verified":return this.agentOs.mergeVerified(t);case"workspace.git_diff":return this.getGitDiff(t);case"agent.plan":return this.handleAgentPlan(t);case"workspace.get_env_info":return this.getEnvInfo();case"workspace.grep_ripgrep":return this.grepRipgrep(t);case"workspace.git_status":return this.getGitStatus();case"workspace.digital_twin":return this.digitalTwin(t);case"workspace.predictive_repair":return this.predictiveRepair(t,i?.onProgress);case"workspace.engineering_memory":return this.engineeringMemory(t);case"workspace.brain":return this.meshBrainTool(t);case"workspace.company_brain":return this.companyBrain.run(t);case"workspace.daemon":return this.daemonControl(t);case"workspace.issue_pipeline":return this.issuePipeline.run({action:"string"==typeof t.action?t.action:"scan",provider:"string"==typeof t.provider?t.provider:void 0,issueId:"string"==typeof t.issueId?t.issueId:void 0});case"workspace.issue_autopilot":return this.issueAutopilot.run(t);case"workspace.chatops":return this.chatops.run({action:"string"==typeof t.action?t.action:"investigate",platform:"string"==typeof t.platform?t.platform:"slack",channel:"string"==typeof t.channel?t.channel:"general",message:"string"==typeof t.message?t.message:void 0});case"workspace.production_status":return this.productionStatus(t);case"workspace.model_route":return{ok:!0,route:G(String(t.task??""))};case"workspace.production_readiness":return this.productionReadiness.run(t);case"workspace.intent_compile":return this.intentCompile(t);case"workspace.cockpit_snapshot":return this.cockpitSnapshot();case"workspace.causal_intelligence":return this.causalIntelligence(t,i?.onProgress);case"workspace.discovery_lab":return this.discoveryLab(t,i?.onProgress);case"workspace.reality_fork":return this.realityFork(t,i?.onProgress);case"workspace.ghost_engineer":return this.ghostEngineer(t,i?.onProgress);default:throw new Error(`Unknown local tool: ${e}`)}}async close(){for(const e of[...this.watchers])this.closeWatcher(e);return await Promise.allSettled(this.startupTasks),await this.cache.flushCache().catch(()=>{}),await this.timelines.close().catch(()=>{}),await this.runtimeObserver.close(),await this.meshCore.close(),Promise.resolve()}async listFiles(e){const r="string"==typeof e.path?e.path:".",s=Math.max(1,Math.min(Number(e.limit)||200,2e3)),n=i(this.workspaceRoot,r),a=await S(n,s,this.workspaceRoot);return{ok:!0,workspaceRoot:this.workspaceRoot,requestedPath:r,count:a.length,files:a.map(e=>t(this.workspaceRoot,e)).sort((e,t)=>e.localeCompare(t))}}async readSlice(e){const r=String(e.path??"").trim(),s=String(e.symbol??"").trim();if(!r||!s)throw new Error("workspace.read_slice requires 'path' and 'symbol'");const n=i(this.workspaceRoot,r);if(!_(n))throw new Error(`File not found: ${r}`);const a=await this.workspaceIndex.getSemanticSlice(r,s);if(!a.ok||!a.slice)throw new Error(a.error||`Failed to read semantic slice for ${s} in ${r}`);return{ok:!0,path:t(this.workspaceRoot,n),symbol:s,content:a.slice}}async readFile(e){const s=String(e.path??"").trim(),n=(a=String(e.tier??"medium").trim(),We.includes(a)?a:"medium");var a;if(!s)throw new Error("workspace.read_file requires 'path'");const o=i(this.workspaceRoot,s);if(!await r(o))throw new Error(`File not found: ${s}`);const c=await x.stat(o);if(!c.isFile())throw new Error(`Not a file: ${s}`);const l=Math.floor(c.mtimeMs),p=t(this.workspaceRoot,o);let m=await this.cache.getCapsule(p,n,l),u="",d="";if(m||(u=await x.readFile(o,"utf8"),d=P.createHash("sha1").update(u).digest("hex"),m=await this.cache.getCapsule(p,n,l,d)),m)return{ok:!0,path:p,bytes:c.size,tier:n,capsule:m.content,source:"cache"};let h=u.slice(0,12e3);if(this.meshCore.isAvailable){h=(await this.meshCore.summarizeSelectedTiers(p,u,[n]))[n]||h,await this.cache.setCapsule(p,n,h,l,d);const e=We.filter(e=>e!==n);this.meshCore.summarizeSelectedTiers(p,u,e).then(t=>Promise.all(e.map(e=>this.cache.setCapsule(p,e,t[e]||"",l,d)))).catch(()=>{})}else await this.cache.setCapsule(p,n,h,l,d),Promise.all(We.filter(e=>e!==n).map(e=>this.cache.setCapsule(p,e,u.slice(0,12e3),l,d))).catch(()=>{});return{ok:!0,path:p,bytes:c.size,tier:n,capsule:h,source:"generated",note:"This is a Mesh-optimized capsule. Use read_file_raw if you need the full source code."}}async readFileRaw(e){const r=String(e.path??"").trim();if(!r)throw new Error("read_file_raw requires path");const s=i(this.workspaceRoot,r),n=await x.readFile(s,"utf8");return{ok:!0,path:t(this.workspaceRoot,s),content:n,note:"Raw source code loaded. Use sparingly to save tokens."}}async grepCapsules(e){const i=String(e.query??"").trim().toLowerCase(),r=Math.max(1,Math.min(Number(e.limit)||50,200)),s=await S(this.workspaceRoot,1e4,this.workspaceRoot),n=[];for(const e of s){if(n.length>=r)break;const s=t(this.workspaceRoot,e),a=await x.stat(e).catch(()=>null);if(!a?.isFile())continue;const o=await this.cache.getCapsule(s,"medium",Math.floor(a.mtimeMs));o&&o.content.toLowerCase().includes(i)&&n.push({path:s,snippet:o.content.slice(0,300)})}return{ok:!0,query:i,count:n.length,matches:n}}async readMultipleFiles(e){const t=Array.isArray(e.paths)?e.paths:[],i=await Promise.all(t.slice(0,15).map(async e=>{try{return await this.readFile({path:e})}catch(t){return{ok:!1,path:e,error:t.message}}}));return{ok:!0,count:i.length,results:i}}async semanticSearch(e){const t=String(e.query??"").trim();if(!t)throw new Error("workspace.semantic_search requires 'query'");const i=Math.max(1,Math.min(Number(e.limit)||8,20)),r=e.mode||"architecture";try{const e=await this.workspaceIndex.search(t,r,i);return e.results&&0!==e.results.length?{ok:!0,query:t,count:e.resultsFound,results:e.results.slice(0,i).map(e=>({file:e.file,score:Math.round(100*e.score)/100,purpose:e.purpose,signals:e.matchedSignals.slice(0,6),citations:e.citations.slice(0,3).map(e=>({symbol:e.symbol,lines:e.lines}))})),topMatches:e.topMatches.slice(0,i)}:{ok:!0,query:t,count:0,results:[],hint:"No indexed results — run workspace.index_everything first or embeddings may not be enabled."}}catch(e){return{ok:!1,error:String(e),hint:"Semantic search unavailable — index may not be built yet."}}}async searchFiles(e){const r=String(e.query??"").trim().toLowerCase();if(!r)throw new Error("workspace.search_files requires 'query'");const s="string"==typeof e.path?e.path:".",n=Math.max(1,Math.min(Number(e.limit)||100,1e3)),a=i(this.workspaceRoot,s),o=(await S(a,4e3,this.workspaceRoot)).map(e=>t(this.workspaceRoot,e)).filter(e=>e.toLowerCase().includes(r)).slice(0,n);return{ok:!0,query:r,count:o.length,matches:o}}async readFileLines(e){const r=String(e.path??"").trim(),s=Math.max(1,Number(e.startLine)||1),n=Number(e.endLine);if(!r||!n)throw new Error("workspace.read_file_lines requires path and endLine");const a=i(this.workspaceRoot,r),o=(await x.readFile(a,"utf8")).split(/\r?\n/g),c=o.slice(s-1,n);return{ok:!0,path:t(this.workspaceRoot,a),startLine:s,endLine:n,totalLines:o.length,content:c.join("\n")}}async listDirectory(e){const t="string"==typeof e.path?e.path:".",r=i(this.workspaceRoot,t);return{ok:!0,path:t,entries:(await x.readdir(r,{withFileTypes:!0})).map(e=>({name:e.name,type:e.isDirectory()?"directory":"file"})).sort((e,t)=>e.type!==t.type?"directory"===e.type?-1:1:e.name.localeCompare(t.name))}}async getFileInfo(e){const r=String(e.path??"").trim(),s=i(this.workspaceRoot,r),n=await x.stat(s);return{ok:!0,path:t(this.workspaceRoot,s),size:n.size,mtime:n.mtime.toISOString(),isDirectory:n.isDirectory(),isFile:n.isFile()}}async moveFile(e){const r=String(e.sourcePath??"").trim(),s=String(e.destinationPath??"").trim();if(!r||!s)throw new Error("move_file requires sourcePath and destinationPath");const n=i(this.workspaceRoot,r),a=i(this.workspaceRoot,s);await x.rename(n,a);const o=t(this.workspaceRoot,n),c=t(this.workspaceRoot,a);return await this.cache.deleteCapsule(o,"low"),await this.cache.deleteCapsule(o,"medium"),await this.cache.deleteCapsule(o,"high"),this.workspaceIndex.partialUpdate([c]).catch(()=>{}),{ok:!0,source:r,destination:s}}async deleteFile(e){const r=String(e.path??"").trim(),s=i(this.workspaceRoot,r);await x.unlink(s);const n=t(this.workspaceRoot,s);return await this.cache.deleteCapsule(n,"low"),await this.cache.deleteCapsule(n,"medium"),await this.cache.deleteCapsule(n,"high"),{ok:!0,path:n}}async patchFile(e){const r=String(e.path??"").trim(),s=String(e.search??""),n=String(e.replace??"");if(!r||!s)throw new Error("patch_file requires path and search string");const a=i(this.workspaceRoot,r);await this.saveBackup(r);const o=await x.readFile(a,"utf8");if(!o.includes(s))throw new Error(`Search string not found in ${r}`);const c=o.replace(s,n);await x.writeFile(a,c,"utf8");const l=t(this.workspaceRoot,a);return await this.cache.deleteCapsule(l,"low"),await this.cache.deleteCapsule(l,"medium"),await this.cache.deleteCapsule(l,"high"),this.workspaceIndex.partialUpdate([l]).catch(()=>{}),{ok:!0,path:l,patched:!0}}async getGitStatus(){try{const{stdout:e}=await E("git status --short",{cwd:this.workspaceRoot}),{stdout:t}=await E("git branch --show-current",{cwd:this.workspaceRoot});return{ok:!0,branch:t.trim(),status:e.trim()}}catch(e){return{ok:!1,error:"Not a git repository or git not installed"}}}async readDocs(e){const t=String(e.url??"").trim();if(!t)throw new Error("web.read_docs requires url");try{const e=await fetch(t);if(!e.ok)throw new Error(`HTTP ${e.status} ${e.statusText}`);const i=await e.text();return{ok:!0,url:t,content:i.replace(/<script\b[^<]*(?:(?!<\/script>)<[^<]*)*<\/script>/gi,"").replace(/<style\b[^<]*(?:(?!<\/style>)<[^<]*)*<\/style>/gi,"").replace(/<[^>]+>/g," ").replace(/\s{2,}/g," ").trim().slice(0,15e3),note:"Content converted from HTML. Formatting may be imperfect."}}catch(e){return{ok:!1,error:e.message}}}async runInShadow(e,t){const i=String(e.command??"").trim();if(!i)throw new Error("workspace.run_in_shadow requires 'command'");const r=ke(i),s=R.join($.tmpdir(),`mesh-shadow-${Date.now()}`);t?.(`[Shadow Workspace] Creating at ${s}...\n`);try{await E(`rsync -a --exclude node_modules --exclude .git --exclude dist ${this.workspaceRoot}/ ${s}/`),t?.(`[Shadow Workspace] Running: ${i}\n`);const e=6e4,n=await new Promise(i=>{const n=M(r.command,r.args,{cwd:s});let a="",o="",c=!1;const l=setTimeout(()=>{c=!0,n.kill("SIGTERM")},e);n.stdout.on("data",e=>{const i=e.toString();a+=i,t?.(i)}),n.stderr.on("data",e=>{const i=e.toString();o+=i,t?.(i)}),n.on("close",t=>{clearTimeout(l),i({ok:0===t&&!c,exitCode:c?124:t??0,stdout:a,stderr:c?`[TIMEOUT after ${e/1e3}s]\n${o}`:o})})});return await x.rm(s,{recursive:!0,force:!0}),{...n,note:"Executed safely in shadow workspace."}}catch(e){return await x.rm(s,{recursive:!0,force:!0}).catch(()=>{}),{ok:!1,error:e.message}}}async getDiagnostics(e){e?.("Running diagnostics (tsc --noEmit)...\n");const t=R.join(this.workspaceRoot,"tsconfig.json");if(!await r(t))return{ok:!0,output:"No tsconfig.json found; TypeScript diagnostics skipped."};try{const{stdout:e,stderr:t}=await T("npm",["exec","--","tsc","--noEmit"],{cwd:this.workspaceRoot});return{ok:!0,output:e||t||"No issues found."}}catch(e){return{ok:!1,hasErrors:!0,output:e.stdout||e.stderr||e.message}}}async findReferences(e){const t=String(e.symbol??"").trim();if(!t)throw new Error("workspace.find_references requires symbol");return this.grepRipgrep({query:`\\b${t}\\b`})}async expandExecutionPath(e){const r=String(e.path??"").trim(),s=String(e.symbolName??"").trim();if(!r||!s)throw new Error("expand_execution_path requires path and symbolName");const n=i(this.workspaceRoot,r),a=await x.readFile(n,"utf8"),o=t(this.workspaceRoot,n),c=await this.meshCore.getDetailedRecord(o,a);if(!c)throw new Error("Could not analyze file with MeshCore.");const l=c.symbols.find(e=>e.name===s||e.kind&&`${e.kind} ${e.name}`===s);if(!l)throw new Error(`Symbol '${s}' not found in ${r}`);const p=a.split(/\r?\n/g),m=p.slice(l.lineStart-1,l.lineEnd).join("\n"),u=c.callSites.filter(e=>e.lineStart>=l.lineStart&&e.lineStart<=l.lineEnd),d=[];for(const e of u){const t=c.symbols.find(t=>t.name===e.callee);if(t){const i=p[t.lineStart-1]?.trim()||"";d.push(`${e.callee} at L${t.lineStart}: ${i}`)}}return{ok:!0,path:o,symbol:l.name,body:m,internalDependencies:d.length>0?d:["No internal dependencies found."]}}async sessionIndexSymbols(e){const t=e.paths||[];let r=this.sessionSymbolIndex.size+1;const s={};for(const e of t){const t=i(this.workspaceRoot,e),n=await x.readFile(t,"utf8"),a=await this.meshCore.extractSymbols(e,n);for(const t of a){const i=r++;this.sessionSymbolIndex.set(i,{path:e,name:t.name}),s[`${e}#${t.name}`]=i}}return{ok:!0,sessionIndex:s,note:"Use these IDs with workspace.alien_patch for max token savings."}}async generateLexicon(e){const t=e.paths||[];let r=Object.keys(this.projectLexicon).length+1;for(const e of t){const t=i(this.workspaceRoot,e),s=await x.readFile(t,"utf8"),n=await this.meshCore.getDetailedRecord(e,s);if(n)for(const e of n.symbols)Object.values(this.projectLexicon).includes(e.name)||(this.projectLexicon[r++]=e.name)}return{ok:!0,lexicon:this.projectLexicon,note:"Use #ID in alien_patch to reference these terms."}}startLiveSyncWatcher(){}async saveBackup(e){try{const t=i(this.workspaceRoot,e);if(_(t)){const i=await x.readFile(t,"utf8");this.changeStack.push({path:e,content:i}),this.changeStack.length>50&&this.changeStack.shift()}}catch{}}async performUndo(){const e=this.changeStack.pop();if(!e)return{ok:!1,error:"No changes to undo."};const r=i(this.workspaceRoot,e.path);await x.writeFile(r,e.content,"utf8");const s=t(this.workspaceRoot,r);return await this.cache.deleteCapsule(s,"low"),await this.cache.deleteCapsule(s,"medium"),await this.cache.deleteCapsule(s,"high"),{ok:!0,path:e.path,message:`Restored ${e.path} to previous state.`}}async ghostVerify(e,t){const i=String(e.patch??""),r=String(e.testCommand??"");if(!i.trim()||!r.trim())throw new Error("workspace.ghost_verify requires patch and testCommand");try{const e=await this.timelines.create({name:"ghost-verify"}),s=e.timeline.root;if(t?.(`[GBL] Spawned replayable timeline ${e.timeline.id} at ${s}\n`),/^(diff --git|---\s|\+\+\+\s)/m.test(i.trim())){const t=await this.timelines.applyPatch({timelineId:e.timeline.id,patch:i});if(!t.ok)return t}else{t?.("[GBL] Applying symbolic patch to ghost timeline...\n");const e=this.config?{...this.config,agent:{...this.config.agent,workspaceRoot:s}}:void 0,r=new LocalToolBackend(s,e);r.sessionSymbolIndex=new Map(this.sessionSymbolIndex),r.projectLexicon={...this.projectLexicon},await r.alienPatch({patch:i})}t?.(`[GBL] Running verification: ${r}\n`);const n=await this.timelines.run({timelineId:e.timeline.id,command:r,timeoutMs:12e4});return{ok:n.ok,timelineId:e.timeline.id,message:n.ok?"Verification PASSED in ghost timeline.":"Verification FAILED in ghost timeline. Patch rejected.",output:n.stdout||n.stderr}}catch(e){return{ok:!1,message:"Verification FAILED in ghost timeline. Patch rejected.",error:e.stdout||e.stderr||e.message}}}async alienPatch(e){const r=String(e.patch??"").trim().match(/^!(\d+)\s*>\s*\{([\s\S]*)\}$/);if(!r)throw new Error("Invalid alien_patch format. Use !ID > { CODE }");const s=parseInt(r[1],10),n=r[2].trim(),a=this.sessionSymbolIndex.get(s);if(!a)throw new Error(`Session ID !${s} not found. Run workspace.session_index_symbols first.`);const o=this.meshCore.expandAlienCode(n,this.projectLexicon),c=i(this.workspaceRoot,a.path),l=await x.readFile(c,"utf8"),p=await this.meshCore.getDetailedRecord(a.path,l);if(!p)throw new Error("Could not analyze file for alien patch.");const m=p.symbols.find(e=>e.name===a.name);if(!m)throw new Error(`Symbol ${a.name} no longer found in ${a.path}`);const u=l.split(/\r?\n/g),d=l,h=[...u],g=m.lineEnd-m.lineStart+1;h.splice(m.lineStart-1,g,o);const f=h.join("\n");await x.writeFile(c,f,"utf8");try{c.endsWith(".js")||c.endsWith(".cjs")||c.endsWith(".mjs")?await E(`node --check ${c}`):(c.endsWith(".ts")||c.endsWith(".tsx"))&&await E(`npx tsc --noEmit --skipLibCheck --target esnext --moduleResolution node ${c}`)}catch(e){return await x.writeFile(c,d,"utf8"),{ok:!1,error:"Alien patch introduced a syntax error and was automatically rolled back.",expandedCode:o,compilerOutput:e.stdout||e.stderr||e.message}}const y=t(this.workspaceRoot,c);await this.cache.deleteCapsule(y,"low"),await this.cache.deleteCapsule(y,"medium"),await this.cache.deleteCapsule(y,"high");const w=[];for(const e of this.entangledWorkspaces)try{const{stdout:t}=await E(`npx --yes @ast-grep/cli run --pattern "${a.name}"`,{cwd:e}).catch(()=>({stdout:""})),i=t.split("\n").find(e=>e.includes(a.name));if(i){const t=i.split(":")[0],r=R.join(e,t),s=await x.readFile(r,"utf8");(s.includes(`interface ${a.name}`)||s.includes(`type ${a.name}`))&&w.push(`Quantum-synced AST to entangled repo at ${t}`)}}catch{}return{ok:!0,path:y,id:s,symbol:a.name,patched:!0,quantumSync:w.length>0?w:void 0}}async semanticUndo(e,t){const i=String(e.concept??"").trim();if(!i)throw new Error("semantic_undo requires a concept name");t?.(`[Chrono-Untangling] Searching commit history for concept: "${i}"...\\n`);try{const{stdout:e}=await E(`git log --grep="${i}" --format="%H" -n 1`,{cwd:this.workspaceRoot}),r=e.trim();return r?(t?.(`[Chrono-Untangling] Reverting commit ${r.slice(0,7)} without affecting newer AST nodes...\\n`),await E(`git revert -n ${r}`,{cwd:this.workspaceRoot}),{ok:!0,message:`Concept "${i}" has been surgically removed. Workspace state is now in 'uncommitted revert'. Please verify and commit.`}):{ok:!1,error:`No recent commit found matching concept "${i}".`}}catch(e){return{ok:!1,error:e.message}}}async renameSymbol(e){const r=String(e.path??"").trim(),s=String(e.oldName??"").trim(),n=String(e.newName??"").trim();if(!r||!s||!n)throw new Error("rename_symbol requires path, oldName, and newName");const a=i(this.workspaceRoot,r);await this.saveBackup(r);const o=await x.readFile(a,"utf8");try{if(0===(await this.tsRefactor.renameSymbol(r,s,n)).changed)throw new Error(`Symbol '${s}' not found in ${r}`);await this.verifyTypecheckOrRollback(a,o)}catch(e){return await x.writeFile(a,o,"utf8"),{ok:!1,error:"Compiler-backed rename failed and was automatically rolled back.",compilerOutput:e.stdout||e.stderr||e.message}}const c=t(this.workspaceRoot,a);return await this.cache.deleteCapsule(c,"low"),await this.cache.deleteCapsule(c,"medium"),await this.cache.deleteCapsule(c,"high"),{ok:!0,path:c,oldName:s,newName:n,patched:!0}}async extractFunction(e){const t=String(e.path??"").trim(),r=String(e.functionName??"").trim(),s=Number(e.startLine),n=Number(e.endLine);if(!(t&&r&&Number.isFinite(s)&&Number.isFinite(n)))throw new Error("extract_function requires path, functionName, startLine, and endLine");const a=i(this.workspaceRoot,t),o=await x.readFile(a,"utf8");try{return await this.tsRefactor.extractFunction(t,r,s,n),await this.verifyTypecheckOrRollback(a,o),{ok:!0,path:t,functionName:r,startLine:s,endLine:n}}catch(e){return await x.writeFile(a,o,"utf8"),{ok:!1,error:e.message}}}async inlineSymbol(e){const t=String(e.path??"").trim(),r=String(e.symbolName??"").trim();if(!t||!r)throw new Error("inline_symbol requires path and symbolName");const s=i(this.workspaceRoot,t),n=await x.readFile(s,"utf8");try{const e=await this.tsRefactor.inlineSymbol(t,r);return await this.verifyTypecheckOrRollback(s,n),{ok:!0,path:t,symbolName:r,inlined:e.inlined}}catch(e){return await x.writeFile(s,n,"utf8"),{ok:!1,error:e.message}}}async moveToModule(e){const t=String(e.fromPath??"").trim(),r=String(e.toPath??"").trim(),s=String(e.symbolName??"").trim();if(!t||!r||!s)throw new Error("move_to_module requires fromPath, toPath, symbolName");const n=i(this.workspaceRoot,t),a=i(this.workspaceRoot,r),o=await x.readFile(n,"utf8"),c=await x.readFile(a,"utf8").catch(()=>"");try{if(!(await this.tsRefactor.moveToModule(t,r,s)).moved)throw new Error(`Unable to move symbol '${s}'`);return await this.verifyTypecheckOrRollback(n,o,a,c),{ok:!0,fromPath:t,toPath:r,symbolName:s}}catch(e){return await x.writeFile(n,o,"utf8"),await x.writeFile(a,c,"utf8"),{ok:!1,error:e.message}}}async auditTool(e){const t=String(e.action??"verify");if("replay"===t){const i=Math.max(1,Math.min(Number(e.limit)||200,2e3));return{ok:!0,action:t,entries:await this.audit.replay(i)}}return{action:"verify",...await this.audit.verify()}}async verifyTypecheckOrRollback(e,t,i,r){try{await E("npm run typecheck",{cwd:this.workspaceRoot,timeout:24e4})}catch(s){throw await x.writeFile(e,t,"utf8"),i&&void 0!==r&&await x.writeFile(i,r,"utf8"),s}}async finalizeTask(e,t){const i=String(e.branchName??"").trim(),r=String(e.commitMessage??"").trim();if(!i||!r)throw new Error("finalize_task requires branchName and commitMessage");t?.(`[Semantic Git] Preparing branch ${i}...\n`);try{await E(`git checkout -b ${i}`,{cwd:this.workspaceRoot}),await E("git add .",{cwd:this.workspaceRoot});const e=`${r}\n\n[Mesh Semantic Trace]\nBased on plan: ${this.agentPlan.slice(0,200)}...`;return await new Promise((t,i)=>{const r=M("git",["commit","-F","-"],{cwd:this.workspaceRoot});r.stdin.write(e),r.stdin.end(),r.on("close",e=>0===e?t(!0):i(new Error(`Git commit failed with code ${e}`)))}),{ok:!0,branch:i,note:"Branch created, changes staged, and commit applied successfully. Ready for PR."}}catch(e){return{ok:!1,error:e.message}}}async inspectUi(e,t){const i=String(e.url??"").trim();if(!i)throw new Error("web.inspect_ui requires a URL");const r=s("playwright");r||t?.("[Multi-modal Sight] Playwright not detected locally; attempting npx fallback...\n"),t?.(`[Multi-modal Sight] Capturing UI from ${i} via Playwright...\n`);const n=R.join($.tmpdir(),`mesh_ui_${Date.now()}.png`);try{const e=r?`npx --no-install playwright screenshot --wait-for-timeout=1000 ${i} ${n}`:`npx --yes playwright screenshot --wait-for-timeout=1000 ${i} ${n}`;await E(e);const t=(await x.readFile(n)).toString("base64");return await x.unlink(n).catch(()=>{}),{ok:!0,url:i,base64Image:t,instruction:"Pass this base64 string to a vision model to evaluate the layout.",visionAnalysis:await this.runVisionAnalysis(t,i)}}catch(e){const t=e?.stderr?.toString?.()||e.message||"";return{ok:!1,error:/command not found|ENOENT|could not determine executable|is not installed|executable not found|browserType\.launch/i.test(t)?"Playwright is not installed (or its browser binaries are missing). Install with `npm i -g playwright && npx playwright install chromium`, or use `frontend.preview` instead (built-in Chrome CDP, zero extra deps).":"Failed to capture screenshot via Playwright.",tip:"frontend.preview uses the Chrome DevTools Protocol directly and needs no Playwright install.",details:t}}}async previewFrontend(e,t){const i=String(e.url??"").trim();if(!i)throw new Error("frontend.preview requires url");const r=await J({url:i,width:"number"==typeof e.width?e.width:Number(e.width||1280),height:"number"==typeof e.height?e.height:Number(e.height||800),waitMs:"number"==typeof e.waitMs?e.waitMs:Number(e.waitMs||1200),render:!1!==e.render,protocol:"string"==typeof e.protocol?e.protocol:"auto",outputPath:"string"==typeof e.outputPath?e.outputPath:void 0,onProgress:t}),s=await x.readFile(r.screenshotPath);return{...r,visionAnalysis:await this.runVisionAnalysis(s.toString("base64"),i)}}async runVisionAnalysis(e,t){if(z(this.config?.bedrock.bearerToken))try{return await O(e,[`Inspect this Mesh UI screenshot from ${t}.`,"Return a compact engineering review with:","1. primary layout or rendering defects,","2. text overflow or clipping,","3. missing hierarchy or affordance issues,","4. likely next fix."].join("\n"),process.env.MESH_VISION_MODEL||B[0],this.config?.bedrock.bearerToken)}catch(e){return`vision unavailable: ${e.message}`}}async queryAst(e){const t=String(e.pattern??"").trim();if(!t)throw new Error("query_ast requires an AST pattern");const i=s("ast-grep")?"ast-grep":s("sg")?"sg":null;try{const e=i||"npm",r=i?["run","--pattern",t]:["exec","--yes","--","@ast-grep/cli","run","--pattern",t],{stdout:s}=await T(e,r,{cwd:this.workspaceRoot}),n=s.split("\n").filter(Boolean);return{ok:!0,pattern:t,matchesFound:n.length,preview:n.slice(0,15).join("\n"),backend:i??"npx"}}catch(e){if(e.stdout)return{ok:!0,pattern:t,matchesFound:0,preview:e.stdout,backend:i??"npx"};const r=e.stderr?.toString?.()||e.message||"";return!i&&/command not found|ENOENT|is not installed|could not determine/i.test(r)?{ok:!1,error:"ast-grep is not installed and the npx fallback could not fetch it. Install locally with `brew install ast-grep` (macOS), `cargo install ast-grep --locked`, or `npm i -g @ast-grep/cli`.",hint:"Once installed, Mesh will auto-detect the `ast-grep` binary and skip the slow npx fetch."}:{ok:!1,error:"ast-grep failed to execute.",details:r}}}async runWithTelemetry(e,t){const i=String(e.command??"").trim();if(!i)throw new Error("workspace.run_with_telemetry requires 'command'");const r=R.join($.tmpdir(),`mesh-telemetry-${Date.now()}.cjs`);await x.writeFile(r,"\nconst inspector = require('node:inspector');\nconst session = new inspector.Session();\nsession.connect();\nsession.post('Debugger.enable');\nsession.post('Debugger.setPauseOnExceptions', { state: 'uncaught' });\n\nsession.on('Debugger.paused', async (message) => {\n console.error('\\n[Mesh Telemetry] 🚨 UNCAUGHT EXCEPTION DETECTED 🚨');\n console.error('[Mesh Telemetry] Freezing V8 Engine and dumping memory state...\\n');\n\n const callFrames = message.params.callFrames.slice(0, 3);\n\n const getProps = (objectId) => new Promise(resolve => {\n session.post('Runtime.getProperties', { objectId, ownProperties: true }, (err, res) => {\n resolve(err ? [] : res.result);\n });\n });\n\n for (let i = 0; i < callFrames.length; i++) {\n const frame = callFrames[i];\n console.error(`\\n► Frame ${i}: ${frame.functionName || '<anonymous>'} (${frame.url}:${frame.location.lineNumber + 1})`);\n\n for (const scope of frame.scopeChain) {\n if (scope.type === 'local' || scope.type === 'closure') {\n const props = await getProps(scope.object.objectId);\n if (props && props.length > 0) {\n console.error(` ➤ Scope: ${scope.type}`);\n for (const p of props) {\n if (p.name === 'exports' || p.name === 'require' || p.name === 'module' || p.name === '__filename' || p.name === '__dirname') continue;\n let val = p.value ? p.value.value : (p.value && p.value.description ? p.value.description : 'undefined');\n if (typeof val === 'string' && val.length > 200) val = val.slice(0, 200) + '...';\n console.error(` let ${p.name} = ${val};`);\n }\n }\n }\n }\n }\n\n console.error('\\n[Mesh Telemetry] Dump complete. Exiting.');\n process.exit(1);\n});\n","utf8"),t?.(`[Telemetry] Running: ${i}\n`);const s=ke(i),n=await new Promise(e=>{const i={...process.env,NODE_OPTIONS:`${process.env.NODE_OPTIONS||""} --require "${r}"`},n=M(s.command,s.args,{cwd:this.workspaceRoot,env:i});let a="",o="",c=!1;const l=setTimeout(()=>{c=!0,n.kill("SIGTERM")},6e4);n.stdout.on("data",e=>{const i=e.toString();a+=i,t?.(i)}),n.stderr.on("data",e=>{const i=e.toString();o+=i,t?.(i)}),n.on("close",t=>{clearTimeout(l),e({ok:0===t&&!c,exitCode:c?124:t??0,stdout:a.length>5e3?a.slice(0,2e3)+"\n... [Truncated] ...\n"+a.slice(-3e3):a,stderr:o.length>8e3?o.slice(0,2e3)+"\n... [Truncated] ...\n"+o.slice(-6e3):o})})});return await x.unlink(r).catch(()=>{}),n}async validatePatch(e){const t=String(e.path??"").trim(),r=String(e.searchBlock??""),s=String(e.replaceBlock??"");if(!t||!r)throw new Error("validate_patch requires path and searchBlock");const n=i(this.workspaceRoot,t),a=await x.readFile(n,"utf8"),o=a;let c=a;if(a.includes(r))c=a.replace(r,s);else{const e=r.split("\n").map(e=>e.trim()),t=a.split("\n");let i=-1;for(let r=0;r<=t.length-e.length;r++){let s=!0;for(let i=0;i<e.length;i++)if(t[r+i].trim()!==e[i]){s=!1;break}if(s){i=r;break}}if(-1===i)return{ok:!1,error:"Search block not found. Cannot validate."};const n=[...t];n.splice(i,e.length,s),c=n.join("\n")}await x.writeFile(n,c,"utf8");let l={ok:!0,message:"Patch is syntactically valid."};try{n.endsWith(".js")||n.endsWith(".cjs")||n.endsWith(".mjs")?await E(`node --check ${n}`):(n.endsWith(".ts")||n.endsWith(".tsx"))&&await E(`npx tsc --noEmit --skipLibCheck --target esnext --moduleResolution node ${n}`)}catch(e){l={ok:!1,message:"Patch introduced a syntax error.",error:e.stdout||e.stderr||e.message}}finally{await x.writeFile(n,o,"utf8")}return l}async traceSymbol(e){const t=String(e.symbol??"").trim();if(!t)throw new Error("trace_symbol requires symbol");const r=await this.findReferences({symbol:t});if(!r.ok||!r.matches)return{ok:!1,error:"Could not find references."};const s=[],n=new Set;for(const e of r.matches.slice(0,3)){n.add(e.path);const t=i(this.workspaceRoot,e.path),r=await x.readFile(t,"utf8").catch(()=>"");if(!r)continue;const a=await this.meshCore.getDetailedRecord(e.path,r);if(!a)continue;const o=a.symbols.find(t=>e.line>=t.lineStart&&e.line<=t.lineEnd);if(o){const t=r.split("\n").slice(o.lineStart-1,o.lineEnd).join("\n");s.push({file:e.path,line:e.line,contextSymbol:o.name,snippet:t.slice(0,800)})}}return{ok:!0,symbol:t,totalReferences:r.totalFound,traceContext:s,note:"Symbol traced across caller environments to map data flow."}}async spawnSwarm(e,t){const i=e.subTasks||[];if(0===i.length)throw new Error("spawn_swarm requires at least one sub-task.");t?.(`[Swarm] Orchestrating ${i.length} parallel sub-agents...\n`);return{ok:!0,swarmResults:await Promise.all(i.map(async e=>{t?.(`[Swarm-${e.id}] Launching sub-agent...\n`);try{const t=await this.invokeSubAgent({prompt:e.prompt});return{id:e.id,status:"success",summary:t.summary}}catch(t){return{id:e.id,status:"error",error:t.message}}})),note:"Sub-tasks completed in parallel. Review summaries for next steps."}}async invokeSubAgent(e,t){const i=String(e.prompt??"").trim();if(!i)throw new Error("invoke_sub_agent requires a prompt");if(!this.config)throw new Error("Agent configuration not available for sub-agent.");t?.(`[Sub-Agent] Starting research task: "${i}"\n`);const r=new L({endpointBase:this.config.bedrock.endpointBase,modelId:xe,bearerToken:this.config.bedrock.bearerToken,temperature:.1,maxTokens:4096}),s=(await this.listTools()).filter(e=>["workspace.list_files","workspace.read_file","workspace.grep_capsules","workspace.list_symbols"].includes(e.name)).map(e=>({name:e.name,description:e.description??"",inputSchema:e.inputSchema??{type:"object",properties:{}}})),n=new Map,a=s.map(e=>{let t=k(e.name),i=2;for(;n.has(t);)t=`${k(e.name)}_${i}`,i+=1;return n.set(t,e),{name:t,description:e.description,inputSchema:e.inputSchema}}),o=[{role:"user",content:[{text:i}]}];let c=0;for(;c<15;){const e=await r.converse(o,a,"You are a fast research sub-agent. Gather data and summarize.",xe);if("text"===e.kind)return{ok:!0,summary:e.text};const t=[];e.text&&t.push({text:e.text});for(const i of e.toolUses)t.push({toolUse:i});o.push({role:"assistant",content:t});const i=await Promise.all(e.toolUses.map(async e=>{const t=n.get(e.name);if(!t)return{toolUseId:e.toolUseId,status:"error",content:[{text:`Unknown tool '${e.name}'.`}]};try{const i=await this.callTool(t.name,e.input);return{toolUseId:e.toolUseId,status:"success",content:[{text:JSON.stringify(i)}]}}catch(t){return{toolUseId:e.toolUseId,status:"error",content:[{text:t.message}]}}}));o.push({role:"user",content:i.map(e=>({toolResult:e}))}),c++}return{ok:!1,error:"Sub-agent reached max iterations without a final summary."}}async raceFixes(e,t){const i=String(e.task??"").trim(),r=String(e.verificationCommand??"").trim(),s=Number(e.candidates),n=!/(multi|across|refactor|runtime|crash|exception|architecture|migration|several|multiple)/i.test(i)?1:3,a=Math.max(1,Math.min(Number.isFinite(s)?s:n,5));if(!i||!r)throw new Error("race_fixes requires task and verificationCommand");if(!this.config)throw new Error("Agent configuration not available.");t?.(`[Multiverse] Racing ${a} candidates for task: "${i}"\n`);const o=new L({endpointBase:this.config.bedrock.endpointBase,modelId:this.config.bedrock.modelId,fallbackModelIds:this.config.bedrock.fallbackModelIds,bearerToken:this.config.bedrock.bearerToken,temperature:.7,maxTokens:4096}),c=await this.workspaceIndex.search(i,"bug",Math.max(2,a)).catch(()=>null),l=c?.results?.length?c.results.slice(0,5).map(e=>[`- ${e.file}`,` purpose: ${e.purpose}`,e.citations.length>0?` signals: ${e.citations.map(e=>e.whyMatched.join(", ")).join(" | ")}`:null].filter(Boolean).join("\n")).join("\n"):"No high-confidence codebase matches were found.",p=["Minimal Intervention: Fix the error with as few changes as possible.","Complete Refactoring: Clean up the code and improve architecture while fixing.","Robust Error-Handling: Add defensive checks and try-catch blocks.","Performance Focus: Optimize for speed and memory.","Standard idiomatic approach: Use common patterns."],m=new AbortController,u=await Promise.all(Array.from({length:a}).map(async(e,s)=>{const n=p[s%p.length];t?.(`[Candidate ${s+1}] Generating fix with strategy: ${n.split(":")[0]}...\n`);const a=`You are an expert engineer.\nTask: ${i}\nStrategy: ${n}\nRelevant codebase context:\n${l}\n\nGenerate a standard git patch (diff) to solve the problem.\nRespond ONLY with the raw diff content. No markdown code fences, no preamble.`;try{if(m.signal.aborted)return{id:s,status:"aborted",error:"Another candidate passed verification.",score:-1e3};const e=await o.converse([{role:"user",content:[{text:a}]}],[],"Respond only with a raw git patch.",void 0,m.signal);if("text"!==e.kind)return{id:s,status:"error",error:"LLM failed to generate text."};const i=e.text.trim();if(!i)return{id:s,status:"error",error:"LLM returned empty patch."};const c=(await this.timelines.create({name:`race-${s}-${Date.now().toString(36)}`})).timeline.id;t?.(`[Candidate ${s+1}] Applying patch to ${c}...\n`);if(!(await this.timelines.applyPatch({timelineId:c,patch:i})).ok)return{id:s,timelineId:c,strategy:n.split(":")[0],status:"rejected",error:"Patch rejected.",score:-1e3,metrics:{changedLines:0,warningCount:0,durationMs:0}};if(t?.(`[Candidate ${s+1}] Running verification: ${r}...\n`),m.signal.aborted)return{id:s,timelineId:c,strategy:n.split(":")[0],status:"aborted",error:"Another candidate passed verification before this run started.",score:-1e3,metrics:{changedLines:0,warningCount:0,durationMs:0}};const l=await this.timelines.run({timelineId:c,command:r}),p=(await this.timelines.compare({timelineIds:[c]})).comparisons[0],u=Number(p?.changedLineCount??0),d=`${l.stdout}\n${l.stderr}`.split(/\r?\n/g).map(e=>e.trim()).filter(e=>/\bwarning\b/i.test(e)).length,h=Number(p?.commandDurationMs??l.commandRecord.durationMs??0),g=function(e){return e.passed?1e3-3*e.changedLines-12*e.warningCount-Math.round(e.durationMs/250):-1e3-e.changedLines-25*e.warningCount-Math.round(e.durationMs/500)}({passed:l.ok,changedLines:u,warningCount:d,durationMs:h}),f={id:s,timelineId:c,strategy:n.split(":")[0],status:l.ok?"passed":"failed",exitCode:l.exitCode,verdict:l.ok?"pass":"fail",score:g,metrics:{changedFiles:Array.isArray(p?.changedFiles)?p?.changedFiles:[],changedLines:u,warningCount:d,durationMs:h},comparison:p};return l.ok&&!m.signal.aborted&&m.abort(),f}catch(e){return m.signal.aborted?{id:s,timelineId:void 0,strategy:n.split(":")[0],status:"aborted",error:"Another candidate passed verification.",score:-1e3,metrics:{changedLines:0,warningCount:0,durationMs:0}}:{id:s,timelineId:void 0,strategy:n.split(":")[0],status:"error",error:e.message,score:-1e3,metrics:{changedLines:0,warningCount:0,durationMs:0}}}})),d=[...u].sort((e,t)=>Number(t.score??-1/0)-Number(e.score??-1/0)),h=d.find(e=>"pass"===e.verdict)??d[0]??null;return{ok:!0,task:i,verificationCommand:r,context:c?.topMatches??[],winnerTimelineId:h?.timelineId??null,winnerStrategy:h?.strategy??null,results:d,note:"Multiverse racing complete. Review the ranked candidates and promote the winner with workspace.timeline_promote."}}async askCodebase(e,t){const i=String(e.query??"").trim();if(!i)throw new Error("ask_codebase requires a query");const r=String(e.mode??"architecture").trim(),s=["architecture","bug","edit-impact","test-impact","ownership","recent-change","runtime-path"].includes(r)?r:"architecture",n=Math.max(1,Math.min(Number(e.limit)||8,25));t?.(`[Index] Querying persistent code index (${s}) for "${i}"...\n`);const a=function(e,t){const i=e.replace(/\s+/g," ").trim(),r=[i];"bug"===t&&r.push(`${i} stack error failure root cause`),"test-impact"===t&&r.push(`${i} tests specs verification coverage`),"edit-impact"===t&&r.push(`${i} callers dependencies exports affected files`),"runtime-path"===t&&r.push(`${i} route handler request runtime server`),"ownership"===t&&r.push(`${i} owner module boundary integration`);const s=Array.from(i.matchAll(/[A-Z][A-Za-z0-9_]{2,}|[a-zA-Z0-9_-]+\.[tj]sx?/g)).map(e=>e[0]).slice(0,8).join(" ");return s&&r.push(s),o(r,4)}(i,s),c=await Promise.all(a.map(e=>this.workspaceIndex.search(e,s,Math.min(25,2*n)))),p=c[0],m=await this.telemetry.topSignals(50).catch(()=>[]),u=new Map(m.map(e=>[e.file,e])),d=new Map;for(let e=0;e<c.length;e+=1){const t=a[e];for(const r of c[e].results??[]){const s=d.get(r.file),n=0===e?1:.72,a=l(i,`${r.file}\n${r.purpose}\n${r.matchedSignals?.join(" ")??""}`),c=Number(r.score??0)*n+4*a;s?(s.score=Math.max(s.score,c)+.35,s.matchedVariants=o([...s.matchedVariants??[],t],6),s.citations=[...s.citations??[],...r.citations??[]].slice(0,8),s.matchedSignals=o([...s.matchedSignals??[],...r.matchedSignals??[]],12)):d.set(r.file,{...r,score:c,baseScore:r.score,matchedVariants:[t],citations:Array.isArray(r.citations)?r.citations:[]})}}const h=Array.from(d.values()).map(e=>{const t=u.get(e.file),i=t?oe(t):0,r=Number(e.score??0)+Math.min(8,i/250);return{...e,score:Number(r.toFixed(3)),productionBoost:i}}).sort((e,t)=>(t.score??0)-(e.score??0)).slice(0,n);return{...p,query:i,queryVariants:a,results:h,topMatches:h.map(e=>({path:e.file,score:e.score,snippet:`[Fonte: ${e.file}]\n${e.purpose}`})),resultsFound:Math.max(p.resultsFound??0,d.size),productionSignals:m.slice(0,10)}}async explainSymbol(e){const t=String(e.symbol??"").trim();if(!t)throw new Error("workspace.explain_symbol requires symbol");return this.workspaceIndex.explainSymbol(t)}async impactMap(e){const t=await this.workspaceIndex.impactMap({path:"string"==typeof e.path?e.path:void 0,symbol:"string"==typeof e.symbol?e.symbol:void 0,diff:"string"==typeof e.diff?e.diff:void 0}),i=await this.telemetry.topSignals(100).catch(()=>[]),r=new Map(i.map(e=>[e.file,e])),s=(t.ranked??t.impact??[]).map(e=>{const t=e.path??e.file,i=r.get(t);return{...e,revenueImpactDaily:i?.revenueImpactDaily??0,requestVolume:i?.requestVolume??0,errorRate:i?.errorRate??0}});return{...t,ranked:s}}async getGitDiff(e){try{const r="string"==typeof e.path?e.path:"",s=r?i(this.workspaceRoot,r):this.workspaceRoot,n=r?t(this.workspaceRoot,s):"",{stdout:a}=await E(`git diff ${n}`,{cwd:this.workspaceRoot});return{ok:!0,diff:a||"No changes."}}catch(e){return{ok:!1,error:e.message}}}async patchSurgical(e){const r=String(e.path??"").trim(),s=String(e.searchBlock??""),n=String(e.replaceBlock??"");if(!r||!s)throw new Error("patch_surgical requires path and searchBlock");const a=i(this.workspaceRoot,r);await this.saveBackup(r);const o=await x.readFile(a,"utf8"),c=o;let l=o;if(o.includes(s))l=o.replace(s,n);else{const e=s.split("\n").map(e=>e.trim()),t=o.split("\n");let i=-1;for(let r=0;r<=t.length-e.length;r++){let s=!0;for(let i=0;i<e.length;i++)if(t[r+i].trim()!==e[i]){s=!1;break}if(s){i=r;break}}if(-1===i)throw new Error(`Could not find search block in ${r}. Ensure indentation and content match exactly.`);const a=[...t];a.splice(i,e.length,n),l=a.join("\n")}await x.writeFile(a,l,"utf8");try{a.endsWith(".js")||a.endsWith(".cjs")||a.endsWith(".mjs")?await E(`node --check ${a}`):(a.endsWith(".ts")||a.endsWith(".tsx"))&&await E(`npx tsc --noEmit --skipLibCheck --target esnext --moduleResolution node ${a}`)}catch(e){return await x.writeFile(a,c,"utf8"),{ok:!1,error:"Patch introduced a syntax error and was automatically rolled back.",compilerOutput:e.stdout||e.stderr||e.message,note:"Fix your code and try again."}}const p=t(this.workspaceRoot,a);return await this.cache.deleteCapsule(p,"low"),await this.cache.deleteCapsule(p,"medium"),await this.cache.deleteCapsule(p,"high"),{ok:!0,path:p,patched:!0}}async listSymbols(e){const r=String(e.path??"").trim();if(!r)throw new Error("list_symbols requires path");const s=i(this.workspaceRoot,r),n=await x.readFile(s,"utf8"),a=t(this.workspaceRoot,s),o=await this.meshCore.extractSymbols(a,n);return{ok:!0,path:a,count:o.length,symbols:o.map(e=>({name:e.name,kind:e.kind,location:`L${e.lineStart}-L${e.lineEnd}`}))}}async expandSymbol(e){const r=String(e.path??"").trim(),s=String(e.symbolName??"").trim();if(!r||!s)throw new Error("expand_symbol requires path and symbolName");const n=i(this.workspaceRoot,r),a=await x.readFile(n,"utf8"),o=t(this.workspaceRoot,n),c=await this.meshCore.getDetailedRecord(o,a);if(!c)throw new Error("Could not analyze file with MeshCore.");const l=c.symbols.find(e=>e.name===s||e.kind&&`${e.kind} ${e.name}`===s);if(!l){const e=c.symbols.find(e=>e.name.toLowerCase().includes(s.toLowerCase()));if(!e)throw new Error(`Symbol '${s}' not found in ${r}`);const t=a.split(/\r?\n/g).slice(e.lineStart-1,e.lineEnd).join("\n");return{ok:!0,path:o,symbol:e.name,kind:e.kind,snippet:t}}const p=a.split(/\r?\n/g).slice(l.lineStart-1,l.lineEnd).join("\n");return{ok:!0,path:o,symbol:l.name,kind:l.kind,range:{start:l.lineStart,end:l.lineEnd},snippet:p}}async getFileGraph(e){const r=String(e.path??"").trim();if(!r)throw new Error("get_file_graph requires path");const s=i(this.workspaceRoot,r),n=await x.readFile(s,"utf8"),a=t(this.workspaceRoot,s),o=await this.meshCore.getDetailedRecord(a,n);if(!o)throw new Error("Could not analyze file with MeshCore.");return{ok:!0,path:a,fileType:o.fileType,dependencies:o.dependencies,note:"Dependencies are resolved relative to the workspace root."}}async readDirOverview(e){const r=String(e.path??".").trim(),s=i(this.workspaceRoot,r),n=await x.readdir(s,{withFileTypes:!0}),a=[];for(const e of n)if(e.isFile()&&(e.name.endsWith(".ts")||e.name.endsWith(".js")||e.name.endsWith(".tsx"))){const i=R.join(s,e.name),r=t(this.workspaceRoot,i),n=await x.stat(i),o=await this.cache.getCapsule(r,"low",Math.floor(n.mtimeMs));if(o)a.push({path:r,overview:o.content});else{const e=await x.readFile(i,"utf8"),t=await this.meshCore.summarizeAllTiers(r,e);await this.cache.setCapsule(r,"low",t.low,Math.floor(n.mtimeMs)),a.push({path:r,overview:t.low})}}return{ok:!0,path:r,fileCount:a.length,overviews:a}}handleAgentPlan(e){return"write"===("write"===e.action?"write":"read")?(this.agentPlan=String(e.plan??""),Promise.resolve({ok:!0,message:"Plan updated."})):Promise.resolve({ok:!0,plan:this.agentPlan})}async getEnvInfo(){const e={os:process.platform,arch:process.arch,nodeVersion:process.version,cwd:this.workspaceRoot},t=["git","node","npm","rg","python3","gcc"];for(const i of t)try{const{stdout:t}=await E(`${i} --version`);e[i]=t.split("\n")[0].trim()}catch{e[i]="not found"}return{ok:!0,...e}}async grepRipgrep(e){const t=String(e.query??"").trim();if(!t)throw new Error("grep_ripgrep requires query");const r="string"==typeof e.path?e.path:".",s=i(this.workspaceRoot,r),n="string"==typeof e.includePattern?`--glob "${e.includePattern}"`:"";try{const{stdout:e}=await E(`rg --vimgrep --max-columns 200 ${n} "${t}" .`,{cwd:s}),i=e.split("\n").filter(Boolean),a=i.slice(0,100).map(e=>{const[t,i,s,...n]=e.split(":");return{path:R.join(r,t),line:Number(i),column:Number(s),snippet:n.join(":").trim()}});return{ok:!0,query:t,count:a.length,totalFound:i.length,matches:a}}catch(e){return 127===e.code?{ok:!1,error:"ripgrep (rg) not found on system. Use grep_content instead."}:{ok:!0,query:t,count:0,matches:[],note:"No matches or ripgrep error."}}}async grepContent(e){const r=String(e.query??"").trim();if(!r)throw new Error("workspace.grep_content requires 'query'");const s="string"==typeof e.path?e.path:".",n=Math.max(1,Math.min(Number(e.limit)||50,300)),a=i(this.workspaceRoot,s),o=await S(a,1200,this.workspaceRoot),c=[],l=r.toLowerCase();for(const e of o){if(c.length>=n)break;let i="";try{i=await x.readFile(e,"utf8")}catch{continue}if(i.length>5e5)continue;const r=i.split(/\r?\n/g);for(let i=0;i<r.length;i+=1){const s=r[i];if(s.toLowerCase().includes(l)&&(c.push({path:t(this.workspaceRoot,e),line:i+1,snippet:s.slice(0,220)}),c.length>=n))break}}return{ok:!0,query:r,count:c.length,matches:c}}async writeFile(e){const r=String(e.path??"").trim();if(!r)throw new Error("workspace.write_file requires 'path'");const s="string"==typeof e.content?e.content:String(e.content??""),n=i(this.workspaceRoot,r);await this.saveBackup(r),await x.mkdir(R.dirname(n),{recursive:!0}),await x.writeFile(n,s,"utf8");const a=t(this.workspaceRoot,n);return this.workspaceIndex.partialUpdate([a]).catch(()=>{}),{ok:!0,path:a,bytesWritten:Buffer.byteLength(s,"utf8")}}async runCommand(e,t){const r=String(e.command??"").trim();if(!r)throw new Error("workspace.run_command requires 'command'");const s=ke(r),n=await new Promise(e=>{const i=M(s.command,s.args,{cwd:this.workspaceRoot});let r="",n="",a=!1;const o=setTimeout(()=>{a=!0,i.kill("SIGTERM")},3e4);i.stdout.on("data",e=>{const i=e.toString();r+=i,t?.(i)}),i.stderr.on("data",e=>{const i=e.toString();n+=i,t?.(i)}),i.on("close",t=>{clearTimeout(o),e({ok:0===t&&!a,exitCode:a?124:t??0,stdout:a?`[TIMEOUT after 30s]\n${r}`:r,stderr:n})}),i.on("error",t=>{clearTimeout(o),e({ok:!1,exitCode:1,stdout:"",stderr:String(t)})})}),{filterCommandOutput:a}=await import("./command-filter.js"),o=await a(r,n.stdout,n.stderr,n.exitCode,this.workspaceRoot),c={ok:n.ok,command:r,exitCode:n.exitCode,stdout:o.stdout,stderr:o.stderr,_meshFilter:o.strategy};if(o.teePath&&(c._teePath=o.teePath),!n.ok){const e=n.stderr+"\n"+n.stdout,t=/([a-zA-Z0-9._\-/]+\.(?:ts|tsx|js|jsx|py|go|c|cpp|rs|java|rb|php)):(\d+)/g,r=Array.from(e.matchAll(t)).slice(0,3);if(r.length>0){const e=[];for(const t of r){const r=t[1],s=parseInt(t[2],10);try{const t=i(this.workspaceRoot,r),n=(await x.readFile(t,"utf8")).split(/\r?\n/g),a=Math.max(0,s-3),o=Math.min(n.length,s+2),c=n.slice(a,o).map((e,t)=>`${a+t+1} | ${e}`).join("\n");e.push(`Context for ${r}:${s}:\n${c}`)}catch{}}e.length>0&&(c.contextualSnippets=e,c.stdout=`${c.stdout}\n\n--- Mesh Context ---\n${e.join("\n\n")}`)}}return c}async checkSync(){return{ok:!0,...await this.cache.getSyncStatus()}}async getIndexStatus(){return this.workspaceIndex.status()}meshArtifactPath(...e){return R.join(this.workspaceRoot,".mesh",...e)}async digitalTwin(e={}){const r=String(e.action??"build"),s=this.meshArtifactPath("digital-twin.json");if("read"===r||"status"===r){const e=await n(s,null);return e?"status"===r?{ok:!0,path:s,builtAt:e.builtAt,files:e.files?.total??0,symbols:e.symbols?.length??0,routes:e.routes?.length??0,riskHotspots:e.riskHotspots?.length??0}:{ok:!0,path:s,twin:e}:{ok:!1,status:"missing",path:s,message:"Digital Twin has not been built yet."}}const o=(await S(this.workspaceRoot,1e4,this.workspaceRoot)).map(e=>t(this.workspaceRoot,e)),c=o.filter(e=>/\.(ts|tsx|js|jsx|mjs|cjs)$/.test(e)),l=o.filter(e=>/(^|\/)(test|tests|__tests__)\/|(\.test|\.spec)\.(ts|tsx|js|jsx|mjs|cjs)$/.test(e)),p=o.filter(e=>/(^|\/)(package\.json|tsconfig\.json|vite\.config|next\.config|tailwind\.config|Dockerfile|docker-compose|cloudbuild|render\.yaml|vercel\.json|netlify\.toml|\.env\.example|\.github\/workflows)/.test(e)),m=o.filter(e=>/(^|\/)\.env(\.example|\.local|\.development|\.production)?$/.test(e)),h=[],g=[],y=[];for(const e of c.slice(0,1500)){const t=i(this.workspaceRoot,e),r=await x.readFile(t,"utf8").catch(()=>"");if(!r)continue;for(const t of d(r).slice(0,40))h.push({file:e,...t});for(const t of u(r))g.push({file:e,...t});const s=f(e,r);s.length>0&&y.push({file:e,risks:s,score:s.length})}const w=await n(R.join(this.workspaceRoot,"package.json"),null),k=await this.collectEnvNames(m),b=await this.getGitStatus(),v=await this.workspaceIndex.status(),_={routes:g.map(e=>({f:e.file,m:e.method,r:e.route,l:e.line})),symbols:h.map(e=>({f:e.file,n:e.name,k:e.kind,l:e.line}))},j={schemaVersion:1,builtAt:(new Date).toISOString(),workspaceRoot:this.workspaceRoot,workspaceName:R.basename(this.workspaceRoot),index:v,files:{total:o.length,source:c.length,tests:l.length,config:p.length},package:w?{name:w.name,version:w.version,scripts:w.scripts??{},dependencies:Object.keys(w.dependencies??{}),devDependencies:Object.keys(w.devDependencies??{})}:null,env:k,deploy:{configs:p.filter(e=>/Dockerfile|docker-compose|cloudbuild|render\.yaml|vercel\.json|netlify\.toml|\.github\/workflows/.test(e)),scripts:Object.entries(w?.scripts??{}).filter(([e,t])=>/deploy|publish|release|build|start/i.test(`${e} ${t}`)).map(([e,t])=>({name:e,command:t}))},tests:l.slice(0,250),routes:_.routes.slice(0,500),symbols:_.symbols.slice(0,2e3),riskHotspots:y.sort((e,t)=>t.score-e.score).slice(0,100),git:b};return await a(s,j),{ok:!0,path:s,twin:j}}async collectEnvNames(e){const t={};for(const r of e){const e=await x.readFile(i(this.workspaceRoot,r),"utf8").catch(()=>"");t[r]=o(e.split(/\r?\n/g).map(e=>e.trim()).filter(e=>e&&!e.startsWith("#")).map(e=>e.split("=")[0]?.trim()),200)}return t}async timelinePromoteWithBrain(e){const t=await this.timelines.readRecord(e.timelineId),i=await this.timelines.compare({timelineIds:[e.timelineId]}),r=i.comparisons?.[0]??{},s=ue({diffPreview:String(r.diffPreview??""),verificationOk:"pass"===t.verdict}),n=de({diffPreview:String(r.diffPreview??"")});if(!s.ok||!n.ok)return{ok:!1,timelineId:e.timelineId,message:"Adversarial checks blocked promotion. Resolve findings or override explicitly.",critic:s,redTeam:n};const a=await this.timelines.promote({timelineId:e.timelineId});if(!a.ok)return a;try{const e=t.commands.at(-1),i=await x.readFile(e?.stderrPath??"","utf8").catch(()=>""),o=te(i||e?.command||"timeline-promote"),c=ee(String(r.diffPreview??"")),l=await this.meshBrain.contribute({f:this.workspaceFingerprint(),e:o,d:c,v:{r:t.verdict??"unknown",c:e?.command,x:e?.exitCode,t:/\btsc\b/.test(e?.command??"")?e?.ok?"pass":"fail":"unknown",l:/\blint\b/.test(e?.command??"")?e?.ok?"pass":"fail":"unknown"}});return{...a,meshBrain:l,critic:s,redTeam:n}}catch(e){return{...a,meshBrain:{ok:!1,contributed:!1,reason:e.message},critic:s,redTeam:n}}}async meshBrainTool(e={}){const t=String(e.action??"stats");if("opt_out"===t)return await this.meshBrain.optOut(),{ok:!0,action:t,message:"Mesh Brain contribution disabled for this workspace."};if("query"===t){const i=String(e.error??"").trim();if(!i)throw new Error("workspace.brain query requires error");const r=await this.meshBrain.query({errorSignature:te(i),limit:Math.max(1,Math.min(Number(e.limit)||5,20))});return{ok:!0,action:t,patterns:r.patterns,source:r.source}}return{ok:!0,action:"stats",...await this.meshBrain.status()}}async daemonControl(e={}){const t=String(e.action??"status").trim().toLowerCase();if("start"===t){return{ok:0===await ie(["start"]),action:t}}if(!["status","digest","stop"].includes(t))throw new Error("workspace.daemon action must be start|status|digest|stop");return await this.callDaemonSocket({action:t})}async productionStatus(e={}){const t=String(e.action??"status"),i="refresh"===t?await this.telemetry.refresh():await this.telemetry.status(),r=[...i.signals].sort((e,t)=>oe(t)-oe(e)).slice(0,10);return{ok:!0,action:t,updatedAt:i.updatedAt,totalSignals:i.signals.length,topErrors:r.map(e=>({file:e.file,route:e.route,errorRate:e.errorRate,requestVolume:e.requestVolume,p99Ms:e.p99Ms,revenueImpactDaily:e.revenueImpactDaily,score:oe(e)}))}}async whatIf(e,t){const i=String(e.hypothesis??"").trim();if(!i)throw new Error("workspace.what_if requires hypothesis");if(!this.config)throw new Error("Agent configuration not available.");const r=String(e.verificationCommand??"npm run typecheck && npm test"),s=!0===e.promote;t?.(`[What-If] Creating counterfactual timeline for: ${i}\n`);const n=await this.timelines.create({name:`what-if-${Date.now().toString(36)}`}),a=new L({endpointBase:this.config.bedrock.endpointBase,modelId:this.config.bedrock.modelId,fallbackModelIds:this.config.bedrock.fallbackModelIds,bearerToken:this.config.bedrock.bearerToken,temperature:.1,maxTokens:4096}),o=await a.converse([{role:"user",content:[{text:`Hypothesis: ${i}\nGenerate a git patch implementing this migration. Return only raw diff.`}]}],[],"Return only raw git patch.");if("text"!==o.kind)return{ok:!1,hypothesis:i,message:"LLM did not return patch text."};const c=o.text.trim(),l=await this.timelines.applyPatch({timelineId:n.timeline.id,patch:c});if(!l.ok)return{ok:!1,hypothesis:i,timelineId:n.timeline.id,message:l.message,stderr:l.stderr};const u=await this.timelines.run({timelineId:n.timeline.id,command:r,timeoutMs:24e4}),d=(await this.timelines.compare({timelineIds:[n.timeline.id]})).comparisons[0],h={ok:!0,hypothesis:i,timelineId:n.timeline.id,verificationCommand:r,verdict:u.ok?"pass":"fail",changedFiles:d?.changedFiles??[],changedLineCount:d?.changedLineCount??0,testsBrokenEstimate:u.ok?0:p(`${u.stdout}\n${u.stderr}`),typeErrorsEstimate:(g=`${u.stdout}\n${u.stderr}`,g.split(/\r?\n/g).filter(e=>/\berror TS\d+:/i.test(e)).length),bundleSizeDeltaKb:m(`${u.stdout}\n${u.stderr}`),note:s?"Use workspace.timeline_promote to materialize this counterfactual.":"Counterfactual evaluated in isolated timeline; no main workspace changes applied."};var g;if(s&&u.ok){const e=await this.timelines.promote({timelineId:n.timeline.id});return{...h,promoted:e.ok}}return h}async callDaemonSocket(e){return new Promise(t=>{const i=se.createConnection(re,()=>{i.write(JSON.stringify(e)),i.end()});let r="";i.on("data",e=>{r+=e.toString()}),i.on("error",i=>{t({ok:!1,action:e.action,message:`daemon unavailable: ${i.message}`})}),i.on("end",()=>{if(r.trim())try{t(JSON.parse(r))}catch{t({ok:!1,action:e.action,message:"daemon response parse failed"})}else t({ok:!1,action:e.action,message:"daemon returned empty response"})})})}workspaceFingerprint(){return P.createHash("sha256").update(R.resolve(this.workspaceRoot)).digest("hex").slice(0,24)}async bootstrapRepoDnaMemory(){try{const e=await this.engineeringMemory({action:"read"}),t=Array.isArray(e?.memory?.rules)?e.memory.rules:[];if(t.some(e=>e.includes("[dna-cohort]")))return;const i=await this.digitalTwin({action:"build"}),r=this.computeRepoDna(i?.twin??{}),s=function(e,t){if(!Array.isArray(e)||0===e.length)return[];const i=new Map;for(const t of e)for(const e of t.rules??[]){const t=String(e).trim();t&&i.set(t,(i.get(t)??0)+1)}const r=Math.max(1,Math.ceil(e.length*t));return Array.from(i.entries()).filter(([,e])=>e>=r).sort((e,t)=>t[1]-e[1]).map(([e])=>e).slice(0,25)}((await this.meshBrain.queryDnaCohort({dna:r,threshold:.85})).cohort??[],.3);if(0===s.length)return;const n=o([...s.map(e=>`[dna-cohort] ${e}`),...t],200),c=this.meshArtifactPath("engineering-memory.json"),l=e?.memory??{};l.rules=n,l.updatedAt=(new Date).toISOString(),l.events=Array.isArray(l.events)?l.events:[],l.events.unshift({id:`dna-${Date.now().toString(36)}`,at:l.updatedAt,outcome:"neutral",note:"Preloaded rules from Mesh Brain DNA cohort.",source:"dna-cohort",dna:r,files:[]}),l.events=l.events.slice(0,100),await a(c,{schemaVersion:1,reviewerPreferences:[],acceptedPatterns:[],rejectedPatterns:[],riskModules:[],...l})}catch{}}computeRepoDna(e){const t=e?.package??{},i={...t.dependencies??{},...t.devDependencies??{}},r=t.scripts??{},s=Array.isArray(e?.deploy?.configs)?e.deploy.configs.join(" ").toLowerCase():"",n=_(R.join(this.workspaceRoot,"pnpm-lock.yaml"))?"pnpm":_(R.join(this.workspaceRoot,"yarn.lock"))?"yarn":"npm";return{framework:c(i,["next","react","vue","svelte","express","fastify","nestjs"],"unknown"),frameworkVersion:i.next||i.react||i.vue||i.svelte||i.express||"unknown",orm:c(i,["prisma","typeorm","sequelize","drizzle-orm","mongoose"],"none"),testRunner:c({...i,...r},["vitest","jest","mocha","playwright","cypress"],"node:test"),deployTarget:/vercel/.test(s)?"vercel":/render/.test(s)?"render":/docker/.test(s)?"docker":"unknown",monorepoTool:c(i,["turbo","nx","lerna","pnpm-workspace"],"none"),cssStrategy:c(i,["tailwindcss","styled-components","sass","emotion"],"plain-css"),language:Array.isArray(e?.files?.topExtensions)&&e.files.topExtensions.some(e=>".ts"===e||".tsx"===e)?"typescript":"javascript",packageManager:n}}async predictiveRepair(e={},t){const i=String(e.action??"analyze"),r=this.meshArtifactPath("predictive-repair.json"),s=await n(r,{schemaVersion:1,updatedAt:null,queue:[],history:[]});if("status"===i)return{ok:!0,path:r,...s};if("clear"===i){const e={schemaVersion:1,updatedAt:(new Date).toISOString(),queue:[],history:s.history??[]};return await a(r,e),{ok:!0,path:r,...e}}t?.("[Predictive Repair] Running diagnostics and loading repo memory...\n");const c=await this.getDiagnostics().catch(e=>({ok:!1,output:e.message})),l=await this.engineeringMemory({action:"read"}).catch(()=>({memory:null})),p=await this.digitalTwin({action:"build"}).catch(()=>null),m=String(c.output??c.stderr??c.stdout??""),u=te(m),d=await this.meshBrain.query({errorSignature:u,limit:Math.max(1,Math.min(Number(e.limit)||5,5))}).catch(()=>({ok:!1,patterns:[],source:"local-fallback"})),h=await this.telemetry.topSignals(50).catch(()=>[]),g=new Map(h.map(e=>[e.file,oe(e)])),f=o(Array.from(m.matchAll(/([A-Za-z0-9_./-]+\.(?:ts|tsx|js|jsx|mjs|cjs))(?::(\d+))?/g)).map(e=>e[1]),25).sort((e,t)=>(g.get(t)??0)-(g.get(e)??0)),y=await this.readDirtyFilesForMemory(),w=new Set((p?.twin?.riskHotspots??[]).map(e=>e.file)),k=f.length>0||!c.ok?[{id:`repair-${Date.now().toString(36)}`,createdAt:(new Date).toISOString(),status:"prepared",source:"diagnostics",verificationCommand:String(e.verificationCommand??this.defaultVerificationCommand(p?.twin)),summary:c.ok?"Diagnostics are currently clean; no repair candidate required.":"Diagnostics failed; prepare a timeline-first repair.",files:f.length>0?f:y.slice(0,10),prioritizedByImpact:f.filter(e=>g.has(e)).slice(0,10),riskFiles:f.filter(e=>w.has(e)),diagnostics:m.slice(0,8e3),errorSignature:u,globalPatterns:(d.patterns??[]).slice(0,5).map(e=>({score:e.score,successRate:e.successRate,usageCount:e.usageCount,fixSummary:e.fixSummary})),recommendedTool:"agent.race_fixes"}]:[],b={schemaVersion:1,updatedAt:(new Date).toISOString(),diagnosticsOk:Boolean(c.ok),memoryDigest:l?.memory?.rules?.slice?.(0,10)??[],brainPatternSource:d.source,brainPatterns:(d.patterns??[]).slice(0,5),queue:k,history:[...s.history??[],...k.length>0?k:[]].slice(-50)};return await a(r,b),{ok:!0,path:r,...b}}async recordPredictiveRepairSignal(e,t){const i=this.meshArtifactPath("predictive-repair.json"),r=await n(i,{schemaVersion:1,updatedAt:null,queue:[],history:[]}),s=String(t?.output??t?.stderr??t?.stdout??""),c={id:`repair-${Date.now().toString(36)}`,createdAt:(new Date).toISOString(),status:"prepared",source:"watcher",summary:`Diagnostics changed after editing ${e}.`,files:o([e],10),diagnostics:s.slice(0,8e3),recommendedTool:"agent.race_fixes"},l=[c,...r.queue??[]].slice(0,25),p={...r,updatedAt:c.createdAt,diagnosticsOk:!1,queue:l,history:[c,...r.history??[]].slice(0,50)};await a(i,p)}async engineeringMemory(e={}){const t=String(e.action??"read"),i=this.meshArtifactPath("engineering-memory.json"),r=await n(i,{schemaVersion:1,updatedAt:null,rules:[],riskModules:[],reviewerPreferences:[],acceptedPatterns:[],rejectedPatterns:[],events:[]});if("record"===t){const t={id:`mem-${Date.now().toString(36)}`,at:(new Date).toISOString(),outcome:String(e.outcome??"neutral"),note:String(e.note??"").trim(),rule:String(e.rule??"").trim(),files:Array.isArray(e.files)?e.files.map(String):[]};return r.events.unshift(t),t.rule&&(r.rules=o([t.rule,...r.rules],100)),"accepted"===t.outcome&&t.note&&(r.acceptedPatterns=o([t.note,...r.acceptedPatterns],100)),"rejected"===t.outcome&&t.note&&(r.rejectedPatterns=o([t.note,...r.rejectedPatterns],100)),r.riskModules=o([...t.files,...r.riskModules],100),r.updatedAt=t.at,await a(i,r),{ok:!0,path:i,memory:r}}if("learn"===t){const e=await this.digitalTwin({action:"build"}).catch(()=>null),t=await this.readDirtyFilesForMemory(),s=["Prefer timeline verification for changes touching agent runtime, shell execution, auth, secrets, or persistence.","Use the Digital Twin risk hotspots before broad refactors.","Keep docs status aligned with implemented tool surfaces."];return r.rules=o([...s,...r.rules],100),r.riskModules=o([...t,...(e?.twin?.riskHotspots??[]).map(e=>e.file),...r.riskModules],100),r.updatedAt=(new Date).toISOString(),r.events.unshift({id:`learn-${Date.now().toString(36)}`,at:r.updatedAt,outcome:"neutral",note:"Learned repository heuristics from Digital Twin, dirty files, and risk hotspots.",files:t}),r.events=r.events.slice(0,100),await a(i,r),{ok:!0,path:i,memory:r}}return{ok:!0,path:i,memory:r}}async intentCompile(e){const t=String(e.intent??"").trim();if(!t)throw new Error("workspace.intent_compile requires intent");const i=await this.digitalTwin({action:"build"}),r=await this.engineeringMemory({action:"read"}),s=await this.workspaceIndex.search(t,"edit-impact",8).catch(()=>({results:[],topMatches:[]})),n=o([...(s.results??[]).map(e=>e.file),...(i.twin?.riskHotspots??[]).slice(0,5).map(e=>e.file)],20),c=String(e.verificationCommand??this.defaultVerificationCommand(i.twin)),l={schemaVersion:1,compiledAt:(new Date).toISOString(),intent:t,likelyFiles:n,phases:["Confirm behavior and public surface from the Digital Twin.","Implement the smallest vertical change that satisfies the intent.","Update tests/docs for changed behavior.","Verify in a timeline or with the declared verification command.","Record accepted/rejected lessons in Engineering Memory."],interfaces:this.inferIntentInterfaces(t,i.twin),tests:this.inferIntentTests(t,i.twin),risks:this.inferIntentRisks(t,i.twin,n),rollout:{verificationCommand:c,rollback:"Use workspace.semantic_undo or promote only verified timelines.",monitoring:"Check cockpit snapshot after implementation for diagnostics, risk hotspots, and repair queue."},memoryRules:r.memory?.rules?.slice?.(0,8)??[],topMatches:s.topMatches??[]},p=this.meshArtifactPath("intent-compiler","latest.json");return await a(p,l),{ok:!0,path:p,contract:l}}async cockpitSnapshot(){const[e,t,i,r,s,n,a,o,c,l]=await Promise.all([this.workspaceIndex.status().catch(e=>({ok:!1,error:e.message})),this.getGitStatus(),this.digitalTwin({action:"status"}).catch(e=>({ok:!1,error:e.message})),this.predictiveRepair({action:"status"}).catch(e=>({ok:!1,error:e.message})),this.engineeringMemory({action:"read"}).catch(e=>({ok:!1,error:e.message})),this.timelines.list().catch(e=>({ok:!1,timelines:[],error:e.message})),this.causalIntelligence({action:"status"}).catch(e=>({ok:!1,error:e.message})),this.discoveryLab({action:"status"}).catch(e=>({ok:!1,error:e.message})),this.realityFork({action:"status"}).catch(e=>({ok:!1,error:e.message})),this.ghostEngineer({action:"status"}).catch(e=>({ok:!1,error:e.message}))]),p=await this.listRuntimeRuns();return{ok:!0,generatedAt:(new Date).toISOString(),index:e,git:t,digitalTwin:i,predictiveRepair:r,engineeringMemory:s,causalIntelligence:a,discoveryLab:o,realityFork:c,ghostEngineer:l,timelines:n,runtimeRuns:p,health:this.scoreCockpitHealth({index:e,repair:r,timelines:n,runtimeRuns:p})}}async causalIntelligence(e={},t){const i=String(e.action??"build"),r=this.meshArtifactPath("causal-intelligence.json"),s=await n(r,null);if("status"===i)return s?{ok:!0,path:r,builtAt:s.builtAt,nodes:s.nodes?.length??0,edges:s.edges?.length??0,insights:s.insights?.length??0,topSeverity:s.insights?.[0]?.severity??"none"}:{ok:!1,status:"missing",path:r};if("read"===i)return s?{ok:!0,path:r,graph:s}:{ok:!1,status:"missing",path:r};if("query"===i){const i=String(e.query??"").trim();if(!i)throw new Error("workspace.causal_intelligence query action requires query");const n=s??await this.buildCausalGraph(t);return s||await a(r,n),{ok:!0,path:r,query:i,...this.answerCausalQuery(i,n)}}const o=await this.buildCausalGraph(t);return await a(r,o),{ok:!0,path:r,graph:o}}async buildCausalGraph(e){e?.("[Causal Intelligence] Building causal graph from twin, memory, repairs, and git pressure...\n");const[t,i,r,s,n]=await Promise.all([this.digitalTwin({action:"build"}).catch(e=>({ok:!1,error:e.message})),this.engineeringMemory({action:"read"}).catch(()=>({memory:null})),this.predictiveRepair({action:"status"}).catch(()=>({queue:[]})),this.readDirtyFilesForMemory(),this.readGitChurn()]),a=t.twin??{},c=i.memory??{},l=r,p=Array.isArray(a.tests)?a.tests.map(String):[],m=Array.isArray(a.riskHotspots)?a.riskHotspots:[],u=Array.isArray(a.routes)?a.routes:[],d=Array.isArray(a.symbols)?a.symbols:[],h=Array.isArray(l.queue)?l.queue:[],g=o(h.flatMap(e=>Array.isArray(e.files)?e.files.map(String):[]),250),f=o([...d.map(e=>String(e.file??"")),...u.map(e=>String(e.file??"")),...m.map(e=>String(e.file??"")),...p,...a.deploy?.configs??[],...s,...g,...c.riskModules??[]],1500),y=new Map,k=new Map,b=e=>{const t=String(e.id??"");t&&y.set(t,{...y.get(t)??{},...e})},S=e=>{const t=String(e.from??""),i=String(e.to??""),r=String(e.type??"related_to");t&&i&&k.set(`${t}->${r}->${i}`,{...e,from:t,to:i,type:r})};b({id:"workspace",type:"workspace",label:R.basename(this.workspaceRoot),weight:100});for(const e of f){const t=v(e,p);b({id:`file:${e}`,type:"file",label:e,file:e,weight:10+Math.min(30,3*Number(n[e]??0)),metadata:{dirty:s.includes(e),churn:n[e]??0,tests:t}}),S({from:"workspace",to:`file:${e}`,type:"contains",weight:1});for(const i of t)b({id:`file:${i}`,type:"test",label:i,file:i,weight:8}),S({from:`file:${e}`,to:`file:${i}`,type:"covered_by",weight:.72,evidence:"fuzzy test filename match"})}for(const e of d.slice(0,1200)){const t=String(e.file??""),i=String(e.name??"");if(!t||!i)continue;const r=`symbol:${t}:${i}`;b({id:r,type:"symbol",label:i,file:t,line:e.line,weight:5}),S({from:`file:${t}`,to:r,type:"defines",weight:.6})}for(const e of u.slice(0,300)){const t=String(e.file??""),i=String(e.route??"");if(!t||!i)continue;const r=`route:${t}:${e.method??"ANY"}:${i}`;b({id:r,type:"route",label:`${e.method??"ANY"} ${i}`,file:t,line:e.line,weight:12}),S({from:`file:${t}`,to:r,type:"exposes_runtime_path",weight:.85})}for(const e of m){const t=String(e.file??""),i=Array.isArray(e.risks)?e.risks.map(String):[];for(const e of i){const i=`risk:${w(e)}`;b({id:i,type:"risk",label:e,weight:20}),S({from:`file:${t}`,to:i,type:"has_risk",weight:.9,evidence:e})}}for(const[e,t]of(c.rules??[]).slice(0,80).entries()){const i=`rule:${e}:${w(t)}`;b({id:i,type:"rule",label:t,weight:15}),S({from:"workspace",to:i,type:"governed_by",weight:.7});for(const e of(c.riskModules??[]).slice(0,80))S({from:`file:${e}`,to:i,type:"governed_by",weight:.55})}for(const e of h.slice(0,30)){const t=`repair:${e.id??w(String(e.summary??"repair"))}`;b({id:t,type:"repair",label:String(e.summary??"Prepared repair"),weight:35,metadata:e});for(const i of Array.isArray(e.files)?e.files.map(String):[])S({from:t,to:`file:${i}`,type:"targets",weight:.95,evidence:"predictive repair queue"})}for(const[e,t]of Object.entries(n)){if(!f.includes(e))continue;const i=`change:${e}`;b({id:i,type:"change_pressure",label:`${e} churn ${t}`,file:e,weight:Number(t)}),S({from:i,to:`file:${e}`,type:"changed_recently",weight:Math.min(1,Number(t)/10),evidence:`${t} recent commits`})}const x=this.generateCausalInsights({allKnownFiles:f,twin:a,memory:c,repairQueue:h,dirtyFiles:s,churn:n,tests:p});return{schemaVersion:1,builtAt:(new Date).toISOString(),workspaceRoot:this.workspaceRoot,workspaceName:R.basename(this.workspaceRoot),nodes:Array.from(y.values()),edges:Array.from(k.values()),insights:x,stats:{nodes:y.size,edges:k.size,files:f.length,routes:u.length,symbols:d.length,riskHotspots:m.length,repairItems:h.length,memoryRules:(c.rules??[]).length},sources:{digitalTwinBuiltAt:a.builtAt??null,engineeringMemoryUpdatedAt:c.updatedAt??null,predictiveRepairUpdatedAt:l.updatedAt??null}}}generateCausalInsights(e){const t=new Map((e.twin.riskHotspots??[]).map(e=>[String(e.file),Array.isArray(e.risks)?e.risks.map(String):[]])),i=new Map;for(const t of e.twin.routes??[]){const e=String(t.file??"");i.has(e)||i.set(e,[]),i.get(e)?.push(t)}const r=new Map;for(const t of e.twin.symbols??[]){const e=String(t.file??"");r.set(e,(r.get(e)??0)+1)}const s=new Set(e.repairQueue.flatMap(e=>Array.isArray(e.files)?e.files.map(String):[])),n=e.allKnownFiles.map(n=>{const a=t.get(n)??[],c=v(n,e.tests),l=e.churn[n]??0,p=i.get(n)?.length??0,m=r.get(n)??0,u=e.dirtyFiles.includes(n),d=s.has(n);let h=0;h+=Math.min(40,18*a.length),h+=a.length>0&&0===c.length?24:0,h+=Math.min(24,4*l),h+=u?15:0,h+=d?30:0,h+=Math.min(16,4*p),h+=Math.min(10,Math.floor(m/10)),/src\/(local-tools|agent-loop|runtime-observer|timeline-manager)\.ts$/.test(n)&&(h+=10);const g=o([...a.map(e=>`risk: ${e}`),0===c.length&&a.length>0?"no related test file found":void 0,l>0?`${l} recent commits touched this file`:void 0,u?"currently dirty in git status":void 0,d?"present in predictive repair queue":void 0,p>0?`${p} runtime route(s)`:void 0,m>0?`${m} indexed symbol(s)`:void 0],12);return{id:`causal-${w(n)}`,type:d?"operational-repair":a.length>0&&0===c.length?"risk-without-proof":"change-pressure",title:`${n} is a causal pressure point`,severity:b(h),score:h,confidence:Math.min(.95,.42+.08*g.length),summary:`Changes here are likely to propagate because the file combines ${g.slice(0,3).join(", ")||"repository centrality signals"}.`,evidence:g,likelyFiles:o([n,...c],12),recommendedAction:d?"Run workspace.predictive_repair, then race fixes in timelines before promotion.":a.length>0&&0===c.length?"Add focused tests or a runtime proof before broad edits touch this file.":"Use workspace.impact_map before changing this area and verify through a timeline."}}).filter(e=>e.score>=20).sort((e,t)=>t.score-e.score).slice(0,20);return(e.memory.rules??[]).length>0&&n.push({id:"causal-engineering-memory",type:"policy-memory",title:"Engineering Memory is enforcing local repo rules",severity:"medium",score:42,confidence:.82,summary:"Accepted and rejected work has become an active policy layer for future changes.",evidence:(e.memory.rules??[]).slice(0,6),likelyFiles:(e.memory.riskModules??[]).slice(0,10),recommendedAction:"Load Engineering Memory before planning large changes and record outcomes after verification."}),n.sort((e,t)=>Number(t.score)-Number(e.score))}answerCausalQuery(e,t){const i=y(e),r=e=>{const t=JSON.stringify(e??"").toLowerCase();return i.reduce((e,i)=>e+(t.includes(i)?1:0),0)},s=[...t.insights??[]].map(e=>({...e,queryScore:r(e)+Number(e.score??0)/100})).filter(e=>e.queryScore>0).sort((e,t)=>t.queryScore-e.queryScore).slice(0,5),n=[...t.nodes??[]].map(e=>({...e,queryScore:r(e)+Number(e.weight??0)/100})).filter(e=>e.queryScore>0).sort((e,t)=>t.queryScore-e.queryScore).slice(0,12),a=o(s.map(e=>String(e.recommendedAction??"")),5);return{answer:s.length>0?`Top causal signal: ${s[0].title}. ${s[0].summary}`:"No direct causal match found. Build the Digital Twin and Causal Intelligence graph again after more runtime or git evidence exists.",topInsights:s,evidenceNodes:n,recommendations:a,graphStats:t.stats??{}}}async discoveryLab(e={},t){const i=String(e.action??"run"),r=this.meshArtifactPath("discovery-lab.json"),s=await n(r,{schemaVersion:1,ranAt:null,discoveries:[],history:[]});if("status"===i)return{ok:!0,path:r,...s};if("clear"===i){const e={schemaVersion:1,ranAt:(new Date).toISOString(),discoveries:[],history:s.history??[]};return await a(r,e),{ok:!0,path:r,...e}}const o=Math.max(1,Math.min(Number(e.maxDiscoveries)||8,20));t?.("[Discovery Lab] Running causal scan and repair analysis...\n");const c=String(e.verificationCommand??""),[l,p]=await Promise.all([this.causalIntelligence({action:"build"},t),this.predictiveRepair({action:"analyze",verificationCommand:c||void 0},t).catch(e=>({ok:!1,queue:[],error:e.message}))]),m=l.graph,u=c||this.defaultVerificationCommand((await this.digitalTwin({action:"read"}).catch(()=>({twin:null}))).twin),d=this.generateLabDiscoveries(m,p,u).slice(0,o),h={schemaVersion:1,ranAt:(new Date).toISOString(),summary:{discoveries:d.length,critical:d.filter(e=>"critical"===e.severity).length,high:d.filter(e=>"high"===e.severity).length,verificationCommand:u},discoveries:d,history:[...d,...s.history??[]].slice(0,100)};return await a(r,h),{ok:!0,path:r,...h}}generateLabDiscoveries(e,t,i){return[...(Array.isArray(t?.queue)?t.queue:[]).map((e,t)=>({id:`lab-repair-${t}-${w(String(e.id??e.summary??"repair"))}`,type:"prepared-repair",severity:"high",score:90-t,confidence:.86,hypothesis:String(e.summary??"Diagnostics can be converted into a verified repair."),evidence:o(["Predictive Repair produced a prepared queue item.",...Array.isArray(e.files)?e.files.map(e=>`file: ${e}`):[],String(e.diagnostics??"").slice(0,500)],8),experiment:{steps:["Inspect the referenced files and diagnostics.","Generate 3 candidate fixes with agent.race_fixes.","Promote only a passing timeline."],verificationCommand:e.verificationCommand??i,recommendedTool:"agent.race_fixes",rollback:"Do not promote a failed timeline."},files:Array.isArray(e.files)?e.files:[]})),...(e.insights??[]).map((e,t)=>({id:`lab-causal-${t}-${w(String(e.id??e.title??"insight"))}`,type:"causal-opportunity",severity:e.severity,score:Number(e.score??0),confidence:e.confidence??.7,hypothesis:`Reducing "${e.title}" will lower future change risk or increase delivery speed.`,evidence:e.evidence??[],experiment:{steps:["Run workspace.impact_map on the top likely file.","Add the smallest proof: focused test, runtime assertion, or architecture guard.","Verify and record the outcome in Engineering Memory."],verificationCommand:i,recommendedTool:"workspace.reality_fork",rollback:"Keep the proof in a timeline until verification passes."},files:e.likelyFiles??[]}))].sort((e,t)=>Number(t.score??0)-Number(e.score??0)).slice(0,20)}async realityFork(e={},t){const i=String(e.action??"plan"),r=this.meshArtifactPath("reality-forks","latest.json"),s=await n(r,null);if("status"===i)return s?{ok:!0,path:r,plannedAt:s.plannedAt,intent:s.intent,proposals:s.proposals?.length??0,materialized:(s.proposals??[]).filter(e=>e.timelineId).length,recommendation:s.recommendation?.id??null}:{ok:!1,status:"missing",path:r};if("clear"===i){const e={schemaVersion:1,plannedAt:(new Date).toISOString(),intent:null,proposals:[]};return await a(r,e),{ok:!0,path:r,...e}}const o=String(e.intent??"").trim();if(!o)throw new Error("workspace.reality_fork requires intent for plan or fork actions");const c=Math.max(2,Math.min(Number(e.forks)||4,6));t?.(`[Reality Fork] Planning ${c} alternate realities for: ${o}\n`);const[l,p,m]=await Promise.all([this.intentCompile({intent:o,verificationCommand:e.verificationCommand}),this.causalIntelligence({action:"build"},t),this.engineeringMemory({action:"read"})]),u=l.contract,d=p.graph,h=m.memory??{},g=String(e.verificationCommand??u.rollout?.verificationCommand??"npm test"),f=this.buildRealityProposals({intent:o,contract:u,graph:d,memory:h,forks:c,verificationCommand:g});if("fork"===i){const t=Boolean(e.runVerification);for(const e of f){const i=await this.timelines.create({name:`reality-${e.id}`}),r=R.join(i.timeline.root,".mesh","reality-forks");if(await x.mkdir(r,{recursive:!0}),await x.writeFile(R.join(r,`${e.id}.json`),JSON.stringify({schemaVersion:1,materializedAt:(new Date).toISOString(),intent:o,proposal:e},null,2),"utf8"),e.timelineId=i.timeline.id,e.timelineRoot=i.timeline.root,t){const t=await this.timelines.run({timelineId:i.timeline.id,command:g});e.verification={ok:t.ok,exitCode:t.exitCode,durationMs:t.commandRecord.durationMs}}}}const y={schemaVersion:1,plannedAt:(new Date).toISOString(),intent:o,action:i,verificationCommand:g,recommendation:f[0]??null,proposals:f,causalEvidence:(d.insights??[]).slice(0,5).map(e=>({id:e.id,title:e.title,severity:e.severity,likelyFiles:e.likelyFiles}))};return await a(r,y),{ok:!0,path:r,...y}}buildRealityProposals(e){const t=y(e.intent),i=[...e.graph.insights??[]].map(e=>({...e,tokenScore:t.reduce((t,i)=>t+(JSON.stringify(e).toLowerCase().includes(i)?1:0),0)})).sort((e,t)=>t.tokenScore+Number(t.score??0)/100-(e.tokenScore+Number(e.score??0)/100)),r=o(i.flatMap(e=>Array.isArray(e.likelyFiles)?e.likelyFiles:[]),20),s=o([...e.contract.likelyFiles??[],...r],30),n=(e.memory.rules??[]).slice(0,8);return[{id:"minimal-proof",strategy:"Minimal Proof Reality",thesis:"Ship the smallest vertical slice that satisfies the intent while preserving existing boundaries.",focus:s.slice(0,6),bonus:20},{id:"causal-risk-collapse",strategy:"Causal Risk Collapse Reality",thesis:"Attack the files with the strongest causal risk signals first so the change reduces future fragility.",focus:r.slice(0,8),bonus:28},{id:"test-first-proof",strategy:"Test-First Reality",thesis:"Create executable proof before implementation and use it to constrain the patch.",focus:o([...e.contract.tests??[],...s.slice(0,5)],10),bonus:24},{id:"runtime-shadow",strategy:"Runtime Shadow Reality",thesis:"Instrument the runtime path, capture failure evidence, and then patch the observed behavior.",focus:s.filter(e=>/runtime|server|api|route|src\//i.test(e)).slice(0,8),bonus:18},{id:"architecture-law",strategy:"Architecture Law Reality",thesis:"Convert the intent into explicit constraints so future edits cannot drift across the same boundary.",focus:o([...r.slice(0,5),...s.slice(0,5)],10),bonus:16},{id:"product-slice",strategy:"Product Slice Reality",thesis:"Deliver a complete user-visible slice and defer internal cleanup unless proof requires it.",focus:s.slice(0,10),bonus:14}].slice(0,e.forks).map((t,r)=>{const a=o(t.focus.length>0?t.focus:s.slice(0,6),12),c=i.filter(e=>(e.likelyFiles??[]).some(e=>a.includes(e))).slice(0,4),l=c.reduce((e,t)=>e+Number(t.score??0),0),p=6*Math.max(0,a.length-4),m=Math.round(t.bonus+l/8+Math.max(0,30-p));return{id:`${r+1}-${t.id}`,strategy:t.strategy,thesis:t.thesis,score:m,confidence:Math.min(.95,.55+.09*c.length+.015*a.length),targetFiles:a,constraints:o([...(e.contract.risks??[]).slice(0,6),...n,...c.map(e=>String(e.recommendedAction??""))],14),implementationContract:{phases:e.contract.phases??[],interfaces:e.contract.interfaces??[],tests:e.contract.tests??[],verificationCommand:e.verificationCommand,rollback:e.contract.rollout?.rollback??"Promote only passing timelines."},expectedEffects:{riskReduced:c.map(e=>e.title),blastRadius:a.length<=4?"narrow":a.length<=9?"moderate":"wide",proofRequired:e.contract.tests??["Run verification command before promotion."]},promoteWhen:["Verification command passes.","Causal graph no longer reports a higher severity for the touched files.","Engineering Memory records the accepted or rejected outcome."]}}).sort((e,t)=>t.score-e.score)}async ghostEngineer(e={},t){const i=String(e.action??"profile"),r=this.meshArtifactPath("ghost-engineer","profile.json"),s=await n(r,null);if("status"===i)return s?{ok:!0,path:r,learnedAt:s.learnedAt,commitsAnalyzed:s.evidence?.commitsAnalyzed??0,dirtyFiles:s.evidence?.dirtyFiles?.length??0,firstReadFiles:s.habits?.firstReadFiles?.length??0,confidence:s.confidence??0}:{ok:!1,status:"missing",path:r};if("clear"===i){const e={schemaVersion:1,clearedAt:(new Date).toISOString(),profile:null};return await a(r,e),{ok:!0,path:r,...e}}if("learn"===i){const e=await this.learnGhostEngineerProfile(t);return await a(r,e),{ok:!0,path:r,profile:e}}const o=s&&null!==s.profile?s:await this.learnGhostEngineerProfile(t);if(s&&null!==s.profile||await a(r,o),"profile"===i)return{ok:!0,path:r,profile:o};if("predict"===i){const t=String(e.goal??"").trim();if(!t)throw new Error("workspace.ghost_engineer predict action requires goal");const i=await this.predictGhostEngineerPath(t,o,String(e.verificationCommand??"")),s=this.meshArtifactPath("ghost-engineer","predictions",`${Date.now().toString(36)}-${w(t)}.json`);return await a(s,i),{ok:!0,profilePath:r,path:s,prediction:i}}if("divergence"===i){const t=String(e.plan??"").trim();if(!t)throw new Error("workspace.ghost_engineer divergence action requires plan");return{ok:!0,profilePath:r,divergence:this.evaluateGhostDivergence(t,o)}}if("patch"===i){const t=String(e.goal??"").trim();if(!t)throw new Error("workspace.ghost_engineer patch action requires goal");const i=await this.predictGhostEngineerPath(t,o,String(e.verificationCommand??"")),s=await this.timelines.create({name:`ghost-${w(t)}`}),n=R.join(s.timeline.root,".mesh","ghost-engineer");await x.mkdir(n,{recursive:!0});const c={schemaVersion:1,materializedAt:(new Date).toISOString(),goal:t,profileDigest:i.profileDigest,autopilotPatch:i.autopilotPatch,predictedApproach:i.predictedApproach,divergence:i.divergence,promotionGates:["Implement the patch inside this timeline only.",`Run ${i.predictedApproach.verificationCommand}.`,"Compare timeline telemetry before promotion.","Record accepted/rejected outcome in Engineering Memory."]},l=R.join(n,`autopilot-${w(t)}.json`);await x.writeFile(l,JSON.stringify(c,null,2),"utf8");const p=this.meshArtifactPath("ghost-engineer","predictions",`${Date.now().toString(36)}-${w(t)}-autopilot.json`);return await a(p,{...c,timelineId:s.timeline.id,timelineRoot:s.timeline.root}),{ok:!0,profilePath:r,path:p,timelineId:s.timeline.id,timelineRoot:s.timeline.root,autopilot:c}}throw new Error(`Unknown workspace.ghost_engineer action: ${i}`)}async learnGhostEngineerProfile(e){e?.("[Ghost Engineer] Learning local engineering style from git, memory, causal graph, and twin...\n");const[t,i,r,s,n]=await Promise.all([this.digitalTwin({action:"build"}).catch(()=>({twin:null})),this.engineeringMemory({action:"read"}).catch(()=>({memory:null})),this.causalIntelligence({action:"build"}).catch(()=>({graph:null})),this.readDirtyFilesForMemory(),this.readGitWorkSamples()]),a=t.twin??{},c=i.memory??{},l=r.graph??{},p=(a.package,n.flatMap(e=>e.files)),m=this.rankStrings([...p,...s,...c.riskModules??[]],40),u=this.rankStrings(p.map(e=>this.classifyEngineerSurface(e)),20),d=n.filter(e=>e.files.some(e=>/(\.test|\.spec|^tests\/|\/tests\/)/i.test(e))).length,h=n.filter(e=>e.files.some(e=>/\.(md|mdx)$/i.test(e))).length,g=n.filter(e=>e.files.some(e=>/package\.json|tsconfig|config|\.ya?ml|\.toml/i.test(e))).length,f=n.length>0?Number((n.reduce((e,t)=>e+t.files.length,0)/n.length).toFixed(2)):0,y=Array.isArray(a.symbols)?a.symbols:[],w=this.inferGhostNamingStyle(y),k=this.defaultVerificationCommand(a),b=Math.min(.95,.35+Math.min(.3,n.length/160)+Math.min(.2,(c.events?.length??0)/80)+Math.min(.1,m.length/80)),v=o([...(l.insights??[]).flatMap(e=>Array.isArray(e.likelyFiles)?e.likelyFiles:[]),...(a.riskHotspots??[]).map(e=>e.file)],30);return{schemaVersion:1,learnedAt:(new Date).toISOString(),workspaceRoot:this.workspaceRoot,workspaceName:R.basename(this.workspaceRoot),confidence:b,evidence:{commitsAnalyzed:n.length,dirtyFiles:s,memoryEvents:c.events?.length??0,memoryRules:(c.rules??[]).slice(0,20),causalInsights:(l.insights??[]).slice(0,8).map(e=>({title:e.title,severity:e.severity,likelyFiles:e.likelyFiles}))},habits:{firstReadFiles:o([...m.map(e=>e.value),...v,...(a.tests??[]).slice(0,8)],20),frequentSurfaces:u,averageFilesPerCommit:f,testsTogetherWithCodeRate:n.length>0?Number((d/n.length).toFixed(2)):0,docsTogetherWithCodeRate:n.length>0?Number((h/n.length).toFixed(2)):0,configChangeRate:n.length>0?Number((g/n.length).toFixed(2)):0,preferredVerificationCommand:k,naming:w,patchShape:f<=2?"surgical":f<=6?"vertical-slice":"campaign"},behaviorModel:{readingOrder:["Read the closest existing implementation pattern.","Read the public tool/command surface before changing behavior.","Read linked tests or create a focused proof if none exists.","Check Causal Intelligence for risk hotspots before broad edits."],implementationSequence:["Schema or public surface first.","Backend implementation second.","CLI/dashboard/docs wiring third.","Focused test coverage fourth.","Build/test verification before promotion."],avoidances:["Avoid new dependencies unless the repo already uses the pattern.","Avoid broad rewrites when a vertical slice proves the behavior.","Avoid touching high-risk runtime or shell execution files without timeline verification."],approvalCriteria:[`Verification passes: ${k}.`,"Docs match actual public surface.","Risky changes have tests or timeline telemetry.","Engineering Memory captures accepted or rejected lessons."],divergenceRules:[{id:"missing-verification",severity:"high",description:"Plan does not mention tests, build, or timeline verification."},{id:"new-dependency",severity:"medium",description:"Plan adds dependency/framework without explicit justification."},{id:"wide-blast-radius",severity:"medium",description:"Plan touches many files compared with learned patch shape."},{id:"docs-missing",severity:"medium",description:"Public tool/CLI change lacks docs update."},{id:"risk-without-proof",severity:"high",description:"High-risk file touched without proof-first workflow."}]},recentWork:n.slice(0,20)}}async predictGhostEngineerPath(e,t,i=""){const[r,s]=await Promise.all([this.intentCompile({intent:e,verificationCommand:i||void 0}),this.causalIntelligence({action:"query",query:e}).catch(()=>({topInsights:[],recommendations:[]}))]),n=r.contract,a=i||n.rollout?.verificationCommand||t.habits?.preferredVerificationCommand||"npm test",c=o([...n.likelyFiles??[],...(s.topInsights??[]).flatMap(e=>Array.isArray(e.likelyFiles)?e.likelyFiles:[]),...(t.habits?.firstReadFiles??[]).slice(0,6)],24),l=o([...c.slice(0,8),...(t.habits?.firstReadFiles??[]).slice(0,8)],12),p=o([...t.behaviorModel?.readingOrder??[],...n.phases??[],...t.behaviorModel?.implementationSequence??[]],14),m=[`Start by reading ${l.slice(0,4).join(", ")||"the closest matching files"}.`,"Make the smallest repo-native vertical slice that satisfies the goal.","Mirror existing naming and tool/command patterns before introducing new abstraction.",`Run ${a} before promotion.`,"Record the outcome in Engineering Memory."],u=this.evaluateGhostDivergence(m.join("\n"),t,c);return{schemaVersion:1,predictedAt:(new Date).toISOString(),goal:e,profileDigest:{learnedAt:t.learnedAt,confidence:t.confidence,patchShape:t.habits?.patchShape,preferredVerificationCommand:t.habits?.preferredVerificationCommand,frequentSurfaces:t.habits?.frequentSurfaces?.slice?.(0,6)??[]},prediction:`You would likely make a ${t.habits?.patchShape??"vertical-slice"} change: read existing surfaces first, patch the smallest compatible path, add proof, update docs if public behavior changes, then verify.`,predictedApproach:{firstReads:l,likelyFiles:c,implementationSteps:p,tests:n.tests??[],docs:/cli|tool|feature|command|docs?|readme/i.test(e)?["Update MESH_FEATURES.md or the nearest public docs after code lands."]:[],verificationCommand:a,rollback:n.rollout?.rollback??"Use timelines or semantic undo for rollback."},divergence:u,autopilotPatch:{mode:"timeline-first",styleConstraints:[...t.behaviorModel?.avoidances??[],...(t.evidence?.memoryRules??[]).slice(0,6)],suggestedPatchOrder:m,promotionGates:t.behaviorModel?.approvalCriteria??[]},causalSignals:s.topInsights??[],contract:n}}evaluateGhostDivergence(e,t,i=[]){const r=e.toLowerCase(),s=[],n=[],a=(e,t,i,r)=>{s.push({id:e,severity:t,message:i,evidence:r})};/\b(test|build|verify|verification|timeline|npm test|npm run build)\b/.test(r)?n.push("mentions verification"):a("missing-verification","high","Plan does not mention verification.","Learned profile expects tests/build/timeline verification before promotion."),/\b(add|install|dependency|framework|package)\b/.test(r)&&!/\bjustify|because|existing pattern|already uses\b/.test(r)&&a("new-dependency","medium","Plan may introduce a dependency without justification.","Profile avoids new dependencies unless local patterns justify them.");const c=o(Array.from(e.matchAll(/([A-Za-z0-9_./-]+\.(?:ts|tsx|js|jsx|mjs|cjs|md|json|css|html|yml|yaml))/g)).map(e=>e[1]),100),l=o([...c,...i],100),p=Number(t.habits?.averageFilesPerCommit??0);p>0&&l.length>Math.max(8,3*p)&&a("wide-blast-radius","medium","Plan touches a wider surface than the learned patch shape.",`${l.length} files vs typical ${p}.`);const m=/\b(cli|slash|command|tool|dashboard|feature|public|docs?|readme)\b/.test(r)||l.some(e=>/agent-loop|local-tools|MESH_FEATURES|MOONSHOT|README/i.test(e));m&&!/\b(doc|docs|readme|features|moonshot)\b/.test(r)?a("docs-missing","medium","Public surface change lacks a docs step.","Profile and repo pattern keep docs aligned with tool surfaces."):m&&n.push("keeps docs aligned with public surface");const u=new Set((t.evidence?.causalInsights??[]).flatMap(e=>Array.isArray(e.likelyFiles)?e.likelyFiles:[]));l.some(e=>u.has(e))&&!/\b(test|timeline|verify|proof|race|autopsy)\b/.test(r)&&a("risk-without-proof","high","High-risk file appears without proof-first workflow.","Causal profile marks the file as risky."),/\b(existing|pattern|smallest|surgical|vertical|focused)\b/.test(r)&&n.push("uses local pattern and focused change language");const d=s.reduce((e,t)=>e+("high"===t.severity?30:15),0),h=Math.max(0,Math.min(100,100-d+5*n.length));return{alignmentScore:h,verdict:h>=85?"aligned":h>=65?"watch":"divergent",warnings:s,matchedPreferences:n,profileConfidence:t.confidence??0}}async readGitWorkSamples(e=80){try{const t=Math.max(1,Math.min(e,300)),{stdout:i}=await E(`git log --name-only --format=@@@%h%x09%ad%x09%s --date=short --max-count=${t}`,{cwd:this.workspaceRoot,maxBuffer:2097152}),r=[];let s=null;for(const e of i.split(/\r?\n/g)){if(e.startsWith("@@@")){s&&r.push(s);const[t,i,...n]=e.slice(3).split("\t");s={commit:t||"unknown",date:i||"",subject:n.join("\t"),files:[]};continue}const t=e.trim();s&&t&&!t.includes(" ")&&s.files.push(t)}return s&&r.push(s),r.map(e=>({...e,files:o(e.files,80)}))}catch{return[]}}rankStrings(e,t=20){const i=new Map;for(const t of e){const e=t.trim();e&&i.set(e,(i.get(e)??0)+1)}return Array.from(i.entries()).map(([e,t])=>({value:e,count:t})).sort((e,t)=>t.count-e.count||e.value.localeCompare(t.value)).slice(0,t)}classifyEngineerSurface(e){return/(\.test|\.spec|^tests\/|\/tests\/)/i.test(e)?"tests":/\.(md|mdx)$/i.test(e)?"docs":/agent-loop|local-tools|tool|command|cli/i.test(e)?"cli-tooling":/runtime|observer|server|api|route/i.test(e)?"runtime-api":/dashboard|canvas|ui|frontend|tsx|jsx|css|html/i.test(e)?"frontend":/package\.json|tsconfig|config|\.ya?ml|\.toml|Dockerfile/i.test(e)?"config":"source"}inferGhostNamingStyle(e){const t=e.map(e=>String(e.name??"")).filter(Boolean).slice(0,1e3),i=t.filter(e=>/^[a-z][A-Za-z0-9]*$/.test(e)).length,r=t.filter(e=>/^[A-Z][A-Za-z0-9]*$/.test(e)).length,s=t.filter(e=>/^[a-z0-9_]+$/.test(e)&&e.includes("_")).length,n=[{style:"lowerCamel",count:i},{style:"PascalCase",count:r},{style:"snake_case",count:s}].sort((e,t)=>t.count-e.count)[0];return{dominant:n?.style??"unknown",samples:t.slice(0,20),counts:{lowerCamel:i,PascalCase:r,snake_case:s}}}defaultVerificationCommand(e){const t=e?.package?.scripts??{};return t.test?"npm test":t.build?"npm run build":t.lint?"npm run lint":"npm test"}inferIntentInterfaces(e,t){const i=e.toLowerCase(),r=[];return/api|endpoint|route|server|backend/.test(i)&&r.push("API route/controller surface"),/ui|frontend|screen|dashboard|page|button|form/.test(i)&&r.push("Frontend/browser interaction surface"),/db|database|schema|migration|persist|store/.test(i)&&r.push("Persistence/schema surface"),/cli|command|slash|tool/.test(i)&&r.push("CLI/tool surface"),0===r.length&&t?.routes?.length>0&&r.push("Existing route and tool surface"),r}inferIntentTests(e,t){const i=["Run declared verification command before promotion."];return/runtime|crash|error|debug|failure/i.test(e)&&i.push("Add a runtime failure fixture or stack-trace regression test."),/ui|frontend|dashboard|browser/i.test(e)&&i.push("Add a DOM/payload or dashboard snapshot test."),/api|route|backend/i.test(e)&&i.push("Add route matching and request/response behavior tests."),0===(t?.tests??[]).length&&i.push("Create first focused test if no matching test exists."),i}inferIntentRisks(e,t,i){const r=new Map((t?.riskHotspots??[]).map(e=>[e.file,e.risks])),s=i.flatMap(e=>r.get(e)?.map(t=>`${e}: ${t}`)??[]);return/auth|secret|token|payment|delete|migration/i.test(e)&&s.push("Intent contains high-risk domain terms; require timeline verification."),o(s,20)}async readDirtyFilesForMemory(){try{const{stdout:e}=await E("git status --short",{cwd:this.workspaceRoot});return o(e.split(/\r?\n/g).map(e=>e.slice(3).trim()).filter(Boolean),100)}catch{return[]}}async readGitChurn(e=200){try{const t=Math.max(1,Math.min(e,1e3)),{stdout:i}=await E(`git log --name-only --format= --max-count=${t}`,{cwd:this.workspaceRoot,maxBuffer:1048576}),r={};for(const e of i.split(/\r?\n/g)){const t=e.trim();t&&!t.includes(" ")&&(r[t]=(r[t]??0)+1)}return r}catch{return{}}}async listRuntimeRuns(){const e=this.runtimeObserver.basePath,t=await x.readdir(e,{withFileTypes:!0}).catch(()=>[]),i=[];for(const r of t){if(!r.isDirectory())continue;const t=await n(R.join(e,r.name,"run.json"),null);t&&i.push(t)}return i.sort((e,t)=>String(t.startedAt??"").localeCompare(String(e.startedAt??""))).slice(0,20)}scoreCockpitHealth(e){const t=e.repair?.queue?.length??0,i=(e.timelines?.timelines??[]).filter(e=>"fail"===e.verdict).length,r=(e.runtimeRuns??[]).filter(e=>"failed"===e.status).length,s=Math.max(0,100-15*t-10*i-10*r);return{score:s,status:s>=90?"healthy":s>=70?"watch":"attention",signals:{repairQueue:t,failedTimelines:i,failedRuns:r}}}async*indexEverything(){const e=await S(this.workspaceRoot,1e4,this.workspaceRoot),i=e.length;let r=0;const s=async(e,i)=>{const r=t(this.workspaceRoot,e),s=await x.stat(e),n=Math.floor(s.mtimeMs);let a="",o="";if(i||(a=await x.readFile(e,"utf8"),o=P.createHash("sha1").update(a).digest("hex"),i=await this.cache.getCapsule(r,"medium",n,o)),!i)if(this.meshCore.isAvailable){const e=await this.meshCore.summarizeAllTiers(r,a);await Promise.all(We.map(t=>this.cache.setCapsule(r,t,e[t]||"",n,o)))}else await Promise.all(We.map(e=>this.cache.setCapsule(r,e,a.slice(0,12e3),n,o)));return r};for(let n=0;n<i;n+=Ve){const a=e.slice(n,n+Ve),o=[];for(const e of a){const i=t(this.workspaceRoot,e),r=await x.stat(e);o.push({filePath:i,tier:"medium",mtimeMs:Math.floor(r.mtimeMs)})}const c=await this.cache.getCapsuleBatch(o),l=await Promise.all(a.map(e=>{const i=t(this.workspaceRoot,e);return s(e,c.get(`${i}\0medium`))}));for(let e=0;e<l.length;e++)r++,yield{current:r,total:i,path:l[e]}}await this.updateIntelligence(),await this.workspaceIndex.rebuild()}async updateIntelligence(){const e=R.join(this.workspaceRoot,".mesh");if(!await x.access(e).then(()=>!0).catch(()=>!1))return;const r=(await S(this.workspaceRoot,2e3,this.workspaceRoot)).filter(e=>e.endsWith(".ts")||e.endsWith(".tsx")||e.endsWith(".js")).map(e=>t(this.workspaceRoot,e)),s=[];for(const e of r.slice(0,100))try{const t=i(this.workspaceRoot,e),r=g(await x.readFile(t,"utf8"));r.length>0&&s.push(`${e} -> ${r.join(", ")}`)}catch{}await x.writeFile(R.join(e,"dependency_graph.md"),s.join("\n"));const n={CORE:[],API:[],INFRA:[],MCP:[]};for(const e of r)e.includes("agent-loop")||e.includes("agent-os")||e.includes("mesh-core")?n.CORE.push(e):e.includes("api")||e.includes("gateway")||e.includes("server")||e.includes("portal")?n.API.push(e):e.includes("index")||e.includes("cache")||e.includes("storage")||e.includes("db")||e.includes("assembler")?n.INFRA.push(e):e.includes("mcp")&&n.MCP.push(e);const o=Object.entries(n).filter(([e,t])=>t.length>0).map(([e,t])=>`${e}: ${t.slice(0,20).join(", ")}`);await x.writeFile(R.join(e,"architecture.md"),o.join("\n"));const c={};for(const e of r.slice(0,150))try{const t=i(this.workspaceRoot,e),r=h(await x.readFile(t,"utf8"));Object.assign(c,r)}catch{}await a(R.join(e,"schemas.json"),c);const l={add_mcp_tool:["src/mcp-client.ts","src/local-tools.ts","src/agent-loop.ts"],fix_auth:["src/auth.ts","src/mesh-gateway.ts"],update_index:["src/workspace-index.ts","src/cache-manager.ts"],refactor_brain:["src/mesh-brain.ts","src/company-brain.ts"],debug_runtime:["src/runtime-api.ts","src/agent-os.ts"],test_setup:["scripts/run-tests.cjs","tests/"]};r.some(e=>e.includes("moonshots"))&&(l.moonshot_dev=["src/moonshots/common.ts","src/agent-loop.ts"]),await a(R.join(e,"ops.json"),l)}}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
function e(){if(/^(1|true|yes)$/i.test(process.env.MESH_MCP_INHERIT_ENV??""))return Object.fromEntries(Object.entries(process.env).filter(e=>"string"==typeof e[1]));const e=new Set(["HOME","LANG","LC_ALL","LOGNAME","NODE_PATH","PATH","SHELL","TMPDIR","TEMP","TMP","USER","USERNAME","SystemRoot","ComSpec"]);for(const t of(process.env.MESH_MCP_ENV_ALLOWLIST??"").split(",")){const s=t.trim();s&&e.add(s)}const t={};for(const s of e){const e=process.env[s];"string"==typeof e&&(t[s]=e)}return t}import{spawn as t}from"node:child_process";import{once as s}from"node:events";export class McpClient{process;nextId=1;pending=new Map;readBuffer=Buffer.alloc(0);constructor(s,r){this.process=t(s,r,{stdio:["pipe","pipe","pipe"],env:e()}),this.process.stdout.on("data",e=>this.onData(e)),this.process.stderr.on("data",e=>{const t=e.toString("utf8").trim();t&&process.stderr.write(`[mesh-mcp] ${t}\n`)}),this.process.on("exit",(e,t)=>{const s=`MCP process exited (code=${e}, signal=${t})`;for(const[,e]of this.pending)e.reject(new Error(s));this.pending.clear()})}async initialize(){await this.sendRequest("initialize",{protocolVersion:"2024-11-05",clientInfo:{name:"mesh-agent-cli",version:"0.1.0"},capabilities:{}}),this.sendNotification("notifications/initialized",{})}async listTools(){return(await this.sendRequest("tools/list",{})).tools??[]}async callTool(e,t,s){return this.sendRequest("tools/call",{name:e,arguments:t})}async close(){this.process.stdin.end(),this.process.killed||this.process.kill(),await s(this.process,"exit").catch(()=>{})}sendNotification(e,t){const s={jsonrpc:"2.0",method:e,params:t};this.writeMessage(s)}sendRequest(e,t){const s=this.nextId++,r={jsonrpc:"2.0",id:s,method:e,params:t};return new Promise((e,t)=>{this.pending.set(s,{resolve:e,reject:t}),this.writeMessage(r)})}writeMessage(e){const t=Buffer.from(JSON.stringify(e),"utf8"),s=Buffer.from(`Content-Length: ${t.length}\r\n\r\n`,"utf8");this.process.stdin.write(Buffer.concat([s,t]))}onData(e){for(this.readBuffer=Buffer.concat([this.readBuffer,e]);;){const e=this.readBuffer.indexOf("\r\n\r\n");if(-1===e)return;const t=this.readBuffer.slice(0,e).toString("utf8"),s=t.match(/Content-Length:\s*(\d+)/i);if(!s)throw new Error(`Invalid MCP header: ${t}`);const r=e+4+Number(s[1]);if(this.readBuffer.length<r)return;const i=this.readBuffer.slice(e+4,r).toString("utf8");this.readBuffer=this.readBuffer.slice(r);const n=JSON.parse(i);this.handleMessage(n)}}handleMessage(e){if("number"!=typeof e.id)return;const t=this.pending.get(e.id);t&&(this.pending.delete(e.id),e.error?t.reject(new Error(`MCP error ${e.error.code}: ${e.error.message}`)):t.resolve(e.result))}}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
function t(t,e){const r=new Set(t.split(/\s+/g).filter(Boolean)),n=new Set(e.split(/\s+/g).filter(Boolean));if(0===r.size||0===n.size)return 0;let i=0;for(const t of r)n.has(t)&&(i+=1);return i/Math.sqrt(r.size*n.size)}import{promises as e}from"node:fs";import r from"node:path";export function normalizeErrorSignature(t){return t.slice(0,8e3).replace(/[A-Za-z_$][\w$]*/g,"$ID").replace(/\d+/g,"$N").replace(/(["'`]).*?\1/g,"$STR").replace(/\s+/g," ").trim().slice(0,3e3)||"unknown-error"}export function normalizeDiffPattern(t){return t.split(/\r?\n/g).filter(t=>t.startsWith("+")||t.startsWith("-")).map(t=>t.replace(/[A-Za-z_$][\w$]*/g,"$V").replace(/\d+/g,"$N").replace(/(["'`]).*?\1/g,"$STR")).join("\n").slice(0,6e3)||"empty-diff"}export class MeshBrainClient{telemetryPath;endpoint;telemetryContribute;constructor(t){this.telemetryPath=r.join(t.workspaceRoot,".mesh","brain.json"),this.endpoint=t.endpoint?.trim()||void 0,this.telemetryContribute=t.telemetryContribute}async status(){const t=await this.readState();return{telemetryContribute:t.telemetryContribute,endpoint:t.endpoint,contributions:t.contributions.length,lastContributionAt:t.contributions[0]?.a??null}}async optOut(){const t=await this.readState();t.telemetryContribute=!1,this.telemetryContribute=!1,await this.writeState(t)}async query(e){const r=Math.max(1,Math.min(e.limit??5,20));if(this.endpoint)try{const t=await fetch(`${this.endpoint.replace(/\/$/,"")}/brain/query`,{method:"POST",headers:{"content-type":"application/json"},body:JSON.stringify({errorSignature:e.errorSignature,limit:r})});if(t.ok){return{ok:!0,patterns:((await t.json()).patterns??[]).slice(0,r),source:"remote"}}}catch{}return{ok:!0,patterns:(await this.readState()).contributions.map((r,n)=>({id:`local-${n+1}`,score:t(r.e,e.errorSignature),errorSignature:r.e,diffPattern:r.d,fixSummary:"Historical local timeline promotion",successRate:"pass"===r.v.r?1:0,usageCount:1,verification:r.v})).filter(t=>t.score>.15).sort((t,e)=>e.score-t.score).slice(0,r),source:"local-fallback"}}async queryDnaCohort(t){if(this.endpoint)try{const e=await fetch(`${this.endpoint.replace(/\/$/,"")}/brain/dna/query`,{method:"POST",headers:{"content-type":"application/json"},body:JSON.stringify({dna:t.dna,threshold:t.threshold??.85})});if(e.ok){return{ok:!0,source:"remote",cohort:(await e.json()).cohort??[]}}}catch{}const e=function(t){const e=[];return"next"!==t.framework&&"react"!==t.framework||(e.push("Use zod for boundary validation and schema-based parsing."),e.push("Keep tests next to source files for component-heavy modules.")),"typescript"===t.language&&e.push("Prefer strict type guards instead of widening with any."),"vitest"!==t.testRunner&&"jest"!==t.testRunner||e.push("Mock external HTTP/DB boundaries in unit tests; reserve integration tests for behavior contracts."),"tailwindcss"===t.cssStrategy&&e.push("Favor utility composition and avoid bespoke CSS unless shared design tokens are required."),e}(t.dna);return{ok:!0,source:"local-fallback",cohort:e.length>0?[{similarity:.9,rules:e}]:[]}}async contribute(t){const e=await this.readState();if(!e.telemetryContribute||!this.telemetryContribute)return{ok:!0,contributed:!1,reason:"telemetry_opted_out"};const r={...t,a:(new Date).toISOString()};if(e.contributions.unshift(r),e.contributions=e.contributions.slice(0,250),await this.writeState(e),!this.endpoint)return{ok:!0,contributed:!0,reason:"saved_locally_no_endpoint"};try{const t=await fetch(`${this.endpoint.replace(/\/$/,"")}/brain/contribute`,{method:"POST",headers:{"content-type":"application/json"},body:JSON.stringify(r)});return t.ok?{ok:!0,contributed:!0}:{ok:!1,contributed:!0,reason:`remote_error_${t.status}`}}catch{return{ok:!1,contributed:!0,reason:"remote_unreachable"}}}async readState(){try{const t=await e.readFile(this.telemetryPath,"utf8"),r=JSON.parse(t);return{telemetryContribute:r.telemetryContribute??this.telemetryContribute,endpoint:r.endpoint||this.endpoint,contributions:Array.isArray(r.contributions)?r.contributions:[]}}catch{return{telemetryContribute:this.telemetryContribute,endpoint:this.endpoint,contributions:[]}}}async writeState(t){await e.mkdir(r.dirname(this.telemetryPath),{recursive:!0}),await e.writeFile(this.telemetryPath,JSON.stringify(t),"utf8")}}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
function e(e){return{ts:"typescript",tsx:"tsx",js:"javascript",jsx:"javascript",mjs:"javascript",cjs:"javascript",py:"python",go:"go",rs:"rust",cpp:"cpp",cc:"cpp",cs:"csharp",java:"java",html:"html",css:"css",json:"json",md:"markdown"}[e]??"unknown"}import t from"node:path";let s,a,n,r=!1;try{const e=await import("@trymesh/native");s=e.parse_file,a=e.compress_text,n=e.estimate_tokens,r=e.nativeAvailable??!1}catch{s=()=>({symbols:[],callSites:[],dependencies:[],fileType:null,parseOk:!1}),a=e=>e,n=e=>Math.ceil(Buffer.byteLength(e,"utf8")/4),r=!1}const i="dense",c="compact",l="verbose";export class MeshCoreAdapter{get isAvailable(){return!0}get nativeAccelerated(){return r}async getDetailedRecord(e,a){try{const n=t.extname(e).slice(1),r=s(a,n);return{path:e,symbols:r.symbols.map(e=>({name:e.name,kind:e.kind,lineStart:e.lineStart,lineEnd:e.lineEnd})),callSites:r.callSites.map(e=>({callee:e.callee,lineStart:e.lineStart})),dependencies:r.dependencies,fileType:r.fileType??"unknown",parseOk:r.parseOk}}catch{return null}}async extractSymbols(e,t){const s=await this.getDetailedRecord(e,t);return s?.symbols??[]}summarizeFile(e,t){return this.safeSummarizeFile(e,t)}async summarizeAllTiers(e,t){return this.summarizeSelectedTiers(e,t,["low","medium","high"])}async summarizeSelectedTiers(e,s,n){const r=String(s??""),o=t.extname(e).slice(1),m=new Set(n),p={low:"",medium:"",high:""};return m.has("low")&&(p.low=a(r,o,i).slice(0,6e3)),m.has("medium")&&(p.medium=a(r,o,c).slice(0,12e3)),m.has("high")&&(p.high=a(r,o,l).slice(0,24e3)),p}expandAlienCode(e,t={}){const s={"r:":"return ","a:":"await ","c:":"const ","l:":"let ","s:":"async ","f:":"function ","e:":"export ","i:":"if ","p:":"Promise","cn:":"console.log","th:":"throw new Error","=>":" => "};let a=e;for(const[e,s]of Object.entries(t)){const t=new RegExp(`#${e}\\b`,"g");a=a.replace(t,s)}for(const[e,t]of Object.entries(s)){const s=new RegExp(e.replace(/[.*+?^${}()|[\]\\]/g,"\\$&"),"g");a=a.replace(s,t)}return a}async close(){}async safeSummarizeFile(s,r){const i=String(r??""),l=t.extname(s).slice(1);try{const t=n(i),s=l.toLowerCase(),r={ext:s,language:e(s)};return{meshCoreAvailable:!0,tokensEstimate:t,fileType:r,capsulePreview:a(i,l,c).slice(0,3e3),capsuleTier:"medium"}}catch(e){return{meshCoreAvailable:!0,tokensEstimate:Math.max(1,Math.ceil(Buffer.byteLength(i,"utf8")/4)),fileType:null,warning:`mesh-native summarize failed: ${e.message}`}}}}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
import e from"node:zlib";import{promisify as t}from"node:util";import o from"node:crypto";const r=new Set,n=t(e.brotliCompress),s=t(e.brotliDecompress);export async function compressMeshPayload(t){const o=function(e){const t=String(e??"");try{const e=JSON.parse(t);return{normalized:JSON.stringify(e),type:"json"}}catch{return{normalized:t.replace(/[ \t]+/g," ").replace(/\n{3,}/g,"\n\n"),type:"text"}}}(t),r=await n(Buffer.from(o.normalized,"utf8"),{params:{[e.constants.BROTLI_PARAM_MODE]:e.constants.BROTLI_MODE_TEXT,[e.constants.BROTLI_PARAM_QUALITY]:11}}),s=Buffer.byteLength(t,"utf8"),i=r.length;return{buffer:r,originalSize:s,compressedSize:i,ratio:i>0?s/i:1,type:o.type}}export async function decompressMeshPayload(e){return(await s(e)).toString("utf8")}export async function buildLlmSafeMeshContext(e,t,n,s=4e3){let i=JSON.stringify(n);if("workspace.read_file"===e&&i.length>500){const e=String(t.path??"unknown"),n=o.createHash("md5").update(e+":"+i).digest("hex");r.has(n)?i=`{"note": "[DIFFERENTIAL SYNC] File ${e} content is already in context. Omitted."}`:r.add(n)}const a=await compressMeshPayload(i),l=await decompressMeshPayload(a.buffer);if(l.length<=s)return[`Tool called: ${e}`,`Arguments: ${JSON.stringify(t)}`,`Result: ${l}`,`MeshCompression: original=${a.originalSize}B compressed=${a.compressedSize}B ratio=${a.ratio.toFixed(2)}x type=${a.type}`].join("\n");const c=l.slice(0,Math.floor(.6*s)),p=l.slice(-Math.floor(.25*s));return[`Tool called: ${e}`,`Arguments: ${JSON.stringify(t)}`,"Result: [mesh-compressed preview]",`Head: ${c}`,`Tail: ${p}`,`MeshCompression: original=${a.originalSize}B compressed=${a.compressedSize}B ratio=${a.ratio.toFixed(2)}x type=${a.type}`,"ResultNote: full tool payload omitted from LLM context due to size"].join("\n")}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
import{promises as e,existsSync as t}from"node:fs";import n from"node:net";import i from"node:os";import o from"node:path";import r from"node:crypto";import{spawn as s,spawnSync as a}from"node:child_process";export class MeshPortal{workspaceRoot;chromeProcess=null;cdpClient=null;userDataDir=null;port=0;constructor(e){this.workspaceRoot=e}async active(){return null!==this.cdpClient}async start(t,n){if(this.cdpClient)return;this.port=await this.findFreePort(),this.userDataDir=await e.mkdtemp(o.join(i.tmpdir(),"mesh-portal-"));const r=await this.findChromeExecutable(),a=["--disable-gpu","--no-first-run","--no-default-browser-check",`--remote-debugging-port=${this.port}`,`--user-data-dir=${this.userDataDir}`,t];this.chromeProcess=s(r,a,{stdio:["ignore","pipe","pipe"]}),await this.waitForCdp();const l=await this.createPageTarget();this.cdpClient=await c.connect(l.webSocketDebuggerUrl),await this.cdpClient.send("Runtime.enable"),await this.cdpClient.send("Page.enable"),await this.cdpClient.send("DOM.enable"),await this.cdpClient.send("Runtime.addBinding",{name:"meshEmit"}),n&&this.cdpClient.onBindingCalled(e=>{n({name:"meshEmit",payload:e.payload,executionContextId:e.executionContextId})})}async evaluate(e){return this.cdpClient?.send("Runtime.evaluate",{expression:e,awaitPromise:!0})}async applyGhostStyles(e){const t={};for(const[n,i]of Object.entries(e))/^[a-zA-Z0-9-]+$/.test(n)&&(t[n]=i.replace(/[\\"'`\r\n<>]/g,""));const n=JSON.stringify(t);await this.evaluate(`window.__mesh_apply_ghost(${n})`)}async captureElementScreenshot(){if(!this.cdpClient)throw new Error("Portal not active");const e=await this.evaluate('\n (function() {\n const el = document.querySelector(".mesh-highlight");\n if (!el) return null;\n const rect = el.getBoundingClientRect();\n return { x: rect.x, y: rect.y, width: rect.width, height: rect.height, deviceScaleFactor: window.devicePixelRatio };\n })()\n ');if(!e||!e.result?.value)return"";const{x:t,y:n,width:i,height:o,deviceScaleFactor:r}=e.result.value;return(await this.cdpClient.send("Page.captureScreenshot",{format:"png",clip:{x:t,y:n,width:i,height:o,scale:1},fromSurface:!0})).data}async stop(){this.cdpClient?.close(),this.cdpClient=null,this.chromeProcess&&(this.chromeProcess.kill("SIGTERM"),this.chromeProcess=null),this.userDataDir&&(await e.rm(this.userDataDir,{recursive:!0,force:!0}).catch(()=>{}),this.userDataDir=null)}async findFreePort(){return new Promise(e=>{const t=n.createServer();t.listen(0,"127.0.0.1",()=>{const n=t.address().port;t.close(()=>e(n))})})}async findChromeExecutable(){const e=[process.env.MESH_CHROME_PATH,"/Applications/Google Chrome.app/Contents/MacOS/Google Chrome","/Applications/Chromium.app/Contents/MacOS/Chromium","google-chrome","chromium"].filter(Boolean);for(const n of e){if(t(n))return n;try{const{stdout:e}=a("which",[n],{encoding:"utf8"});if(e.trim())return e.trim()}catch{}}throw new Error("Chrome not found. Please install Google Chrome or set MESH_CHROME_PATH.")}async waitForCdp(){for(let e=0;e<50;e++){try{if((await fetch(`http://127.0.0.1:${this.port}/json/version`)).ok)return}catch{}await new Promise(e=>setTimeout(e,200))}throw new Error("Timed out waiting for Chrome CDP.")}async createPageTarget(){const e=await fetch(`http://127.0.0.1:${this.port}/json/list`),t=await e.json();return t.find(e=>"page"===e.type)||t[0]}}class c{socket;nextId=1;pending=new Map;onBindingHandler=null;constructor(e){this.socket=e,this.socket.on("data",e=>this.handleData(e))}static async connect(e){const t=new URL(e),i=n.createConnection(Number(t.port),t.hostname);await new Promise(e=>i.once("connect",e));const o=r.randomBytes(16).toString("base64");return i.write([`GET ${t.pathname} HTTP/1.1`,`Host: ${t.host}`,"Upgrade: websocket","Connection: Upgrade",`Sec-WebSocket-Key: ${o}`,"Sec-WebSocket-Version: 13","\r\n"].join("\r\n")),await new Promise(e=>i.once("data",e)),new c(i)}send(e,t={}){const n=this.nextId++,i=JSON.stringify({id:n,method:e,params:t});return this.socket.write(this.encodeFrame(i)),new Promise((e,t)=>this.pending.set(n,{resolve:e,reject:t}))}onBindingCalled(e){this.onBindingHandler=e}close(){this.socket.destroy()}handleData(e){try{const t=this.decodeFrame(e);if(!t)return;const n=JSON.parse(t);if(n.id&&this.pending.has(n.id)){const{resolve:e}=this.pending.get(n.id);this.pending.delete(n.id),e(n.result)}else"Runtime.bindingCalled"===n.method&&this.onBindingHandler?.(n.params)}catch{}}encodeFrame(e){const t=Buffer.from(e),n=Buffer.alloc(t.length+6);n[0]=129,n[1]=128|t.length;const i=r.randomBytes(4);i.copy(n,2);for(let e=0;e<t.length;e++)n[e+6]=t[e]^i[e%4];return n}decodeFrame(e){if(e.length<2)return null;const t=127&e[1];return t>125?null:e.slice(2,2+t).toString()}}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export const DEFAULT_MODEL_ID="google/gemini-2.5-flash";export const DEFAULT_FALLBACK_MODEL_IDS=["google/gemini-2.5-flash-lite","qwen/qwen3-coder-480b-a35b-instruct"];export const GEMINI_PRO_MODEL_ID="google/gemini-2.5-pro";export const GEMINI_FLASH_MODEL_ID=DEFAULT_MODEL_ID;export const GEMINI_FLASH_LITE_MODEL_ID="google/gemini-2.5-flash-lite";export const MODEL_CATALOG=[{label:"Gemini 2.5 Flash",value:"google/gemini-2.5-flash",aliases:["gemini-flash","gemini-2.5-flash","flash","default"],note:"default — fast, multimodal, 1M context",provider:"google",pricing:{inputPer1k:15e-5,outputPer1k:6e-4}},{label:"Gemini 2.5 Pro",value:"google/gemini-2.5-pro",aliases:["gemini-pro","gemini-2.5-pro","pro"],note:"most powerful stable",provider:"google",pricing:{inputPer1k:.00125,outputPer1k:.01}},{label:"Gemini 2.5 Flash Lite",value:"google/gemini-2.5-flash-lite",aliases:["gemini-lite","gemini-2.5-flash-lite","flash-lite","lite"],note:"cheapest, high throughput",provider:"google",pricing:{inputPer1k:75e-6,outputPer1k:3e-4}},{label:"Gemini 3 Flash Preview",value:"google/gemini-3-flash-preview",aliases:["gemini-3-flash","3-flash","gemini3flash"],note:"google next-gen fast preview",provider:"google",pricing:{inputPer1k:75e-6,outputPer1k:3e-4}},{label:"Gemini 3 Pro Preview",value:"google/gemini-3-pro-preview",aliases:["gemini-3-pro","3-pro","gemini3pro"],note:"google next-gen powerful preview",provider:"google",pricing:{inputPer1k:.0035,outputPer1k:.0105}},{label:"Gemini 3.1 Pro Preview",value:"google/gemini-3.1-pro-preview",aliases:["gemini-3.1-pro","3.1-pro","gemini31pro"],note:"google latest preview",provider:"google",pricing:{inputPer1k:.0035,outputPer1k:.0105}},{label:"Grok 4.20 Reasoning",value:"xai/grok-4.20-reasoning",aliases:["grok-reasoning","grok-4.2-reasoning"],note:"gcp xai preview reasoning",provider:"google",pricing:{inputPer1k:.005,outputPer1k:.015}},{label:"Grok 4.20",value:"xai/grok-4.20-non-reasoning",aliases:["grok","grok-4.2"],note:"gcp xai preview fast",provider:"google",pricing:{inputPer1k:.002,outputPer1k:.006}},{label:"Qwen 3 Coder 480B",value:"qwen/qwen3-coder-480b-a35b-instruct",aliases:["qwen3coder","qwen3-coder","qwen-coder-480b"],note:"nvidia code specialist",provider:"nvidia",pricing:{inputPer1k:0,outputPer1k:0}},{label:"Kimi K2.6",value:"moonshotai/kimi-k2.6",aliases:["kimi","kimi-k2.6","k2.6"],note:"nvidia deep reasoning",provider:"nvidia",pricing:{inputPer1k:0,outputPer1k:0}},{label:"Mistral Large 3 675B",value:"mistralai/mistral-large-3-675b-instruct-2512",aliases:["mistral-large","mistral-3","mistral675b"],note:"nvidia strong coding",provider:"nvidia",pricing:{inputPer1k:0,outputPer1k:0}},{label:"DeepSeek V4 Pro",value:"deepseek-ai/deepseek-v4-pro",aliases:["deepseek","deepseek-v4","deepseekv4"],note:"nvidia strong generalist",provider:"nvidia",pricing:{inputPer1k:0,outputPer1k:0}},{label:"Llama 4 Maverick 17B",value:"meta/llama-4-maverick-17b-128e-instruct",aliases:["llama4","llama-4-maverick","maverick"],note:"nvidia fast fallback",provider:"nvidia",pricing:{inputPer1k:0,outputPer1k:0}},{label:"NVIDIA Nemotron Ultra 253B",value:"nvidia/llama-3.1-nemotron-ultra-253b-v1",aliases:["nemotron","nemotron-ultra","nemotron253b"],note:"nvidia flagship",provider:"nvidia",pricing:{inputPer1k:0,outputPer1k:0}},{label:"GLM 5.1",value:"z-ai/glm-5.1",aliases:["glm5.1","glm-5.1"],note:"nvidia agentic excellence",provider:"nvidia",pricing:{inputPer1k:.0014,outputPer1k:.0044}}];
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
function e(e,i){return{code:/(implement|fix|patch|refactor|code|test|typecheck)/i,debug:/(crash|exception|stack|debug|runtime|sentry|trace)/i,review:/(review|pr|diff|regression|audit)/i,retrieval:/(rag|retrieval|search|symbol|reference|architecture)/i,vision:/(screenshot|preview|visual|ui|frontend|layout|inspect)/i,security:/(security|secret|token|pii|auth|vulnerability|sqli|redos)/i,runtime:/(runtime|trace|telemetry|production|sentry|otel)/i,planning:/(plan|roadmap|architecture|design|adr)/i,summarization:/(summarize|summary|compress|distill|capsule)/i}[e].test(i)?.86:.62}import{DEFAULT_NVIDIA_CHAT_MODELS as i,DEFAULT_NVIDIA_EMBEDDING_MODELS as r,DEFAULT_NVIDIA_PII_MODELS as t,DEFAULT_NVIDIA_SAFETY_MODELS as s,DEFAULT_NVIDIA_VISION_MODELS as a}from"./nvidia-services.js";import{DEFAULT_MODEL_ID as n}from"./model-catalog.js";const o=["microsoft/phi-4-mini-instruct","nvidia/nemotron-mini-4b-instruct","google/gemma-3-4b-it"];export function routeMeshTask(c){const d=c.toLowerCase(),u=function(e){return/(screenshot|preview|visual|ui|frontend|css|layout|browser|inspect)/i.test(e)?"vision":/(security|secret|token|pii|auth|vulnerability|xss|sqli|redos|guard|risk|risiko)/i.test(e)?"security":/(crash|exception|stack|runtime|trace|sentry|otel|datadog|hologram|debug)/i.test(e)?"debug":/(review|pr|pull request|diff|regression|test gap|audit)/i.test(e)?"review":/(search|rag|retrieval|find|where|symbol|reference|architecture|explain)/i.test(e)?"retrieval":/(plan|roadmap|design|architecture decision|adr)/i.test(e)?"planning":/(summarize|summary|compress|distill|capsule)/i.test(e)?"summarization":/(code|implement|fix|patch|refactor|build|test|typecheck)/i.test(e)?"code":"planning"}(d),l=function(e,i){const r=new Set(["retrieval_rerank","proof_bundle"]);return("code"===e||"debug"===e||/patch|fix|refactor|implement/i.test(i))&&(r.add("timeline_verification"),r.add("precrime_gate")),"vision"===e&&r.add("vision_regression_check"),("security"===e||/(auth|secret|token|pii|runtime|shell|exec)/i.test(i))&&(r.add("safety_guard"),r.add("pii_scan")),"review"===e&&(r.add("diff_review"),r.add("test_gap_check")),Array.from(r)}(u,d),m=function(e){const i=["Use code-specific embeddings for repository context before raw file reads."];return"vision"===e?["Use a vision model for screenshot evidence.",...i]:"security"===e?["Run safety and PII specialists before persisting or promoting risky artifacts.",...i]:"debug"===e?["Pair runtime evidence with timeline-first fixes.",...i]:"review"===e?["Require proof, test-gap, and risk checks before merge.",...i]:"code"===e?["Use the strongest code model for candidate changes and verify in timelines.",...i]:i}(u),p="code"===u||"debug"===u||"review"===u?i[0]:n;return{taskType:u,confidence:e(u,d),primaryChatModel:p,chatFallbacks:"code"===u||"debug"===u||"review"===u?i.filter(e=>e!==p):[n],sidecarModel:o[0],retrievalModels:[...r],visionModels:[...a],safetyModels:[...s],piiModels:[...t],requiredGates:l,rationale:m}}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
function e(e,t,o){const n=[];return n.push({step:"symptom",statement:e||t.runtime.errorSummary||"No explicit symptom supplied.",confidence:e?.9:.45}),"none"!==t.runtime.kind&&n.push({step:"runtime-evidence",statement:t.runtime.errorSummary,confidence:t.runtime.stackFrames.length>0?.9:.55,frames:t.runtime.stackFrames.slice(0,5)}),o[0]&&n.push({step:"likely-source",statement:`${o[0].file} is the strongest causal suspect.`,confidence:o[0].confidence,reasons:o[0].reasons}),((t.configDeltas??[]).length>0||(t.dependencyDeltas??[]).length>0)&&n.push({step:"environment-delta",statement:"Config or dependency changes may have altered behavior outside direct source edits.",confidence:.65,configDeltas:t.configDeltas,dependencyDeltas:t.dependencyDeltas}),t.ledgers.proof||n.push({step:"missing-proof",statement:"No proof-carrying change bundle exists for this incident.",confidence:.7}),n}function t(e,t){const o=[],n=t[0]?.file;return n&&o.push(`Add a focused regression that fails before the fix and covers ${n}.`),0===(e.runtime.stackFrames??[]).length&&o.push("Capture runtime stack evidence or a failing command before patching."),e.ledgers.specCode||o.push("Synthesize spec-code contracts so behavior drift is visible."),e.ledgers.proof||o.push("Generate a proof-carrying change bundle before promotion."),!0!==e.ledgers.shadowDeploy?.ok&&o.push("Run a shadow deploy or timeline verification command for the suspected path."),Array.from(new Set(o))}function o(e,t){const o=t[0]?.file;return[o?`Inspect ${o} first; it has the strongest combined evidence.`:"Collect a failing command or runtime runId to improve causal confidence.","Use a timeline for the candidate fix and run the failing command there.","Regenerate workspace.proof_carrying_change after the fix so review has intent, tests, risk, rollback, and ledger evidence."]}function n(e="No runtime run or failing command supplied."){return{kind:"none",errorSummary:e,stackFrames:[],causalChain:[]}}function s(e){return e.split(/\r?\n/g).map(e=>e.trim()).filter(Boolean).find(e=>/\b(?:error|exception|failed|assertion)\b/i.test(e))??"No explicit error found in captured output."}function r(e,t){const o=[],n=/\bat\s+(?:[^\s(]+\s+\()?(.+?):(\d+):(\d+)\)?/g;for(const s of e.matchAll(n)){const e=i(s[1],t);e&&o.push({file:e,line:Number(s[2]),column:Number(s[3]),raw:s[0]})}return o.slice(0,25)}function i(e,t){if(!e||e.startsWith("node:")||e.includes("node:internal"))return"";const o=e.replace(/^file:\/\//,"");if(d.isAbsolute(o)){const e=d.relative(t,o).split(d.sep).join("/");return e.startsWith("..")?o:e}return c(o)}async function a(e,t){try{const{stdout:o}=await b("git",t,{cwd:e,maxBuffer:1048576});return{ok:!0,stdout:o}}catch(e){return{ok:!1,stdout:String(e.stdout??"")}}}function c(e){return e.replace(/^"|"$/g,"").replace(/\\040/g," ").split(d.sep).join("/")}function u(e,t=8e3){return e.length>t?e.slice(-t):e}import{promises as m}from"node:fs";import d from"node:path";import l from"node:os";import f from"node:crypto";import{execFile as p}from"node:child_process";import{promisify as h}from"node:util";import{parseAllowedCommand as g}from"../command-safety.js";import{clampNumber as y,collectWorkspaceFiles as w,readJson as k,writeJson as v}from"./common.js";const b=h(p);export class CausalAutopsyEngine{workspaceRoot;constructor(e){this.workspaceRoot=e}async run(n={}){const s=String(n.action??"investigate").trim().toLowerCase();if("status"===s)return this.status();if("investigate"!==s)throw new Error("workspace.causal_autopsy action must be investigate|status");const r=String(n.symptom??"").trim(),i=String(n.runId??"").trim(),a=String(n.failingCommand??"").trim(),u=y(n.timeoutMs,12e4,1e3,6e5),m=await this.collectEvidence({symptom:r,runId:i,failingCommand:a,timeoutMs:u}),d=function(e){const t=new Map,o=(e,o,n)=>{const s=c(e);if(!s||s.includes("node_modules"))return;const r=t.get(s)??{file:s,score:0,confidence:0,reasons:[]};r.score+=o,r.reasons.includes(n)||r.reasons.push(n),t.set(s,r)};for(const t of e.runtime.stackFrames??[])o(t.file,70,"Appears in runtime stack evidence.");for(const t of e.changedFiles??[])o(t,28,"Changed in the current workspace diff.");for(const t of e.symptomMatches??[])o(t.file,12+6*t.matches.length,`Matches symptom token(s): ${t.matches.join(", ")}.`);for(const t of e.ledgers.precrime?.predictions??[])o(t.file,Math.ceil(45*Number(t.probability??0)),"Precrime flagged this file as likely failure surface.");for(const t of e.ledgers.selfDefense?.findings??[])o(t.file,"confirmed"===t.status?45:25,`Self-defense finding: ${t.status}.`);for(const t of e.ledgers.specCode?.drift??[])t.contract?.file&&o(t.contract.file,22,`Spec-code drift: ${t.kind}.`);for(const t of e.configDeltas??[])o(t,18,"Configuration delta can change runtime behavior.");for(const t of e.dependencyDeltas??[])o(t,20,"Dependency delta can change behavior outside source diff.");return Array.from(t.values()).map(e=>({...e,confidence:Math.min(.98,Number((e.score/100).toFixed(2)))})).sort((e,t)=>t.score-e.score).slice(0,12)}(m),l=function(e,t,o){const n=[{id:"symptom",type:"symptom",label:e||t.runtime.errorSummary||"unspecified symptom"}],s=[];"none"!==t.runtime.kind&&(n.push({id:"runtime",type:"runtime",label:t.runtime.errorSummary}),s.push({from:"runtime",to:"symptom",type:"observes",confidence:t.runtime.stackFrames.length>0?.9:.55}));for(const e of o.slice(0,6)){const o=`file:${e.file}`;n.push({id:o,type:"file",label:e.file,score:e.score,reasons:e.reasons}),s.push({from:o,to:"none"!==t.runtime.kind?"runtime":"symptom",type:"may_cause",confidence:e.confidence})}return t.ledgers.proof&&(n.push({id:"proof",type:"proof",label:`proof:${t.ledgers.proof.proofId??"current"}`,verdict:t.ledgers.proof.verdict}),s.push({from:"proof",to:"symptom",type:"constrains_debugging",confidence:.7})),{nodes:n,edges:s}}(r,m,d),f={ok:!0,action:s,generatedAt:(new Date).toISOString(),incident:{symptom:r||m.runtime.errorSummary||"No explicit symptom supplied.",runId:i||void 0,failingCommand:a||void 0},runtimeEvidence:m.runtime,causalChain:e(r,m,d),suspects:d,graph:l,configDeltas:m.configDeltas,dependencyDeltas:m.dependencyDeltas,missingInvariants:t(m,d),nextActions:o(0,d),autopsyPath:".mesh/causal-autopsy/last-autopsy.json"};return await v(this.ledgerPath(),f),f}async status(){return k(this.ledgerPath(),{ok:!0,action:"status",message:"No causal autopsy exists yet. Run action=investigate."})}async collectEvidence(e){const t=e.failingCommand?await this.runFailingCommand(e.failingCommand,e.timeoutMs):e.runId?await this.readRuntimeRun(e.runId):n(),o=await this.gitEvidence(),s=await this.ledgerEvidence(),r=await this.symptomMatches(e.symptom),i=o.changedFiles;return{runtime:t,git:o,ledgers:s,symptomMatches:r,changedFiles:i,configDeltas:i.filter(e=>function(e){return/(^|\/)(\.env|[^/]*(?:config|rc)\.[cm]?[jt]s|tsconfig\.json|vite\.config|next\.config|wrangler\.toml)$/i.test(e)}(e)),dependencyDeltas:i.filter(e=>function(e){return/(^|\/)(package(?:-lock)?\.json|pnpm-lock\.yaml|yarn\.lock|bun\.lockb)$/i.test(e)}(e))}}async runFailingCommand(e,t){const o=g(e);try{const{stdout:n,stderr:i}=await b(o.command,o.args,{cwd:this.workspaceRoot,timeout:t,maxBuffer:1048576}),a=`${i}\n${n}`;return{kind:"command-run",command:e,ok:!0,exitCode:0,errorSummary:s(a),stackFrames:r(a,this.workspaceRoot),causalChain:["Command completed successfully; autopsy should be used to explain non-crash symptoms."],stdout:u(n),stderr:u(i)}}catch(t){const o=String(t.stdout??""),n=String(t.stderr??t.message??""),i=`${n}\n${o}`;return{kind:"command-run",command:e,ok:!1,exitCode:"number"==typeof t.code?t.code:1,errorSummary:s(i),stackFrames:r(i,this.workspaceRoot),causalChain:[`Command failed with exit code ${"number"==typeof t.code?t.code:1}.`],stdout:u(o),stderr:u(n)}}}async readRuntimeRun(e){const t=e.replace(/[^a-zA-Z0-9._-]+/g,"");if(!t)return n();const o=d.join(function(e){const t=f.createHash("sha256").update(d.resolve(e)).digest("hex").slice(0,24),o=process.env.MESH_STATE_DIR||d.join(l.homedir(),".config","mesh");return d.join(o,"runtime",t)}(this.workspaceRoot),t),a=await k(d.join(o,"run.json"),null);if(!a)return n(`Runtime run ${t} was not found.`);const c=await k(a.autopsyPath??d.join(o,"autopsy.json"),null),p=await m.readFile(a.stdoutPath,"utf8").catch(()=>""),h=await m.readFile(a.stderrPath,"utf8").catch(()=>"");if(c)return{kind:"runtime-run",runId:t,command:a.command,ok:"exited"===a.status,exitCode:a.exitCode,status:a.status,errorSummary:String(c.errorSummary??s(`${h}\n${p}`)),stackFrames:(g=c.frames??[],y=this.workspaceRoot,g.map(e=>({file:i(String(e.file??""),y),line:Number(e.line??0),column:"number"==typeof e.column?e.column:void 0,raw:String(e.raw??"")})).filter(e=>Boolean(e.file))),causalChain:Array.isArray(c.causalChain)?c.causalChain.map(String):[],stdout:u(p),stderr:u(h)};var g,y;const w=`${h}\n${p}`;return{kind:"runtime-run",runId:t,command:a.command,ok:"exited"===a.status,exitCode:a.exitCode,status:a.status,errorSummary:s(w),stackFrames:r(w,this.workspaceRoot),causalChain:["Runtime autopsy report was missing; using captured logs."],stdout:u(p),stderr:u(h)}}async gitEvidence(){const e=await a(this.workspaceRoot,["status","--porcelain"]),t=await a(this.workspaceRoot,["diff","--stat","HEAD"]),o=await a(this.workspaceRoot,["log","--oneline","--max-count=8"]);return{gitAvailable:e.ok||t.ok||o.ok,status:e.stdout.trim(),diffStat:t.stdout.trim(),recentCommits:o.stdout.split(/\r?\n/g).map(e=>e.trim()).filter(Boolean),changedFiles:(n=e.stdout,n.split(/\r?\n/g).map(e=>e.trimEnd()).filter(Boolean).map(e=>{const t=e.slice(3).trim();return c(t.includes(" -> ")?t.split(" -> ").pop()??t:t)}).filter(Boolean))};var n}async ledgerEvidence(){return{proof:await k(d.join(this.workspaceRoot,".mesh","proof-carrying-change","proof.json"),null),selfDefense:await k(d.join(this.workspaceRoot,".mesh","security","last-self-defense.json"),null),precrime:await k(d.join(this.workspaceRoot,".mesh","precrime","predictions.json"),null),shadowDeploy:await k(d.join(this.workspaceRoot,".mesh","shadow-deploy","last-ledger.json"),null),specCode:await k(d.join(this.workspaceRoot,".mesh","spec-code","contracts.json"),null),fluidMesh:await k(d.join(this.workspaceRoot,".mesh","fluid-mesh","capabilities.json"),null),livingSoftware:await k(d.join(this.workspaceRoot,".mesh","living-software","pulse.json"),null)}}async symptomMatches(e){const t=function(e){return Array.from(new Set(e.toLowerCase().split(/[^a-z0-9_/-]+/g).map(e=>e.trim()).filter(e=>e.length>=4&&!["with","from","that","this","when","then"].includes(e)))).slice(0,12)}(e);if(0===t.length)return[];const o=await w(this.workspaceRoot,{maxFiles:1e3}),n=[];for(const e of o){const o=`${e}\n${await m.readFile(d.join(this.workspaceRoot,e),"utf8").catch(()=>"")}`.toLowerCase(),s=t.filter(e=>o.includes(e));s.length>0&&n.push({file:e,matches:s})}return n.slice(0,50)}ledgerPath(){return d.join(this.workspaceRoot,".mesh","causal-autopsy","last-autopsy.json")}}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
async function t(r,a,s,o,c){if(s.length>=c)return;const u=await e.readdir(a,{withFileTypes:!0}).catch(()=>[]);for(const e of u){if(s.length>=c)return;const u=n.join(a,e.name),m=toPosix(n.relative(r,u));i.test(m)||(e.isDirectory()?await t(r,u,s,o,c):e.isFile()&&o.has(n.extname(e.name))&&s.push(m))}}import{promises as e}from"node:fs";import n from"node:path";const i=/(^|\/)(\.git|node_modules|dist|coverage|\.next|\.turbo|\.cache|\.mesh)(\/|$)/;export async function collectWorkspaceFiles(e,n={}){const i=new Set(n.extensions??[".ts",".tsx",".js",".jsx",".mjs",".cjs"]),r=Math.max(1,Math.min(n.maxFiles??500,5e3)),a=[];return await t(e,e,a,i,r),a.sort((t,e)=>t.localeCompare(e))}export async function readJson(t,n){try{return JSON.parse(await e.readFile(t,"utf8"))}catch{return n}}export async function writeJson(t,i){await e.mkdir(n.dirname(t),{recursive:!0}),await e.writeFile(t,JSON.stringify(i),"utf8")}export async function appendJsonl(t,i){await e.mkdir(n.dirname(t),{recursive:!0}),await e.appendFile(t,JSON.stringify(i)+"\n","utf8")}export function toPosix(t){return t.split(n.sep).join("/")}export function clampNumber(t,e,n,i){const r=Number(t);return Number.isFinite(r)?Math.max(n,Math.min(i,Math.trunc(r))):e}export function lineNumberAt(t,e){return t.slice(0,Math.max(0,e)).split(/\r?\n/g).length}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
function o(o,s){const e=[],t=/\b(?:export\s+)?(?:async\s+)?(function|class|const|let|var|interface|type)\s+([A-Za-z_$][\w$]*)/g;for(const n of s.matchAll(t))e.push({symbol:n[2],file:o,line:r(s,n.index??0),kind:n[1],notes:[]});return e}import{promises as s}from"node:fs";import e from"node:path";import{collectWorkspaceFiles as t,lineNumberAt as r,readJson as n,writeJson as a}from"./common.js";export class ConversationalCodebaseEngine{workspaceRoot;constructor(o){this.workspaceRoot=o}async run(o={}){const s=String(o.action??"ask").trim().toLowerCase();if("status"===s)return this.status();if("record"===s)return this.record(o);if("map"===s)return this.map();if("ask"!==s)throw new Error("workspace.conversational_codebase action must be ask|record|map|status");const t=String(o.query??o.symbol??"").trim();if(!t)throw new Error("workspace.conversational_codebase ask requires query or symbol");const r=await this.loadMemory(),n=function(o,s){const e=s.toLowerCase();return[...o].map(o=>({item:o,score:(o.symbol.toLowerCase()===e?100:0)+(o.symbol.toLowerCase().includes(e)?40:0)+(o.file.toLowerCase().includes(e)?20:0)+o.notes.reduce((o,s)=>o+(s.note.toLowerCase().includes(e)?10:0),0)})).filter(o=>o.score>0).sort((o,s)=>s.score-o.score).map(o=>o.item)}(r.symbols.length>0?r.symbols:(await this.rebuildMemory()).symbols,t).slice(0,8),i=0===n.length?`I do not have a symbol-level memory for "${t}" yet. Run action=map to refresh the codebase memory.`:function(o,s){const e=s[0],t=e.notes.slice(0,3).map(o=>` note: ${o.note}`).join("");return`For "${o}", the strongest match is ${e.symbol} (${e.kind}) in ${e.file}:${e.line}.${t}`}(t,n),m={ok:!0,action:s,query:t,answer:i,matches:n,memoryPath:".mesh/conversations/symbol-memory.json"};return await a(e.join(this.workspaceRoot,".mesh","conversations","last-answer.json"),m),m}async record(o){const s=String(o.symbol??"").trim(),e=String(o.note??"").trim();if(!s||!e)throw new Error("workspace.conversational_codebase record requires symbol and note");const t=await this.loadMemory();let r=t.symbols.find(o=>o.symbol===s);return r||(r={symbol:s,file:"unknown",line:0,kind:"unknown",notes:[]},t.symbols.push(r)),r.notes.unshift({at:(new Date).toISOString(),note:e,source:"user"}),r.notes=r.notes.slice(0,25),await a(this.memoryPath(),t),{ok:!0,action:"record",symbol:s,notes:r.notes.length,memoryPath:".mesh/conversations/symbol-memory.json"}}async map(){return{ok:!0,action:"map",symbols:(await this.rebuildMemory()).symbols.length,memoryPath:".mesh/conversations/symbol-memory.json"}}async status(){return{ok:!0,action:"status",symbols:(await this.loadMemory()).symbols.length,memoryPath:".mesh/conversations/symbol-memory.json"}}async rebuildMemory(){const r=await this.loadMemory(),n=new Map(r.symbols.map(o=>[`${o.file}:${o.symbol}`,o.notes])),i=[],m=await t(this.workspaceRoot,{maxFiles:1500});for(const t of m){const r=await s.readFile(e.join(this.workspaceRoot,t),"utf8").catch(()=>"");for(const s of o(t,r))s.notes=n.get(`${s.file}:${s.symbol}`)??[],i.push(s)}const c={symbols:i,updatedAt:(new Date).toISOString()};return await a(this.memoryPath(),c),c}async loadMemory(){return n(this.memoryPath(),{symbols:[]})}memoryPath(){return e.join(this.workspaceRoot,".mesh","conversations","symbol-memory.json")}}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
import{promises as e}from"node:fs";import t from"node:path";import{spawn as n}from"node:child_process";import{writeJson as o,readJson as r}from"./common.js";export class EphemeralExecutionEngine{workspaceRoot;callTool;constructor(e,t){this.workspaceRoot=e,this.callTool=t}async run(s={}){const a=String(s.action??"start").trim().toLowerCase();if("status"===a){const e=".mesh/ephemeral-execution/last-run.json";return{ok:!0,action:"status",...await r(t.join(this.workspaceRoot,e),{status:"not_running"})}}if("start"===a){const r=Number(s.port??3e3),i=String(s.specPath??"openapi.yaml"),c=".mesh/ephemeral-execution/last-run.json",l=t.join(this.workspaceRoot,".mesh","ephemeral-execution");await e.mkdir(l,{recursive:!0}),await o(t.join(this.workspaceRoot,c),{startedAt:(new Date).toISOString(),port:r,specPath:i,status:"listening"});const u=`\nconst http = require("node:http");\nconst { performance } = require("node:perf_hooks");\n\nconst server = http.createServer(async (req, res) => {\n const start = performance.now();\n const route = req.method + " " + req.url;\n \n // 1. Intercept Request\n console.log(\`[EPHEMERAL] Intercepted \${route}. No source code exists.\`);\n \n // 2. Pause & JIT Compile (Simulated LLM call taking ~50-100ms)\n // In a full implementation, this calls Mesh IPC to stream the AST based on the spec\n await new Promise(r => setTimeout(r, 60));\n \n // 3. Hallucinate Function in V8 Memory\n const hallucinatedAst = \`\n return {\n status: 200,\n body: {\n message: "Hello from the Ephemeral Void",\n route: "\${route}",\n timestamp: "\${new Date().toISOString()}",\n note: "This function was generated 1ms ago and will be destroyed in 1ms. Technical debt is zero."\n }\n };\n \`;\n \n // 4. Execute Zero-Source Code\n const AsyncFunction = Object.getPrototypeOf(async function(){}).constructor;\n const ephemeralFn = new AsyncFunction('req', hallucinatedAst);\n \n try {\n const result = await ephemeralFn(req);\n \n // 5. Respond\n res.writeHead(result.status, { "Content-Type": "application/json" });\n res.end(JSON.stringify(result.body));\n \n // 6. Delete from Memory (GC will collect it as references are lost)\n const end = performance.now();\n console.log(\`[EPHEMERAL] Request served in \${Math.round(end - start)}ms. Function destroyed.\`);\n } catch (err) {\n res.writeHead(500);\n res.end(JSON.stringify({ error: err.message }));\n }\n});\n\nserver.listen(${r}, () => {\n console.log(\`[EPHEMERAL] Zero-Source Execution Engine listening on port ${r}\`);\n});\n`,p=t.join(l,"server.js");await e.writeFile(p,u,"utf8");const m=n(process.execPath,[p],{cwd:this.workspaceRoot,detached:!0,stdio:"ignore"});return m.unref(),{ok:!0,action:a,status:"started",port:r,pid:m.pid,message:"Zero-Source Ephemeral Engine started. The server has no code. It will JIT compile handlers per request and destroy them.",ledgerPath:c}}throw new Error("workspace.ephemeral_execution action must be start or status")}}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
function t(t,o){const a=[],i=/\b(?:app|router|server)\.(get|post|put|patch|delete|all)\(\s*(["'`])([^"'`]+)\2/g;for(const r of o.matchAll(i))a.push({id:s(`route:${r[1]}:${r[3]}:${t}`),name:`${r[1].toUpperCase()} ${r[3]}`,file:t,line:n(o,r.index??0),kind:"route",provides:[`http:${r[1].toUpperCase()}:${r[3]}`],dependsOn:e(o)});const r=/name:\s*(["'`])(workspace\.[A-Za-z0-9_.-]+|agent\.[A-Za-z0-9_.-]+|runtime\.[A-Za-z0-9_.-]+)\1/g;for(const i of o.matchAll(r))a.push({id:s(`tool:${i[2]}`),name:i[2],file:t,line:n(o,i.index??0),kind:"tool",provides:[`tool:${i[2]}`],dependsOn:e(o)});const c=/\bexport\s+(?:async\s+)?(?:function|class|const)\s+([A-Za-z_$][\w$]*)/g;for(const i of o.matchAll(c))a.push({id:s(`export:${t}:${i[1]}`),name:i[1],file:t,line:n(o,i.index??0),kind:"export",provides:[`symbol:${i[1]}`],dependsOn:e(o)});return a}function e(t){return Array.from(t.matchAll(/from\s+(["'`])([^"'`]+)\1/g)).map(t=>t[2]).slice(0,20)}function s(t){return t.toLowerCase().replace(/[^a-z0-9]+/g,"-").replace(/^-+|-+$/g,"").slice(0,120)}import{promises as o}from"node:fs";import a from"node:path";import{collectWorkspaceFiles as i,lineNumberAt as n,writeJson as r,readJson as c}from"./common.js";export class FluidMeshEngine{workspaceRoot;constructor(t){this.workspaceRoot=t}async run(e={}){const s=String(e.action??"map").trim().toLowerCase();if("status"===s)return this.status();if("map"!==s)throw new Error("workspace.fluid_mesh action must be map|status");const n=[];n.push(...await this.packageScripts());const c=await i(this.workspaceRoot,{maxFiles:1500});for(const e of c){const s=await o.readFile(a.join(this.workspaceRoot,e),"utf8").catch(()=>"");n.push(...t(e,s))}const p={ok:!0,action:s,generatedAt:(new Date).toISOString(),capabilities:n,graph:n.map(t=>({id:t.id,provides:t.provides,dependsOn:t.dependsOn})),exportable:n.filter(t=>"script"===t.kind||"export"===t.kind).length,manifestPath:".mesh/fluid-mesh/capabilities.json"};return await r(this.manifestPath(),p),p}async status(){return c(this.manifestPath(),{ok:!0,action:"status",capabilities:[],message:"No fluid mesh capability manifest exists yet. Run action=map."})}async packageScripts(){const t=await o.readFile(a.join(this.workspaceRoot,"package.json"),"utf8").catch(()=>"");if(!t)return[];const e=function(t){try{return JSON.parse(t)}catch{return{}}}(t);return Object.entries(e.scripts??{}).map(([t,e])=>({id:s(`script:${t}`),name:t,file:"package.json",line:1,kind:"script",provides:[`script:${t}`],dependsOn:String(e).match(/\b(tsc|node|npm|vitest|jest|tsx|wrangler)\b/g)??[]}))}manifestPath(){return a.join(this.workspaceRoot,".mesh","fluid-mesh","capabilities.json")}}
|