@zibby/cli 0.3.0 → 0.4.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. package/README.md +117 -16
  2. package/dist/bin/zibby.js +3 -3
  3. package/dist/commands/init.js +184 -93
  4. package/dist/commands/memory.js +41 -23
  5. package/dist/commands/template.js +4 -4
  6. package/dist/commands/uninstall.js +15 -14
  7. package/dist/commands/workflows/agent-helpers.js +7 -7
  8. package/dist/commands/workflows/cloud-creds-check.js +2 -0
  9. package/dist/commands/workflows/deploy.js +47 -46
  10. package/dist/commands/workflows/generate.js +60 -37
  11. package/dist/commands/workflows/run-local.js +10 -10
  12. package/dist/commands/workflows/run.js +5 -5
  13. package/dist/commands/workflows/start.js +6 -6
  14. package/dist/package.json +4 -4
  15. package/dist/templates/zibby-workflow-claude/agents-md-block.md +65 -5
  16. package/dist/templates/zibby-workflow-claude/claude/agents/zibby-test-author.md +16 -1
  17. package/dist/templates/zibby-workflow-claude/claude/agents/zibby-workflow-builder.md +22 -2
  18. package/dist/templates/zibby-workflow-claude/claude/commands/zibby-add-node.md +1 -1
  19. package/dist/templates/zibby-workflow-claude/claude/commands/zibby-debug.md +1 -1
  20. package/dist/templates/zibby-workflow-claude/claude/commands/zibby-delete.md +1 -1
  21. package/dist/templates/zibby-workflow-claude/claude/commands/zibby-deploy.md +24 -14
  22. package/dist/templates/zibby-workflow-claude/claude/commands/zibby-list.md +2 -2
  23. package/dist/templates/zibby-workflow-claude/claude/commands/zibby-memory-cost.md +39 -0
  24. package/dist/templates/zibby-workflow-claude/claude/commands/zibby-memory-pull.md +47 -0
  25. package/dist/templates/zibby-workflow-claude/claude/commands/zibby-memory-remote-use-hosted.md +61 -0
  26. package/dist/templates/zibby-workflow-claude/claude/commands/zibby-memory-stats.md +38 -0
  27. package/dist/templates/zibby-workflow-claude/claude/commands/zibby-static-ip.md +8 -6
  28. package/dist/templates/zibby-workflow-claude/claude/commands/zibby-tail.md +1 -1
  29. package/dist/templates/zibby-workflow-claude/claude/commands/zibby-test-debug.md +2 -2
  30. package/dist/templates/zibby-workflow-claude/claude/commands/zibby-test-generate.md +1 -1
  31. package/dist/templates/zibby-workflow-claude/claude/commands/zibby-test-run.md +3 -2
  32. package/dist/templates/zibby-workflow-claude/claude/commands/zibby-test-write.md +1 -1
  33. package/dist/templates/zibby-workflow-claude/claude/commands/zibby-trigger.md +10 -6
  34. package/dist/templates/zibby-workflow-claude/cursor/rules/zibby-workflows.mdc +76 -13
  35. package/dist/templates/zibby-workflow-claude/manifest.json +5 -1
  36. package/dist/utils/apply-memory-sync-config.js +1 -0
  37. package/dist/utils/hosted-memory-sync.js +1 -0
  38. package/package.json +4 -4
@@ -1,6 +1,6 @@
1
1
  #!/usr/bin/env node
2
- import{mkdirSync as j,writeFileSync as oe,existsSync as w,readFileSync as K}from"fs";import{join as g,dirname as te,resolve as G}from"path";import{pathToFileURL as x}from"url";import{execSync as ne,spawn as W}from"node:child_process";import{SQSClient as Y,SendMessageCommand as X}from"@aws-sdk/client-sqs";var P=null;function V(){return P||(P=new Y({region:process.env.AWS_REGION||"ap-southeast-2"})),P}async function L(s,{status:e,error:o}){let{EXECUTION_ID:t,SQS_AUTH_TOKEN:c,PROGRESS_API_URL:i,PROGRESS_QUEUE_URL:r,PROJECT_API_TOKEN:a}=s;if(!t)return;let f={executionId:t,...c&&{sqsAuthToken:c},status:e,...o&&{error:o},timestamp:new Date().toISOString()},l=i?"HTTP":r?"SQS":"NONE",u=JSON.stringify(f).length;console.log(`Sending final status: ${e} via ${l} (${(u/1024).toFixed(1)}KB)`);try{if(i)await Z(i,t,f,a);else if(r){let d=["completed","failed","insufficient_context","blocked"].includes(e)?"execution_completed":"progress_update";await ee(r,t,f,d)}else{console.warn("No transport configured for final status \u2014 neither PROGRESS_API_URL nor PROGRESS_QUEUE_URL set");return}console.log(`Final status ${e} sent via ${l}`)}catch(d){console.error(`Failed to send final status (${e}) via ${l}:`),console.error(` Payload: ${(u/1024).toFixed(1)}KB`),console.error(` Error: ${d.message}`),d.name&&console.error(` Error type: ${d.name}`),d.code&&console.error(` Error code: ${d.code}`)}}async function Z(s,e,o,t){let c=`${s}/${e}/progress`,i={"Content-Type":"application/json"};t&&(i.Authorization=`Bearer ${t}`);let r=await fetch(c,{method:"POST",headers:i,body:JSON.stringify(o)});if(!r.ok){let a=await r.text();throw new Error(`HTTP ${r.status}: ${a}`)}}async function ee(s,e,o,t="progress_update"){let c=JSON.stringify(o),i=(c.length/1024).toFixed(1);c.length>256*1024&&console.error(`\u274C SQS message too large: ${i}KB (limit 256KB) for ${e} [${t}]`),await V().send(new X({QueueUrl:s,MessageBody:c,MessageGroupId:e,MessageAttributes:{executionId:{DataType:"String",StringValue:e},messageType:{DataType:"String",StringValue:t}}}))}function D({workflowType:s,jobId:e,projectId:o,agentType:t,model:c}){let i="\u2500".repeat(60),r=`${t||"default"} (model: ${c||"auto"})`;return["",i,` Workflow: ${s}`,` Job: ${e||"local"}`,` Project: ${o||"none"}`,` Agent: ${r}`,i].join(`
3
- `)}import"@zibby/core";var T=process.env.WORKSPACE||"/workspace";async function se(s,e){j(e,{recursive:!0});let o=Date.now();console.log("[setup] Fetching bundle...");let t=setInterval(()=>{let i=((Date.now()-o)/1e3).toFixed(1);console.log(`[setup] still fetching (${i}s elapsed)`)},3e3);try{await new Promise((i,r)=>{let a=W("curl",["-fsSL",s],{stdio:["ignore","pipe","inherit"]}),f=W("tar",["-xzf","-","-C",e],{stdio:["pipe","inherit","inherit"]});a.stdout.pipe(f.stdin);let l,u,d=()=>{if(l!==void 0&&u!==void 0){if(l!==0)return r(new Error(`curl exited ${l}`));if(u!==0)return r(new Error(`tar exited ${u}`));i()}};a.on("close",S=>{l=S,d()}),f.on("close",S=>{u=S,d()}),a.on("error",r),f.on("error",r)})}finally{clearInterval(t)}let c=((Date.now()-o)/1e3).toFixed(1);return console.log(`[setup] Bundle extracted (${c}s)`),e}async function z(){let s=process.env.WORKFLOW_SOURCES_URL;if(!s)throw new Error("WORKFLOW_SOURCES_URL env var is required");let e=await fetch(s);if(!e.ok)throw new Error(`Failed to fetch sources: ${e.status} ${e.statusText}`);let o=await e.json();if(!o.sources||typeof o.sources!="object")throw new Error('Invalid sources payload \u2014 missing "sources" map');return o}function re(s,e){let o=G(e),t=0;for(let[c,i]of Object.entries(s)){let r=G(e,c);if(!r.startsWith(`${o}/`)&&r!==o){console.error(` \u26D4 Skipping unsafe path: ${c}`);continue}j(te(r),{recursive:!0}),oe(r,i,"utf-8"),t++}return t}async function ie(){let s=process.env.ZIBBY_EGRESS_PROXY_URL,e=process.env.ZIBBY_EGRESS_TOKEN;if(!(!s||!e))try{let o=await import("undici"),t=new o.ProxyAgent({uri:s,token:`Bearer ${e}`});o.setGlobalDispatcher(t),console.log(`[setup] Egress proxy active \u2192 ${s}`)}catch(o){console.warn(`[setup] Failed to install egress proxy dispatcher: ${o.message}`)}}async function ae(s,e){let o=g(s,"graph.mjs");if(!w(o))throw new Error(`graph.mjs not found at ${o}`);let t=await import(x(o).href),c=e?.entryClass,i=c&&t[c]||t.default||Object.values(t).find(r=>typeof r=="function"&&r.prototype?.buildGraph);if(!i)throw new Error("No WorkflowAgent class found in graph.mjs");return i}async function me(){if(!process.env.NODE_PATH){process.env.NODE_PATH="/opt/zibby/packages";let n=await import("module");n.default._initPaths&&n.default._initPaths()}await ie();let{WORKFLOW_JOB_ID:s,WORKFLOW_TYPE:e,PROJECT_ID:o,AGENT_TYPE:t,MODEL:c}=process.env;e||(console.error("Missing WORKFLOW_TYPE env var"),process.exit(1));let i=D({workflowType:e,jobId:s,projectId:o,agentType:t,model:c});console.log(i);let r=process.env.WORKFLOW_BUNDLE_URL,a,f={},l,u;if(r){l=e,a=g(T,".zibby","workflows",l);try{await se(r,a);try{let n=await z();f=n.input||{},u=n.version}catch(n){console.warn(`[setup] Could not fetch input payload: ${n.message}`)}}catch(n){console.warn(`[setup] Bundle extract failed (${n.message}); falling back to source install`),a=null}}if(!a){let n=await z(),{sources:p,input:m,workflowType:O,version:E}=n;f=m||{},l=O||e,u=E,console.log(`[setup] Workflow v${u||"?"} (${Object.keys(p).length} files)`),a=g(T,".zibby","workflows",l);let b=re(p,a);console.log(`[setup] Wrote ${b} files`),console.log("[setup] Installing dependencies...");try{ne("npm install --silent --no-audit --no-fund",{cwd:a,stdio:"inherit"}),console.log("[setup] Dependencies installed")}catch($){console.warn(`[setup] npm install failed: ${$.message}`)}}let d={},S=g(a,"workflow.json");w(S)&&(d=JSON.parse(K(S,"utf-8")));let A={},I=g(a,"zibby.config.json");if(w(I))try{A=JSON.parse(K(I,"utf-8")),console.log("[setup] Loaded user config from zibby.config.json")}catch(n){console.warn(`[setup] Failed to parse zibby.config.json: ${n.message} \u2014 falling back to defaults`)}let k=await ae(a,d);console.log(`[setup] Loaded ${k.name}`);let y=[],C=g(a,"node_modules","@zibby","agent-workflow"),v=g(a,"node_modules","@zibby","core","node_modules","@zibby","agent-workflow");w(C)&&y.push({kind:"hoisted",path:C}),w(v)&&y.push({kind:"nested",path:v});let R=process.env.ZIBBY_RUN_DIAG==="1";if(R){let{readdirSync:n}=await import("fs");console.log(` [diag] @zibby/agent-workflow copies in bundle: ${y.length}`);for(let p of y)console.log(` [diag] ${p.kind}: ${p.path}`);try{let p=g(a,"node_modules","@zibby");w(p)&&console.log(` [diag] node_modules/@zibby/ contents: [${n(p).join(", ")}]`)}catch{}}let N=g(a,"node_modules","@zibby","core","dist","index.js");if(w(N)&&y.length>0)try{let n=await import(x(N).href),p=[n.AssistantStrategy,n.CursorAgentStrategy,n.ClaudeAgentStrategy,n.CodexAgentStrategy,n.GeminiAgentStrategy].filter(Boolean);for(let m of y){let O=g(m.path,"dist","index.js");if(!w(O))continue;let E=await import(x(O).href),b=R?E.listStrategies():null;for(let $ of p)try{E.registerStrategy(new $)}catch(q){console.warn(` register ${$.name} into ${m.kind} failed: ${q.message}`)}R&&console.log(` [diag] ${m.kind} registry: before=[${b.join(",")||"empty"}] after=[${E.listStrategies().join(",")||"empty"}]`)}console.log("[setup] Registered 5 agent strategies (assistant, cursor, claude, codex, gemini)")}catch(n){console.warn(`[setup] Failed to bridge strategies: ${n.message}`)}else console.warn("[setup] No @zibby/core or @zibby/agent-workflow in bundle \u2014 agent strategies may be unavailable");let Q=Date.now(),_=new k({workflow:l||e}),J=_.buildGraph(),H={input:f||{},cwd:T,runId:s||`run-${Date.now()}`,config:A};console.log("");let h;try{h=await J.run(_,H)}catch(n){console.error(`
4
- Workflow execution failed: ${n.message}`),console.error(n.stack),await U("failed",n.message),process.exit(1)}let F=((Date.now()-Q)/1e3).toFixed(1),M=h?.success!==!1,B=l||e;M?(console.log(`
5
- [done] ${B} completed in ${F}s`),await U("completed")):(console.error(`
6
- [done] ${B} failed after ${F}s`),await U("failed",h?.error||"Workflow execution failed"),process.exit(1)),_.onComplete&&await _.onComplete(h)}async function U(s,e=null){let o={EXECUTION_ID:process.env.WORKFLOW_JOB_ID,PROGRESS_API_URL:process.env.PROGRESS_API_URL,PROGRESS_QUEUE_URL:process.env.PROGRESS_QUEUE_URL,PROJECT_API_TOKEN:process.env.PROJECT_API_TOKEN,SQS_AUTH_TOKEN:process.env.SQS_AUTH_TOKEN};if(o.EXECUTION_ID)try{await L(o,{status:s,...e&&{error:e}})}catch(t){console.error(`\u26A0\uFE0F Failed to report status: ${t.message}`)}}export{me as runWorkflowCommand};
2
+ import{mkdirSync as j,writeFileSync as oe,existsSync as w,readFileSync as K}from"fs";import{join as g,dirname as te,resolve as G}from"path";import{pathToFileURL as U}from"url";import{execSync as ne,spawn as W}from"node:child_process";import{SQSClient as Y,SendMessageCommand as X}from"@aws-sdk/client-sqs";var b=null;function V(){return b||(b=new Y({region:process.env.AWS_REGION||"ap-southeast-2"})),b}async function L(s,{status:e,error:o}){let{EXECUTION_ID:t,SQS_AUTH_TOKEN:c,PROGRESS_API_URL:i,PROGRESS_QUEUE_URL:r,PROJECT_API_TOKEN:a}=s;if(!t)return;let f={executionId:t,...c&&{sqsAuthToken:c},status:e,...o&&{error:o},timestamp:new Date().toISOString()},l=i?"HTTP":r?"SQS":"NONE",u=JSON.stringify(f).length;console.log(`Sending final status: ${e} via ${l} (${(u/1024).toFixed(1)}KB)`);try{if(i)await Z(i,t,f,a);else if(r){let d=["completed","failed","insufficient_context","blocked"].includes(e)?"execution_completed":"progress_update";await ee(r,t,f,d)}else{console.warn("No transport configured for final status \u2014 neither PROGRESS_API_URL nor PROGRESS_QUEUE_URL set");return}console.log(`Final status ${e} sent via ${l}`)}catch(d){console.error(`Failed to send final status (${e}) via ${l}:`),console.error(` Payload: ${(u/1024).toFixed(1)}KB`),console.error(` Error: ${d.message}`),d.name&&console.error(` Error type: ${d.name}`),d.code&&console.error(` Error code: ${d.code}`)}}async function Z(s,e,o,t){let c=`${s}/${e}/progress`,i={"Content-Type":"application/json"};t&&(i.Authorization=`Bearer ${t}`);let r=await fetch(c,{method:"POST",headers:i,body:JSON.stringify(o)});if(!r.ok){let a=await r.text();throw new Error(`HTTP ${r.status}: ${a}`)}}async function ee(s,e,o,t="progress_update"){let c=JSON.stringify(o),i=(c.length/1024).toFixed(1);c.length>256*1024&&console.error(`\u274C SQS message too large: ${i}KB (limit 256KB) for ${e} [${t}]`),await V().send(new X({QueueUrl:s,MessageBody:c,MessageGroupId:e,MessageAttributes:{executionId:{DataType:"String",StringValue:e},messageType:{DataType:"String",StringValue:t}}}))}function D({workflowType:s,jobId:e,projectId:o,agentType:t,model:c}){let i="\u2500".repeat(60),r=`${t||"default"} (model: ${c||"auto"})`;return["",i,` Workflow: ${s}`,` Job: ${e||"local"}`,` Project: ${o||"none"}`,` Agent: ${r}`,i].join(`
3
+ `)}import"@zibby/core";var P=process.env.WORKSPACE||"/workspace";async function se(s,e){j(e,{recursive:!0});let o=Date.now();console.log("[setup] Fetching bundle...");let t=setInterval(()=>{let i=((Date.now()-o)/1e3).toFixed(1);console.log(`[setup] still fetching (${i}s elapsed)`)},3e3);try{await new Promise((i,r)=>{let a=W("curl",["-fsSL",s],{stdio:["ignore","pipe","inherit"]}),f=W("tar",["-xzf","-","-C",e],{stdio:["pipe","inherit","inherit"]});a.stdout.pipe(f.stdin);let l,u,d=()=>{if(l!==void 0&&u!==void 0){if(l!==0)return r(new Error(`curl exited ${l}`));if(u!==0)return r(new Error(`tar exited ${u}`));i()}};a.on("close",S=>{l=S,d()}),f.on("close",S=>{u=S,d()}),a.on("error",r),f.on("error",r)})}finally{clearInterval(t)}let c=((Date.now()-o)/1e3).toFixed(1);return console.log(`[setup] Bundle extracted (${c}s)`),e}async function z(){let s=process.env.WORKFLOW_SOURCES_URL;if(!s)throw new Error("WORKFLOW_SOURCES_URL env var is required");let e=await fetch(s);if(!e.ok)throw new Error(`Failed to fetch sources: ${e.status} ${e.statusText}`);let o=await e.json();if(!o.sources||typeof o.sources!="object")throw new Error('Invalid sources payload \u2014 missing "sources" map');return o}function re(s,e){let o=G(e),t=0;for(let[c,i]of Object.entries(s)){let r=G(e,c);if(!r.startsWith(`${o}/`)&&r!==o){console.error(` \u26D4 Skipping unsafe path: ${c}`);continue}j(te(r),{recursive:!0}),oe(r,i,"utf-8"),t++}return t}async function ie(){let s=process.env.ZIBBY_EGRESS_PROXY_URL,e=process.env.ZIBBY_EGRESS_TOKEN;if(!(!s||!e))try{let o=await import("undici"),t=new o.ProxyAgent({uri:s,token:`Bearer ${e}`});o.setGlobalDispatcher(t),console.log(`[setup] Egress proxy active \u2192 ${s}`)}catch(o){console.warn(`[setup] Failed to install egress proxy dispatcher: ${o.message}`)}}async function ae(s,e){let o=g(s,"graph.mjs");if(!w(o))throw new Error(`graph.mjs not found at ${o}`);let t=await import(U(o).href),c=e?.entryClass,i=c&&t[c]||t.default||Object.values(t).find(r=>typeof r=="function"&&r.prototype?.buildGraph);if(!i)throw new Error("No WorkflowAgent class found in graph.mjs");return i}async function me(){if(!process.env.NODE_PATH){process.env.NODE_PATH="/opt/zibby/packages";let n=await import("module");n.default._initPaths&&n.default._initPaths()}await ie();let{WORKFLOW_JOB_ID:s,WORKFLOW_TYPE:e,PROJECT_ID:o,AGENT_TYPE:t,MODEL:c}=process.env;e||(console.error("Missing WORKFLOW_TYPE env var"),process.exit(1));let i=D({workflowType:e,jobId:s,projectId:o,agentType:t,model:c});console.log(i);let r=process.env.WORKFLOW_BUNDLE_URL,a,f={},l,u;if(r){l=e,a=g(P,".zibby","workflows",l);try{await se(r,a);try{let n=await z();f=n.input||{},u=n.version}catch(n){console.warn(`[setup] Could not fetch input payload: ${n.message}`)}}catch(n){console.warn(`[setup] Bundle extract failed (${n.message}); falling back to source install`),a=null}}if(!a){let n=await z(),{sources:p,input:m,workflowType:_,version:E}=n;f=m||{},l=_||e,u=E,console.log(`[setup] Workflow v${u||"?"} (${Object.keys(p).length} files)`),a=g(P,".zibby","workflows",l);let R=re(p,a);console.log(`[setup] Wrote ${R} files`),console.log("[setup] Installing dependencies...");try{ne("npm install --silent --no-audit --no-fund",{cwd:a,stdio:"inherit"}),console.log("[setup] Dependencies installed")}catch(h){console.warn(`[setup] npm install failed: ${h.message}`)}}let d={},S=g(a,"workflow.json");w(S)&&(d=JSON.parse(K(S,"utf-8")));let x={},A=g(a,"zibby.config.json");if(w(A))try{x=JSON.parse(K(A,"utf-8")),console.log("[setup] Loaded user config from zibby.config.json")}catch(n){console.warn(`[setup] Failed to parse zibby.config.json: ${n.message} \u2014 falling back to defaults`)}let I=await ae(a,d);console.log(`[setup] Loaded ${I.name}`);let y=[],k=g(a,"node_modules","@zibby","agent-workflow"),v=g(a,"node_modules","@zibby","core","node_modules","@zibby","agent-workflow");w(k)&&y.push({kind:"hoisted",path:k}),w(v)&&y.push({kind:"nested",path:v});let O=process.env.ZIBBY_RUN_DIAG==="1";if(O){let{readdirSync:n}=await import("fs");console.log(` [diag] @zibby/agent-workflow copies in bundle: ${y.length}`);for(let p of y)console.log(` [diag] ${p.kind}: ${p.path}`);try{let p=g(a,"node_modules","@zibby");w(p)&&console.log(` [diag] node_modules/@zibby/ contents: [${n(p).join(", ")}]`)}catch{}}let C=g(a,"node_modules","@zibby","core","dist","index.js");if(w(C)&&y.length>0)try{let n=await import(U(C).href),p=[n.AssistantStrategy,n.CursorAgentStrategy,n.ClaudeAgentStrategy,n.CodexAgentStrategy,n.GeminiAgentStrategy].filter(Boolean);for(let m of y){let _=g(m.path,"dist","index.js");if(!w(_))continue;let E=await import(U(_).href),R=O?E.listStrategies():null;for(let h of p)try{E.registerStrategy(new h)}catch(q){console.warn(` register ${h.name} into ${m.kind} failed: ${q.message}`)}O&&console.log(` [diag] ${m.kind} registry: before=[${R.join(",")||"empty"}] after=[${E.listStrategies().join(",")||"empty"}]`)}console.log("[setup] Registered 5 agent strategies (assistant, cursor, claude, codex, gemini)")}catch(n){console.warn(`[setup] Failed to bridge strategies: ${n.message}`)}else console.warn("[setup] No @zibby/core or @zibby/agent-workflow in bundle \u2014 agent strategies may be unavailable");let Q=Date.now(),N=new I({workflow:l||e}),J=N.buildGraph(),H={input:f||{},cwd:P,runId:s||`run-${Date.now()}`,config:x};console.log("");let $;try{$=await J.run(N,H)}catch(n){console.error(`
4
+ Workflow execution failed: ${n.message}`),console.error(n.stack),await T("failed",n.message),process.exit(1)}let F=((Date.now()-Q)/1e3).toFixed(1),M=$?.success!==!1,B=l||e;M?(console.log(`
5
+ [done] ${B} completed in ${F}s`),await T("completed")):(console.error(`
6
+ [done] ${B} failed after ${F}s`),await T("failed",$?.error||"Workflow execution failed"),process.exit(1))}async function T(s,e=null){let o={EXECUTION_ID:process.env.WORKFLOW_JOB_ID,PROGRESS_API_URL:process.env.PROGRESS_API_URL,PROGRESS_QUEUE_URL:process.env.PROGRESS_QUEUE_URL,PROJECT_API_TOKEN:process.env.PROJECT_API_TOKEN,SQS_AUTH_TOKEN:process.env.SQS_AUTH_TOKEN};if(o.EXECUTION_ID)try{await L(o,{status:s,...e&&{error:e}})}catch(t){console.error(`\u26A0\uFE0F Failed to report status: ${t.message}`)}}export{me as runWorkflowCommand};
@@ -1,13 +1,13 @@
1
- var j=Object.defineProperty;var I=(n,t)=>()=>(n&&(t=n(n=0)),t);var K=(n,t)=>{for(var o in t)j(n,o,{get:t[o],enumerable:!0})};var $={};K($,{_resetLoaderCacheForTests:()=>G,loadCredentialsIntoEnv:()=>Y});import k from"node:fs";import R from"node:path";import W from"node:os";async function N(){if(!(process.env.__ZIBBY_CLAUDE_PLAN||process.platform!=="darwin"||!(process.env.CLAUDE_CODE_OAUTH_TOKEN||process.env.CLAUDE_CODE_OAUTH_TOKEN_POOL||process.env.ANTHROPIC_AUTH_TOKEN)))try{let{execSync:t}=await import("node:child_process"),o=t('security find-generic-password -s "Claude Code-credentials" -w',{encoding:"utf-8",stdio:["ignore","pipe","ignore"]}).trim();if(o){let f=JSON.parse(o)?.claudeAiOauth?.subscriptionType;f&&(process.env.__ZIBBY_CLAUDE_PLAN=f)}}catch{}}function F(n){try{if(!k.existsSync(n))return{};let t=k.readFileSync(n,"utf-8"),o=JSON.parse(t);return o&&o.agentKeys&&typeof o.agentKeys=="object"?o.agentKeys:{}}catch{return{}}}async function Y(n={}){let{verbose:t=!1,force:o=!1,configPath:i}=n;if(m&&!o)return w;if(!!(process.env.CLAUDE_CODE_OAUTH_TOKEN_POOL||process.env.ANTHROPIC_API_KEY_POOL||process.env.CLAUDE_CODE_OAUTH_TOKEN||process.env.ANTHROPIC_API_KEY||process.env.ANTHROPIC_AUTH_TOKEN))return await N(),m=!0,w={oauthCount:0,apiCount:0,source:"cloud-env"},t&&console.log("[credentials-loader] env vars already set \u2014 skipping local discovery"),w;let l=i||R.join(W.homedir(),".zibby","config.json"),a=F(l),c="none",s=0,p=0;for(let r of z)a[r]&&!process.env[r]&&(process.env[r]=String(a[r]).trim(),c="config.json",r.endsWith("_POOL")?s+=String(a[r]).split(",").filter(g=>g.trim()).length:s+=1);for(let r of B)a[r]&&!process.env[r]&&(process.env[r]=String(a[r]).trim(),c="config.json",r.endsWith("_POOL")?p+=String(a[r]).split(",").filter(g=>g.trim()).length:p+=1);return await N(),m=!0,w={oauthCount:s,apiCount:p,source:c},t&&s+p>0&&console.log(`[credentials-loader] loaded ${s} OAuth + ${p} API from ${c} (~/.zibby/config.json)`),w}function G(){m=!1,w=null}var m,w,z,B,v=I(()=>{m=!1,w=null,z=["CLAUDE_CODE_OAUTH_TOKEN","CLAUDE_CODE_OAUTH_TOKEN_POOL","ANTHROPIC_AUTH_TOKEN","ANTHROPIC_AUTH_TOKEN_POOL"],B=["ANTHROPIC_API_KEY","ANTHROPIC_API_KEY_POOL"]});import{existsSync as C}from"fs";import{readFile as J}from"fs/promises";import{join as A}from"path";import{pathToFileURL as M}from"url";import e from"chalk";import Z from"ora";import{existsSync as H}from"fs";import{join as D}from"path";import{pathToFileURL as x}from"url";async function b(n){let t=D(n,".zibby.config.mjs");if(!H(t))throw new Error(".zibby.config.mjs not found");try{let o=await import(x(t).href);return o.default||o}catch(o){throw new Error(`Failed to load .zibby.config.mjs: ${o.message}`,{cause:o})}}var q=3848;async function V(n){try{let t=await b(n);return{userConfig:t,workflowsBasePath:t?.paths?.workflows||".zibby/workflows"}}catch{return{userConfig:null,workflowsBasePath:".zibby/workflows"}}}async function X(n,t,o){let i=A(n,"graph.mjs");if(!C(i))throw new Error(`graph.mjs not found in ${o}/${t}/`);let f=await Q(n,t),l=await import(M(i).href),a=f.entryClass,c=a&&l[a]||l.default||Object.values(l).find(s=>typeof s=="function"&&s.prototype?.buildGraph);if(!c)throw new Error("No WorkflowAgent class found in graph.mjs. Export a class with buildGraph() method.");return{AgentClass:c,manifest:f}}async function Q(n,t){let o=A(n,"workflow.json");if(!C(o))return{name:t,triggers:{api:!0}};let i=await J(o,"utf-8");return JSON.parse(i)}async function yo(n,t){n||(console.log(e.red(`
1
+ var j=Object.defineProperty;var I=(n,t)=>()=>(n&&(t=n(n=0)),t);var K=(n,t)=>{for(var o in t)j(n,o,{get:t[o],enumerable:!0})};var N={};K(N,{_resetLoaderCacheForTests:()=>G,loadCredentialsIntoEnv:()=>Y});import b from"node:fs";import R from"node:path";import W from"node:os";async function k(){if(!(process.env.__ZIBBY_CLAUDE_PLAN||process.platform!=="darwin"||!(process.env.CLAUDE_CODE_OAUTH_TOKEN||process.env.CLAUDE_CODE_OAUTH_TOKEN_POOL||process.env.ANTHROPIC_AUTH_TOKEN)))try{let{execSync:t}=await import("node:child_process"),o=t('security find-generic-password -s "Claude Code-credentials" -w',{encoding:"utf-8",stdio:["ignore","pipe","ignore"]}).trim();if(o){let f=JSON.parse(o)?.claudeAiOauth?.subscriptionType;f&&(process.env.__ZIBBY_CLAUDE_PLAN=f)}}catch{}}function F(n){try{if(!b.existsSync(n))return{};let t=b.readFileSync(n,"utf-8"),o=JSON.parse(t);return o&&o.agentKeys&&typeof o.agentKeys=="object"?o.agentKeys:{}}catch{return{}}}async function Y(n={}){let{verbose:t=!1,force:o=!1,configPath:c}=n;if(h&&!o)return w;if(!!(process.env.CLAUDE_CODE_OAUTH_TOKEN_POOL||process.env.ANTHROPIC_API_KEY_POOL||process.env.CLAUDE_CODE_OAUTH_TOKEN||process.env.ANTHROPIC_API_KEY||process.env.ANTHROPIC_AUTH_TOKEN))return await k(),h=!0,w={oauthCount:0,apiCount:0,source:"cloud-env"},t&&console.log("[credentials-loader] env vars already set \u2014 skipping local discovery"),w;let l=c||R.join(W.homedir(),".zibby","config.json"),a=F(l),i="none",s=0,g=0;for(let r of z)a[r]&&!process.env[r]&&(process.env[r]=String(a[r]).trim(),i="config.json",r.endsWith("_POOL")?s+=String(a[r]).split(",").filter(p=>p.trim()).length:s+=1);for(let r of B)a[r]&&!process.env[r]&&(process.env[r]=String(a[r]).trim(),i="config.json",r.endsWith("_POOL")?g+=String(a[r]).split(",").filter(p=>p.trim()).length:g+=1);return await k(),h=!0,w={oauthCount:s,apiCount:g,source:i},t&&s+g>0&&console.log(`[credentials-loader] loaded ${s} OAuth + ${g} API from ${i} (~/.zibby/config.json)`),w}function G(){h=!1,w=null}var h,w,z,B,$=I(()=>{h=!1,w=null,z=["CLAUDE_CODE_OAUTH_TOKEN","CLAUDE_CODE_OAUTH_TOKEN_POOL","ANTHROPIC_AUTH_TOKEN","ANTHROPIC_AUTH_TOKEN_POOL"],B=["ANTHROPIC_API_KEY","ANTHROPIC_API_KEY_POOL"]});import{existsSync as _}from"fs";import{readFile as J}from"fs/promises";import{join as C}from"path";import{pathToFileURL as M}from"url";import e from"chalk";import Z from"ora";import{existsSync as H}from"fs";import{join as D}from"path";import{pathToFileURL as x}from"url";async function P(n){let t=D(n,".zibby.config.mjs");if(!H(t))throw new Error(".zibby.config.mjs not found");try{let o=await import(x(t).href);return o.default||o}catch(o){throw new Error(`Failed to load .zibby.config.mjs: ${o.message}`,{cause:o})}}var q=3848;async function V(n){try{let t=await P(n);return{userConfig:t,workflowsBasePath:t?.paths?.workflows||".zibby/workflows"}}catch{return{userConfig:null,workflowsBasePath:".zibby/workflows"}}}async function X(n,t,o){let c=C(n,"graph.mjs");if(!_(c))throw new Error(`graph.mjs not found in ${o}/${t}/`);let f=await Q(n,t),l=await import(M(c).href),a=f.entryClass,i=a&&l[a]||l.default||Object.values(l).find(s=>typeof s=="function"&&s.prototype?.buildGraph);if(!i)throw new Error("No WorkflowAgent class found in graph.mjs. Export a class with buildGraph() method.");return{AgentClass:i,manifest:f}}async function Q(n,t){let o=C(n,"workflow.json");if(!_(o))return{name:t,triggers:{api:!0}};let c=await J(o,"utf-8");return JSON.parse(c)}async function yo(n,t){n||(console.log(e.red(`
2
2
  Workflow name is required`)),console.log(e.gray(" Usage: zibby workflow start <workflow-name>")),console.log(e.gray(` Example: zibby workflow start ticket-triage
3
- `)),process.exit(1));let o=n.toLowerCase(),i=process.cwd();try{let{loadCredentialsIntoEnv:d}=await Promise.resolve().then(()=>(v(),$));await d({verbose:!!process.env.ZIBBY_DEBUG})}catch{}let{userConfig:f,workflowsBasePath:l}=await V(i),a=A(i,l,o);C(a)||(console.log(e.red(`
3
+ `)),process.exit(1));let o=n.toLowerCase(),c=process.cwd();try{let{loadCredentialsIntoEnv:d}=await Promise.resolve().then(()=>($(),N));await d({verbose:!!process.env.ZIBBY_DEBUG})}catch{}let{userConfig:f,workflowsBasePath:l}=await V(c),a=C(c,l,o);_(a)||(console.log(e.red(`
4
4
  Workflow not found: ${l}/${o}/`)),console.log(e.gray(" Create one first:")),console.log(e.cyan(` zibby workflow new ${o}
5
- `)),process.exit(1));let c=Z(` Loading workflow "${o}"...`).start(),s,p;try{({AgentClass:s,manifest:p}=await X(a,o,l)),c.succeed(` Loaded ${e.bold(p.entryClass||s.name)} (${o})`)}catch(d){c.fail(" Failed to load workflow"),console.log(e.red(`
5
+ `)),process.exit(1));let i=Z(` Loading workflow "${o}"...`).start(),s,g;try{({AgentClass:s,manifest:g}=await X(a,o,l)),i.succeed(` Loaded ${e.bold(g.entryClass||s.name)} (${o})`)}catch(d){i.fail(" Failed to load workflow"),console.log(e.red(`
6
6
  ${d.message}
7
- `)),process.exit(1)}let r=parseInt(t.port,10)||q,g;try{g=(await import("express")).default}catch{console.log(e.red(`
7
+ `)),process.exit(1)}let r=parseInt(t.port,10)||q,p;try{p=(await import("express")).default}catch{console.log(e.red(`
8
8
  express is required for local workflow server`)),console.log(e.gray(` npm install express
9
- `)),process.exit(1)}let y=g();y.use(g.json({limit:"1mb"})),y.get("/health",(d,O)=>{O.json({status:"ok",workflow:o,class:s.name})}),y.post("/trigger",async(d,O)=>{let u=`local-${Date.now()}`,E=d.body.input||d.body||{};console.log(e.cyan(`
10
- \u25B6 Run ${u} triggered`)),console.log(e.gray(` input: ${JSON.stringify(E).slice(0,200)}`)),O.status(202).json({runId:u,status:"running",workflow:o});try{let _=Date.now(),h=new s({workflow:o}),U=h.buildGraph(),L={input:E,cwd:process.cwd(),runId:u,config:f||{}},T=await U.run(h,L),P=((Date.now()-_)/1e3).toFixed(1),S=T?.success!==!1;console.log(S?e.green(` \u2714 Run ${u} succeeded (${P}s)`):e.red(` \u2716 Run ${u} failed (${P}s)`)),h.onComplete&&await h.onComplete(T)}catch(_){console.log(e.red(` \u2716 Run ${u} error: ${_.message}`))}}),y.listen(r,()=>{console.log(e.bold.cyan(`
9
+ `)),process.exit(1)}let y=p();y.use(p.json({limit:"1mb"})),y.get("/health",(d,m)=>{m.json({status:"ok",workflow:o,class:s.name})}),y.post("/trigger",async(d,m)=>{let u=`local-${Date.now()}`,A=d.body.input||d.body||{};console.log(e.cyan(`
10
+ \u25B6 Run ${u} triggered`)),console.log(e.gray(` input: ${JSON.stringify(A).slice(0,200)}`)),m.status(202).json({runId:u,status:"running",workflow:o});try{let O=Date.now(),E=new s({workflow:o}),v=E.buildGraph(),U={input:A,cwd:process.cwd(),runId:u,config:f||{}},L=await v.run(E,U),T=((Date.now()-O)/1e3).toFixed(1),S=L?.success!==!1;console.log(S?e.green(` \u2714 Run ${u} succeeded (${T}s)`):e.red(` \u2716 Run ${u} failed (${T}s)`))}catch(O){console.log(e.red(` \u2716 Run ${u} error: ${O.message}`))}}),y.listen(r,()=>{console.log(e.bold.cyan(`
11
11
  Zibby Workflow Server \u2014 ${o}
12
12
  `)),console.log(e.gray(" ".padEnd(56,"-"))),console.log(e.white(` Workflow: ${e.cyan(o)}`)),console.log(e.white(` Class: ${e.cyan(s.name)}`)),console.log(e.white(` Port: ${e.cyan(r)}`)),console.log(e.gray(" ".padEnd(56,"-"))),console.log(e.white(`
13
13
  Endpoints:`)),console.log(e.gray(` GET http://localhost:${r}/health`)),console.log(e.cyan(` POST http://localhost:${r}/trigger`)),console.log(e.white(`
package/dist/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@zibby/cli",
3
- "version": "0.3.0",
3
+ "version": "0.4.1",
4
4
  "description": "Zibby CLI - Test automation generator and runner",
5
5
  "type": "module",
6
6
  "bin": {
@@ -8,7 +8,7 @@
8
8
  },
9
9
  "scripts": {
10
10
  "build": "node ../scripts/build.mjs --extra-dirs bin",
11
- "test": "vitest run test/auth*.test.js test/two-layer-auth.test.js test/trigger-params.test.js test/trigger-helpers.test.js test/deploy-helpers.test.js test/deploy-bundles-user-config.test.js test/run-loads-user-config.test.js test/env-helpers.test.js test/env-cli.test.js test/chat-agents.test.js test/chat-agents-api.test.js test/chat-agents-picker.test.js test/chat-sandbox-attach.test.js test/credentials-file.test.js test/credentials-api.test.js test/credentials-loader.test.js test/cli-namespace-consistency.test.js test/cli-workflow-subcommands.test.js test/run-bundle-core-import.test.js test/start-respects-config.test.js test/sse-backoff.test.js test/sse-reconnect-loop.test.js test/run-helpers.test.js test/sse-parser.test.js",
11
+ "test": "vitest run test/auth*.test.js test/two-layer-auth.test.js test/trigger-params.test.js test/trigger-helpers.test.js test/deploy-helpers.test.js test/deploy-bundles-user-config.test.js test/run-loads-user-config.test.js test/env-helpers.test.js test/env-cli.test.js test/chat-agents.test.js test/chat-agents-api.test.js test/chat-agents-picker.test.js test/chat-sandbox-attach.test.js test/credentials-file.test.js test/credentials-api.test.js test/credentials-loader.test.js test/cli-namespace-consistency.test.js test/cli-workflow-subcommands.test.js test/run-bundle-core-import.test.js test/start-respects-config.test.js test/sse-backoff.test.js test/sse-reconnect-loop.test.js test/run-helpers.test.js test/sse-parser.test.js test/cloud-creds-check.test.js",
12
12
  "test:unit": "vitest run src/",
13
13
  "test:auth": "vitest run test/auth*.test.js test/two-layer-auth.test.js",
14
14
  "lint": "eslint .",
@@ -33,8 +33,8 @@
33
33
  },
34
34
  "dependencies": {
35
35
  "@aws-sdk/client-sqs": "^3.1038.0",
36
- "@zibby/agent-workflow": "^0.3.0",
37
- "@zibby/core": "^0.3.0",
36
+ "@zibby/agent-workflow": "^0.3.2",
37
+ "@zibby/core": "^0.3.3",
38
38
  "@zibby/ui-memory": "^1.0.0",
39
39
  "@zibby/skills": "^0.1.11",
40
40
  "adm-zip": "^0.5.17",
@@ -1,4 +1,4 @@
1
- <!-- BEGIN zibby-workflows zibby-template-version: 3 -->
1
+ <!-- BEGIN zibby-workflows zibby-template-version: 4 -->
2
2
  ## Zibby
3
3
 
4
4
  This project uses **Zibby** — there are two surfaces:
@@ -16,7 +16,7 @@ Both share `.zibby.config.mjs` at the project root.
16
16
  Files:
17
17
  ```
18
18
  <paths.workflows or .zibby/workflows>/<name>/
19
- ├── workflow.json name, entryClass, triggers, schemas
19
+ ├── workflow.json name, entryClass, triggers, schemas (manifest)
20
20
  ├── graph.mjs nodes + edges from START to END
21
21
  ├── nodes/
22
22
  │ ├── index.mjs barrel export
@@ -33,19 +33,42 @@ Each node has `async run(ctx)` where `ctx` provides:
33
33
  Common dev loop:
34
34
  ```
35
35
  zibby workflow new <name> # scaffold
36
- zibby workflow run <name> # one-shot local run (mirrors trigger flags)
36
+ zibby workflow run <name> # one-shot local run (preferred for the dev loop)
37
37
  zibby workflow run <name> -p k=v # with input
38
38
  zibby workflow deploy <name> # build + push to Zibby Cloud
39
39
  zibby workflow trigger <uuid> # invoke the cloud workflow
40
40
  zibby workflow logs <uuid> -t # tail live logs (docker-compose-style)
41
- zibby workflow list # find UUIDs and statuses
41
+ zibby workflow list # find UUIDs and statuses (local + cloud)
42
+ zibby workflow download <uuid> # pull the cloud workflow source back to .zibby/workflows/
42
43
  zibby workflow delete <uuid> # remove a deployed workflow
43
44
  ```
44
45
 
45
- `run` and `trigger` accept the same input flags (`-p key=value`, `--input '<json>'`, `--input-file path.json`) flip the verb to switch between local and cloud. `workflow start` exists too but is the long-lived dev server (Studio integration); for plain CLI iteration prefer `run`.
46
+ **`run` vs `start`.** `workflow run` is the one-shot CLI iteration command load the graph, execute it once, print the result, exit. That's the right primitive for the dev loop and for CI/CD. `workflow start` is a *long-lived* local dev server (default port 3848) used by Studio for replay/debug; for plain CLI iteration always prefer `run`.
47
+
48
+ `run` and `trigger` accept the same input flag surface — flip the verb to switch between local and cloud:
49
+ - `-p key=value` (repeatable) — highest precedence
50
+ - `--input '<json>'` — JSON string
51
+ - `--input-file path.json` — JSON file, lowest precedence
46
52
 
47
53
  Static outbound IPs (for customers behind firewalls): see `--dedicated-ip` flag on `deploy`.
48
54
 
55
+ #### Per-workflow env vars
56
+
57
+ Each deployed workflow has its own encrypted env-var bag (KMS-backed). Vars get injected into the Fargate task at trigger time, and **workflow env wins over project secrets on conflict**. Use this for per-pipeline credentials (different `ANTHROPIC_API_KEY` per workflow, a workflow-only `DATABASE_URL`, etc.).
58
+
59
+ ```
60
+ zibby workflow env list <uuid> # show key names (values never returned)
61
+ zibby workflow env set <uuid> ANTHROPIC_API_KEY=sk-… # add or rotate one key
62
+ zibby workflow env unset <uuid> OLD_KEY # remove one key
63
+ zibby workflow env push <uuid> --file .env [--file .env.prod] # bulk replace from .env files
64
+ ```
65
+
66
+ Fast path on first deploy — sync a `.env` in one shot:
67
+ ```
68
+ zibby workflow deploy my-pipeline --env .env [--env .env.prod]
69
+ ```
70
+ The CLI deploys, then runs `push` against the freshly-minted UUID.
71
+
49
72
  ---
50
73
 
51
74
  ### Tests
@@ -55,6 +78,7 @@ Files:
55
78
  test-specs/ source `.txt` specs (paths.specs)
56
79
  tests/ generated `.spec.js` (paths.generated; regenerated each run)
57
80
  test-results/ videos, traces, JSON results per run
81
+ .zibby/memory/.dolt/ local test memory DB (selectors, page model, history)
58
82
  playwright.config.js
59
83
  ```
60
84
 
@@ -64,6 +88,8 @@ Common dev loop:
64
88
  ```
65
89
  zibby test test-specs/<name>.txt # run a spec
66
90
  zibby test "go to example.com and ..." # inline, no file
91
+ zibby test <spec> --agent claude # override the configured agent (claude|cursor|codex|gemini)
92
+ zibby test --sources <ids> --execution <id> # cloud test cases (run from a stored execution)
67
93
  zibby generate -t ENG-1234 # generate specs from a Jira ticket
68
94
  zibby video # organize videos next to spec files
69
95
  zibby upload <spec-path> # upload existing artifacts to cloud
@@ -71,6 +97,39 @@ zibby upload <spec-path> # upload existing artifacts to cloud
71
97
 
72
98
  When debugging a failed test, watch the video at `test-results/<spec>/video.webm` — that's almost always faster than reading logs.
73
99
 
100
+ #### Test memory
101
+
102
+ `.zibby/memory/.dolt/` is a **local-first Dolt SQL database** (Git-for-data) that learns from every test run — selectors that worked, page-element fingerprints, navigation transitions, timing quirks, recorded insights. The runner auto-pulls before a run and auto-pushes after a passing run. Keying is **per-domain** (not per-spec), so any spec that hits `myapp.com` benefits from selectors learned by every other spec on the same domain.
103
+
104
+ When `zibby test` runs and `.zibby/memory/.dolt/` exists, the agent gets 5 MCP tools auto-exposed:
105
+
106
+ - `memory_get_test_history` — query recent runs (pass/fail/timing); filter by spec-path substring
107
+ - `memory_get_selectors` — query known selectors per page with stability metrics (success/fail counts)
108
+ - `memory_get_page_model` — query page structure (elements, roles, accessible names, best selector)
109
+ - `memory_get_navigation` — known page-to-page transitions (what click/submit produced what URL)
110
+ - `memory_save_insight` — save an observation. Categories: `selector_tip | timing | navigation | workaround | flaky | general`
111
+
112
+ > **AFTER completing the test, you MUST call `memory_save_insight` at least once.** Save any useful finding: reliable selectors, timing quirks, navigation patterns, workarounds. Be specific — future runs will read your insights. (Lifted from the memory skill's `promptFragment`.)
113
+
114
+ Local CLI:
115
+ ```
116
+ zibby memory stats # row counts, last commit, per-spec breakdown
117
+ zibby memory cost # real LLM token spend per spec / per domain
118
+ zibby memory compact # prune old runs + Dolt GC (--max-runs 50, --max-age 90d)
119
+ zibby memory reset -f # wipe the DB
120
+ ```
121
+
122
+ **Team sync.** Memory is local-first; opt into a shared remote so teammates' learnings flow back to you:
123
+
124
+ ```
125
+ zibby memory remote add aws://my-bucket/team/proj/main # BYO S3 / GCS / DoltHub / file:///
126
+ zibby memory remote use --hosted # OR: Zibby-managed S3 (signed-in only)
127
+ zibby memory pull # manual override (auto on test start)
128
+ zibby memory push # manual override (auto on passing test)
129
+ ```
130
+
131
+ Set `memorySync.remote` in `.zibby.config.mjs` (`'hosted'` or an `aws://...` URL) and `zibby init` auto-wires the remote — teammates clone the repo, run `zibby init`, and they're plugged into the same memory.
132
+
74
133
  ---
75
134
 
76
135
  ### How to invoke the CLI
@@ -97,6 +156,7 @@ Don't waste time on `npx @zibby/cli` — not always published.
97
156
  - Deploying & bundling: https://docs.zibby.app/workflows/deploying
98
157
  - Triggering & inputs: https://docs.zibby.app/workflows/triggers
99
158
  - Live log streaming: https://docs.zibby.app/workflows/logs
159
+ - Per-workflow env vars: https://docs.zibby.app/cloud/env-vars
100
160
  - Egress proxy / static IPs: https://docs.zibby.app/workflows/egress
101
161
  - Security & secrets: https://docs.zibby.app/workflows/security
102
162
  - Debugging: https://docs.zibby.app/workflows/debugging
@@ -1,4 +1,4 @@
1
- <!-- zibby-template-version: 1 -->
1
+ <!-- zibby-template-version: 4 -->
2
2
  ---
3
3
  name: zibby-test-author
4
4
  description: Sub-agent that helps the user design and author Zibby test specs end-to-end. Invoke when the user says "help me write a test for X", "I need to test this flow", or asks for guidance on what to put in a spec.
@@ -53,12 +53,27 @@ A spec is unambiguous English with one action per line. See `/zibby-test-write`
53
53
 
54
54
  5. **Stop when the spec exercises the goal end-to-end.** Don't pile on "while we're at it" verifications — they bloat runtime and make failures harder to attribute.
55
55
 
56
+ ## Test memory (`.zibby/memory/.dolt/`)
57
+
58
+ When `zibby test` runs and `.zibby/memory/.dolt/` exists (initialized by `zibby memory init` or auto-created on first run with `-m` / a `memorySync.remote` config), the agent gets 5 MCP tools auto-exposed. They read from a local-first Dolt SQL DB that learns selectors, page model, navigation, and history **per-domain** across every spec hitting the same site:
59
+
60
+ - `memory_get_test_history` — recent runs (filter by spec-path substring) — pass/fail and timing
61
+ - `memory_get_selectors` — known selectors per page with stability metrics (success/fail counts)
62
+ - `memory_get_page_model` — page elements, ARIA roles, accessible names, best-known selector
63
+ - `memory_get_navigation` — known page-to-page transitions (what click/submit produced what URL)
64
+ - `memory_save_insight` — save observations: `selector_tip | timing | navigation | workaround | flaky | general`
65
+
66
+ > **Hard rule: after every test run, the agent MUST call `memory_save_insight` at least once.** Save reliable selectors, timing quirks, navigation patterns, workarounds — be specific. Future runs read these. (This is in the memory skill's prompt fragment; surface it to the user if they ask why their tests keep getting smarter.)
67
+
68
+ Team sync (optional): a project may have `memorySync.remote: 'hosted'` (Zibby-managed S3, signed-in only) or `'aws://...' / 'gs://...'` (BYO) configured in `.zibby.config.mjs`. If set, the runner auto-pulls before each run and auto-pushes after passing runs. Manual override: `zibby memory pull` / `zibby memory push`.
69
+
56
70
  ## Hard rules
57
71
 
58
72
  - **Never recommend `--headless` for first runs.** Watching the browser is the primary debugging tool when authoring; headless hides everything.
59
73
  - **Never recommend disabling video.** Videos are 99% of post-mortem signal; they're cheap.
60
74
  - **Don't write CSS selectors into specs.** Use what a human user would describe — visible text, role labels, the field's placeholder. Selectors belong in generated `.spec.js`, not the source.
61
75
  - **Don't suggest `npx playwright test` directly** to bypass Zibby for "speed". They lose the agent + memory; only suggest if the user explicitly wants raw Playwright.
76
+ - **Always call `memory_save_insight` at the end of a test run.** This is non-negotiable — without it, memory degrades to the seeded baseline and stops compounding.
62
77
 
63
78
  ## Reference
64
79
 
@@ -1,4 +1,4 @@
1
- <!-- zibby-template-version: 2 -->
1
+ <!-- zibby-template-version: 4 -->
2
2
  ---
3
3
  name: zibby-workflow-builder
4
4
  description: Sub-agent that walks the user through building, testing, and deploying a Zibby agent workflow end-to-end. Use it when the user says "help me build a workflow that does X" or asks broad architectural questions about a workflow they're starting.
@@ -49,7 +49,7 @@ The return value of `run()` is the node's output, available to downstream nodes
49
49
 
50
50
  4. **Generate the scaffold** if they don't have one yet:
51
51
  ```
52
- zibby workflow generate <slug>
52
+ zibby workflow new <slug>
53
53
  ```
54
54
  Then add nodes one at a time using the `/zibby-add-node` command.
55
55
 
@@ -64,6 +64,26 @@ The return value of `run()` is the node's output, available to downstream nodes
64
64
 
65
65
  6. **Stop when the workflow does the goal end-to-end.** Don't pile on speculative nodes.
66
66
 
67
+ ## Per-workflow env vars
68
+
69
+ Each deployed workflow has its own encrypted env-var bag (KMS-backed). Workflow env wins over project secrets on conflict.
70
+
71
+ - `zibby workflow env list <uuid>` — show key names (values never returned)
72
+ - `zibby workflow env set <uuid> ANTHROPIC_API_KEY=sk-…` — add or rotate one key
73
+ - `zibby workflow env unset <uuid> OLD_KEY` — remove one key
74
+ - `zibby workflow env push <uuid> --file .env [--file .env.prod]` — bulk replace from .env files (later files override)
75
+ - `zibby workflow deploy <slug> --env .env` — fast path: deploy + auto-`push` of .env to the new UUID
76
+
77
+ Use this for credentials specific to one workflow (per-pipeline `ANTHROPIC_API_KEY`, a workflow-only `DATABASE_URL`, an external webhook secret). Project-wide secrets stay on the project record.
78
+
79
+ ## Pulling a deployed workflow back to local
80
+
81
+ ```
82
+ zibby workflow download <uuid>
83
+ ```
84
+
85
+ Pulls the cloud workflow's source back into `.zibby/workflows/<name>/`. Useful when collaborators need the source from cloud (e.g. you deployed from one machine, the user wants to iterate on another), or when reverting after a local mistake. UUIDs come from `zibby workflow list`.
86
+
67
87
  ## Hard rules
68
88
 
69
89
  - **Never recommend `--force` flags or skipping checks** to make a deploy go faster. Build problems are signal.
@@ -1,4 +1,4 @@
1
- <!-- zibby-template-version: 2 -->
1
+ <!-- zibby-template-version: 4 -->
2
2
  # /zibby-add-node — scaffold a new node in a Zibby workflow
3
3
 
4
4
  You are helping the user add a new **node** to one of their Zibby agent workflows.
@@ -1,4 +1,4 @@
1
- <!-- zibby-template-version: 1 -->
1
+ <!-- zibby-template-version: 4 -->
2
2
  # /zibby-debug — diagnose a failing or stuck Zibby workflow
3
3
 
4
4
  You are helping the user debug a workflow that didn't behave as expected.
@@ -1,4 +1,4 @@
1
- <!-- zibby-template-version: 1 -->
1
+ <!-- zibby-template-version: 4 -->
2
2
  # /zibby-delete — delete a deployed Zibby workflow
3
3
 
4
4
  You are helping the user remove a workflow from Zibby Cloud.
@@ -1,4 +1,4 @@
1
- <!-- zibby-template-version: 1 -->
1
+ <!-- zibby-template-version: 4 -->
2
2
  # /zibby-deploy — deploy a Zibby workflow to the cloud
3
3
 
4
4
  You are helping the user deploy a workflow they've been building locally.
@@ -47,31 +47,41 @@ Canonical docs: **https://docs.zibby.app/workflows/deploying**
47
47
 
48
48
  ## Optional flags worth knowing
49
49
 
50
+ `zibby workflow deploy` accepts:
50
51
  - `--project <id>` — skip the interactive project picker
51
52
  - `--api-key <key>` — use a PAT instead of the session token (for CI)
53
+ - `--env <path>` — sync a `.env` file into per-workflow env vars after deploy. Repeatable; later files override.
52
54
  - `--verbose` — print raw CodeBuild output during the build (helpful for debugging build failures)
53
- - `--dedicated-ip <action>` — opt this workflow into the dedicated egress addon (static outbound IP). See `/zibby-static-ip` for setup.
54
55
 
55
- ## Static outbound IP (dedicated egress) at deploy time
56
+ ### Seeding per-workflow env on first deploy
56
57
 
57
- If the user's workflow needs to call APIs that require IP allowlisting (corporate GitHub, GitLab Enterprise, paranoid SaaS firewalls), the workflow needs the **dedicated egress IP** addon enabled on their account, AND the workflow must opt in.
58
+ If the workflow needs its own `ANTHROPIC_API_KEY`, `DATABASE_URL`, etc., put them in a `.env` and pass `--env`:
58
59
 
59
- Three flags map to three things:
60
+ ```
61
+ zibby workflow deploy <name> --env .env
62
+ zibby workflow deploy <name> --env .env --env .env.prod # later files win
63
+ ```
64
+
65
+ After deploy, manage them surgically with `zibby workflow env set/unset/list/push <uuid>`. See `/zibby-list` to recover the UUID; full guide at https://docs.zibby.app/cloud/env-vars.
66
+
67
+ ## Static outbound IP (dedicated egress)
68
+
69
+ If the user's workflow needs to call APIs that require IP allowlisting (corporate GitHub, GitLab Enterprise, paranoid SaaS firewalls), the workflow needs the **dedicated egress IP** addon. The flag lives on the legacy alias `zibby deploy` (NOT `zibby workflow deploy`):
60
70
 
61
71
  | Flag | What it does |
62
72
  |------|-------------|
63
- | `--dedicated-ip status` | Show current addon state for the account (active / inactive / billing) |
64
- | `--dedicated-ip enable` | Enable the addon on the account (Pro subscription required, ~$50/mo). One-time per account. |
65
- | `--dedicated-ip use` | Mark THIS workflow as using the static egress IP (per-workflow opt-in, after `enable`) |
66
- | `--dedicated-ip unuse` | Stop routing this workflow through the static IP |
67
- | `--dedicated-ip disable` | Disable the addon for the whole account |
73
+ | `zibby deploy <name> --dedicated-ip status` | Show current addon state for the account (active / inactive / billing) |
74
+ | `zibby deploy <name> --dedicated-ip enable` | Enable the addon on the account (Pro subscription required, ~$50/mo). One-time per account. |
75
+ | `zibby deploy <name> --dedicated-ip use` | Mark THIS workflow as using the static egress IP (per-workflow opt-in, after `enable`) |
76
+ | `zibby deploy <name> --dedicated-ip unuse` | Stop routing this workflow through the static IP |
77
+ | `zibby deploy <name> --dedicated-ip disable` | Disable the addon for the whole account |
68
78
 
69
79
  Typical first-time flow when the user says "I need a static outbound IP":
70
- 1. `zibby workflow deploy <name> --dedicated-ip status` — check whether they have it
71
- 2. If inactive → `zibby workflow deploy <name> --dedicated-ip enable` — enables the account-wide addon (interactive billing prompt; prerequisite Pro subscription)
72
- 3. `zibby workflow deploy <name> --dedicated-ip use` — opts this specific workflow in
80
+ 1. `zibby deploy <name> --dedicated-ip status` — check whether they have it
81
+ 2. If inactive → `zibby deploy <name> --dedicated-ip enable` — enables the account-wide addon (interactive billing prompt; prerequisite Pro subscription)
82
+ 3. `zibby deploy <name> --dedicated-ip use` — opts this specific workflow in
73
83
  4. Regular `zibby workflow deploy <name>` from now on uses the static IP
74
84
 
75
85
  After `--dedicated-ip use`, every node in this workflow gets its outbound HTTP routed through the egress proxy, and `process.env.HTTP_PROXY` / `HTTPS_PROXY` are set in the sandbox automatically. Their static IPs are visible to customers via `https://docs.zibby.app/workflows/egress`.
76
86
 
77
- **Don't** run `--dedicated-ip enable` without confirming with the user — it has billing impact ($50/mo addon). Always confirm.
87
+ **Don't** run `--dedicated-ip enable` without confirming with the user — it has billing impact ($50/mo addon). Always confirm. See `/zibby-static-ip` for the deeper walkthrough.
@@ -1,9 +1,9 @@
1
- <!-- zibby-template-version: 1 -->
1
+ <!-- zibby-template-version: 4 -->
2
2
  # /zibby-list — list workflows (local + cloud) with their UUIDs and statuses
3
3
 
4
4
  You are helping the user see what workflows exist — locally scaffolded and remotely deployed.
5
5
 
6
- Canonical docs: **https://docs.zibby.app/workflows/listing**
6
+ Canonical docs: **https://docs.zibby.app/cli-reference#workflow-list**
7
7
 
8
8
  ## Steps
9
9
 
@@ -0,0 +1,39 @@
1
+ <!-- zibby-template-version: 4 -->
2
+ # /zibby-memory-cost — show real LLM token spend across past test runs
3
+
4
+ You are helping the user see how many input/output/cache tokens their tests have actually burned, broken down per spec and per domain. This is real measured spend (read off run records in `.zibby/memory/.dolt/`), not an estimate.
5
+
6
+ Canonical docs: **https://docs.zibby.app/tests/memory**
7
+
8
+ ## What the command shows
9
+
10
+ ```
11
+ Bash(zibby memory cost)
12
+ ```
13
+
14
+ Per-spec and per-domain rollup of:
15
+ - Input tokens
16
+ - Output tokens
17
+ - Cache hit / cache write tokens (when the agent supports prompt caching)
18
+ - Estimated $ cost (uses current public model pricing)
19
+ - Recent-runs trend, so you can see if a spec is getting cheaper or more expensive over time
20
+
21
+ The numbers are pulled from `test_runs` rows in the Dolt DB — every test run records the agent's actual usage on completion.
22
+
23
+ ## When to invoke
24
+
25
+ - User asks "how much are my tests costing me?" or "which spec is the expensive one?"
26
+ - After enabling prompt caching to confirm cache hits are landing
27
+ - When deciding whether to swap to a cheaper agent on hot specs (`--agent` per run)
28
+ - When triaging a regression in test runtime — high token counts often correlate with the agent retrying
29
+
30
+ ## Caveats
31
+
32
+ - **Only counts what's in local memory.** Runs on machines that haven't pulled from the team remote won't appear. Run `/zibby-memory-pull` first if you want the full team picture.
33
+ - **Pricing is informational.** Public API pricing changes; treat the $ column as a guide, not a bill. The token counts themselves are exact.
34
+ - **Empty if you've never run a test with memory enabled.** Confirm the runs are in there with `/zibby-memory-stats` first.
35
+
36
+ ## Related
37
+
38
+ - `/zibby-memory-stats` — what's in the DB at all
39
+ - `/zibby-memory-pull` — refresh from team remote before reading cost
@@ -0,0 +1,47 @@
1
+ <!-- zibby-template-version: 4 -->
2
+ # /zibby-memory-pull — pull the team's latest test memory from the configured remote
3
+
4
+ You are helping the user fetch the team's latest learnings (selectors, page model, insights, run history) from the project's configured memory remote into local `.zibby/memory/.dolt/`.
5
+
6
+ Canonical docs: **https://docs.zibby.app/tests/memory**
7
+
8
+ ## When this is needed (vs. just runs automatically)
9
+
10
+ `zibby test` auto-pulls before every run when a remote is configured, and auto-pushes after every passing run. So most of the time the user doesn't need to invoke pull manually. Manual pull is for:
11
+
12
+ - Fresh clone of the repo — first sync to seed `.zibby/memory/.dolt/` from the remote
13
+ - After a teammate landed a big batch of new learnings and the user wants them before running anything
14
+ - Inspecting team memory (`/zibby-memory-stats`, `/zibby-memory-cost`) without running a test
15
+ - Reconciling after a manual conflict in the Dolt DB
16
+
17
+ ## How to run
18
+
19
+ ```
20
+ Bash(zibby memory pull)
21
+ ```
22
+
23
+ The CLI fetches from whatever remote `zibby memory remote info` reports — BYO S3/GCS/DoltHub URL or the Zibby-hosted backend. No flags.
24
+
25
+ ## Pre-flight: is a remote configured?
26
+
27
+ Before suggesting `pull`, check:
28
+
29
+ ```
30
+ Bash(zibby memory remote info)
31
+ ```
32
+
33
+ - **No remote configured** → pull errors out. Tell the user to either:
34
+ - Add their own: `zibby memory remote add aws://my-bucket/team/proj/main`
35
+ - Use the hosted one: `zibby memory remote use --hosted` (requires `zibby login`)
36
+ - See `/zibby-memory-remote-use-hosted` for the hosted path.
37
+ - **Hosted remote, signed out** → `zibby login` first.
38
+
39
+ ## After pulling
40
+
41
+ Confirm the pull landed with `/zibby-memory-stats` — row counts should jump (selectors, runs, insights) compared to before.
42
+
43
+ ## Related
44
+
45
+ - `zibby memory push` — manual push (auto on passing test, but sometimes you want to share now)
46
+ - `/zibby-memory-stats` — verify what came in
47
+ - `/zibby-memory-remote-use-hosted` — switch to the Zibby-managed S3 backend
@@ -0,0 +1,61 @@
1
+ <!-- zibby-template-version: 4 -->
2
+ # /zibby-memory-remote-use-hosted — switch this project's memory remote to Zibby-managed S3
3
+
4
+ You are helping the user point their `.zibby/memory/.dolt/` at Zibby's hosted S3 backend, instead of running their own S3 bucket / GCS / DoltHub repo.
5
+
6
+ Canonical docs: **https://docs.zibby.app/tests/memory**
7
+
8
+ ## What this does
9
+
10
+ ```
11
+ Bash(zibby memory remote use --hosted)
12
+ ```
13
+
14
+ Allocates a tenant-scoped prefix on Zibby-managed S3 for this project (keyed on the projectId in `.zibby.config.mjs`) and writes that as the local Dolt remote. After this, every `zibby test` run auto-pulls before and auto-pushes after — same as a BYO remote, just without the bucket plumbing.
15
+
16
+ ## Prerequisite: signed in
17
+
18
+ Hosted remote is **signed-in users only**. Verify:
19
+
20
+ ```
21
+ Bash(zibby status)
22
+ ```
23
+
24
+ If not signed in, run `zibby login` first. The CLI uses the saved session to derive the tenant prefix; it won't fall back to anonymous.
25
+
26
+ ## When to use hosted vs BYO
27
+
28
+ | | Hosted (`--hosted`) | BYO (`zibby memory remote add aws://...`) |
29
+ |---|---|---|
30
+ | Setup time | Zero — `--hosted` and you're done | Provision an S3 bucket, IAM, optional KMS |
31
+ | Who can read | Everyone with project access on Zibby | Whoever you grant in IAM |
32
+ | Where data lives | Zibby-managed AWS account | Your account |
33
+ | Compliance / data-residency | Limited regions | Wherever you want |
34
+ | Cost | Included in plan | Your S3 bill |
35
+
36
+ If the user has any data-residency requirement or a regulated workload, prefer BYO. Otherwise hosted is the path of least resistance.
37
+
38
+ ## Switching from BYO to hosted
39
+
40
+ `zibby memory remote use --hosted` overwrites the existing remote. If they had a BYO remote and might want to keep its history, run `zibby memory push` against the old remote first so nothing's lost — then switch.
41
+
42
+ ## After switching
43
+
44
+ 1. `zibby memory pull` — seed `.zibby/memory/.dolt/` from the hosted prefix (no-op the very first time per project)
45
+ 2. `/zibby-memory-stats` — confirm
46
+ 3. Commit `.zibby.config.mjs` if you set `memorySync.remote: 'hosted'` so teammates auto-wire on next `zibby init`
47
+
48
+ ## Reverting
49
+
50
+ ```
51
+ Bash(zibby memory remote remove)
52
+ ```
53
+
54
+ Drops the remote — memory becomes local-only again. The data on Zibby's S3 isn't deleted (it's still tenant-scoped), but nothing pushes or pulls until a new remote is configured.
55
+
56
+ ## Related
57
+
58
+ - `/zibby-memory-pull` — manual pull (auto on test start)
59
+ - `/zibby-memory-stats` — verify what's in the local DB
60
+ - `zibby memory remote info` — show current remote config
61
+ - `zibby memory remote add <url>` — BYO remote (S3/GCS/DoltHub/file:///)
@@ -0,0 +1,38 @@
1
+ <!-- zibby-template-version: 4 -->
2
+ # /zibby-memory-stats — inspect the local test memory database
3
+
4
+ You are helping the user see what's in their `.zibby/memory/.dolt/` test-memory DB — row counts per table, last commit, and per-spec breakdown.
5
+
6
+ Canonical docs: **https://docs.zibby.app/tests/memory**
7
+
8
+ ## What the command shows
9
+
10
+ ```
11
+ Bash(zibby memory stats)
12
+ ```
13
+
14
+ Prints a summary of the local Dolt database:
15
+ - **Test runs** — total runs recorded, pass/fail split, last run timestamp
16
+ - **Selectors** — total cached selectors, top pages by selector count
17
+ - **Page model** — pages mapped, total elements
18
+ - **Navigation** — known transitions
19
+ - **Insights** — count by category (`selector_tip | timing | navigation | workaround | flaky | general`)
20
+ - **Dolt status** — current branch, last commit hash, uncommitted changes
21
+
22
+ ## When to invoke
23
+
24
+ - User asks "what does Zibby know about my app?" or "show me what's in test memory"
25
+ - After running a few tests, to confirm the agent is actually persisting learnings
26
+ - Before a `zibby memory compact` to see how much there is to prune
27
+ - Before a `zibby memory remote add` to know what's about to ship to the team
28
+
29
+ ## Empty database?
30
+
31
+ If the user just ran `zibby memory init` (or it auto-initialized on first `zibby test`), most counts will be 0. That's fine — selectors and page model populate after the first successful run. Suggest running a test first.
32
+
33
+ ## Related commands
34
+
35
+ - `/zibby-memory-cost` — real LLM token spend per spec / per domain
36
+ - `/zibby-memory-pull` — pull team's latest learnings from the configured remote
37
+ - `zibby memory compact` — prune old runs (`--max-runs N`, `--max-age <days>`)
38
+ - `zibby memory reset -f` — wipe the DB (destructive — confirm first)