@zibby/cli 0.1.87 → 0.1.95
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +94 -848
- package/dist/bin/zibby.js +47 -2
- package/dist/commands/chat.js +1 -1
- package/dist/commands/workflow.js +19 -19
- package/dist/commands/workflows/agent-helpers.js +18 -0
- package/dist/commands/workflows/deploy-helpers.js +1 -1
- package/dist/commands/workflows/deploy.js +49 -26
- package/dist/commands/workflows/env-helpers.js +2 -0
- package/dist/commands/workflows/env.js +39 -0
- package/dist/commands/workflows/generate.js +48 -31
- package/dist/commands/workflows/input-helpers.js +7 -0
- package/dist/commands/workflows/list.js +2 -2
- package/dist/commands/workflows/logs.js +33 -33
- package/dist/commands/workflows/run-helpers.js +2 -0
- package/dist/commands/workflows/run-local.js +19 -0
- package/dist/commands/workflows/run.js +5 -7
- package/dist/commands/workflows/start.js +11 -11
- package/dist/commands/workflows/trigger.js +9 -9
- package/dist/package.json +4 -4
- package/dist/templates/zibby-workflow-claude/agents-md-block.md +113 -0
- package/dist/templates/zibby-workflow-claude/claude/agents/zibby-test-author.md +72 -0
- package/dist/templates/zibby-workflow-claude/claude/agents/zibby-workflow-builder.md +81 -0
- package/dist/templates/zibby-workflow-claude/claude/commands/zibby-add-node.md +75 -0
- package/dist/templates/zibby-workflow-claude/claude/commands/zibby-debug.md +67 -0
- package/dist/templates/zibby-workflow-claude/claude/commands/zibby-delete.md +37 -0
- package/dist/templates/zibby-workflow-claude/claude/commands/zibby-deploy.md +77 -0
- package/dist/templates/zibby-workflow-claude/claude/commands/zibby-list.md +30 -0
- package/dist/templates/zibby-workflow-claude/claude/commands/zibby-static-ip.md +68 -0
- package/dist/templates/zibby-workflow-claude/claude/commands/zibby-tail.md +53 -0
- package/dist/templates/zibby-workflow-claude/claude/commands/zibby-test-debug.md +59 -0
- package/dist/templates/zibby-workflow-claude/claude/commands/zibby-test-generate.md +39 -0
- package/dist/templates/zibby-workflow-claude/claude/commands/zibby-test-run.md +48 -0
- package/dist/templates/zibby-workflow-claude/claude/commands/zibby-test-write.md +46 -0
- package/dist/templates/zibby-workflow-claude/claude/commands/zibby-trigger.md +52 -0
- package/dist/templates/zibby-workflow-claude/claude/settings.json +10 -0
- package/dist/templates/zibby-workflow-claude/cursor/rules/zibby-workflows.mdc +56 -0
- package/dist/templates/zibby-workflow-claude/manifest.json +43 -0
- package/package.json +4 -4
|
@@ -1,22 +1,22 @@
|
|
|
1
|
-
import h from"ora";import{select as
|
|
1
|
+
import h from"ora";import{select as v}from"@inquirer/prompts";import{readFileSync as x,existsSync as N}from"fs";import{homedir as R}from"os";import{join as T}from"path";var u={local:{name:"Local Development",apiUrl:"http://localhost:3001",accountApiUrl:"http://localhost:3001",frontendUrl:"http://localhost:3000",description:"Local backend running on port 3001"},prod:{name:"Production",apiUrl:process.env.ZIBBY_PROD_API_URL||"https://api-prod.zibby.app",accountApiUrl:process.env.ZIBBY_PROD_ACCOUNT_API_URL||"https://account-api-prod.zibby.app",frontendUrl:process.env.ZIBBY_PROD_FRONTEND_URL||"https://studio.zibby.app",description:"Production environment"}};function g(){let o;if(process.env.ZIBBY_API_URL)o=process.env.ZIBBY_API_URL;else{let e=process.env.ZIBBY_ENV||"prod";u[e]?o=u[e].apiUrl:o=u.prod.apiUrl}try{let e=new URL(o);return e.protocol!=="http:"&&e.protocol!=="https:"?(console.error(`\u26A0\uFE0F Invalid API URL protocol: ${e.protocol} (only http/https allowed)`),u.prod.apiUrl):o}catch{return console.error(`\u26A0\uFE0F Invalid API URL: ${o}`),u.prod.apiUrl}}var b=/^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$/i;function y(o){return o?b.test(o)?{ok:!0}:{ok:!1,error:`'${o}' is not a UUID. Cloud workflows are identified by UUID only. Run \`zibby workflow list\` to find yours, or run \`zibby workflow trigger\` with no argument for interactive selection.`}:{ok:!0}}import{existsSync as j,readFileSync as _}from"fs";import{resolve as B}from"path";function I(o){return o==="true"?!0:o==="false"?!1:o==="null"?null:o!==""&&!isNaN(Number(o))?Number(o):o}function $(o){let e={};for(let r of o||[]){let i=r.indexOf("=");if(i===-1){console.warn(` Warning: ignored param "${r}" \u2014 expected key=value format`);continue}let t=r.slice(0,i).trim(),n=I(r.slice(i+1)),l=t.split("."),c=e;for(let s=0;s<l.length-1;s++)(typeof c[l[s]]!="object"||c[l[s]]===null)&&(c[l[s]]={}),c=c[l[s]];c[l[l.length-1]]=n}return e}function E(o){let e=B(o);j(e)||(console.log(`
|
|
2
2
|
Error: --input-file not found: ${o}
|
|
3
|
-
`),process.exit(1));try{return JSON.parse(
|
|
3
|
+
`),process.exit(1));try{return JSON.parse(_(e,"utf-8"))}catch(r){console.log(`
|
|
4
4
|
Error: --input-file is not valid JSON: ${r.message}
|
|
5
|
-
`),process.exit(1)}}function
|
|
5
|
+
`),process.exit(1)}}function U(o){let e={};if(o.inputFile&&(e={...E(o.inputFile)}),o.input)try{e={...e,...JSON.parse(o.input)}}catch(r){console.log(`
|
|
6
6
|
Error: --input is not valid JSON`),console.log(` ${r.message}
|
|
7
|
-
`),process.exit(1)}return o.param?.length&&(e={...e
|
|
7
|
+
`),process.exit(1)}return o.param?.length&&(e={...e,...$(o.param)}),e}function A(){let o=T(R(),".zibby","config.json");if(N(o))try{let r=JSON.parse(x(o,"utf-8"));if(r.sessionToken)return r.sessionToken}catch{}let e=process.env.ZIBBY_API_KEY;if(e)return e;console.log(`
|
|
8
8
|
Not authenticated`),console.log(" Run: zibby login"),console.log(` OR set ZIBBY_API_KEY env var (for CI/CD)
|
|
9
|
-
`),process.exit(1)}async function
|
|
9
|
+
`),process.exit(1)}async function D(o){let e=g(),r=h("Fetching projects...").start();try{let i=await fetch(`${e}/projects`,{method:"GET",headers:{"Content-Type":"application/json",Authorization:`Bearer ${o}`}});i.ok||(r.fail("Failed to fetch projects"),process.exit(1));let t=await i.json();Array.isArray(t)||(t.projects?t=t.projects:t.data&&(t=t.data)),(!t||t.length===0)&&(r.fail("No projects found"),process.exit(1)),r.succeed(`Found ${t.length} project${t.length===1?"":"s"}`),console.log("");let n=t.map(l=>({name:`${l.name||"Unnamed"} (${l.projectId||l.id})`,value:l.projectId||l.id}));return await v({message:"Select a project:",choices:n})}catch(i){r.fail(`Error: ${i.message}`),process.exit(1)}}async function P(o,e){let r=g(),i=h("Fetching deployed workflows...").start();try{let t=["analysis","implementation","run_test"],n=[];for(let c of t){let s=await fetch(`${r}/projects/${o}/workflows/${c}`,{method:"GET",headers:{"Content-Type":"application/json",Authorization:`Bearer ${e}`}});if(s.ok){let a=await s.json();a.graph&&n.push({name:c,version:a.version||0,isDefault:a.isDefault!==!1})}}n.length===0&&(i.fail("No deployed workflows found for this project"),process.exit(1)),i.succeed(`Found ${n.length} deployed workflow${n.length===1?"":"s"}`),console.log("");let l=n.map(c=>({name:`${c.name} (v${c.version})${c.isDefault?" [default]":""}`,value:c.name}));return await v({message:"Select a workflow to trigger:",choices:l})}catch(t){i.fail(`Error: ${t.message}`),process.exit(1)}}async function M(o,e={}){let r=y(o);r.ok||(console.log(`
|
|
10
10
|
Error: ${r.error}
|
|
11
|
-
`),process.exit(1));let i=
|
|
11
|
+
`),process.exit(1));let i=A(),t=e.project||process.env.ZIBBY_PROJECT_ID,n;if(o){let s=g();try{let a=await fetch(`${s}/projects`,{method:"GET",headers:{"Content-Type":"application/json",Authorization:`Bearer ${i}`}});if(a.ok){let d=(await a.json()).projects||[];for(let p of d){let m=await fetch(`${s}/projects/${p.projectId}/workflows`,{method:"GET",headers:{"Content-Type":"application/json",Authorization:`Bearer ${i}`}});if(m.ok){let w=(await m.json()).find(k=>k.uuid===o);if(w){t=p.projectId,n=w.workflowType||w.name,console.log(`
|
|
12
12
|
\u2713 Found workflow "${n}" (UUID: ${o})
|
|
13
13
|
`);break}}}(!n||n===o)&&(console.log(`
|
|
14
14
|
Error: Workflow with UUID "${o}" not found`),console.log(` Check: zibby workflow list
|
|
15
15
|
`),process.exit(1))}}catch(a){console.log(`
|
|
16
16
|
Error looking up workflow UUID: ${a.message}
|
|
17
|
-
`),process.exit(1)}}t||(console.log(""),t=await
|
|
17
|
+
`),process.exit(1)}}t||(console.log(""),t=await D(i)),n||(console.log(""),n=await P(t,i));let l=U(e);if(console.log(`
|
|
18
18
|
Triggering Workflow
|
|
19
|
-
`),console.log(" ".padEnd(60,"-")),console.log(` Workflow: ${n}`),console.log(` Project: ${t}`),Object.keys(l).length>0){let s=JSON.stringify(l);console.log(` Input: ${s.length>60?`${s.substring(0,57)}...`:s}`)}e.idempotencyKey&&console.log(` Idempotency: ${e.idempotencyKey}`),console.log(" ".padEnd(60,"-")),console.log("");let c=h("Triggering workflow execution...").start();try{let s=
|
|
19
|
+
`),console.log(" ".padEnd(60,"-")),console.log(` Workflow: ${n}`),console.log(` Project: ${t}`),Object.keys(l).length>0){let s=JSON.stringify(l);console.log(` Input: ${s.length>60?`${s.substring(0,57)}...`:s}`)}e.idempotencyKey&&console.log(` Idempotency: ${e.idempotencyKey}`),console.log(" ".padEnd(60,"-")),console.log("");let c=h("Triggering workflow execution...").start();try{let s=g(),a={input:l};e.idempotencyKey&&(a.idempotencyKey=e.idempotencyKey);let f=await fetch(`${s}/projects/${t}/workflows/${n}/trigger`,{method:"POST",headers:{"Content-Type":"application/json",Authorization:`Bearer ${i}`},body:JSON.stringify(a)});if(!f.ok){let p=await f.json().catch(()=>({}));f.status===429&&(c.fail("Quota exceeded"),console.log(`
|
|
20
20
|
Your workflow execution quota has been exceeded`),p.quotaInfo&&(console.log(` Used: ${p.quotaInfo.used}/${p.quotaInfo.limit} executions`),console.log(` Plan: ${p.quotaInfo.planId}`),p.quotaInfo.periodEnd&&console.log(` Resets: ${new Date(p.quotaInfo.periodEnd).toLocaleDateString()}`)),console.log(""),process.exit(1)),c.fail("Trigger failed"),console.log(` Error: ${p.message||f.statusText}
|
|
21
21
|
`),process.exit(1)}let d=await f.json();c.succeed("Workflow triggered successfully"),console.log(""),console.log(" Job Details:"),console.log(` Job ID: ${d.jobId}`),console.log(` Status: ${d.status}`),console.log(` Version: ${d.version}`),console.log(` Triggered: ${new Date(d.triggeredAt).toLocaleString()}`),console.log(""),console.log(" Monitor execution:"),o?(console.log(` zibby workflow logs ${o}`),console.log(` zibby workflow logs ${o} -t`)):(console.log(` zibby workflow logs --workflow ${n} --project ${t}`),console.log(` zibby workflow logs --workflow ${n} --project ${t} -t`)),console.log("")}catch(s){c.fail("Trigger failed"),console.log(` Error: ${s.message}
|
|
22
|
-
`),process.exit(1)}}export{
|
|
22
|
+
`),process.exit(1)}}export{I as coerceValue,$ as parseParams,U as resolveInput,M as triggerWorkflowCommand};
|
package/dist/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@zibby/cli",
|
|
3
|
-
"version": "0.1.
|
|
3
|
+
"version": "0.1.95",
|
|
4
4
|
"description": "Zibby CLI - Test automation generator and runner",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"bin": {
|
|
@@ -8,7 +8,7 @@
|
|
|
8
8
|
},
|
|
9
9
|
"scripts": {
|
|
10
10
|
"build": "node ../scripts/build.mjs --extra-dirs bin",
|
|
11
|
-
"test": "vitest run test/auth*.test.js test/two-layer-auth.test.js test/trigger-params.test.js test/trigger-helpers.test.js test/deploy-helpers.test.js test/cli-namespace-consistency.test.js test/cli-workflow-subcommands.test.js test/run-bundle-core-import.test.js test/start-respects-config.test.js test/sse-backoff.test.js test/sse-reconnect-loop.test.js",
|
|
11
|
+
"test": "vitest run test/auth*.test.js test/two-layer-auth.test.js test/trigger-params.test.js test/trigger-helpers.test.js test/deploy-helpers.test.js test/deploy-bundles-user-config.test.js test/run-loads-user-config.test.js test/env-helpers.test.js test/env-cli.test.js test/cli-namespace-consistency.test.js test/cli-workflow-subcommands.test.js test/run-bundle-core-import.test.js test/start-respects-config.test.js test/sse-backoff.test.js test/sse-reconnect-loop.test.js test/run-helpers.test.js test/sse-parser.test.js",
|
|
12
12
|
"test:unit": "vitest run src/",
|
|
13
13
|
"test:auth": "vitest run test/auth*.test.js test/two-layer-auth.test.js",
|
|
14
14
|
"lint": "eslint .",
|
|
@@ -33,8 +33,8 @@
|
|
|
33
33
|
},
|
|
34
34
|
"dependencies": {
|
|
35
35
|
"@aws-sdk/client-sqs": "^3.1038.0",
|
|
36
|
-
"@zibby/agent-workflow": "^0.
|
|
37
|
-
"@zibby/core": "^0.
|
|
36
|
+
"@zibby/agent-workflow": "^0.2.0",
|
|
37
|
+
"@zibby/core": "^0.2.0",
|
|
38
38
|
"@zibby/memory": "^0.1.5",
|
|
39
39
|
"@zibby/skills": "^0.1.11",
|
|
40
40
|
"adm-zip": "^0.5.17",
|
|
@@ -0,0 +1,113 @@
|
|
|
1
|
+
<!-- BEGIN zibby-workflows zibby-template-version: 3 -->
|
|
2
|
+
## Zibby
|
|
3
|
+
|
|
4
|
+
This project uses **Zibby** — there are two surfaces:
|
|
5
|
+
|
|
6
|
+
1. **Workflows** — graphs of AI-agent-driven steps that run inside an ECS Fargate sandbox in Zibby Cloud. Used for automation that needs an LLM in the loop (analyze tickets, draft replies, write code, etc.).
|
|
7
|
+
|
|
8
|
+
2. **Tests** — plain-language `.txt` specs that Zibby's runner converts to Playwright executions. Produces video + JSON results. Used for end-to-end UI testing where specs survive UI churn better than raw selector-based tests.
|
|
9
|
+
|
|
10
|
+
Both share `.zibby.config.mjs` at the project root.
|
|
11
|
+
|
|
12
|
+
---
|
|
13
|
+
|
|
14
|
+
### Workflows
|
|
15
|
+
|
|
16
|
+
Files:
|
|
17
|
+
```
|
|
18
|
+
<paths.workflows or .zibby/workflows>/<name>/
|
|
19
|
+
├── workflow.json name, entryClass, triggers, schemas
|
|
20
|
+
├── graph.mjs nodes + edges from START to END
|
|
21
|
+
├── nodes/
|
|
22
|
+
│ ├── index.mjs barrel export
|
|
23
|
+
│ └── *.mjs one node per file: { id, description, run(ctx) }
|
|
24
|
+
└── package.json deps; bundled at deploy time
|
|
25
|
+
```
|
|
26
|
+
|
|
27
|
+
Each node has `async run(ctx)` where `ctx` provides:
|
|
28
|
+
- `ctx.input` — outputs from upstream nodes
|
|
29
|
+
- `ctx.agent({ prompt, schema })` — call the configured LLM with structured output
|
|
30
|
+
- `ctx.shell(cmd)` — run shell in the sandbox (egress proxy enabled)
|
|
31
|
+
- `ctx.log(...)` — emit a log line (visible via `zibby workflow logs`)
|
|
32
|
+
|
|
33
|
+
Common dev loop:
|
|
34
|
+
```
|
|
35
|
+
zibby workflow new <name> # scaffold
|
|
36
|
+
zibby workflow run <name> # one-shot local run (mirrors trigger flags)
|
|
37
|
+
zibby workflow run <name> -p k=v # with input
|
|
38
|
+
zibby workflow deploy <name> # build + push to Zibby Cloud
|
|
39
|
+
zibby workflow trigger <uuid> # invoke the cloud workflow
|
|
40
|
+
zibby workflow logs <uuid> -t # tail live logs (docker-compose-style)
|
|
41
|
+
zibby workflow list # find UUIDs and statuses
|
|
42
|
+
zibby workflow delete <uuid> # remove a deployed workflow
|
|
43
|
+
```
|
|
44
|
+
|
|
45
|
+
`run` and `trigger` accept the same input flags (`-p key=value`, `--input '<json>'`, `--input-file path.json`) — flip the verb to switch between local and cloud. `workflow start` exists too but is the long-lived dev server (Studio integration); for plain CLI iteration prefer `run`.
|
|
46
|
+
|
|
47
|
+
Static outbound IPs (for customers behind firewalls): see `--dedicated-ip` flag on `deploy`.
|
|
48
|
+
|
|
49
|
+
---
|
|
50
|
+
|
|
51
|
+
### Tests
|
|
52
|
+
|
|
53
|
+
Files:
|
|
54
|
+
```
|
|
55
|
+
test-specs/ source `.txt` specs (paths.specs)
|
|
56
|
+
tests/ generated `.spec.js` (paths.generated; regenerated each run)
|
|
57
|
+
test-results/ videos, traces, JSON results per run
|
|
58
|
+
playwright.config.js
|
|
59
|
+
```
|
|
60
|
+
|
|
61
|
+
A spec is plain-language imperative English describing what to test. Zibby's runner reads the spec, drives the browser via MCP, generates Playwright, and produces a video.
|
|
62
|
+
|
|
63
|
+
Common dev loop:
|
|
64
|
+
```
|
|
65
|
+
zibby test test-specs/<name>.txt # run a spec
|
|
66
|
+
zibby test "go to example.com and ..." # inline, no file
|
|
67
|
+
zibby generate -t ENG-1234 # generate specs from a Jira ticket
|
|
68
|
+
zibby video # organize videos next to spec files
|
|
69
|
+
zibby upload <spec-path> # upload existing artifacts to cloud
|
|
70
|
+
```
|
|
71
|
+
|
|
72
|
+
When debugging a failed test, watch the video at `test-results/<spec>/video.webm` — that's almost always faster than reading logs.
|
|
73
|
+
|
|
74
|
+
---
|
|
75
|
+
|
|
76
|
+
### How to invoke the CLI
|
|
77
|
+
|
|
78
|
+
The `zibby` command might be on PATH (if installed globally via npm) OR not — depending on the user's setup. **If `zibby` returns "command not found", fall back to `./.zibby/bin/zibby`** — a project-local shim auto-generated by the scaffolder that routes to whichever CLI binary the user has. Always exists in this project.
|
|
79
|
+
|
|
80
|
+
```
|
|
81
|
+
# Try first:
|
|
82
|
+
zibby workflow list
|
|
83
|
+
|
|
84
|
+
# If "command not found":
|
|
85
|
+
./.zibby/bin/zibby workflow list
|
|
86
|
+
```
|
|
87
|
+
|
|
88
|
+
Don't waste time on `npx @zibby/cli` — not always published.
|
|
89
|
+
|
|
90
|
+
---
|
|
91
|
+
|
|
92
|
+
### Reference (always prefer canonical docs over these notes)
|
|
93
|
+
|
|
94
|
+
**Workflows**
|
|
95
|
+
- Concepts: https://docs.zibby.app/workflows
|
|
96
|
+
- Node SDK (ctx.*): https://docs.zibby.app/workflows/sdk
|
|
97
|
+
- Deploying & bundling: https://docs.zibby.app/workflows/deploying
|
|
98
|
+
- Triggering & inputs: https://docs.zibby.app/workflows/triggers
|
|
99
|
+
- Live log streaming: https://docs.zibby.app/workflows/logs
|
|
100
|
+
- Egress proxy / static IPs: https://docs.zibby.app/workflows/egress
|
|
101
|
+
- Security & secrets: https://docs.zibby.app/workflows/security
|
|
102
|
+
- Debugging: https://docs.zibby.app/workflows/debugging
|
|
103
|
+
|
|
104
|
+
**Tests**
|
|
105
|
+
- Spec format: https://docs.zibby.app/tests/specs
|
|
106
|
+
- Running (`zibby test`): https://docs.zibby.app/tests/running
|
|
107
|
+
- Generating from Jira: https://docs.zibby.app/tests/generating
|
|
108
|
+
- Test memory: https://docs.zibby.app/tests/memory
|
|
109
|
+
- Debugging: https://docs.zibby.app/tests/debugging
|
|
110
|
+
- MCP browser config: https://docs.zibby.app/tests/playwright-mcp
|
|
111
|
+
|
|
112
|
+
When in doubt about behavior, fetch the docs URL — these notes are a snapshot, the docs are kept current.
|
|
113
|
+
<!-- END zibby-workflows -->
|
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
<!-- zibby-template-version: 1 -->
|
|
2
|
+
---
|
|
3
|
+
name: zibby-test-author
|
|
4
|
+
description: Sub-agent that helps the user design and author Zibby test specs end-to-end. Invoke when the user says "help me write a test for X", "I need to test this flow", or asks for guidance on what to put in a spec.
|
|
5
|
+
---
|
|
6
|
+
|
|
7
|
+
You are an expert at authoring Zibby test specs and running them. The user has invoked you because they want guidance on testing a feature or flow.
|
|
8
|
+
|
|
9
|
+
## What you know
|
|
10
|
+
|
|
11
|
+
A **Zibby test spec** is a plain-language `.txt` file that Zibby's runner converts to a Playwright execution at runtime. The runner's AI agent (configured per-project in `.zibby.config.mjs`) reads the spec, navigates the browser via MCP, generates a Playwright script, and produces a video + JSON results.
|
|
12
|
+
|
|
13
|
+
It's the right tool when:
|
|
14
|
+
- The user wants tests that survive UI churn (specs are higher-level than CSS selectors)
|
|
15
|
+
- They have non-engineers writing test descriptions
|
|
16
|
+
- They want test memory across runs (Dolt-backed, so the agent learns the app over time)
|
|
17
|
+
|
|
18
|
+
It's NOT the right tool when:
|
|
19
|
+
- The user wants 1000s of micro-tests in a tight CI loop (Zibby runs are LLM-mediated; slower than raw Playwright)
|
|
20
|
+
- They have a fully-deterministic API testing need (use plain `pytest` or similar)
|
|
21
|
+
|
|
22
|
+
## Spec layout
|
|
23
|
+
|
|
24
|
+
```
|
|
25
|
+
<workflowsBasePath if any>/...
|
|
26
|
+
├── .zibby.config.mjs
|
|
27
|
+
├── test-specs/ ← spec source (paths.specs)
|
|
28
|
+
│ ├── login-happy-path.txt
|
|
29
|
+
│ ├── checkout-flow.txt
|
|
30
|
+
│ └── ...
|
|
31
|
+
├── tests/ ← Generated Playwright (paths.generated)
|
|
32
|
+
│ └── *.spec.js ← regenerated each run by default
|
|
33
|
+
├── test-results/ ← Videos, traces, JSON results per run
|
|
34
|
+
└── playwright.config.js
|
|
35
|
+
```
|
|
36
|
+
|
|
37
|
+
A spec is unambiguous English with one action per line. See `/zibby-test-write` for the format.
|
|
38
|
+
|
|
39
|
+
## Your job in this conversation
|
|
40
|
+
|
|
41
|
+
1. **Listen for the goal.** What user-facing behavior is being tested? What's the success criterion? Be skeptical of vague specs.
|
|
42
|
+
|
|
43
|
+
2. **Decompose into one user goal per spec.** Don't write a spec that does login + signup + checkout + admin in one file — that's four specs. Smaller specs = easier to debug, easier to localize regressions.
|
|
44
|
+
|
|
45
|
+
3. **Write the spec(s)** to `test-specs/<kebab-name>.txt` — concrete, one action per line, stable selectors (visible text, ARIA labels, not CSS classes).
|
|
46
|
+
|
|
47
|
+
4. **Run iteratively.** Author → run → watch the video → tighten ambiguous lines → re-run. Encourage:
|
|
48
|
+
```
|
|
49
|
+
zibby test test-specs/<name>.txt # run it
|
|
50
|
+
open test-results/<name>/video.webm # watch what the agent did
|
|
51
|
+
```
|
|
52
|
+
When the run fails, the video usually pinpoints the issue in 30 seconds.
|
|
53
|
+
|
|
54
|
+
5. **Stop when the spec exercises the goal end-to-end.** Don't pile on "while we're at it" verifications — they bloat runtime and make failures harder to attribute.
|
|
55
|
+
|
|
56
|
+
## Hard rules
|
|
57
|
+
|
|
58
|
+
- **Never recommend `--headless` for first runs.** Watching the browser is the primary debugging tool when authoring; headless hides everything.
|
|
59
|
+
- **Never recommend disabling video.** Videos are 99% of post-mortem signal; they're cheap.
|
|
60
|
+
- **Don't write CSS selectors into specs.** Use what a human user would describe — visible text, role labels, the field's placeholder. Selectors belong in generated `.spec.js`, not the source.
|
|
61
|
+
- **Don't suggest `npx playwright test` directly** to bypass Zibby for "speed". They lose the agent + memory; only suggest if the user explicitly wants raw Playwright.
|
|
62
|
+
|
|
63
|
+
## Reference
|
|
64
|
+
|
|
65
|
+
- Spec format and conventions: https://docs.zibby.app/tests/specs
|
|
66
|
+
- Running specs (`zibby test`): https://docs.zibby.app/tests/running
|
|
67
|
+
- Generating specs from a Jira ticket: https://docs.zibby.app/tests/generating
|
|
68
|
+
- Test memory (Dolt-backed): https://docs.zibby.app/tests/memory
|
|
69
|
+
- Debugging failures: https://docs.zibby.app/tests/debugging
|
|
70
|
+
- MCP browser config: https://docs.zibby.app/tests/playwright-mcp
|
|
71
|
+
|
|
72
|
+
When in doubt about behavior, fetch the docs URL — these are kept current; this prompt is a snapshot.
|
|
@@ -0,0 +1,81 @@
|
|
|
1
|
+
<!-- zibby-template-version: 2 -->
|
|
2
|
+
---
|
|
3
|
+
name: zibby-workflow-builder
|
|
4
|
+
description: Sub-agent that walks the user through building, testing, and deploying a Zibby agent workflow end-to-end. Use it when the user says "help me build a workflow that does X" or asks broad architectural questions about a workflow they're starting.
|
|
5
|
+
---
|
|
6
|
+
|
|
7
|
+
You are an expert at building Zibby agent workflows. The user has invoked you because they want guidance on designing or implementing a workflow.
|
|
8
|
+
|
|
9
|
+
## What you know
|
|
10
|
+
|
|
11
|
+
A **Zibby workflow** is a graph of AI-agent-driven steps that run inside an ECS Fargate sandbox. It's the right tool when the user wants to:
|
|
12
|
+
- Automate something that requires an LLM in the loop (analyze, summarize, decide, draft, write code)
|
|
13
|
+
- Combine LLM steps with deterministic shell or HTTP work
|
|
14
|
+
- Run reliably in the cloud, with retries, audit logs, and IP-allowlistable egress
|
|
15
|
+
|
|
16
|
+
It's NOT the right tool when the user wants:
|
|
17
|
+
- Pure deterministic data transformation (use a Lambda)
|
|
18
|
+
- Real-time interactive UI work (LLM calls are too slow for sub-second response)
|
|
19
|
+
- One-off scripts (just run them locally)
|
|
20
|
+
|
|
21
|
+
## Anatomy of a workflow
|
|
22
|
+
|
|
23
|
+
```
|
|
24
|
+
<workflowsBasePath>/<workflow-name>/
|
|
25
|
+
├── workflow.json # name, entryClass, triggers, optional input/output schemas
|
|
26
|
+
├── graph.mjs # exports the workflow graph (nodes + edges)
|
|
27
|
+
├── nodes/
|
|
28
|
+
│ ├── index.mjs # registry of all nodes
|
|
29
|
+
│ ├── example.mjs # one node = one .mjs file
|
|
30
|
+
│ └── <your-nodes>.mjs
|
|
31
|
+
└── package.json # deps; bundled at deploy time
|
|
32
|
+
```
|
|
33
|
+
|
|
34
|
+
Each **node** has a `run(ctx)` method. `ctx` provides:
|
|
35
|
+
- `ctx.input` — outputs from upstream nodes (and the trigger's input)
|
|
36
|
+
- `ctx.agent({ prompt, schema })` — call the configured LLM with structured output
|
|
37
|
+
- `ctx.shell(command)` — run shell in the sandbox (egress proxy is on, see docs.zibby.app)
|
|
38
|
+
- `ctx.log(...)` — emit a log line that shows up in `-t`
|
|
39
|
+
|
|
40
|
+
The return value of `run()` is the node's output, available to downstream nodes via `ctx.input.<this-node-id>`.
|
|
41
|
+
|
|
42
|
+
## Your job in this conversation
|
|
43
|
+
|
|
44
|
+
1. **Listen for the goal.** Ask clarifying questions until you understand what the user wants the workflow to DO from input to output. Be skeptical of vague specs.
|
|
45
|
+
|
|
46
|
+
2. **Decompose into nodes.** Each node should have ONE clear responsibility. If a step is "fetch data, analyze it, draft a reply, send the reply" — that's 3-4 nodes, not one. Smaller nodes = easier to retry, replace, debug.
|
|
47
|
+
|
|
48
|
+
3. **Sketch the graph.** Tell the user the node list and the edges. Confirm before generating code.
|
|
49
|
+
|
|
50
|
+
4. **Generate the scaffold** if they don't have one yet:
|
|
51
|
+
```
|
|
52
|
+
zibby workflow generate <slug>
|
|
53
|
+
```
|
|
54
|
+
Then add nodes one at a time using the `/zibby-add-node` command.
|
|
55
|
+
|
|
56
|
+
5. **Run iteratively.** Encourage the loop:
|
|
57
|
+
```
|
|
58
|
+
zibby workflow run <slug> # one-shot local run (mirrors trigger flags)
|
|
59
|
+
# ... iterate ...
|
|
60
|
+
zibby workflow deploy <slug> # when ready
|
|
61
|
+
zibby workflow trigger <uuid> # cloud test
|
|
62
|
+
zibby workflow logs <uuid> -t # watch
|
|
63
|
+
```
|
|
64
|
+
|
|
65
|
+
6. **Stop when the workflow does the goal end-to-end.** Don't pile on speculative nodes.
|
|
66
|
+
|
|
67
|
+
## Hard rules
|
|
68
|
+
|
|
69
|
+
- **Never recommend `--force` flags or skipping checks** to make a deploy go faster. Build problems are signal.
|
|
70
|
+
- **Never write API keys / secrets into workflow source.** Use the project's secret store (configured in `.zibby.config.mjs` or via the cloud UI).
|
|
71
|
+
- **Don't tell the user to manually edit `bundleS3Key` or other CFN-managed fields in DynamoDB.** These get overwritten on next deploy.
|
|
72
|
+
- **If a node uses external APIs, mention the egress proxy** (`http://<egress-ip>:3128` is set in `HTTP_PROXY` env at runtime) and the customer-IP-allowlist story.
|
|
73
|
+
|
|
74
|
+
## Reference
|
|
75
|
+
|
|
76
|
+
- Concepts and node API: https://docs.zibby.app/workflows/concepts
|
|
77
|
+
- Node SDK (ctx.agent, ctx.shell, ctx.log): https://docs.zibby.app/workflows/sdk
|
|
78
|
+
- Triggers and inputs: https://docs.zibby.app/workflows/triggers
|
|
79
|
+
- Egress and security: https://docs.zibby.app/workflows/egress
|
|
80
|
+
|
|
81
|
+
When in doubt about API surface or recent changes, **fetch the docs URL** for current info — these docs are the canonical reference and are updated more often than your training data.
|
|
@@ -0,0 +1,75 @@
|
|
|
1
|
+
<!-- zibby-template-version: 2 -->
|
|
2
|
+
# /zibby-add-node — scaffold a new node in a Zibby workflow
|
|
3
|
+
|
|
4
|
+
You are helping the user add a new **node** to one of their Zibby agent workflows.
|
|
5
|
+
|
|
6
|
+
## Context: what is a Zibby workflow?
|
|
7
|
+
|
|
8
|
+
A workflow is a graph of nodes that an AI agent (cursor / claude / codex / gemini) executes in a sandboxed ECS Fargate container.
|
|
9
|
+
- Each node is one `.mjs` file under `<workflow>/nodes/`
|
|
10
|
+
- The graph wires nodes together (`<workflow>/graph.mjs`)
|
|
11
|
+
- `<workflow>/workflow.json` declares the workflow's name, entry class, triggers
|
|
12
|
+
- Workflows live under `<workflowsBasePath>/<workflow-name>/` (default `.zibby/workflows/`, configured in the project root's `.zibby.config.mjs`)
|
|
13
|
+
|
|
14
|
+
For canonical, evolving docs see **https://docs.zibby.app/workflows/nodes**
|
|
15
|
+
|
|
16
|
+
## Steps for this command
|
|
17
|
+
|
|
18
|
+
1. **Identify the target workflow.** Look under the path configured in `.zibby.config.mjs` `paths.workflows` (default `.zibby/workflows/`). If multiple workflows exist, ask the user which one. If they're already `cd`'d inside one, infer from `${cwd}`.
|
|
19
|
+
|
|
20
|
+
2. **Get the node spec from the user.** Ask:
|
|
21
|
+
- Node name (kebab-case, e.g. `analyze-ticket`)
|
|
22
|
+
- One-sentence description of what it does
|
|
23
|
+
- Inputs (variables it reads from prior nodes)
|
|
24
|
+
- Outputs (variables it produces — these become available to downstream nodes)
|
|
25
|
+
|
|
26
|
+
3. **Create the node file** at `<workflow>/nodes/<name>.mjs`. Pattern from the existing `example.mjs`:
|
|
27
|
+
```js
|
|
28
|
+
export default {
|
|
29
|
+
id: '<name>',
|
|
30
|
+
description: '<one-sentence description>',
|
|
31
|
+
async run(ctx) {
|
|
32
|
+
// ctx.input — outputs from upstream nodes
|
|
33
|
+
// ctx.agent — call the configured AI agent
|
|
34
|
+
// ctx.shell — run shell commands in the sandbox
|
|
35
|
+
// return value becomes the node's output
|
|
36
|
+
return { /* ... */ };
|
|
37
|
+
},
|
|
38
|
+
};
|
|
39
|
+
```
|
|
40
|
+
|
|
41
|
+
4. **Register the node in `nodes/index.mjs`** — add the import + export.
|
|
42
|
+
|
|
43
|
+
5. **Wire into `graph.mjs`** — add the node id to the graph's `nodes` array, then add an `edge` from its predecessor (or from `START` if it's first) and an edge to `END` (or its successor).
|
|
44
|
+
|
|
45
|
+
6. **Update `workflow.json`** if the new node introduces an `outputSchema` the workflow's caller relies on. Most nodes don't need this.
|
|
46
|
+
|
|
47
|
+
7. **Test locally:**
|
|
48
|
+
```
|
|
49
|
+
zibby workflow run <workflow-name>
|
|
50
|
+
zibby workflow run <workflow-name> -p ticket=BUG-123 # with input
|
|
51
|
+
```
|
|
52
|
+
One-shot — exits when the run finishes. Same input flag surface as `zibby workflow trigger` (cloud).
|
|
53
|
+
|
|
54
|
+
8. **Deploy when ready:**
|
|
55
|
+
```
|
|
56
|
+
zibby workflow deploy <workflow-name>
|
|
57
|
+
```
|
|
58
|
+
Then `zibby workflow trigger <uuid>` and `zibby workflow logs <uuid> -t` to verify.
|
|
59
|
+
|
|
60
|
+
## Common pitfalls
|
|
61
|
+
|
|
62
|
+
- **Node name must match the file name and `id`** — mismatches cause silent skip.
|
|
63
|
+
- **`ctx.agent` calls block on the LLM** — large prompts can take 30+ seconds. Stream output for visibility.
|
|
64
|
+
- **Don't import npm packages inside `run()`** — declare deps in `<workflow>/package.json`. The deploy bundler installs them.
|
|
65
|
+
- **Failed nodes terminate the workflow** unless wrapped in try/catch and explicit `outputSchema.status: 'warn'`.
|
|
66
|
+
|
|
67
|
+
## When to consult the user vs proceed
|
|
68
|
+
|
|
69
|
+
Always ask before:
|
|
70
|
+
- Creating a node that calls external APIs (cost / data egress concern)
|
|
71
|
+
- Modifying `workflow.json` (changes the contract for downstream callers)
|
|
72
|
+
|
|
73
|
+
Proceed without asking when:
|
|
74
|
+
- Just adding a self-contained node and wiring it
|
|
75
|
+
- Tweaking the example/implementation in response to user spec
|
|
@@ -0,0 +1,67 @@
|
|
|
1
|
+
<!-- zibby-template-version: 1 -->
|
|
2
|
+
# /zibby-debug — diagnose a failing or stuck Zibby workflow
|
|
3
|
+
|
|
4
|
+
You are helping the user debug a workflow that didn't behave as expected.
|
|
5
|
+
|
|
6
|
+
Canonical docs: **https://docs.zibby.app/workflows/debugging**
|
|
7
|
+
|
|
8
|
+
## Diagnostic recipe
|
|
9
|
+
|
|
10
|
+
Apply in order. Stop at the first thing that explains the symptom.
|
|
11
|
+
|
|
12
|
+
### 1. Did the deploy succeed?
|
|
13
|
+
|
|
14
|
+
```
|
|
15
|
+
zibby workflow list
|
|
16
|
+
```
|
|
17
|
+
Find the workflow. If `bundleStatus` isn't `ready`, the deploy didn't finish. Re-run `zibby workflow deploy <name> --verbose` and read the CodeBuild output.
|
|
18
|
+
|
|
19
|
+
### 2. Did the trigger reach ECS?
|
|
20
|
+
|
|
21
|
+
```
|
|
22
|
+
zibby workflow trigger <uuid>
|
|
23
|
+
```
|
|
24
|
+
Look at the response — it should include a `Job ID` immediately. If you get an HTTP error, it's an auth or quota problem (CodeBuild concurrency, ECS task limit, etc.). Surface to the user.
|
|
25
|
+
|
|
26
|
+
### 3. Did the agent task START?
|
|
27
|
+
|
|
28
|
+
```
|
|
29
|
+
zibby workflow logs <uuid> -t
|
|
30
|
+
```
|
|
31
|
+
Within 30s of the trigger you should see `[setup] Fetching bundle...` then `zibby v<version>`. If silence past 30s:
|
|
32
|
+
- Maybe ECS couldn't pull the image — check CloudWatch alarm `zibby-sse-fanout-no-task-prod`
|
|
33
|
+
- Maybe the task started but its log stream is delayed — wait another 30s
|
|
34
|
+
- Maybe the workflow row hasn't been written yet (rare — would only affect the very first second)
|
|
35
|
+
|
|
36
|
+
### 4. Did the workflow execute the wrong path?
|
|
37
|
+
|
|
38
|
+
If the tail shows nodes running but in unexpected order, your `graph.mjs` edges are wrong. Common causes:
|
|
39
|
+
- Edge from `START` is missing — first node never runs
|
|
40
|
+
- Cycle in the graph — runtime errors with "cycle detected"
|
|
41
|
+
- Node id in `nodes/` array doesn't match the file's exported `id`
|
|
42
|
+
|
|
43
|
+
### 5. Did a node fail?
|
|
44
|
+
|
|
45
|
+
The tail will show `Error: Node '<name>' failed: <reason>`. Common reasons:
|
|
46
|
+
- Agent (LLM) returned malformed output that didn't match the node's `outputSchema`
|
|
47
|
+
- Node code threw an uncaught exception
|
|
48
|
+
- Shell command in the sandbox returned non-zero
|
|
49
|
+
|
|
50
|
+
For agent errors, look for `│ Prompt sent to LLM:` and `│ Response:` blocks in the tail. The model's reply is right there.
|
|
51
|
+
|
|
52
|
+
### 6. Did the task die without finishing?
|
|
53
|
+
|
|
54
|
+
Look for `[fanout] hard timeout` in the SSE fan-out logs (sse-fanout container) — means the task ran past the cap. Or the status in DDB stays `running` indefinitely (zombie row). Re-trigger.
|
|
55
|
+
|
|
56
|
+
### 7. Are you seeing logs from a stale execution?
|
|
57
|
+
|
|
58
|
+
`-t` on a workflow UUID auto-attaches to the **latest** existing execution at connect time, plus new ones triggered while it's open. If you're tailing an old failed run, drain it (Ctrl+C, re-run after triggering fresh).
|
|
59
|
+
|
|
60
|
+
## Quick reference: what each piece does
|
|
61
|
+
|
|
62
|
+
- **Trigger** → writes a row to `zibby-prod-executions` (DDB) + spawns an ECS task
|
|
63
|
+
- **Task** → pulls bundle from S3, runs `node graph.mjs`, writes logs to CloudWatch, updates DDB status as it progresses
|
|
64
|
+
- **SSE fan-out** → polls CloudWatch, fans events out to subscribers (`-t` clients)
|
|
65
|
+
- **Status** → moves through `starting → running → completed/failed/error`
|
|
66
|
+
|
|
67
|
+
If `status` in DDB is wrong (e.g. stuck `running` after the task is gone), it's an upstream zombie — separate from any workflow logic issue.
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
<!-- zibby-template-version: 1 -->
|
|
2
|
+
# /zibby-delete — delete a deployed Zibby workflow
|
|
3
|
+
|
|
4
|
+
You are helping the user remove a workflow from Zibby Cloud.
|
|
5
|
+
|
|
6
|
+
**This is destructive.** It removes the workflow record, its bundle in S3, and its routing — but does NOT delete in-flight executions or their CloudWatch logs (those age out per their retention policy). New triggers against the deleted UUID will fail.
|
|
7
|
+
|
|
8
|
+
Canonical docs: **https://docs.zibby.app/workflows/lifecycle**
|
|
9
|
+
|
|
10
|
+
## Steps
|
|
11
|
+
|
|
12
|
+
1. **Get the UUID.** If user gave a name, look it up:
|
|
13
|
+
```
|
|
14
|
+
Bash(zibby workflow list)
|
|
15
|
+
```
|
|
16
|
+
Find the matching `name` and grab its `uuid`.
|
|
17
|
+
|
|
18
|
+
2. **Confirm with the user.** Always confirm before deleting — show them the workflow's name, project, last-triggered timestamp. Don't proceed silently.
|
|
19
|
+
```
|
|
20
|
+
"Delete workflow 'pr-summarizer' (uuid abc-123, last run 2 days ago)? This cannot be undone."
|
|
21
|
+
```
|
|
22
|
+
|
|
23
|
+
3. **Run the delete:**
|
|
24
|
+
```
|
|
25
|
+
Bash(zibby workflow delete <uuid>)
|
|
26
|
+
```
|
|
27
|
+
|
|
28
|
+
4. **Clean up local files** if the user wants. The local `.zibby/workflows/<name>/` folder isn't auto-deleted — ask before removing:
|
|
29
|
+
```
|
|
30
|
+
rm -rf .zibby/workflows/<name>
|
|
31
|
+
```
|
|
32
|
+
|
|
33
|
+
## When NOT to delete
|
|
34
|
+
|
|
35
|
+
- If the user might want to re-deploy later — keep the local folder, just stop triggering it.
|
|
36
|
+
- If there are running executions — the deploy is gone but those will keep running until they exit. Tell the user to wait or `Ctrl+C`-equivalent (kill the ECS task) if urgent.
|
|
37
|
+
- For a "hide from list" feeling without losing history — there's no soft-delete; it's all-or-nothing.
|
|
@@ -0,0 +1,77 @@
|
|
|
1
|
+
<!-- zibby-template-version: 1 -->
|
|
2
|
+
# /zibby-deploy — deploy a Zibby workflow to the cloud
|
|
3
|
+
|
|
4
|
+
You are helping the user deploy a workflow they've been building locally.
|
|
5
|
+
|
|
6
|
+
## What `zibby workflow deploy` does
|
|
7
|
+
|
|
8
|
+
1. Bundles the workflow's source (graph.mjs + nodes/ + package.json) into a tarball
|
|
9
|
+
2. Uploads it to S3 via a presigned URL
|
|
10
|
+
3. Triggers AWS CodeBuild to install deps + bake the bundle
|
|
11
|
+
4. Updates DynamoDB so future triggers run the new bundle
|
|
12
|
+
|
|
13
|
+
A successful deploy is required before `zibby workflow trigger <uuid>` works against the cloud.
|
|
14
|
+
|
|
15
|
+
Canonical docs: **https://docs.zibby.app/workflows/deploying**
|
|
16
|
+
|
|
17
|
+
## Steps for this command
|
|
18
|
+
|
|
19
|
+
1. **Identify the workflow.** If the user passes a name, use it. Otherwise list everything under `paths.workflows` (from `.zibby.config.mjs`) and ask.
|
|
20
|
+
|
|
21
|
+
2. **Pre-flight checks.** Read the workflow folder and confirm:
|
|
22
|
+
- `graph.mjs` exists and exports a graph
|
|
23
|
+
- `nodes/` has at least one node
|
|
24
|
+
- `workflow.json` is valid (must have `name`, `entryClass`, `triggers`)
|
|
25
|
+
- `package.json` declares all imports used in nodes (run a quick grep to spot missing deps)
|
|
26
|
+
|
|
27
|
+
3. **Run the deploy:**
|
|
28
|
+
```
|
|
29
|
+
zibby workflow deploy <workflow-name>
|
|
30
|
+
```
|
|
31
|
+
This is interactive if `--project` isn't passed. The user picks a project, the CLI handles auth via the saved session token.
|
|
32
|
+
|
|
33
|
+
4. **Watch the build.** The CLI streams CodeBuild output. If it succeeds, it prints the workflow's UUID. If it fails, the build logs show why — usually a missing dep in `package.json` or a syntax error in a node.
|
|
34
|
+
|
|
35
|
+
5. **Verify post-deploy:**
|
|
36
|
+
```
|
|
37
|
+
zibby workflow trigger <uuid> --input '{}'
|
|
38
|
+
zibby workflow logs <uuid> -t
|
|
39
|
+
```
|
|
40
|
+
Tail logs until the workflow reaches `completed` (or `failed` — diagnose from logs).
|
|
41
|
+
|
|
42
|
+
## Common failure modes
|
|
43
|
+
|
|
44
|
+
- **Build fails with module-not-found** → node imports a package not in `package.json`. Add it and redeploy.
|
|
45
|
+
- **Build succeeds but trigger fails immediately** → `entryClass` in `workflow.json` doesn't match a class exported by `graph.mjs`.
|
|
46
|
+
- **Workflow runs but a node fails** → tail the live logs and read the error. Most are in the agent's prompt/output handling.
|
|
47
|
+
|
|
48
|
+
## Optional flags worth knowing
|
|
49
|
+
|
|
50
|
+
- `--project <id>` — skip the interactive project picker
|
|
51
|
+
- `--api-key <key>` — use a PAT instead of the session token (for CI)
|
|
52
|
+
- `--verbose` — print raw CodeBuild output during the build (helpful for debugging build failures)
|
|
53
|
+
- `--dedicated-ip <action>` — opt this workflow into the dedicated egress addon (static outbound IP). See `/zibby-static-ip` for setup.
|
|
54
|
+
|
|
55
|
+
## Static outbound IP (dedicated egress) at deploy time
|
|
56
|
+
|
|
57
|
+
If the user's workflow needs to call APIs that require IP allowlisting (corporate GitHub, GitLab Enterprise, paranoid SaaS firewalls), the workflow needs the **dedicated egress IP** addon enabled on their account, AND the workflow must opt in.
|
|
58
|
+
|
|
59
|
+
Three flags map to three things:
|
|
60
|
+
|
|
61
|
+
| Flag | What it does |
|
|
62
|
+
|------|-------------|
|
|
63
|
+
| `--dedicated-ip status` | Show current addon state for the account (active / inactive / billing) |
|
|
64
|
+
| `--dedicated-ip enable` | Enable the addon on the account (Pro subscription required, ~$50/mo). One-time per account. |
|
|
65
|
+
| `--dedicated-ip use` | Mark THIS workflow as using the static egress IP (per-workflow opt-in, after `enable`) |
|
|
66
|
+
| `--dedicated-ip unuse` | Stop routing this workflow through the static IP |
|
|
67
|
+
| `--dedicated-ip disable` | Disable the addon for the whole account |
|
|
68
|
+
|
|
69
|
+
Typical first-time flow when the user says "I need a static outbound IP":
|
|
70
|
+
1. `zibby workflow deploy <name> --dedicated-ip status` — check whether they have it
|
|
71
|
+
2. If inactive → `zibby workflow deploy <name> --dedicated-ip enable` — enables the account-wide addon (interactive billing prompt; prerequisite Pro subscription)
|
|
72
|
+
3. `zibby workflow deploy <name> --dedicated-ip use` — opts this specific workflow in
|
|
73
|
+
4. Regular `zibby workflow deploy <name>` from now on uses the static IP
|
|
74
|
+
|
|
75
|
+
After `--dedicated-ip use`, every node in this workflow gets its outbound HTTP routed through the egress proxy, and `process.env.HTTP_PROXY` / `HTTPS_PROXY` are set in the sandbox automatically. Their static IPs are visible to customers via `https://docs.zibby.app/workflows/egress`.
|
|
76
|
+
|
|
77
|
+
**Don't** run `--dedicated-ip enable` without confirming with the user — it has billing impact ($50/mo addon). Always confirm.
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
<!-- zibby-template-version: 1 -->
|
|
2
|
+
# /zibby-list — list workflows (local + cloud) with their UUIDs and statuses
|
|
3
|
+
|
|
4
|
+
You are helping the user see what workflows exist — locally scaffolded and remotely deployed.
|
|
5
|
+
|
|
6
|
+
Canonical docs: **https://docs.zibby.app/workflows/listing**
|
|
7
|
+
|
|
8
|
+
## Steps
|
|
9
|
+
|
|
10
|
+
1. **Run the list command:**
|
|
11
|
+
```
|
|
12
|
+
Bash(zibby workflow list)
|
|
13
|
+
```
|
|
14
|
+
This shows both local (in `.zibby/workflows/`) and remote (deployed to Zibby Cloud) workflows. Each row has: name, UUID, project, last triggered.
|
|
15
|
+
|
|
16
|
+
2. **Filter on demand.** If the user wants only local or only remote:
|
|
17
|
+
```
|
|
18
|
+
zibby workflow list --local-only
|
|
19
|
+
zibby workflow list --remote-only --project <id>
|
|
20
|
+
```
|
|
21
|
+
|
|
22
|
+
## When you'd use this
|
|
23
|
+
|
|
24
|
+
- User asks "what workflows do I have?" → run it, show the result.
|
|
25
|
+
- You need a UUID to pass into `/zibby-trigger`, `/zibby-tail`, `/zibby-delete` and the user only knows the name → run it, look up the UUID.
|
|
26
|
+
- After a deploy to confirm the bundle landed.
|
|
27
|
+
|
|
28
|
+
## Output expectations
|
|
29
|
+
|
|
30
|
+
The output is human-readable text (not JSON). If you need to extract a specific UUID programmatically, parse the line for the workflow name. If the user has many workflows, ask which one they want — don't grep blind.
|