@zibby/core 0.1.27 â 0.1.30
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/constants.js +1 -1
- package/dist/framework/agents/cursor-strategy.js +12 -12
- package/dist/framework/constants.js +1 -1
- package/dist/package.json +4 -2
- package/package.json +4 -2
- package/scripts/export-default-workflows.js +51 -0
- package/scripts/patch-cursor-mcp.js +174 -0
- package/scripts/setup-ci.sh +115 -0
- package/scripts/setup-official-playwright-mcp.sh +173 -0
- package/scripts/test-with-video.sh +49 -0
package/dist/constants.js
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
const o={ASSISTANT:"gpt-5.4-nano-2026-03-17",CLAUDE:"claude-sonnet-4-6",CURSOR:"auto",CODEX:"o4-mini",GEMINI:"gemini-2.5-pro",OPENAI_POSTPROCESSING:"gpt-4o-mini"},n={ASSISTANT:"assistant",CLAUDE:"claude",CURSOR:"cursor",CODEX:"codex",GEMINI:"gemini"},e={DEBUG:"debug",INFO:"info",WARN:"warn",ERROR:"error",SILENT:"silent"},t={auto:"claude-sonnet-4-6","sonnet-4.6":"claude-sonnet-4-6","sonnet-4-6":"claude-sonnet-4-6","opus-4.6":"claude-opus-4-6","opus-4-6":"claude-opus-4-6","sonnet-4.5":"claude-sonnet-4-5-20250929","sonnet-4-5":"claude-sonnet-4-5-20250929","opus-4.5":"claude-opus-4-20250514","opus-4-5":"claude-opus-4-20250514","claude-sonnet-4-6":"claude-sonnet-4-6","claude-opus-4-6":"claude-opus-4-6","claude-sonnet-4-5-20250929":"claude-sonnet-4-5-20250929","claude-opus-4-20250514":"claude-opus-4-20250514"},s={auto:"o4-mini","o4-mini":"o4-mini",o3:"o3","o3-mini":"o3-mini","codex-mini":"codex-mini-latest","gpt-4o":"gpt-4o","gpt-4o-mini":"gpt-4o-mini","gpt-5.2-codex":"gpt-5.2-codex","gpt-5.2":"gpt-5.2","gpt-5.3":"gpt-5.3","gpt-5.4":"gpt-5.4"},i={auto:"gemini-2.5-pro","gemini-2.5-pro":"gemini-2.5-pro","gemini-2.5-flash":"gemini-2.5-flash"},p={CURSOR_AGENT_DEFAULT:1200*1e3,OPENAI_REQUEST:
|
|
1
|
+
const o={ASSISTANT:"gpt-5.4-nano-2026-03-17",CLAUDE:"claude-sonnet-4-6",CURSOR:"auto",CODEX:"o4-mini",GEMINI:"gemini-2.5-pro",OPENAI_POSTPROCESSING:"gpt-4o-mini"},n={ASSISTANT:"assistant",CLAUDE:"claude",CURSOR:"cursor",CODEX:"codex",GEMINI:"gemini"},e={DEBUG:"debug",INFO:"info",WARN:"warn",ERROR:"error",SILENT:"silent"},t={auto:"claude-sonnet-4-6","sonnet-4.6":"claude-sonnet-4-6","sonnet-4-6":"claude-sonnet-4-6","opus-4.6":"claude-opus-4-6","opus-4-6":"claude-opus-4-6","sonnet-4.5":"claude-sonnet-4-5-20250929","sonnet-4-5":"claude-sonnet-4-5-20250929","opus-4.5":"claude-opus-4-20250514","opus-4-5":"claude-opus-4-20250514","claude-sonnet-4-6":"claude-sonnet-4-6","claude-opus-4-6":"claude-opus-4-6","claude-sonnet-4-5-20250929":"claude-sonnet-4-5-20250929","claude-opus-4-20250514":"claude-opus-4-20250514"},s={auto:"o4-mini","o4-mini":"o4-mini",o3:"o3","o3-mini":"o3-mini","codex-mini":"codex-mini-latest","gpt-4o":"gpt-4o","gpt-4o-mini":"gpt-4o-mini","gpt-5.2-codex":"gpt-5.2-codex","gpt-5.2":"gpt-5.2","gpt-5.3":"gpt-5.3","gpt-5.4":"gpt-5.4"},i={auto:"gemini-2.5-pro","gemini-2.5-pro":"gemini-2.5-pro","gemini-2.5-flash":"gemini-2.5-flash"},p={CURSOR_AGENT_DEFAULT:1200*1e3,OPENAI_REQUEST:18e4};export{n as AGENT_TYPES,t as CLAUDE_MODEL_MAP,s as CODEX_MODEL_MAP,o as DEFAULT_MODELS,i as GEMINI_MODEL_MAP,e as LOG_LEVELS,p as TIMEOUTS};
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import{AgentStrategy as Y}from"./base.js";import{spawn as q,execSync as N}from"child_process";import{writeFileSync as j,readFileSync as J,mkdirSync as z,existsSync as H,accessSync as B,constants as W,unlinkSync as V}from"fs";import{join as C,resolve as X}from"path";import{homedir as D}from"os";import{logger as t}from"../../utils/logger.js";import{DEFAULT_MODELS as Q,TIMEOUTS as ee}from"../../constants.js";import{DEFAULT_OUTPUT_BASE as te,SESSION_INFO_FILE as re,STUDIO_STOP_REQUEST_FILE as oe}from"../constants.js";import{getAllSkills as ne,getSkill as Z}from"../skill-registry.js";import{StreamingParser as se}from"../../utils/streaming-parser.js";import{StructuredOutputFormatter as ie}from"./utils/structured-output-formatter.js";import{formatWithOpenAIProxy as le}from"./utils/openai-proxy-formatter.js";import{timeline as G}from"../../utils/timeline.js";import{shouldUseIsolatedCursorMcpHome as ae,createIsolatedCursorAgentHome as ue,removeIsolatedCursorAgentHome as ce}from"../../utils/cursor-mcp-isolated-home.js";class Ee extends Y{constructor(){super("cursor","Cursor (CLI)",100)}canHandle(
|
|
1
|
+
import{AgentStrategy as Y}from"./base.js";import{spawn as q,execSync as N}from"child_process";import{writeFileSync as j,readFileSync as J,mkdirSync as z,existsSync as H,accessSync as B,constants as W,unlinkSync as V}from"fs";import{join as C,resolve as X}from"path";import{homedir as D}from"os";import{logger as t}from"../../utils/logger.js";import{DEFAULT_MODELS as Q,TIMEOUTS as ee}from"../../constants.js";import{DEFAULT_OUTPUT_BASE as te,SESSION_INFO_FILE as re,STUDIO_STOP_REQUEST_FILE as oe}from"../constants.js";import{getAllSkills as ne,getSkill as Z}from"../skill-registry.js";import{StreamingParser as se}from"../../utils/streaming-parser.js";import{StructuredOutputFormatter as ie}from"./utils/structured-output-formatter.js";import{formatWithOpenAIProxy as le}from"./utils/openai-proxy-formatter.js";import{timeline as G}from"../../utils/timeline.js";import{shouldUseIsolatedCursorMcpHome as ae,createIsolatedCursorAgentHome as ue,removeIsolatedCursorAgentHome as ce}from"../../utils/cursor-mcp-isolated-home.js";class Ee extends Y{constructor(){super("cursor","Cursor (CLI)",100)}canHandle(i){const l=[C(D(),".local","bin","cursor-agent"),C(D(),".cursor","bin","cursor-agent"),"/usr/local/bin/cursor-agent","/usr/local/bin/agent","/Applications/Cursor.app/Contents/Resources/app/bin/cursor","agent","cursor-agent"];for(const r of l)try{if(r.startsWith("/")){B(r,W.X_OK);const f=N(`"${r}" --version 2>&1`,{encoding:"utf-8",timeout:3e3,stdio:"pipe"});if(f&&f.length>0)return t.debug(`[Cursor] Found agent at: ${r} (version: ${f.trim().slice(0,50)})`),!0}else{const f=N(`which ${r}`,{encoding:"utf-8",timeout:2e3,stdio:"pipe"}).trim();if(!f)continue;const a=N(`${r} --version 2>&1`,{encoding:"utf-8",timeout:3e3,stdio:"pipe"});if(a&&a.length>0)return t.debug(`[Cursor] Found '${r}' in PATH at ${f} (version: ${a.trim().slice(0,50)})`),!0}}catch{continue}return t.warn("[Cursor] \u274C Cursor Agent CLI not found or not working. Run: agent --version"),!1}async invoke(i,l={}){const{workspace:r=process.cwd(),print:f=!1,schema:a=null,skills:b=null,sessionPath:u=null,nodeName:g=null,timeout:m=ee.CURSOR_AGENT_DEFAULT,config:h={}}=l,$=h?.agent?.strictMode||!1,v=l.model??h?.agent?.cursor?.model??Q.CURSOR;t.debug(`[Cursor] Invoking (model: ${v}, timeout: ${m/1e3}s, skills: ${JSON.stringify(b)})`);const E=(this._setupMcpConfig(u,r,h,b,g)||{}).isolatedMcpHome??null,S=[C(D(),".local","bin","cursor-agent"),C(D(),".cursor","bin","cursor-agent"),"/usr/local/bin/cursor-agent","/usr/local/bin/agent","/Applications/Cursor.app/Contents/Resources/app/bin/cursor","agent","cursor-agent"];let n=null;for(const c of S)try{if(c.startsWith("/"))B(c,W.X_OK),N(`"${c}" --version 2>&1`,{encoding:"utf-8",timeout:3e3,stdio:"pipe"});else{if(!N(`which ${c}`,{encoding:"utf-8",timeout:2e3}).trim())throw new Error("not in PATH");N(`${c} --version 2>&1`,{encoding:"utf-8",timeout:3e3,stdio:"pipe"})}n=c,t.debug(`[Agent] Using binary: ${c}`);break}catch(y){t.debug(`[Agent] Binary '${c}' check failed: ${y.message}`);continue}if(!n)throw new Error(`Cursor Agent CLI not found or not working.
|
|
2
2
|
|
|
3
3
|
Checked paths:
|
|
4
4
|
${S.map(c=>` - ${c}`).join(`
|
|
@@ -10,23 +10,23 @@ Install cursor-agent:
|
|
|
10
10
|
Then add to PATH:
|
|
11
11
|
echo 'export PATH="$HOME/.local/bin:$PATH"' >> ~/.zshrc && source ~/.zshrc
|
|
12
12
|
|
|
13
|
-
Test with: agent --version`);let
|
|
13
|
+
Test with: agent --version`);let w=null;if(a){const c=`zibby-result-${Date.now()}.json`;w=C(r,".zibby","tmp",c);const y=C(r,".zibby","tmp");H(y)||z(y,{recursive:!0});const T=ie.generateFileOutputInstructions(a,w);i=`${i}
|
|
14
14
|
|
|
15
15
|
${T}`}const P=process.env.CURSOR_API_KEY,x=P?` | key: ***${P.slice(-4)}`:" | key: not set";console.log(`
|
|
16
16
|
\u25C6 Model: ${v||"auto"}${x}
|
|
17
17
|
`);const M=(await import("chalk")).default;console.log(`
|
|
18
|
-
${M.bold("Prompt sent to LLM:")}`),console.log(M.dim("\u2500".repeat(60))),console.log(M.dim(
|
|
19
|
-
`)}catch{}let _,o=null;try{const c=u||(process.env.ZIBBY_SESSION_PATH?String(process.env.ZIBBY_SESSION_PATH).trim():null);_=await this._spawnWithStreaming(n,L,r,m,null,c,E)}catch(c){o=c}const A=_?.stdout||"";if(a){const c=typeof a.parse=="function";let
|
|
20
|
-
`);let r=null;for(const f of l){const a=f.trim();if(a)try{const b=JSON.parse(a);if(b.type==="assistant"&&b.message?.content){const u=b.message.content;if(Array.isArray(u)){const g=u.filter(m=>m.type==="text"&&m.text).map(m=>m.text).join("");g&&(r=g)}else typeof u=="string"&&u&&(r=u)}}catch{}}return r?.trim()||null}_setupMcpConfig(
|
|
21
|
-
`;if(ae(
|
|
22
|
-
`)){const K=U.replace(`${r}/`,"");F.has(K)||(F.add(K),
|
|
23
|
-
${$.slice(-2e3)}`),o.kill("SIGTERM"),setTimeout(()=>{o.killed||o.kill("SIGKILL")},5e3)},f),k=new se;k.onToolCall=(e,d)=>{let
|
|
24
|
-
`).filter(R=>R.trim());E+=p.length}),o.stderr.on("data",e=>{const d=e.toString();v+=d,I=Date.now(),
|
|
25
|
-
`).filter(p=>p.trim());for(const p of
|
|
18
|
+
${M.bold("Prompt sent to LLM:")}`),console.log(M.dim("\u2500".repeat(60))),console.log(M.dim(i)),console.log(M.dim("\u2500".repeat(60)));const L=["--print","--force","--approve-mcps","--output-format","stream-json","--stream-partial-output","--model",v||"auto"];if(process.env.CURSOR_API_KEY&&L.push("--api-key",process.env.CURSOR_API_KEY),L.push(i),t.debug(`[Agent] Prompt: ${i.length} chars, model: ${v||"auto"}`),t.debug(`[Agent] Workspace: ${r}`),process.env.LOG_LEVEL==="debug"||process.env.ZIBBY_LOG_CURSOR_CLI==="1")try{console.log(`\u{1F527} Cursor CLI --model ${v||"auto"} (from .zibby.config.js agent.cursor.model)
|
|
19
|
+
`)}catch{}let _,o=null;try{const c=u||(process.env.ZIBBY_SESSION_PATH?String(process.env.ZIBBY_SESSION_PATH).trim():null);_=await this._spawnWithStreaming(n,L,r,m,null,c,E)}catch(c){o=c}const A=_?.stdout||"";if(a){const c=typeof a.parse=="function";let y=null;const T=!!(w&&H(w));if(w&&t.info(`[Agent] Result file: ${T?"present":"missing"} at ${w}`),T)try{const e=J(w,"utf-8").trim();y=JSON.parse(e),t.info(`[Agent] Parsed JSON from result file OK (${e.length} chars) \u2192 object ready for validation`),o&&t.debug("[Agent] Agent exited non-zero but result file was written \u2014 recovering")}catch(e){t.warn(`\u26A0\uFE0F [Agent] Result file exists on disk but is not valid JSON: ${e.message}`)}else if(o)t.warn(`[Agent] Result file missing at ${w} (agent process error \u2014 may still recover if strictMode repairs)`);else throw t.error(`\u274C [Agent] Result file was never created at ${w}`),new Error(`Agent did not write required result file at ${w}`);if(y&&c)try{const e=a.parse(y);return t.info("\u2705 [Agent] Zod validation passed for structured result file"),$&&t.debug("[Agent] strictMode enabled but not needed \u2014 agent wrote valid file"),{raw:A,structured:e}}catch(e){t.warn(`\u26A0\uFE0F [Agent] JSON parsed but Zod rejected it (wrong types/shape): ${e.message?.slice(0,400)}`)}else{if(y)return t.info("\u2705 [Agent] File-based output extracted (no Zod parse fn) \u2014 accepting as structured"),$&&t.debug("[Agent] strictMode enabled but not needed \u2014 agent wrote valid file"),{raw:A,structured:y};T&&t.error("\u274C [Agent] Result file exists but produced no in-memory JSON (parse failed earlier)")}if($&&!o){const e=_.parsedText,d=y?JSON.stringify(y):e;t.info(`[Agent] strictMode: calling OpenAI proxy to fix structured output (${d.length} chars in)`);try{const s=await le(d,a);if(c){const p=a.parse(s.structured);return t.info("\u2705 [Agent] Proxy output passed Zod validation"),{raw:A,structured:p}}return{raw:A,...s}}catch(s){if(t.warn(`\u26A0\uFE0F [Agent] strictMode proxy failed: ${s.message}`),y)return t.warn("[Agent] Using agent's original result file as fallback"),{raw:A,structured:y}}}if(o)throw o;const k=T?y==null?"file existed but JSON.parse failed \u2014 see WARN log above":c?"JSON was valid but Zod validation failed \u2014 see WARN log above":"no structured object after read (unexpected)":"file never appeared (agent may not have run Write tool to the path above)";throw t.error(`\u274C [Agent] No validated structured output: ${k}`),t.error("\u{1F4A1} Tip: Set strictMode=true in .zibby.config.js for OpenAI proxy fallback"),new Error(`Agent did not produce a valid result file at ${w}. Enable strictMode for proxy fallback.`)}if(o)throw o;return this._extractFinalResult(A)||_?.parsedText||A}_extractFinalResult(i){if(!i)return null;const l=i.split(`
|
|
20
|
+
`);let r=null;for(const f of l){const a=f.trim();if(a)try{const b=JSON.parse(a);if(b.type==="assistant"&&b.message?.content){const u=b.message.content;if(Array.isArray(u)){const g=u.filter(m=>m.type==="text"&&m.text).map(m=>m.text).join("");g&&(r=g)}else typeof u=="string"&&u&&(r=u)}}catch{}}return r?.trim()||null}_setupMcpConfig(i,l,r,f=null,a=null){const b=r?.headless,u=C(D(),".cursor"),g=C(u,"mcp.json");let m={};if(H(g))try{m=JSON.parse(J(g,"utf-8"))}catch{}const h=m.mcpServers||{},$=r?.paths?.output||te,v=C(l||process.cwd(),$,re),I=Array.isArray(f)?f.map(n=>Z(n)).filter(Boolean):[...ne()].map(([,n])=>n),E=new Set;for(const n of I)typeof n.resolve=="function"&&(E.has(n.serverName)||(E.add(n.serverName),this._ensureSkillConfigured(h,n,i,v,a,b)));if(i){const n=Z("browser");n&&typeof n.resolve=="function"&&!E.has(n.serverName)&&this._ensureSkillConfigured(h,n,i,v,"execute_live",b)}if(Object.keys(h).length===0)return t.debug("[MCP] No MCP servers configured - agent will run without tool access"),{isolatedMcpHome:null};const S=`${JSON.stringify({mcpServers:h},null,2)}
|
|
21
|
+
`;if(ae(i)){const n=ue(l||process.cwd()),w=C(n,".cursor","mcp.json");return j(w,S,"utf8"),t.debug(`[MCP] Isolated cursor-agent HOME (session-scoped mcp.json): ${n} | servers: ${Object.keys(h).join(", ")}`),{isolatedMcpHome:n}}return H(u)||z(u,{recursive:!0}),j(g,S,"utf8"),t.debug(`[MCP] Global ~/.cursor/mcp.json | servers: ${Object.keys(h).join(", ")}`),{isolatedMcpHome:null}}_ensureSkillConfigured(i,l,r,f,a=null,b){const u=l.cursorKey||l.serverName,g=i[u]?u:i[l.serverName]?l.serverName:null;if(g&&r){const h=typeof l.resolve=="function"?l.resolve({sessionPath:r,nodeName:a,headless:b}):null;h?.args?i[g].args=h.args:i[g].args=(i[g].args||[]).map(I=>I.startsWith("--output-dir=")?`--output-dir=${r}`:I);const $=h?.env||{},v=l.sessionEnvKey?{[l.sessionEnvKey]:f}:{};i[g].env={...i[g].env||{},...$,...v},t.debug(`[MCP] Updated ${g} session \u2192 ${r}`);return}if(g)return;const m=l.resolve({sessionPath:r,nodeName:a,headless:b});m&&(i[u]={...m,...l.sessionEnvKey&&{env:{...m.env||{},[l.sessionEnvKey]:f}}},t.debug(`[MCP] Configured ${u}`))}_spawnWithStreaming(i,l,r,f,a=null,b=null,u=null){return new Promise((g,m)=>{const h=Date.now();let $="",v="",I=Date.now(),E=0,S=!1,n=null,w=!1,P=!1,x=null;if(b)try{x=C(X(String(b)),oe)}catch{x=null}let M=!1;const L=()=>{M||(M=!0,ce(u))},_={...process.env};u&&(_.HOME=u,process.platform==="win32"&&(_.USERPROFILE=u),t.debug(`[Agent] cursor-agent HOME=${u} (isolated MCP config)`));const o=q(i,l,{cwd:r,shell:!1,stdio:["pipe","pipe","pipe"],env:_});t.debug(`[Agent] PID: ${o.pid}`),o.stdin.on("error",e=>{e.code!=="EPIPE"&&t.warn(`[Agent] stdin error: ${e.message}`)}),o.stdout.on("error",e=>{e.code!=="EPIPE"&&t.warn(`[Agent] stdout error: ${e.message}`)}),o.stderr.on("error",e=>{e.code!=="EPIPE"&&t.warn(`[Agent] stderr error: ${e.message}`)}),a?(o.stdin.write(a,e=>{e&&e.code!=="EPIPE"&&t.warn(`[Agent] Failed to write to stdin: ${e.message}`),o.stdin.end()}),t.debug(`[Agent] Prompt also piped to stdin (${a.length} chars)`)):o.stdin.end();let A=null;x&&(A=setInterval(()=>{if(!(S||P))try{if(H(x)){S=!0,n="studio-stop";try{V(x)}catch{}t.warn("\u{1F6D1} Studio stop requested \u2014 terminating Cursor agent (and MCP browser session)"),o.kill("SIGTERM"),setTimeout(()=>{o.killed||o.kill("SIGKILL")},2e3)}}catch{}},600));const F=new Set,c=new Date(h).toISOString().replace(/\.\d+Z$/,""),y=setInterval(()=>{const e=Math.round((Date.now()-h)/1e3),d=Math.round((Date.now()-I)/1e3),s=[];try{const R=Math.ceil(e/60)+1,O=N(`find "${r}" -type f -mmin -${R} -not -path '*/node_modules/*' -not -path '*/.git/*' -not -path '*/target/*' 2>/dev/null | head -20`,{encoding:"utf-8",timeout:5e3}).trim();if(O)for(const U of O.split(`
|
|
22
|
+
`)){const K=U.replace(`${r}/`,"");F.has(K)||(F.add(K),s.push(K))}}catch{}let p="";s.length>0&&(p=` | \u{1F4C1} new: ${s.map(O=>O.split("/").pop()).join(", ")}`),F.size>0&&(p+=` | \u{1F4E6} total: ${F.size} files`),t.debug(`\u{1F493} [Agent] Running for ${e}s | ${E} lines output${p}`),E===0&&e>=30&&F.size===0&&(e<35&&t.warn(`\u26A0\uFE0F [Agent] No output after ${e}s \u2014 agent may be stuck. Check your CURSOR_API_KEY.`),e>=60&&(S=!0,n=n||"stall",t.error(`\u274C [Agent] No response after ${e}s \u2014 killing. Verify CURSOR_API_KEY is valid and agent CLI works: agent --version`),o.kill("SIGTERM"),setTimeout(()=>{o.killed||o.kill("SIGKILL")},3e3)))},3e4),T=setTimeout(()=>{S=!0,n=n||"timeout";const e=Math.round((Date.now()-h)/1e3);t.error(`\u23F1\uFE0F [Agent] Timeout after ${e}s \u2014 killing process (PID: ${o.pid})`),$.trim()&&t.warn(`\u{1F4E4} [Agent] Partial output (${$.length} chars) before timeout:
|
|
23
|
+
${$.slice(-2e3)}`),o.kill("SIGTERM"),setTimeout(()=>{o.killed||o.kill("SIGKILL")},5e3)},f),k=new se;k.onToolCall=(e,d)=>{let s=e,p=d;if(e==="mcpToolCall"&&d?.name)s=d.name.replace(/^mcp_+[^_]+_+/,""),s.includes("-")&&s.split("-")[0]===s.split("-")[1]&&(s=s.split("-")[0]),p=d.args??d.input??d;else{if(e==="readToolCall"||e==="editToolCall"||e==="writeToolCall")return;(e.startsWith("mcp__")||e.includes("ToolCall"))&&(s=e.replace(/^mcp_+[^_]+_+/,"").replace(/ToolCall$/,""))}if(s.includes("memory")?G.stepMemory(`Tool: ${s}`):G.stepTool(`Tool: ${s}`),p!=null&&typeof p=="object"&&Object.keys(p).length>0&&!P){const O=JSON.stringify(p),U=O.length>100?`${O.substring(0,100)}...`:O;console.log(` Input: ${U}`)}},o.stdout.on("data",e=>{const d=e.toString();$+=d,I=Date.now(),w||(w=!0);const s=k.processChunk(d);s&&!P&&process.stdout.write(s);const p=d.split(`
|
|
24
|
+
`).filter(R=>R.trim());E+=p.length}),o.stderr.on("data",e=>{const d=e.toString();v+=d,I=Date.now(),w||(w=!0);const s=d.split(`
|
|
25
|
+
`).filter(p=>p.trim());for(const p of s)t.warn(`\u26A0\uFE0F [Agent stderr] ${p}`)}),o.on("close",(e,d)=>{P=!0,L(),clearTimeout(T),clearInterval(y),A&&clearInterval(A),k.flush();const s=Math.round((Date.now()-h)/1e3);if(t.debug(`[Agent] Exited: code=${e}, signal=${d}, elapsed=${s}s, output=${$.length} chars`),S){if(n==="studio-stop"){m(new Error("Stopped from Zibby Studio"));return}m(new Error(`Cursor Agent timed out after ${s}s (limit: ${f/1e3}s). ${E} lines produced. Last output ${Math.round((Date.now()-I)/1e3)}s ago. ${$.trim()?`
|
|
26
26
|
Partial output (last 500 chars):
|
|
27
27
|
${$.slice(-500)}`:"No output captured."}`));return}if(e!==0){m(new Error(`Cursor Agent failed: exit code ${e}, signal ${d}. ${v.trim()?`
|
|
28
28
|
Stderr: ${v.slice(-1e3)}`:""}${$.trim()?`
|
|
29
|
-
Stdout (last 500 chars): ${$.slice(-500)}`:""}`));return}const p=k.getResult(),R=p?JSON.stringify(p,null,2):k.getRawText()||$||"";g({stdout:$||v||"",parsedText:R})}),o.on("error",e=>{L(),clearTimeout(T),clearInterval(
|
|
30
|
-
Binary: ${
|
|
29
|
+
Stdout (last 500 chars): ${$.slice(-500)}`:""}`));return}const p=k.getResult(),R=p?JSON.stringify(p,null,2):k.getRawText()||$||"";g({stdout:$||v||"",parsedText:R})}),o.on("error",e=>{L(),clearTimeout(T),clearInterval(y),A&&clearInterval(A),m(new Error(`Cursor Agent spawn error: ${e.message}
|
|
30
|
+
Binary: ${i}
|
|
31
31
|
This usually means the binary is not in PATH. Try:
|
|
32
32
|
echo 'export PATH="$HOME/.local/bin:$PATH"' >> ~/.zshrc && source ~/.zshrc`))})})}}export{Ee as CursorAgentStrategy};
|
|
@@ -1 +1 @@
|
|
|
1
|
-
const o=".zibby/output",t="sessions",s=".session-info.json",_=".zibby-studio-stop",I="result.json",
|
|
1
|
+
const o=".zibby/output",t="sessions",s=".session-info.json",_=".zibby-studio-stop",I="result.json",r="raw_stream_output.txt",E="events.json",e={BROWSER:"browser",JIRA:"jira",GITHUB:"github",SLACK:"slack",MEMORY:"memory",CHAT_MEMORY:"chat-memory",RUNNER:"runner",SKILL_INSTALLER:"skill-installer",CORE_TOOLS:"core-tools",WORKFLOW_BUILDER:"workflow-builder"},S=["CI_JOB_ID","GITHUB_RUN_ID","CIRCLE_WORKFLOW_ID","BUILD_ID"];export{S as CI_ENV_VARS,o as DEFAULT_OUTPUT_BASE,E as EVENTS_FILE,r as RAW_OUTPUT_FILE,I as RESULT_FILE,t as SESSIONS_DIR,s as SESSION_INFO_FILE,e as SKILLS,_ as STUDIO_STOP_REQUEST_FILE};
|
package/dist/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@zibby/core",
|
|
3
|
-
"version": "0.1.
|
|
3
|
+
"version": "0.1.30",
|
|
4
4
|
"description": "Core test automation engine with multi-agent and multi-MCP support",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"main": "src/index.js",
|
|
@@ -69,6 +69,7 @@
|
|
|
69
69
|
},
|
|
70
70
|
"files": [
|
|
71
71
|
"src/",
|
|
72
|
+
"scripts/",
|
|
72
73
|
"templates/",
|
|
73
74
|
"!templates/**/__tests__/",
|
|
74
75
|
"!templates/**/*.test.js",
|
|
@@ -85,13 +86,14 @@
|
|
|
85
86
|
"@modelcontextprotocol/sdk": "^1.29.0",
|
|
86
87
|
"@openai/codex-sdk": "^0.120.0",
|
|
87
88
|
"@playwright/mcp": "^0.0.70",
|
|
88
|
-
"@zibby/mcp-browser": "^0.1.
|
|
89
|
+
"@zibby/mcp-browser": "^0.1.8",
|
|
89
90
|
"acorn": "^8.15.0",
|
|
90
91
|
"acorn-walk": "^8.3.5",
|
|
91
92
|
"axios": "^1.15.0",
|
|
92
93
|
"chalk": "^5.3.0",
|
|
93
94
|
"dotenv": "^17.4.1",
|
|
94
95
|
"handlebars": "^4.7.9",
|
|
96
|
+
"hono": "^4.12.14",
|
|
95
97
|
"zod": "^4.3.6",
|
|
96
98
|
"zod-to-json-schema": "^3.25.2"
|
|
97
99
|
},
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@zibby/core",
|
|
3
|
-
"version": "0.1.
|
|
3
|
+
"version": "0.1.30",
|
|
4
4
|
"description": "Core test automation engine with multi-agent and multi-MCP support",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"main": "dist/index.js",
|
|
@@ -69,6 +69,7 @@
|
|
|
69
69
|
},
|
|
70
70
|
"files": [
|
|
71
71
|
"dist/",
|
|
72
|
+
"scripts/",
|
|
72
73
|
"templates/",
|
|
73
74
|
"!templates/**/__tests__/",
|
|
74
75
|
"!templates/**/*.test.js",
|
|
@@ -85,13 +86,14 @@
|
|
|
85
86
|
"@modelcontextprotocol/sdk": "^1.29.0",
|
|
86
87
|
"@openai/codex-sdk": "^0.120.0",
|
|
87
88
|
"@playwright/mcp": "^0.0.70",
|
|
88
|
-
"@zibby/mcp-browser": "^0.1.
|
|
89
|
+
"@zibby/mcp-browser": "^0.1.8",
|
|
89
90
|
"acorn": "^8.15.0",
|
|
90
91
|
"acorn-walk": "^8.3.5",
|
|
91
92
|
"axios": "^1.15.0",
|
|
92
93
|
"chalk": "^5.3.0",
|
|
93
94
|
"dotenv": "^17.4.1",
|
|
94
95
|
"handlebars": "^4.7.9",
|
|
96
|
+
"hono": "^4.12.14",
|
|
95
97
|
"zod": "^4.3.6",
|
|
96
98
|
"zod-to-json-schema": "^3.25.2"
|
|
97
99
|
},
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
/**
|
|
3
|
+
* Export default workflows as JSON for the backend API
|
|
4
|
+
* Run: node packages/core/scripts/export-default-workflows.js
|
|
5
|
+
*/
|
|
6
|
+
|
|
7
|
+
import { writeFileSync, mkdirSync, existsSync } from 'fs';
|
|
8
|
+
import { join, dirname } from 'path';
|
|
9
|
+
import { fileURLToPath } from 'url';
|
|
10
|
+
import { WorkflowGraph } from '../src/framework/graph.js';
|
|
11
|
+
import { buildAnalysisGraph } from '../templates/graphs/analysisGraph.js';
|
|
12
|
+
|
|
13
|
+
const __dirname = dirname(fileURLToPath(import.meta.url));
|
|
14
|
+
const outputDir = join(__dirname, '../../../backend/src/data');
|
|
15
|
+
|
|
16
|
+
// Ensure output directory exists
|
|
17
|
+
if (!existsSync(outputDir)) {
|
|
18
|
+
mkdirSync(outputDir, { recursive: true });
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
// Build and serialize analysis workflow
|
|
22
|
+
console.log('Building analysis workflow...');
|
|
23
|
+
const analysisGraph = new WorkflowGraph();
|
|
24
|
+
buildAnalysisGraph(analysisGraph);
|
|
25
|
+
const serializedAnalysis = analysisGraph.serialize();
|
|
26
|
+
|
|
27
|
+
// Add workflow metadata
|
|
28
|
+
const analysisWorkflow = {
|
|
29
|
+
id: 'default_analysis',
|
|
30
|
+
name: 'Analysis Workflow',
|
|
31
|
+
description: 'Analyzes tickets and generates code implementation',
|
|
32
|
+
...serializedAnalysis,
|
|
33
|
+
createdAt: new Date().toISOString(),
|
|
34
|
+
isDefault: true
|
|
35
|
+
};
|
|
36
|
+
|
|
37
|
+
// Write to JSON file
|
|
38
|
+
const outputPath = join(outputDir, 'default-workflows.json');
|
|
39
|
+
const workflows = {
|
|
40
|
+
analysis: analysisWorkflow
|
|
41
|
+
};
|
|
42
|
+
|
|
43
|
+
writeFileSync(outputPath, JSON.stringify(workflows, null, 2));
|
|
44
|
+
console.log(`â
Exported default workflows to ${outputPath}`);
|
|
45
|
+
|
|
46
|
+
// Also export as individual files for clarity
|
|
47
|
+
writeFileSync(
|
|
48
|
+
join(outputDir, 'default-analysis-workflow.json'),
|
|
49
|
+
JSON.stringify(analysisWorkflow, null, 2)
|
|
50
|
+
);
|
|
51
|
+
console.log('â
Exported individual workflow files');
|
|
@@ -0,0 +1,174 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Patch cursor-agent CLI to auto-approve MCP tool calls for CI/CD use.
|
|
5
|
+
*
|
|
6
|
+
* This script modifies the cursor-agent source code to bypass the manual
|
|
7
|
+
* approval prompt for MCP tool executions, making it suitable for automated
|
|
8
|
+
* CI/CD pipelines.
|
|
9
|
+
*
|
|
10
|
+
* Usage:
|
|
11
|
+
* node patch-cursor-mcp.js
|
|
12
|
+
*/
|
|
13
|
+
|
|
14
|
+
import { readFileSync, writeFileSync, existsSync, readdirSync, statSync } from 'fs';
|
|
15
|
+
import { join, dirname } from 'path';
|
|
16
|
+
import { homedir } from 'os';
|
|
17
|
+
import { fileURLToPath } from 'url';
|
|
18
|
+
|
|
19
|
+
const __filename = fileURLToPath(import.meta.url);
|
|
20
|
+
const __dirname = dirname(__filename);
|
|
21
|
+
|
|
22
|
+
function findLatestCursorAgent() {
|
|
23
|
+
const versionsDir = join(homedir(), '.local/share/cursor-agent/versions');
|
|
24
|
+
|
|
25
|
+
if (!existsSync(versionsDir)) {
|
|
26
|
+
console.error(`â Cursor agent versions directory not found: ${versionsDir}`);
|
|
27
|
+
console.error(' Make sure cursor-agent is installed: curl https://cursor.com/install -fsS | bash');
|
|
28
|
+
process.exit(1);
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
const versions = readdirSync(versionsDir)
|
|
32
|
+
.map(name => join(versionsDir, name))
|
|
33
|
+
.filter(path => statSync(path).isDirectory());
|
|
34
|
+
|
|
35
|
+
if (versions.length === 0) {
|
|
36
|
+
console.error(`â No cursor-agent versions found in ${versionsDir}`);
|
|
37
|
+
process.exit(1);
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
// Get the latest version by creation time
|
|
41
|
+
const latestVersion = versions
|
|
42
|
+
.map(path => ({ path, ctime: statSync(path).ctimeMs }))
|
|
43
|
+
.sort((a, b) => b.ctime - a.ctime)[0].path;
|
|
44
|
+
|
|
45
|
+
return join(latestVersion, 'index.js');
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
function backupFile(filePath) {
|
|
49
|
+
const backupPath = `${filePath }.backup`;
|
|
50
|
+
|
|
51
|
+
if (existsSync(backupPath)) {
|
|
52
|
+
console.log(`âšī¸ Backup already exists: ${backupPath}`);
|
|
53
|
+
return backupPath;
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
const content = readFileSync(filePath, 'utf8');
|
|
57
|
+
writeFileSync(backupPath, content, 'utf8');
|
|
58
|
+
|
|
59
|
+
console.log(`â
Backup created: ${backupPath}`);
|
|
60
|
+
return backupPath;
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
function patchMcpApproval(filePath) {
|
|
64
|
+
const content = readFileSync(filePath, 'utf8');
|
|
65
|
+
|
|
66
|
+
// Check if already patched
|
|
67
|
+
if (content.includes('AUTO-APPROVE MCP TOOLS FOR CI/CD')) {
|
|
68
|
+
console.log('â
File is already patched!');
|
|
69
|
+
return false;
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
// Pattern to match the requestApproval call for MCP
|
|
73
|
+
const pattern = /(\s+const result = await this\.pendingDecisionProvider\.requestApproval\(\{\s+type: OperationType\.Mcp,\s+details: \{\s+name: approvalDetails\.name,\s+toolName: approvalDetails\.toolName,\s+providerIdentifier: approvalDetails\.providerIdentifier,\s+args: approvalDetails\.args,\s+\},\s+toolCallId: args\.toolCallId,\s+\}\);)/;
|
|
74
|
+
|
|
75
|
+
const replacement = ` // AUTO-APPROVE MCP TOOLS FOR CI/CD
|
|
76
|
+
const result = { approved: true };
|
|
77
|
+
/*
|
|
78
|
+
const result = await this.pendingDecisionProvider.requestApproval({
|
|
79
|
+
type: OperationType.Mcp,
|
|
80
|
+
details: {
|
|
81
|
+
name: approvalDetails.name,
|
|
82
|
+
toolName: approvalDetails.toolName,
|
|
83
|
+
providerIdentifier: approvalDetails.providerIdentifier,
|
|
84
|
+
args: approvalDetails.args,
|
|
85
|
+
},
|
|
86
|
+
toolCallId: args.toolCallId,
|
|
87
|
+
});
|
|
88
|
+
*/`;
|
|
89
|
+
|
|
90
|
+
const newContent = content.replace(pattern, replacement);
|
|
91
|
+
|
|
92
|
+
if (newContent === content) {
|
|
93
|
+
console.log('âšī¸ Pattern not found - cursor-agent code structure has changed.');
|
|
94
|
+
console.log(' This is OKAY! Newer versions might not need this patch.');
|
|
95
|
+
console.log(' If auto-approval doesn\'t work, cursor-agent team may have fixed it!');
|
|
96
|
+
return false;
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
writeFileSync(filePath, newContent, 'utf8');
|
|
100
|
+
return true;
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
function addApprovalKeyLogging(filePath) {
|
|
104
|
+
const content = readFileSync(filePath, 'utf8');
|
|
105
|
+
const lines = content.split('\n');
|
|
106
|
+
|
|
107
|
+
// Check if already added
|
|
108
|
+
if (lines.some(line => line.includes('đ APPROVAL KEY'))) {
|
|
109
|
+
console.log('â
Approval key logging already enabled!');
|
|
110
|
+
return false;
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
// Find the line with generateApprovalKey and add logging after it
|
|
114
|
+
let targetLineNum = -1;
|
|
115
|
+
for (let i = 0; i < lines.length; i++) {
|
|
116
|
+
if (lines[i].includes('const approvalKey = generateApprovalKey(serverName, server, this.cwd);')) {
|
|
117
|
+
targetLineNum = i;
|
|
118
|
+
break;
|
|
119
|
+
}
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
if (targetLineNum === -1) {
|
|
123
|
+
console.log('â ī¸ Could not find approval key generation line');
|
|
124
|
+
return false;
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
// Insert the console.log after the generateApprovalKey line
|
|
128
|
+
const indent = ' ';
|
|
129
|
+
const logLine = `${indent}console.log("đ APPROVAL KEY:", serverName, "=>", approvalKey);`;
|
|
130
|
+
lines.splice(targetLineNum + 1, 0, logLine);
|
|
131
|
+
|
|
132
|
+
writeFileSync(filePath, lines.join('\n'), 'utf8');
|
|
133
|
+
return true;
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
function main() {
|
|
137
|
+
console.log('đ§ Patching cursor-agent for CI/CD MCP usage...\n');
|
|
138
|
+
console.log('âšī¸ Note: This patch is temporary and may not be needed in future versions.\n');
|
|
139
|
+
|
|
140
|
+
// Find cursor-agent
|
|
141
|
+
const indexFile = findLatestCursorAgent();
|
|
142
|
+
console.log(`đ Found cursor-agent: ${indexFile}\n`);
|
|
143
|
+
|
|
144
|
+
// Backup
|
|
145
|
+
backupFile(indexFile);
|
|
146
|
+
console.log();
|
|
147
|
+
|
|
148
|
+
// Apply patches
|
|
149
|
+
const patchedApproval = patchMcpApproval(indexFile);
|
|
150
|
+
if (patchedApproval) {
|
|
151
|
+
console.log('â
Patched MCP tool auto-approval');
|
|
152
|
+
}
|
|
153
|
+
console.log();
|
|
154
|
+
|
|
155
|
+
const patchedLogging = addApprovalKeyLogging(indexFile);
|
|
156
|
+
if (patchedLogging) {
|
|
157
|
+
console.log('â
Added approval key logging');
|
|
158
|
+
}
|
|
159
|
+
console.log();
|
|
160
|
+
|
|
161
|
+
if (patchedApproval || patchedLogging) {
|
|
162
|
+
console.log('đ cursor-agent is now ready for CI/CD!');
|
|
163
|
+
console.log('\nNext steps:');
|
|
164
|
+
console.log('1. Run: cursor-agent mcp list');
|
|
165
|
+
console.log('2. Copy the approval key shown');
|
|
166
|
+
console.log('3. Add it to ~/.cursor/projects/YOUR_PROJECT/mcp-approvals.json');
|
|
167
|
+
console.log('\nâ ī¸ If cursor-agent updates break this patch, MCP may work without it.');
|
|
168
|
+
} else {
|
|
169
|
+
console.log('âšī¸ No changes needed - already configured or cursor-agent version doesn\'t need patching!');
|
|
170
|
+
}
|
|
171
|
+
}
|
|
172
|
+
|
|
173
|
+
main();
|
|
174
|
+
|
|
@@ -0,0 +1,115 @@
|
|
|
1
|
+
#!/bin/bash
|
|
2
|
+
set -e
|
|
3
|
+
|
|
4
|
+
echo "đ Complete CI/CD Setup (from scratch)"
|
|
5
|
+
echo "======================================="
|
|
6
|
+
echo ""
|
|
7
|
+
|
|
8
|
+
# Get the absolute path to the project
|
|
9
|
+
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
10
|
+
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
|
|
11
|
+
|
|
12
|
+
echo "đ Project root: $PROJECT_ROOT"
|
|
13
|
+
echo ""
|
|
14
|
+
|
|
15
|
+
# Check OS
|
|
16
|
+
OS="$(uname -s)"
|
|
17
|
+
echo "đĨī¸ Detected OS: $OS"
|
|
18
|
+
echo ""
|
|
19
|
+
|
|
20
|
+
# Step 1: Check/Install Node.js
|
|
21
|
+
echo "Step 1: Checking Node.js..."
|
|
22
|
+
if command -v node &> /dev/null; then
|
|
23
|
+
NODE_VERSION=$(node --version)
|
|
24
|
+
echo "â Node.js installed: $NODE_VERSION"
|
|
25
|
+
else
|
|
26
|
+
echo "â Node.js not found!"
|
|
27
|
+
echo " Install: https://nodejs.org/ or use nvm"
|
|
28
|
+
exit 1
|
|
29
|
+
fi
|
|
30
|
+
echo ""
|
|
31
|
+
|
|
32
|
+
# Step 2: Check/Install Python 3
|
|
33
|
+
echo "Step 2: Checking Python 3..."
|
|
34
|
+
if command -v python3 &> /dev/null; then
|
|
35
|
+
PYTHON_VERSION=$(python3 --version)
|
|
36
|
+
echo "â Python 3 installed: $PYTHON_VERSION"
|
|
37
|
+
else
|
|
38
|
+
echo "â Python 3 not found!"
|
|
39
|
+
exit 1
|
|
40
|
+
fi
|
|
41
|
+
echo ""
|
|
42
|
+
|
|
43
|
+
# Step 3: Install npm dependencies
|
|
44
|
+
echo "Step 3: Installing npm dependencies..."
|
|
45
|
+
cd "$PROJECT_ROOT"
|
|
46
|
+
if [ -f "package.json" ]; then
|
|
47
|
+
npm install --silent > /dev/null 2>&1
|
|
48
|
+
echo "â npm dependencies installed"
|
|
49
|
+
else
|
|
50
|
+
echo "â package.json not found"
|
|
51
|
+
exit 1
|
|
52
|
+
fi
|
|
53
|
+
echo ""
|
|
54
|
+
|
|
55
|
+
# Step 4: Check/Install cursor-agent
|
|
56
|
+
echo "Step 4: Checking cursor-agent..."
|
|
57
|
+
if command -v cursor-agent &> /dev/null; then
|
|
58
|
+
CURSOR_VERSION=$(cursor-agent --version 2>&1 | head -1 || echo "unknown")
|
|
59
|
+
echo "â cursor-agent installed: $CURSOR_VERSION"
|
|
60
|
+
else
|
|
61
|
+
echo "â ī¸ cursor-agent not found - installing..."
|
|
62
|
+
|
|
63
|
+
if [ "$OS" = "Darwin" ] || [ "$OS" = "Linux" ]; then
|
|
64
|
+
# Install cursor-agent
|
|
65
|
+
curl -fsSL https://cursor.com/install | bash
|
|
66
|
+
|
|
67
|
+
# Add to PATH for current session
|
|
68
|
+
export PATH="$HOME/.local/bin:$PATH"
|
|
69
|
+
|
|
70
|
+
if command -v cursor-agent &> /dev/null; then
|
|
71
|
+
echo "â cursor-agent installed successfully"
|
|
72
|
+
else
|
|
73
|
+
echo "â cursor-agent installation failed"
|
|
74
|
+
echo " Manual install: curl https://cursor.com/install -fsS | bash"
|
|
75
|
+
exit 1
|
|
76
|
+
fi
|
|
77
|
+
else
|
|
78
|
+
echo "â Unsupported OS for automatic cursor-agent installation"
|
|
79
|
+
echo " Install manually: https://cursor.com/cli"
|
|
80
|
+
exit 1
|
|
81
|
+
fi
|
|
82
|
+
fi
|
|
83
|
+
echo ""
|
|
84
|
+
|
|
85
|
+
# Step 5: Check CURSOR_API_KEY (REQUIRED for CI/CD)
|
|
86
|
+
echo "Step 5: Checking CURSOR_API_KEY..."
|
|
87
|
+
if [ -z "$CURSOR_API_KEY" ]; then
|
|
88
|
+
echo "â ī¸ CURSOR_API_KEY not set"
|
|
89
|
+
echo " â ī¸ REQUIRED for CI/CD - cursor-agent will fail without it!"
|
|
90
|
+
echo " Set it as an environment variable in your CI/CD config"
|
|
91
|
+
echo " Get your key from: https://cursor.com/settings"
|
|
92
|
+
echo ""
|
|
93
|
+
echo " Example (GitLab CI):"
|
|
94
|
+
echo " variables:"
|
|
95
|
+
echo " CURSOR_API_KEY: \$CURSOR_API_KEY"
|
|
96
|
+
echo ""
|
|
97
|
+
else
|
|
98
|
+
echo "â CURSOR_API_KEY is set"
|
|
99
|
+
fi
|
|
100
|
+
echo ""
|
|
101
|
+
|
|
102
|
+
# Step 6: Run the official Playwright MCP setup
|
|
103
|
+
echo "Step 6: Setting up official Playwright MCP..."
|
|
104
|
+
echo ""
|
|
105
|
+
bash "$SCRIPT_DIR/setup-official-playwright-mcp.sh"
|
|
106
|
+
|
|
107
|
+
echo ""
|
|
108
|
+
echo "======================================="
|
|
109
|
+
echo "â
Complete CI/CD setup finished!"
|
|
110
|
+
echo "======================================="
|
|
111
|
+
echo ""
|
|
112
|
+
echo "Ready to use in CI/CD:"
|
|
113
|
+
echo " cursor-agent -p 'Navigate to google.com' --mcp-server playwright-official"
|
|
114
|
+
echo ""
|
|
115
|
+
|
|
@@ -0,0 +1,173 @@
|
|
|
1
|
+
#!/bin/bash
|
|
2
|
+
|
|
3
|
+
set -e
|
|
4
|
+
|
|
5
|
+
# Get script directory for relative imports
|
|
6
|
+
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
7
|
+
|
|
8
|
+
# Step 1: Check cursor-agent (check actual locations, not just PATH)
|
|
9
|
+
CURSOR_AGENT_PATH=""
|
|
10
|
+
if command -v cursor-agent &> /dev/null; then
|
|
11
|
+
CURSOR_AGENT_PATH=$(command -v cursor-agent)
|
|
12
|
+
elif [ -x "$HOME/.local/bin/cursor-agent" ]; then
|
|
13
|
+
CURSOR_AGENT_PATH="$HOME/.local/bin/cursor-agent"
|
|
14
|
+
elif [ -x "$HOME/.cursor/bin/cursor-agent" ]; then
|
|
15
|
+
CURSOR_AGENT_PATH="$HOME/.cursor/bin/cursor-agent"
|
|
16
|
+
fi
|
|
17
|
+
|
|
18
|
+
if [ -z "$CURSOR_AGENT_PATH" ]; then
|
|
19
|
+
echo "â ī¸ cursor-agent not found. Installing..."
|
|
20
|
+
curl https://cursor.com/install -fsS | bash
|
|
21
|
+
# Check again after install
|
|
22
|
+
if [ -x "$HOME/.local/bin/cursor-agent" ]; then
|
|
23
|
+
CURSOR_AGENT_PATH="$HOME/.local/bin/cursor-agent"
|
|
24
|
+
elif [ -x "$HOME/.cursor/bin/cursor-agent" ]; then
|
|
25
|
+
CURSOR_AGENT_PATH="$HOME/.cursor/bin/cursor-agent"
|
|
26
|
+
fi
|
|
27
|
+
fi
|
|
28
|
+
|
|
29
|
+
# Always ensure these directories are in PATH for this script
|
|
30
|
+
export PATH="$HOME/.cursor/bin:$HOME/.local/bin:$PATH"
|
|
31
|
+
|
|
32
|
+
# Step 2: Find Node.js absolute paths (Cursor GUI doesn't have shell PATH)
|
|
33
|
+
NODE_PATH=$(which node 2>/dev/null || echo "")
|
|
34
|
+
NPX_PATH=$(which npx 2>/dev/null || echo "")
|
|
35
|
+
NODE_BIN_DIR=$(dirname "$NODE_PATH" 2>/dev/null || echo "")
|
|
36
|
+
|
|
37
|
+
if [ -z "$NODE_PATH" ]; then
|
|
38
|
+
echo "â node not found. Please install Node.js"
|
|
39
|
+
exit 1
|
|
40
|
+
fi
|
|
41
|
+
|
|
42
|
+
echo " Using Node: $NODE_PATH"
|
|
43
|
+
|
|
44
|
+
# Step 3: Find Zibby MCP Browser (stable IDs + event recording)
|
|
45
|
+
echo "Setting up Zibby MCP Browser..."
|
|
46
|
+
|
|
47
|
+
# Resolve @zibby/mcp-browser from node_modules (works in workspace, global, and npx installs)
|
|
48
|
+
ZIBBY_MCP_PKG=$("$NODE_PATH" -e "try{console.log(require.resolve('@zibby/mcp-browser/package.json',{paths:['$SCRIPT_DIR/..']}))}catch{}" 2>/dev/null)
|
|
49
|
+
if [ -n "$ZIBBY_MCP_PKG" ]; then
|
|
50
|
+
ZIBBY_MCP_BIN="$(dirname "$ZIBBY_MCP_PKG")/bin/mcp-browser-zibby.js"
|
|
51
|
+
fi
|
|
52
|
+
|
|
53
|
+
# Fallback: try workspace path (dev only)
|
|
54
|
+
if [ -z "$ZIBBY_MCP_BIN" ] || [ ! -f "$ZIBBY_MCP_BIN" ]; then
|
|
55
|
+
ZIBBY_MCP_DIR="$SCRIPT_DIR/../../mcps/browser"
|
|
56
|
+
if [ -f "$ZIBBY_MCP_DIR/bin/mcp-browser-zibby.js" ]; then
|
|
57
|
+
ZIBBY_MCP_BIN="$ZIBBY_MCP_DIR/bin/mcp-browser-zibby.js"
|
|
58
|
+
fi
|
|
59
|
+
fi
|
|
60
|
+
|
|
61
|
+
# Final fallback: official @playwright/mcp (no stable IDs or event recording)
|
|
62
|
+
if [ -z "$ZIBBY_MCP_BIN" ] || [ ! -f "$ZIBBY_MCP_BIN" ]; then
|
|
63
|
+
PLAYWRIGHT_MCP_PKG=$("$NODE_PATH" -e "try{console.log(require.resolve('@playwright/mcp/package.json',{paths:['$SCRIPT_DIR/..']}))}catch{}" 2>/dev/null)
|
|
64
|
+
if [ -n "$PLAYWRIGHT_MCP_PKG" ]; then
|
|
65
|
+
ZIBBY_MCP_BIN="$(dirname "$PLAYWRIGHT_MCP_PKG")/cli.js"
|
|
66
|
+
echo " â ī¸ Using @playwright/mcp (official) â stable IDs and event recording unavailable"
|
|
67
|
+
fi
|
|
68
|
+
fi
|
|
69
|
+
|
|
70
|
+
if [ -z "$ZIBBY_MCP_BIN" ] || [ ! -f "$ZIBBY_MCP_BIN" ]; then
|
|
71
|
+
echo "â No Playwright MCP found. Run: npm install @zibby/mcp-browser"
|
|
72
|
+
exit 1
|
|
73
|
+
fi
|
|
74
|
+
|
|
75
|
+
echo " â
Zibby MCP Browser ready"
|
|
76
|
+
|
|
77
|
+
# Install ffmpeg for video recording
|
|
78
|
+
echo "đŦ Installing ffmpeg for video recording..."
|
|
79
|
+
if command -v playwright &> /dev/null 2>&1; then
|
|
80
|
+
playwright install ffmpeg > /dev/null 2>&1 && echo " â
ffmpeg installed" || echo " â ī¸ Could not install ffmpeg"
|
|
81
|
+
else
|
|
82
|
+
"$NPX_PATH" --yes playwright install ffmpeg > /dev/null 2>&1 && echo " â
ffmpeg installed" || echo " â ī¸ Could not install ffmpeg (will disable video)"
|
|
83
|
+
fi
|
|
84
|
+
|
|
85
|
+
# Step 4: Configure MCP
|
|
86
|
+
echo "Configuring Zibby MCP Browser..."
|
|
87
|
+
mkdir -p ~/.cursor
|
|
88
|
+
|
|
89
|
+
# Default viewport size (can be overridden via env)
|
|
90
|
+
VIEWPORT_WIDTH="${ZIBBY_VIEWPORT_WIDTH:-1280}"
|
|
91
|
+
VIEWPORT_HEIGHT="${ZIBBY_VIEWPORT_HEIGHT:-720}"
|
|
92
|
+
VIEWPORT_SIZE="${VIEWPORT_WIDTH}x${VIEWPORT_HEIGHT}"
|
|
93
|
+
|
|
94
|
+
# Build MCP args array
|
|
95
|
+
# --save-video is stripped by the Zibby wrapper before forwarding to @playwright/mcp,
|
|
96
|
+
# but the wrapper reads the resolution from it to auto-start video recording.
|
|
97
|
+
MCP_ARGS="\"--save-video=${VIEWPORT_SIZE}\", \"--viewport-size=${VIEWPORT_SIZE}\", \"--output-dir=test-results\""
|
|
98
|
+
|
|
99
|
+
# Check if headless mode requested (default is headed)
|
|
100
|
+
if [ "$ZIBBY_HEADLESS" = "1" ]; then
|
|
101
|
+
MCP_ARGS="$MCP_ARGS, \"--headless\""
|
|
102
|
+
echo " Mode: Headless (hidden browser)"
|
|
103
|
+
else
|
|
104
|
+
echo " Mode: Headed (visible browser)"
|
|
105
|
+
fi
|
|
106
|
+
|
|
107
|
+
# Check if video recording requested (default is on)
|
|
108
|
+
if [ "$ZIBBY_VIDEO" != "off" ]; then
|
|
109
|
+
echo " Video: Enabled (${VIEWPORT_SIZE})"
|
|
110
|
+
else
|
|
111
|
+
MCP_ARGS="\"--viewport-size=${VIEWPORT_SIZE}\", \"--output-dir=test-results\""
|
|
112
|
+
if [ "$ZIBBY_HEADLESS" = "1" ]; then
|
|
113
|
+
MCP_ARGS="$MCP_ARGS, \"--headless\""
|
|
114
|
+
fi
|
|
115
|
+
echo " Video: Disabled"
|
|
116
|
+
fi
|
|
117
|
+
|
|
118
|
+
# Cloud sync is handled by the CLI/Studio directly (old mcp-server package removed).
|
|
119
|
+
cat > ~/.cursor/mcp.json <<EOF
|
|
120
|
+
{
|
|
121
|
+
"mcpServers": {
|
|
122
|
+
"playwright-official": {
|
|
123
|
+
"command": "$NODE_PATH",
|
|
124
|
+
"args": ["$ZIBBY_MCP_BIN", $MCP_ARGS],
|
|
125
|
+
"env": {
|
|
126
|
+
"PATH": "$NODE_BIN_DIR:/usr/bin:/bin:/usr/sbin:/sbin"
|
|
127
|
+
},
|
|
128
|
+
"description": "Zibby MCP Browser - Forked Playwright MCP with stable ID support"
|
|
129
|
+
}
|
|
130
|
+
}
|
|
131
|
+
}
|
|
132
|
+
EOF
|
|
133
|
+
echo " + Zibby MCP Browser (stable IDs + event recording)"
|
|
134
|
+
|
|
135
|
+
# Step 5: Patch cursor-agent
|
|
136
|
+
echo "Patching cursor-agent..."
|
|
137
|
+
node "$SCRIPT_DIR/patch-cursor-mcp.js" > /dev/null 2>&1 || echo " (patch may not be needed for your version)"
|
|
138
|
+
|
|
139
|
+
# Step 6: Get approval key
|
|
140
|
+
echo "đ Getting approval key..."
|
|
141
|
+
APPROVAL_OUTPUT=$(cursor-agent mcp list 2>&1)
|
|
142
|
+
|
|
143
|
+
APPROVAL_KEY_LINE=$(echo "$APPROVAL_OUTPUT" | grep "đ APPROVAL KEY.*playwright-official" | head -1)
|
|
144
|
+
echo "$APPROVAL_KEY_LINE"
|
|
145
|
+
|
|
146
|
+
APPROVAL_KEY=$(echo "$APPROVAL_KEY_LINE" | sed -E 's/.*=> ([^ ]+).*/\1/' | head -1)
|
|
147
|
+
|
|
148
|
+
if [ -z "$APPROVAL_KEY" ]; then
|
|
149
|
+
APPROVAL_KEY=$(echo "$APPROVAL_OUTPUT" | grep -oE 'playwright-official-[a-f0-9]+' | head -1)
|
|
150
|
+
fi
|
|
151
|
+
|
|
152
|
+
if [ -z "$APPROVAL_KEY" ]; then
|
|
153
|
+
echo "â ī¸ Could not auto-extract approval key"
|
|
154
|
+
echo " This is OK for development - approval key only needed for CI/CD"
|
|
155
|
+
echo " For CI/CD: Run 'cursor-agent mcp list' and save the key"
|
|
156
|
+
else
|
|
157
|
+
# Step 7: Setup workspace approvals (only if we got the key)
|
|
158
|
+
echo "Configuring approvals..."
|
|
159
|
+
WORKSPACE_PATH="$PWD"
|
|
160
|
+
WORKSPACE_ENCODED=$(echo "$WORKSPACE_PATH" | sed 's|^/||' | sed 's|/|-|g')
|
|
161
|
+
APPROVAL_DIR="$HOME/.cursor/projects/$WORKSPACE_ENCODED"
|
|
162
|
+
APPROVAL_FILE="$APPROVAL_DIR/mcp-approvals.json"
|
|
163
|
+
|
|
164
|
+
mkdir -p "$APPROVAL_DIR"
|
|
165
|
+
cat > "$APPROVAL_FILE" <<EOF
|
|
166
|
+
["$APPROVAL_KEY"]
|
|
167
|
+
EOF
|
|
168
|
+
echo "Auto-approval configured for this workspace"
|
|
169
|
+
fi
|
|
170
|
+
|
|
171
|
+
echo ""
|
|
172
|
+
echo "Zibby MCP Browser ready!"
|
|
173
|
+
echo ""
|
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
#!/bin/bash
|
|
2
|
+
|
|
3
|
+
# Run Playwright tests with video recording
|
|
4
|
+
# Usage: ./scripts/test-with-video.sh [test-file] [headed]
|
|
5
|
+
#
|
|
6
|
+
# Examples:
|
|
7
|
+
# ./scripts/test-with-video.sh tests/auth/login.spec.js
|
|
8
|
+
# ./scripts/test-with-video.sh tests/auth/login.spec.js headed
|
|
9
|
+
|
|
10
|
+
TEST_FILE="${1:-tests/}"
|
|
11
|
+
HEADED_FLAG=""
|
|
12
|
+
|
|
13
|
+
if [ "$2" = "headed" ]; then
|
|
14
|
+
HEADED_FLAG="--headed"
|
|
15
|
+
fi
|
|
16
|
+
|
|
17
|
+
echo "đĨ Running Playwright tests with video recording..."
|
|
18
|
+
echo " Test: $TEST_FILE"
|
|
19
|
+
echo " Mode: $([ -n "$HEADED_FLAG" ] && echo "Headed (visible)" || echo "Headless")"
|
|
20
|
+
echo ""
|
|
21
|
+
|
|
22
|
+
# Run tests (video is configured in playwright.config.js)
|
|
23
|
+
npx playwright test "$TEST_FILE" --project=chromium $HEADED_FLAG
|
|
24
|
+
|
|
25
|
+
echo ""
|
|
26
|
+
echo "â
Tests complete!"
|
|
27
|
+
echo ""
|
|
28
|
+
echo "đ Videos saved in: test-results/"
|
|
29
|
+
echo ""
|
|
30
|
+
|
|
31
|
+
# List videos if any were created
|
|
32
|
+
if ls test-results/*/video.webm 2>/dev/null 1>&2; then
|
|
33
|
+
echo "đŦ Videos created:"
|
|
34
|
+
ls -lh test-results/*/video.webm
|
|
35
|
+
echo ""
|
|
36
|
+
echo "To view videos:"
|
|
37
|
+
echo " 1. Open Finder: open test-results/"
|
|
38
|
+
echo " 2. Or play directly:"
|
|
39
|
+
for video in test-results/*/video.webm; do
|
|
40
|
+
echo " open \"$video\""
|
|
41
|
+
done
|
|
42
|
+
else
|
|
43
|
+
echo "âšī¸ No videos found"
|
|
44
|
+
echo ""
|
|
45
|
+
echo "Video is configured in playwright.config.js:"
|
|
46
|
+
echo " - video: 'on' (always record)"
|
|
47
|
+
echo " - video: 'retain-on-failure' (only failed tests)"
|
|
48
|
+
fi
|
|
49
|
+
|