@zibby/core 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +147 -0
- package/package.json +94 -0
- package/src/agents/base.js +361 -0
- package/src/constants.js +47 -0
- package/src/enrichment/base.js +49 -0
- package/src/enrichment/enrichers/accessibility-enricher.js +197 -0
- package/src/enrichment/enrichers/dom-enricher.js +171 -0
- package/src/enrichment/enrichers/page-state-enricher.js +129 -0
- package/src/enrichment/enrichers/position-enricher.js +67 -0
- package/src/enrichment/index.js +96 -0
- package/src/enrichment/mcp-integration.js +149 -0
- package/src/enrichment/mcp-ref-enricher.js +78 -0
- package/src/enrichment/pipeline.js +192 -0
- package/src/enrichment/trace-text-enricher.js +115 -0
- package/src/framework/AGENTS.md +98 -0
- package/src/framework/agents/base.js +72 -0
- package/src/framework/agents/claude-strategy.js +278 -0
- package/src/framework/agents/cursor-strategy.js +459 -0
- package/src/framework/agents/index.js +105 -0
- package/src/framework/agents/utils/cursor-output-formatter.js +67 -0
- package/src/framework/agents/utils/openai-proxy-formatter.js +249 -0
- package/src/framework/code-generator.js +301 -0
- package/src/framework/constants.js +33 -0
- package/src/framework/context-loader.js +101 -0
- package/src/framework/function-bridge.js +78 -0
- package/src/framework/function-skill-registry.js +20 -0
- package/src/framework/graph-compiler.js +342 -0
- package/src/framework/graph.js +610 -0
- package/src/framework/index.js +28 -0
- package/src/framework/node-registry.js +163 -0
- package/src/framework/node.js +259 -0
- package/src/framework/output-parser.js +71 -0
- package/src/framework/skill-registry.js +55 -0
- package/src/framework/state-utils.js +52 -0
- package/src/framework/state.js +67 -0
- package/src/framework/tool-resolver.js +65 -0
- package/src/index.js +342 -0
- package/src/runtime/generation/base.js +46 -0
- package/src/runtime/generation/index.js +70 -0
- package/src/runtime/generation/mcp-ref-strategy.js +197 -0
- package/src/runtime/generation/stable-id-strategy.js +170 -0
- package/src/runtime/stable-id-runtime.js +248 -0
- package/src/runtime/verification/base.js +44 -0
- package/src/runtime/verification/index.js +67 -0
- package/src/runtime/verification/playwright-json-strategy.js +119 -0
- package/src/runtime/zibby-runtime.js +299 -0
- package/src/sync/index.js +2 -0
- package/src/sync/uploader.js +29 -0
- package/src/tools/run-playwright-test.js +158 -0
- package/src/utils/adf-converter.js +68 -0
- package/src/utils/ast-utils.js +37 -0
- package/src/utils/ci-setup.js +124 -0
- package/src/utils/cursor-utils.js +71 -0
- package/src/utils/logger.js +144 -0
- package/src/utils/mcp-config-writer.js +115 -0
- package/src/utils/node-schema-parser.js +522 -0
- package/src/utils/post-process-events.js +55 -0
- package/src/utils/result-handler.js +102 -0
- package/src/utils/ripple-effect.js +84 -0
- package/src/utils/selector-generator.js +239 -0
- package/src/utils/streaming-parser.js +387 -0
- package/src/utils/test-post-processor.js +211 -0
- package/src/utils/timeline.js +217 -0
- package/src/utils/trace-parser.js +325 -0
- package/src/utils/video-organizer.js +91 -0
- package/templates/browser-test-automation/README.md +114 -0
- package/templates/browser-test-automation/graph.js +54 -0
- package/templates/browser-test-automation/nodes/execute-live.js +250 -0
- package/templates/browser-test-automation/nodes/generate-script.js +77 -0
- package/templates/browser-test-automation/nodes/index.js +3 -0
- package/templates/browser-test-automation/nodes/preflight.js +59 -0
- package/templates/browser-test-automation/nodes/utils.js +154 -0
- package/templates/browser-test-automation/result-handler.js +286 -0
- package/templates/code-analysis/graph.js +72 -0
- package/templates/code-analysis/index.js +18 -0
- package/templates/code-analysis/nodes/analyze-ticket-node.js +204 -0
- package/templates/code-analysis/nodes/create-pr-node.js +175 -0
- package/templates/code-analysis/nodes/finalize-node.js +118 -0
- package/templates/code-analysis/nodes/generate-code-node.js +425 -0
- package/templates/code-analysis/nodes/generate-test-cases-node.js +376 -0
- package/templates/code-analysis/nodes/services/prMetaService.js +86 -0
- package/templates/code-analysis/nodes/setup-node.js +142 -0
- package/templates/code-analysis/prompts/analyze-ticket.md +181 -0
- package/templates/code-analysis/prompts/generate-code.md +33 -0
- package/templates/code-analysis/prompts/generate-test-cases.md +110 -0
- package/templates/code-analysis/state.js +40 -0
- package/templates/code-implementation/graph.js +35 -0
- package/templates/code-implementation/index.js +7 -0
- package/templates/code-implementation/state.js +14 -0
- package/templates/global-setup.js +56 -0
- package/templates/index.js +94 -0
- package/templates/register-nodes.js +24 -0
|
@@ -0,0 +1,249 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* OpenAI Proxy Formatter Utility
|
|
3
|
+
* Handles structured output post-processing via Zibby's OpenAI proxy
|
|
4
|
+
*
|
|
5
|
+
* Authentication Requirements:
|
|
6
|
+
* - Local/Dev: User must be logged in via `zibby login` (JWT token in ~/.zibby/config.json)
|
|
7
|
+
* - CI/CD: Must provide ZIBBY_USER_TOKEN env variable (Personal Access Token)
|
|
8
|
+
* - Project: Uses ZIBBY_API_KEY for project-scoped access
|
|
9
|
+
*/
|
|
10
|
+
|
|
11
|
+
import axios from 'axios';
|
|
12
|
+
import { homedir } from 'os';
|
|
13
|
+
import { join } from 'path';
|
|
14
|
+
import { existsSync, readFileSync } from 'fs';
|
|
15
|
+
import { zodToJsonSchema } from 'zod-to-json-schema';
|
|
16
|
+
import { logger } from '../../../utils/logger.js';
|
|
17
|
+
import { DEFAULT_MODELS, TIMEOUTS } from '../../../constants.js';
|
|
18
|
+
|
|
19
|
+
/**
|
|
20
|
+
* Get Zibby user authentication token
|
|
21
|
+
* Priority: ZIBBY_USER_TOKEN env > zibby login session
|
|
22
|
+
* @returns {string|null} JWT token or null if not authenticated
|
|
23
|
+
*/
|
|
24
|
+
function getUserToken() {
|
|
25
|
+
// 1. ECS: per-execution token (generated by ecs-executor, validated by proxy Lambda)
|
|
26
|
+
if (process.env.OPENAI_PROXY_TOKEN) {
|
|
27
|
+
logger.debug('[Auth] Using OPENAI_PROXY_TOKEN (ECS execution)');
|
|
28
|
+
return process.env.OPENAI_PROXY_TOKEN;
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
// 2. CI/CD: user's Personal Access Token (created in UI settings, set as env var)
|
|
32
|
+
if (process.env.ZIBBY_USER_TOKEN) {
|
|
33
|
+
logger.debug('[Auth] Using ZIBBY_USER_TOKEN (CI/CD PAT)');
|
|
34
|
+
return process.env.ZIBBY_USER_TOKEN;
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
// 3. Local dev: zibby login session (~/.zibby/config.json)
|
|
38
|
+
try {
|
|
39
|
+
const configPath = join(homedir(), '.zibby', 'config.json');
|
|
40
|
+
if (existsSync(configPath)) {
|
|
41
|
+
const config = JSON.parse(readFileSync(configPath, 'utf-8'));
|
|
42
|
+
if (config.sessionToken) {
|
|
43
|
+
logger.debug('[Auth] Using session token from zibby login');
|
|
44
|
+
return config.sessionToken;
|
|
45
|
+
}
|
|
46
|
+
}
|
|
47
|
+
} catch (error) {
|
|
48
|
+
logger.debug(`[Auth] Could not read zibby login session: ${error.message}`);
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
return null;
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
/**
|
|
55
|
+
* Get OpenAI proxy URL (always prod)
|
|
56
|
+
* @returns {string} Full URL to OpenAI proxy endpoint
|
|
57
|
+
*/
|
|
58
|
+
function getProxyUrl() {
|
|
59
|
+
if (process.env.OPENAI_PROXY_URL) {
|
|
60
|
+
return process.env.OPENAI_PROXY_URL.replace(/\/v1\/?$/, '');
|
|
61
|
+
}
|
|
62
|
+
return 'https://api-prod.zibby.app/openai-proxy';
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
/**
|
|
66
|
+
* Normalize Zod JSON schema for OpenAI Structured Outputs API
|
|
67
|
+
* @param {Object} schema - Zod-generated JSON schema
|
|
68
|
+
*/
|
|
69
|
+
function normalizeSchemaForOpenAI(schema) {
|
|
70
|
+
if (typeof schema !== 'object' || schema === null) return;
|
|
71
|
+
|
|
72
|
+
// Empty objects need type definition
|
|
73
|
+
if (Object.keys(schema).length === 0) {
|
|
74
|
+
schema.type = 'object';
|
|
75
|
+
schema.additionalProperties = true;
|
|
76
|
+
return;
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
// Infer type from structure if missing
|
|
80
|
+
if (!schema.type) {
|
|
81
|
+
if (schema.properties) {
|
|
82
|
+
schema.type = 'object';
|
|
83
|
+
} else if (schema.items) {
|
|
84
|
+
schema.type = 'array';
|
|
85
|
+
}
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
// Objects must have additionalProperties: false and all keys in required for strict mode
|
|
89
|
+
if (schema.type === 'object') {
|
|
90
|
+
if (schema.properties) {
|
|
91
|
+
// Drop properties that use dynamic keys (additionalProperties as value schema)
|
|
92
|
+
// since OpenAI strict mode requires additionalProperties: false
|
|
93
|
+
for (const [key, prop] of Object.entries(schema.properties)) {
|
|
94
|
+
if (prop.type === 'object' && prop.additionalProperties && prop.additionalProperties !== false) {
|
|
95
|
+
if (!prop.properties || Object.keys(prop.properties).length === 0) {
|
|
96
|
+
// Dynamic-key object (e.g. Record<string, number>) — replace with empty nullable object
|
|
97
|
+
schema.properties[key] = { type: ['object', 'null'] };
|
|
98
|
+
}
|
|
99
|
+
}
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
schema.additionalProperties = false;
|
|
103
|
+
schema.required = Object.keys(schema.properties);
|
|
104
|
+
Object.values(schema.properties).forEach(normalizeSchemaForOpenAI);
|
|
105
|
+
} else if (!('additionalProperties' in schema)) {
|
|
106
|
+
schema.additionalProperties = true;
|
|
107
|
+
}
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
// Recursively normalize array items
|
|
111
|
+
if (schema.type === 'array' && schema.items) {
|
|
112
|
+
normalizeSchemaForOpenAI(schema.items);
|
|
113
|
+
}
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
/**
|
|
117
|
+
* Format raw AI output into structured JSON using OpenAI proxy
|
|
118
|
+
*
|
|
119
|
+
* @param {string} rawText - Raw text output from AI agent
|
|
120
|
+
* @param {ZodSchema} zodSchema - Zod schema for validation
|
|
121
|
+
* @returns {Promise<Object>} { structured: Object, raw: string }
|
|
122
|
+
* @throws {Error} If authentication fails or OpenAI request fails
|
|
123
|
+
*/
|
|
124
|
+
export async function formatWithOpenAIProxy(rawText, zodSchema) {
|
|
125
|
+
logger.info('🔧 [OpenAI Proxy] Formatting structured output...');
|
|
126
|
+
|
|
127
|
+
// 1. Check authentication
|
|
128
|
+
const userToken = getUserToken();
|
|
129
|
+
if (!userToken) {
|
|
130
|
+
throw new Error(
|
|
131
|
+
'Authentication required for structured output processing.\n' +
|
|
132
|
+
' Local development: Run `zibby login`\n' +
|
|
133
|
+
' CI/CD: Set ZIBBY_USER_TOKEN environment variable (Personal Access Token from UI settings)'
|
|
134
|
+
);
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
// 2. Get proxy URL
|
|
138
|
+
const proxyUrl = getProxyUrl();
|
|
139
|
+
logger.info(`🔗 Using OpenAI proxy: ${proxyUrl}`);
|
|
140
|
+
|
|
141
|
+
// 3. Convert Zod schema to JSON Schema
|
|
142
|
+
const fullSchema = zodToJsonSchema(zodSchema);
|
|
143
|
+
|
|
144
|
+
let actualSchema = fullSchema;
|
|
145
|
+
|
|
146
|
+
// Handle $ref definitions
|
|
147
|
+
if (fullSchema.$ref && fullSchema.definitions) {
|
|
148
|
+
const refKey = fullSchema.$ref.split('/').pop();
|
|
149
|
+
actualSchema = fullSchema.definitions[refKey] || fullSchema;
|
|
150
|
+
logger.debug(`Extracted schema from $ref: ${refKey}`);
|
|
151
|
+
}
|
|
152
|
+
|
|
153
|
+
// Remove $schema meta field
|
|
154
|
+
delete actualSchema.$schema;
|
|
155
|
+
|
|
156
|
+
// Normalize for OpenAI strict mode
|
|
157
|
+
normalizeSchemaForOpenAI(actualSchema);
|
|
158
|
+
|
|
159
|
+
// 4. Prepare OpenAI request — truncate to fit model context (~120K tokens ≈ ~400K chars)
|
|
160
|
+
const MAX_TEXT_CHARS = 400_000;
|
|
161
|
+
let trimmedText = rawText;
|
|
162
|
+
if (rawText.length > MAX_TEXT_CHARS) {
|
|
163
|
+
logger.warn(`⚠️ [OpenAI Proxy] Raw text (${rawText.length} chars) exceeds limit, keeping last ${MAX_TEXT_CHARS} chars`);
|
|
164
|
+
trimmedText = `... [truncated early content] ...\n${ rawText.slice(-MAX_TEXT_CHARS)}`;
|
|
165
|
+
}
|
|
166
|
+
|
|
167
|
+
const formattingPrompt = `Extract and format the following information into structured JSON matching the schema.
|
|
168
|
+
|
|
169
|
+
RAW CONTENT:
|
|
170
|
+
${trimmedText}
|
|
171
|
+
|
|
172
|
+
Extract all relevant information and format it according to the schema. If any required fields are missing, do your best to infer them from the content.`;
|
|
173
|
+
|
|
174
|
+
const requestBody = {
|
|
175
|
+
model: DEFAULT_MODELS.OPENAI_POSTPROCESSING,
|
|
176
|
+
messages: [
|
|
177
|
+
{ role: 'user', content: formattingPrompt }
|
|
178
|
+
],
|
|
179
|
+
response_format: {
|
|
180
|
+
type: 'json_schema',
|
|
181
|
+
json_schema: {
|
|
182
|
+
name: 'extract',
|
|
183
|
+
schema: actualSchema,
|
|
184
|
+
strict: true
|
|
185
|
+
}
|
|
186
|
+
}
|
|
187
|
+
};
|
|
188
|
+
|
|
189
|
+
logger.info(`📤 Sending to OpenAI proxy: model=${DEFAULT_MODELS.OPENAI_POSTPROCESSING}, schema keys=${Object.keys(actualSchema.properties || {}).join(', ')}`);
|
|
190
|
+
logger.debug(` Schema size: ${JSON.stringify(actualSchema).length} chars`);
|
|
191
|
+
logger.debug(` Prompt size: ${formattingPrompt.length} chars`);
|
|
192
|
+
|
|
193
|
+
// 5. Call Zibby OpenAI proxy
|
|
194
|
+
try {
|
|
195
|
+
const headers = { 'Content-Type': 'application/json' };
|
|
196
|
+
|
|
197
|
+
if (process.env.OPENAI_PROXY_TOKEN) {
|
|
198
|
+
// ECS: proxy Lambda validates x-proxy-token + x-execution-id against DynamoDB
|
|
199
|
+
headers['x-proxy-token'] = userToken;
|
|
200
|
+
headers['x-execution-id'] = process.env.EXECUTION_ID || '';
|
|
201
|
+
} else {
|
|
202
|
+
// Local/CI: user JWT or PAT in Authorization header
|
|
203
|
+
headers['Authorization'] = `Bearer ${userToken}`;
|
|
204
|
+
headers['x-api-key'] = process.env.ZIBBY_API_KEY || '';
|
|
205
|
+
headers['x-execution-id'] = process.env.EXECUTION_ID || '';
|
|
206
|
+
}
|
|
207
|
+
|
|
208
|
+
const response = await axios.post(proxyUrl, requestBody, {
|
|
209
|
+
headers,
|
|
210
|
+
timeout: TIMEOUTS.OPENAI_REQUEST
|
|
211
|
+
});
|
|
212
|
+
|
|
213
|
+
const content = response.data?.choices?.[0]?.message?.content;
|
|
214
|
+
if (!content) {
|
|
215
|
+
throw new Error('OpenAI proxy returned empty response');
|
|
216
|
+
}
|
|
217
|
+
|
|
218
|
+
const parsed = JSON.parse(content);
|
|
219
|
+
logger.info('✅ Successfully formatted with OpenAI proxy');
|
|
220
|
+
|
|
221
|
+
return {
|
|
222
|
+
structured: parsed,
|
|
223
|
+
raw: rawText
|
|
224
|
+
};
|
|
225
|
+
} catch (error) {
|
|
226
|
+
if (error.response) {
|
|
227
|
+
const status = error.response.status;
|
|
228
|
+
const errorData = error.response.data;
|
|
229
|
+
|
|
230
|
+
logger.error(`❌ OpenAI proxy request failed: ${status}`);
|
|
231
|
+
logger.error(` Status: ${status}`);
|
|
232
|
+
logger.error(` Response: ${JSON.stringify(errorData, null, 2)}`);
|
|
233
|
+
|
|
234
|
+
if (status === 401 || status === 403) {
|
|
235
|
+
throw new Error(
|
|
236
|
+
'Authentication failed for OpenAI proxy.\n' +
|
|
237
|
+
' Run `zibby login` or set ZIBBY_USER_TOKEN environment variable.\n' +
|
|
238
|
+
` Response: ${JSON.stringify(errorData)}`,
|
|
239
|
+
{ cause: error }
|
|
240
|
+
);
|
|
241
|
+
}
|
|
242
|
+
|
|
243
|
+
throw new Error(`Failed to format Cursor output: ${errorData?.error?.message || 'Unknown error'}`, { cause: error });
|
|
244
|
+
}
|
|
245
|
+
|
|
246
|
+
logger.error(`❌ OpenAI proxy request failed: ${error.message}`);
|
|
247
|
+
throw new Error(`Failed to format output: ${error.message}`, { cause: error });
|
|
248
|
+
}
|
|
249
|
+
}
|
|
@@ -0,0 +1,301 @@
|
|
|
1
|
+
import { NODE_DEFAULT_TOOLS } from './tool-resolver.js';
|
|
2
|
+
import { getNodeTemplate } from './node-registry.js';
|
|
3
|
+
|
|
4
|
+
export function generateWorkflowCode(config, meta = {}) {
|
|
5
|
+
const { nodes, edges, nodeConfigs = {} } = config;
|
|
6
|
+
|
|
7
|
+
const decisionNodeIds = new Set();
|
|
8
|
+
const executableNodes = [];
|
|
9
|
+
const nodeTypeMap = new Map();
|
|
10
|
+
|
|
11
|
+
for (const node of nodes) {
|
|
12
|
+
const nodeType = node.data?.nodeType || node.type;
|
|
13
|
+
nodeTypeMap.set(node.id, nodeType);
|
|
14
|
+
if (nodeType === 'decision') {
|
|
15
|
+
decisionNodeIds.add(node.id);
|
|
16
|
+
} else {
|
|
17
|
+
executableNodes.push({ id: node.id, nodeType, label: node.data?.label || node.id });
|
|
18
|
+
}
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
const usesRegisteredNodes = executableNodes.some(n => {
|
|
22
|
+
const nc = nodeConfigs[n.id] || {};
|
|
23
|
+
return !nc.customCode && !nc.executeCode;
|
|
24
|
+
});
|
|
25
|
+
|
|
26
|
+
const { toolsPerNode, toolIdsByVar } = collectToolBindings(executableNodes, nodeConfigs);
|
|
27
|
+
const { simpleEdges, conditionalEdges } = collapseEdges(edges, decisionNodeIds);
|
|
28
|
+
const entryNode = findEntryNode(executableNodes, edges, decisionNodeIds);
|
|
29
|
+
|
|
30
|
+
const lines = [];
|
|
31
|
+
|
|
32
|
+
const workflowType = meta.workflowType || 'workflow';
|
|
33
|
+
|
|
34
|
+
lines.push(generateHeader(meta));
|
|
35
|
+
lines.push(generateImports(workflowType, { usesRegisteredNodes }));
|
|
36
|
+
lines.push(generateToolDeclarations(toolIdsByVar));
|
|
37
|
+
lines.push(generateConfigLoader(workflowType));
|
|
38
|
+
lines.push(generateNodeFunctions(executableNodes, nodeConfigs));
|
|
39
|
+
lines.push(generateBuildFunction(
|
|
40
|
+
executableNodes, entryNode,
|
|
41
|
+
simpleEdges, conditionalEdges, toolsPerNode, workflowType
|
|
42
|
+
));
|
|
43
|
+
|
|
44
|
+
return lines.filter(Boolean).join('\n');
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
export function generateNodeConfigsJson(nodeConfigs) {
|
|
48
|
+
const cleaned = {};
|
|
49
|
+
for (const [nodeId, config] of Object.entries(nodeConfigs)) {
|
|
50
|
+
const { tools: _tools, ...rest } = config;
|
|
51
|
+
if (Object.keys(rest).length > 0) {
|
|
52
|
+
cleaned[nodeId] = rest;
|
|
53
|
+
}
|
|
54
|
+
}
|
|
55
|
+
return cleaned;
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
function generateHeader(meta) {
|
|
59
|
+
const wfType = meta.workflowType || 'workflow';
|
|
60
|
+
const lines = [
|
|
61
|
+
`// Generated by Zibby Visual Workflow Editor`,
|
|
62
|
+
`// ${meta.projectId ? `Project: ${meta.projectId} | ` : ''}Type: ${wfType} | Version: ${meta.version ?? 0}`,
|
|
63
|
+
`// Downloaded: ${new Date().toISOString()}`,
|
|
64
|
+
`//`,
|
|
65
|
+
`// Upload back: zibby workflow upload --project <id> --type ${wfType}`,
|
|
66
|
+
'',
|
|
67
|
+
];
|
|
68
|
+
return lines.join('\n');
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
function generateImports(workflowType, { usesRegisteredNodes = true } = {}) {
|
|
72
|
+
const lines = [
|
|
73
|
+
`import '@zibby/skills';`,
|
|
74
|
+
`import { WorkflowGraph } from '@zibby/core/framework/graph.js';`,
|
|
75
|
+
`import { invokeAgent } from '@zibby/core';`,
|
|
76
|
+
`import { getResolvedToolDefinitions } from '@zibby/core/framework/tool-resolver.js';`,
|
|
77
|
+
];
|
|
78
|
+
|
|
79
|
+
if (usesRegisteredNodes) {
|
|
80
|
+
lines.push(`import '@zibby/core/templates/register-nodes.js';`);
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
lines.push(
|
|
84
|
+
`import { readFileSync, writeFileSync, existsSync, mkdirSync } from 'fs';`,
|
|
85
|
+
`import { join, dirname } from 'path';`,
|
|
86
|
+
`import { fileURLToPath } from 'url';`,
|
|
87
|
+
'',
|
|
88
|
+
);
|
|
89
|
+
return lines.join('\n');
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
function generateToolDeclarations(uniqueToolSets) {
|
|
93
|
+
if (uniqueToolSets.size === 0) return '';
|
|
94
|
+
|
|
95
|
+
const lines = [
|
|
96
|
+
`// ── Tool Bindings ────────────────────────────────────────────────────`,
|
|
97
|
+
`// Each call resolves skill IDs → full schemas + MCP server configs`,
|
|
98
|
+
`// from the skill registry (populated by @zibby/skills import above).`,
|
|
99
|
+
];
|
|
100
|
+
|
|
101
|
+
for (const [varName, toolIds] of uniqueToolSets) {
|
|
102
|
+
const desc = toolIds.join(', ');
|
|
103
|
+
lines.push(`const ${varName} = getResolvedToolDefinitions(${JSON.stringify(toolIds)}); // ${desc}`);
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
lines.push('');
|
|
107
|
+
return lines.join('\n');
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
function generateConfigLoader(workflowType) {
|
|
111
|
+
const lines = [
|
|
112
|
+
`// ── Node Configs (extra instructions, runtime settings) ─────────────`,
|
|
113
|
+
`const __filename = fileURLToPath(import.meta.url);`,
|
|
114
|
+
`const __dirname = dirname(__filename);`,
|
|
115
|
+
`const configPath = join(__dirname, 'workflow-${workflowType}.config.json');`,
|
|
116
|
+
`const nodeConfigs = existsSync(configPath)`,
|
|
117
|
+
` ? JSON.parse(readFileSync(configPath, 'utf-8'))`,
|
|
118
|
+
` : {};`,
|
|
119
|
+
'',
|
|
120
|
+
];
|
|
121
|
+
return lines.join('\n');
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
function generateNodeFunctions(executableNodes, nodeConfigs) {
|
|
126
|
+
const lines = [
|
|
127
|
+
`// ── Node Implementations ─────────────────────────────────────────────`,
|
|
128
|
+
`// Each node's execute function is inlined below.`,
|
|
129
|
+
`// Edit any function to customize behavior. The upload command detects`,
|
|
130
|
+
`// changes via // @custom markers and persists them to the cloud.`,
|
|
131
|
+
'',
|
|
132
|
+
];
|
|
133
|
+
|
|
134
|
+
for (const node of executableNodes) {
|
|
135
|
+
const varName = sanitizeVarName(node.id);
|
|
136
|
+
const customCode = nodeConfigs[node.id]?.customCode;
|
|
137
|
+
|
|
138
|
+
if (customCode) {
|
|
139
|
+
lines.push(`// @custom — modified from default "${node.nodeType}" template`);
|
|
140
|
+
lines.push(`const ${varName}_execute = ${customCode};`);
|
|
141
|
+
} else {
|
|
142
|
+
const template = getNodeTemplate(node.nodeType);
|
|
143
|
+
if (template) {
|
|
144
|
+
lines.push(`// Default "${node.nodeType}" implementation`);
|
|
145
|
+
lines.push(`const ${varName}_execute = ${template};`);
|
|
146
|
+
} else {
|
|
147
|
+
lines.push(`// No template available for "${node.nodeType}" — using passthrough`);
|
|
148
|
+
lines.push(`const ${varName}_execute = async (state) => ({ success: true, output: {}, raw: null });`);
|
|
149
|
+
}
|
|
150
|
+
}
|
|
151
|
+
lines.push('');
|
|
152
|
+
}
|
|
153
|
+
|
|
154
|
+
return lines.join('\n');
|
|
155
|
+
}
|
|
156
|
+
|
|
157
|
+
function generateBuildFunction(
|
|
158
|
+
executableNodes, entryNode,
|
|
159
|
+
simpleEdges, conditionalEdges, toolsPerNode, _workflowType = 'workflow'
|
|
160
|
+
) {
|
|
161
|
+
const lines = [];
|
|
162
|
+
|
|
163
|
+
lines.push(`// ── Graph Builder ────────────────────────────────────────────────────`);
|
|
164
|
+
lines.push(`export function buildGraph(options = {}) {`);
|
|
165
|
+
lines.push(` const graph = new WorkflowGraph(options);`);
|
|
166
|
+
lines.push('');
|
|
167
|
+
|
|
168
|
+
lines.push(` // Nodes`);
|
|
169
|
+
for (const node of executableNodes) {
|
|
170
|
+
const varName = sanitizeVarName(node.id);
|
|
171
|
+
lines.push(` graph.addNode('${node.id}', { name: '${node.id}', execute: ${varName}_execute });`);
|
|
172
|
+
lines.push(` graph.setNodeType('${node.id}', '${node.nodeType}');`);
|
|
173
|
+
}
|
|
174
|
+
|
|
175
|
+
lines.push('');
|
|
176
|
+
lines.push(` // Entry point`);
|
|
177
|
+
lines.push(` graph.setEntryPoint('${entryNode}');`);
|
|
178
|
+
lines.push('');
|
|
179
|
+
|
|
180
|
+
if (simpleEdges.length > 0 || conditionalEdges.length > 0) {
|
|
181
|
+
lines.push(` // Edges`);
|
|
182
|
+
}
|
|
183
|
+
|
|
184
|
+
for (const edge of simpleEdges) {
|
|
185
|
+
lines.push(` graph.addEdge('${edge.source}', '${edge.target}');`);
|
|
186
|
+
}
|
|
187
|
+
|
|
188
|
+
for (const cond of conditionalEdges) {
|
|
189
|
+
const indented = cond.code.split('\n').map((line, i) => i === 0 ? line : ` ${line}`).join('\n');
|
|
190
|
+
lines.push(` graph.addConditionalEdges('${cond.source}', ${indented});`);
|
|
191
|
+
}
|
|
192
|
+
|
|
193
|
+
const toolMapEntries = [];
|
|
194
|
+
for (const node of executableNodes) {
|
|
195
|
+
const toolVar = toolsPerNode.get(node.id);
|
|
196
|
+
if (toolVar) {
|
|
197
|
+
toolMapEntries.push(` '${node.id}': ${toolVar},`);
|
|
198
|
+
}
|
|
199
|
+
}
|
|
200
|
+
|
|
201
|
+
if (toolMapEntries.length > 0) {
|
|
202
|
+
lines.push('');
|
|
203
|
+
lines.push(` graph.resolvedToolsMap = {`);
|
|
204
|
+
for (const entry of toolMapEntries) {
|
|
205
|
+
lines.push(` ${entry}`);
|
|
206
|
+
}
|
|
207
|
+
lines.push(` };`);
|
|
208
|
+
}
|
|
209
|
+
|
|
210
|
+
lines.push('');
|
|
211
|
+
lines.push(` return graph;`);
|
|
212
|
+
lines.push(`}`);
|
|
213
|
+
lines.push('');
|
|
214
|
+
|
|
215
|
+
lines.push(`// ── Exports ──────────────────────────────────────────────────────────`);
|
|
216
|
+
lines.push(`export { nodeConfigs };`);
|
|
217
|
+
lines.push('');
|
|
218
|
+
|
|
219
|
+
return lines.join('\n');
|
|
220
|
+
}
|
|
221
|
+
|
|
222
|
+
function collectToolBindings(executableNodes, nodeConfigs) {
|
|
223
|
+
const toolsPerNode = new Map();
|
|
224
|
+
const toolIdsByVar = new Map();
|
|
225
|
+
|
|
226
|
+
for (const node of executableNodes) {
|
|
227
|
+
const userTools = nodeConfigs[node.id]?.tools;
|
|
228
|
+
let toolIds;
|
|
229
|
+
|
|
230
|
+
if (Array.isArray(userTools) && userTools.length > 0) {
|
|
231
|
+
toolIds = [...userTools].sort();
|
|
232
|
+
} else {
|
|
233
|
+
const defaults = NODE_DEFAULT_TOOLS[node.nodeType];
|
|
234
|
+
if (defaults && defaults.length > 0) {
|
|
235
|
+
toolIds = [...defaults].sort();
|
|
236
|
+
}
|
|
237
|
+
}
|
|
238
|
+
|
|
239
|
+
if (toolIds) {
|
|
240
|
+
const varName = `${toolIds.map(id => id.replace(/[^a-zA-Z0-9]/g, '')).join('And') }Tools`;
|
|
241
|
+
toolsPerNode.set(node.id, varName);
|
|
242
|
+
if (!toolIdsByVar.has(varName)) {
|
|
243
|
+
toolIdsByVar.set(varName, toolIds);
|
|
244
|
+
}
|
|
245
|
+
}
|
|
246
|
+
}
|
|
247
|
+
|
|
248
|
+
return { toolsPerNode, toolIdsByVar };
|
|
249
|
+
}
|
|
250
|
+
|
|
251
|
+
|
|
252
|
+
function collapseEdges(edges, decisionNodeIds) {
|
|
253
|
+
const simpleEdges = [];
|
|
254
|
+
const conditionalEdges = [];
|
|
255
|
+
const edgesBySource = new Map();
|
|
256
|
+
const processedDecisions = new Set();
|
|
257
|
+
|
|
258
|
+
for (const edge of edges) {
|
|
259
|
+
const src = edge.source;
|
|
260
|
+
if (!edgesBySource.has(src)) edgesBySource.set(src, []);
|
|
261
|
+
edgesBySource.get(src).push(edge);
|
|
262
|
+
}
|
|
263
|
+
|
|
264
|
+
for (const edge of edges) {
|
|
265
|
+
if (decisionNodeIds.has(edge.source)) continue;
|
|
266
|
+
|
|
267
|
+
if (decisionNodeIds.has(edge.target)) {
|
|
268
|
+
if (processedDecisions.has(edge.target)) continue;
|
|
269
|
+
processedDecisions.add(edge.target);
|
|
270
|
+
|
|
271
|
+
const outgoing = edgesBySource.get(edge.target) || [];
|
|
272
|
+
const edgeWithCode = outgoing.find(
|
|
273
|
+
e => e.data?.conditionalCode || e.conditionalCode
|
|
274
|
+
);
|
|
275
|
+
|
|
276
|
+
if (edgeWithCode) {
|
|
277
|
+
const code = edgeWithCode.data?.conditionalCode || edgeWithCode.conditionalCode;
|
|
278
|
+
conditionalEdges.push({ source: edge.source, code });
|
|
279
|
+
}
|
|
280
|
+
} else {
|
|
281
|
+
simpleEdges.push({ source: edge.source, target: edge.target });
|
|
282
|
+
}
|
|
283
|
+
}
|
|
284
|
+
|
|
285
|
+
return { simpleEdges, conditionalEdges };
|
|
286
|
+
}
|
|
287
|
+
|
|
288
|
+
function findEntryNode(executableNodes, edges, decisionNodeIds) {
|
|
289
|
+
const incomingTargets = new Set();
|
|
290
|
+
for (const edge of edges) {
|
|
291
|
+
if (!decisionNodeIds.has(edge.target)) {
|
|
292
|
+
incomingTargets.add(edge.target);
|
|
293
|
+
}
|
|
294
|
+
}
|
|
295
|
+
const entry = executableNodes.find(n => !incomingTargets.has(n.id));
|
|
296
|
+
return entry ? entry.id : executableNodes[0]?.id;
|
|
297
|
+
}
|
|
298
|
+
|
|
299
|
+
function sanitizeVarName(nodeId) {
|
|
300
|
+
return nodeId.replace(/[^a-zA-Z0-9]/g, '_');
|
|
301
|
+
}
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Framework constants for paths, filenames, and configuration defaults
|
|
3
|
+
*/
|
|
4
|
+
|
|
5
|
+
// Out directory structure
|
|
6
|
+
export const DEFAULT_OUTPUT_BASE = '.zibby/output';
|
|
7
|
+
export const SESSIONS_DIR = 'sessions';
|
|
8
|
+
export const SESSION_INFO_FILE = '.session-info.json';
|
|
9
|
+
|
|
10
|
+
// Node output files
|
|
11
|
+
export const RESULT_FILE = 'result.json';
|
|
12
|
+
export const RAW_OUTPUT_FILE = 'raw_stream_output.txt';
|
|
13
|
+
export const EVENTS_FILE = 'events.json';
|
|
14
|
+
|
|
15
|
+
// Node skills — declares what capabilities a node needs (agent-agnostic).
|
|
16
|
+
// Skill definitions (MCP server configs, tool schemas) live in @zibby/skills.
|
|
17
|
+
// Import @zibby/skills to register built-in skills, or call registerSkill()
|
|
18
|
+
// from ./skill-registry.js for custom skills.
|
|
19
|
+
export const SKILLS = {
|
|
20
|
+
BROWSER: 'browser',
|
|
21
|
+
JIRA: 'jira',
|
|
22
|
+
GITHUB: 'github',
|
|
23
|
+
SLACK: 'slack',
|
|
24
|
+
MEMORY: 'memory',
|
|
25
|
+
};
|
|
26
|
+
|
|
27
|
+
// CI environment variables for session ID detection
|
|
28
|
+
export const CI_ENV_VARS = [
|
|
29
|
+
'CI_JOB_ID',
|
|
30
|
+
'GITHUB_RUN_ID',
|
|
31
|
+
'CIRCLE_WORKFLOW_ID',
|
|
32
|
+
'BUILD_ID'
|
|
33
|
+
];
|
|
@@ -0,0 +1,101 @@
|
|
|
1
|
+
import { existsSync, readFileSync } from 'fs';
|
|
2
|
+
import { join, dirname } from 'path';
|
|
3
|
+
|
|
4
|
+
export class ContextLoader {
|
|
5
|
+
static async loadContext(specPath, cwd, config = {}) {
|
|
6
|
+
const context = {};
|
|
7
|
+
|
|
8
|
+
// Get list of context filenames to search for (default: CONTEXT.md, AGENTS.md)
|
|
9
|
+
const filenames = config.filenames || ['CONTEXT.md', 'AGENTS.md'];
|
|
10
|
+
|
|
11
|
+
// Auto-discover cascade: search from spec directory up to root
|
|
12
|
+
if (specPath) {
|
|
13
|
+
const specDir = dirname(join(cwd, specPath));
|
|
14
|
+
|
|
15
|
+
for (const filename of filenames) {
|
|
16
|
+
const content = await this.findAndMergeContextFiles(filename, specDir, cwd);
|
|
17
|
+
if (content) {
|
|
18
|
+
// Use filename without extension as key (e.g., CONTEXT.md -> context)
|
|
19
|
+
const key = filename.replace(/\.[^.]+$/, '').toLowerCase();
|
|
20
|
+
context[key] = content;
|
|
21
|
+
}
|
|
22
|
+
}
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
// Also load any explicitly configured discovery files
|
|
26
|
+
const discovery = config.discovery || {};
|
|
27
|
+
for (const [key, pathTemplate] of Object.entries(discovery)) {
|
|
28
|
+
try {
|
|
29
|
+
const resolvedPath = join(cwd, pathTemplate);
|
|
30
|
+
if (existsSync(resolvedPath)) {
|
|
31
|
+
const content = await this.loadFile(resolvedPath);
|
|
32
|
+
context[key] = content;
|
|
33
|
+
}
|
|
34
|
+
} catch (err) {
|
|
35
|
+
console.warn(`⚠️ Could not load context '${key}' from '${pathTemplate}': ${err.message}`);
|
|
36
|
+
}
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
return context;
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
// Auto-discover: search up the directory tree for context files
|
|
43
|
+
static async findAndMergeContextFiles(filename, startDir, rootDir) {
|
|
44
|
+
const contents = [];
|
|
45
|
+
let currentDir = startDir;
|
|
46
|
+
|
|
47
|
+
// Walk up the tree from spec directory to root
|
|
48
|
+
while (currentDir.startsWith(rootDir)) {
|
|
49
|
+
const contextPath = join(currentDir, filename);
|
|
50
|
+
|
|
51
|
+
if (existsSync(contextPath)) {
|
|
52
|
+
try {
|
|
53
|
+
const content = await this.loadFile(contextPath);
|
|
54
|
+
contents.unshift(content); // Add to beginning (root context first)
|
|
55
|
+
} catch (err) {
|
|
56
|
+
console.warn(`⚠️ Could not load ${filename} from ${contextPath}: ${err.message}`);
|
|
57
|
+
}
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
// Move up one directory
|
|
61
|
+
const parentDir = dirname(currentDir);
|
|
62
|
+
if (parentDir === currentDir) break; // Reached filesystem root
|
|
63
|
+
currentDir = parentDir;
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
// Merge all context files (root → specific)
|
|
67
|
+
if (contents.length === 0) return null;
|
|
68
|
+
|
|
69
|
+
// If all files are strings (markdown), concatenate with separator
|
|
70
|
+
if (contents.every(c => typeof c === 'string')) {
|
|
71
|
+
return contents.join('\n\n---\n\n');
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
// If objects (JSON/JS), merge them
|
|
75
|
+
if (contents.every(c => typeof c === 'object')) {
|
|
76
|
+
return Object.assign({}, ...contents);
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
// Mixed types: return the most specific (last one)
|
|
80
|
+
return contents[contents.length - 1];
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
static async loadFile(filePath) {
|
|
84
|
+
const content = readFileSync(filePath, 'utf-8');
|
|
85
|
+
|
|
86
|
+
// Try to parse as JSON
|
|
87
|
+
if (filePath.endsWith('.json')) {
|
|
88
|
+
return JSON.parse(content);
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
// Try to import as JS module
|
|
92
|
+
if (filePath.endsWith('.js') || filePath.endsWith('.mjs')) {
|
|
93
|
+
const { pathToFileURL } = await import('url');
|
|
94
|
+
const module = await import(pathToFileURL(filePath).href);
|
|
95
|
+
return module.default || module;
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
// Return raw content (markdown, yaml, txt, etc.)
|
|
99
|
+
return content;
|
|
100
|
+
}
|
|
101
|
+
}
|