@synergenius/flow-weaver 0.4.3 → 0.5.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +3 -2
- package/README.md +2 -2
- package/dist/cli/flow-weaver.mjs +10 -1
- package/dist/cli/templates/shared/llm-types.d.ts +4 -4
- package/dist/cli/templates/shared/llm-types.js +5 -0
- package/dist/generator/code-utils.js +1 -0
- package/dist/generator/scope-function-generator.js +1 -0
- package/dist/generator/unified.js +3 -0
- package/package.json +1 -1
package/LICENSE
CHANGED
package/README.md
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
# @synergenius/flow-weaver
|
|
2
2
|
|
|
3
3
|
[](https://www.npmjs.com/package/@synergenius/flow-weaver)
|
|
4
|
-
[](./LICENSE)
|
|
5
5
|
[](https://nodejs.org)
|
|
6
6
|
|
|
7
7
|
**Workflow compiler for AI agents.** LLMs create, validate, iterate, and test workflows programmatically. Humans review them visually. Compiled output is standalone TypeScript with no runtime dependencies.
|
|
@@ -367,7 +367,7 @@ npm run docs # Generate API docs
|
|
|
367
367
|
|
|
368
368
|
## License
|
|
369
369
|
|
|
370
|
-
|
|
370
|
+
Licensed under the Flow Weaver Library License. See [LICENSE](./LICENSE) for full terms.
|
|
371
371
|
|
|
372
372
|
- **Free to use**: install, run, and compile workflows in any organization
|
|
373
373
|
- **Free to host internally** for organizations with 15 or fewer people
|
package/dist/cli/flow-weaver.mjs
CHANGED
|
@@ -27103,6 +27103,7 @@ function generateScopeFunctionClosure(scopeName, parentNodeId, parentNodeType, w
|
|
|
27103
27103
|
lines.push(` // Execute: ${child.id} (${child.nodeType})`);
|
|
27104
27104
|
lines.push(` scopedCtx.checkAborted('${child.id}');`);
|
|
27105
27105
|
lines.push(` const ${safeChildId}Idx = scopedCtx.addExecution('${child.id}');`);
|
|
27106
|
+
lines.push(` if (typeof globalThis !== 'undefined') (globalThis as any).__fw_current_node_id__ = '${child.id}';`);
|
|
27106
27107
|
lines.push(` scopedCtx.sendStatusChangedEvent({`);
|
|
27107
27108
|
lines.push(` nodeTypeName: '${child.nodeType}',`);
|
|
27108
27109
|
lines.push(` id: '${child.id}',`);
|
|
@@ -28372,6 +28373,7 @@ function generateBranchingNodeCode(instance, branchNode, workflow, allNodeTypes,
|
|
|
28372
28373
|
const functionName = branchNode.functionName;
|
|
28373
28374
|
lines.push(`${indent}${ctxVar}.checkAborted('${instanceId}');`);
|
|
28374
28375
|
lines.push(`${indent}${safeId}Idx = ${ctxVar}.addExecution('${instanceId}');`);
|
|
28376
|
+
lines.push(`${indent}if (typeof globalThis !== 'undefined') (globalThis as any).__fw_current_node_id__ = '${instanceId}';`);
|
|
28375
28377
|
lines.push(`${indent}${ctxVar}.sendStatusChangedEvent({`);
|
|
28376
28378
|
lines.push(`${indent} nodeTypeName: '${functionName}',`);
|
|
28377
28379
|
lines.push(`${indent} id: '${instanceId}',`);
|
|
@@ -28739,6 +28741,7 @@ function generatePullNodeWithContext(instance, nodeType, workflow, lines, indent
|
|
|
28739
28741
|
lines.push(`${indent} }`);
|
|
28740
28742
|
lines.push(`${indent} ${ctxVar}.checkAborted('${instanceId}');`);
|
|
28741
28743
|
lines.push(`${indent} ${safeId}Idx = ${ctxVar}.addExecution('${instanceId}');`);
|
|
28744
|
+
lines.push(`${indent} if (typeof globalThis !== 'undefined') (globalThis as any).__fw_current_node_id__ = '${instanceId}';`);
|
|
28742
28745
|
lines.push(`${indent} ${ctxVar}.sendStatusChangedEvent({`);
|
|
28743
28746
|
lines.push(`${indent} nodeTypeName: '${functionName}',`);
|
|
28744
28747
|
lines.push(`${indent} id: '${instanceId}',`);
|
|
@@ -28956,6 +28959,7 @@ function generateNodeCallWithContext(instance, nodeType, workflow, _availableVar
|
|
|
28956
28959
|
const varDecl = useConst ? "const " : "";
|
|
28957
28960
|
lines.push(`${indent}${ctxVar}.checkAborted('${instanceId}');`);
|
|
28958
28961
|
lines.push(`${indent}${varDecl}${safeId}Idx = ${ctxVar}.addExecution('${instanceId}');`);
|
|
28962
|
+
lines.push(`${indent}if (typeof globalThis !== 'undefined') (globalThis as any).__fw_current_node_id__ = '${instanceId}';`);
|
|
28959
28963
|
lines.push(`${indent}${ctxVar}.sendStatusChangedEvent({`);
|
|
28960
28964
|
lines.push(`${indent} nodeTypeName: '${functionName}',`);
|
|
28961
28965
|
lines.push(`${indent} id: '${instanceId}',`);
|
|
@@ -48732,6 +48736,7 @@ interface LLMResponse {
|
|
|
48732
48736
|
content: string | null;
|
|
48733
48737
|
toolCalls: LLMToolCall[];
|
|
48734
48738
|
finishReason: 'stop' | 'tool_calls' | 'length' | 'error';
|
|
48739
|
+
usage?: { promptTokens: number; completionTokens: number };
|
|
48735
48740
|
}
|
|
48736
48741
|
|
|
48737
48742
|
interface LLMToolCall {
|
|
@@ -48772,6 +48777,7 @@ interface LLMResponse {
|
|
|
48772
48777
|
content: string | null;
|
|
48773
48778
|
toolCalls: LLMToolCall[];
|
|
48774
48779
|
finishReason: 'stop' | 'tool_calls' | 'length' | 'error';
|
|
48780
|
+
usage?: { promptTokens: number; completionTokens: number };
|
|
48775
48781
|
}
|
|
48776
48782
|
|
|
48777
48783
|
interface LLMProvider {
|
|
@@ -48784,6 +48790,7 @@ var LLM_MOCK_PROVIDER = `const createMockProvider = (): LLMProvider => ({
|
|
|
48784
48790
|
content: \`[Mock response to: \${lastMessage.content.slice(0, 50)}...]\`,
|
|
48785
48791
|
toolCalls: [],
|
|
48786
48792
|
finishReason: 'stop',
|
|
48793
|
+
usage: { promptTokens: 10, completionTokens: 20 },
|
|
48787
48794
|
};
|
|
48788
48795
|
},
|
|
48789
48796
|
});
|
|
@@ -48803,6 +48810,7 @@ var LLM_MOCK_PROVIDER_WITH_TOOLS = `const createMockProvider = (): LLMProvider =
|
|
|
48803
48810
|
},
|
|
48804
48811
|
],
|
|
48805
48812
|
finishReason: 'tool_calls',
|
|
48813
|
+
usage: { promptTokens: 15, completionTokens: 30 },
|
|
48806
48814
|
};
|
|
48807
48815
|
}
|
|
48808
48816
|
|
|
@@ -48810,6 +48818,7 @@ var LLM_MOCK_PROVIDER_WITH_TOOLS = `const createMockProvider = (): LLMProvider =
|
|
|
48810
48818
|
content: '[Mock answer] ' + last.content,
|
|
48811
48819
|
toolCalls: [],
|
|
48812
48820
|
finishReason: 'stop',
|
|
48821
|
+
usage: { promptTokens: 10, completionTokens: 20 },
|
|
48813
48822
|
};
|
|
48814
48823
|
},
|
|
48815
48824
|
});
|
|
@@ -94420,7 +94429,7 @@ function displayInstalledPackage(pkg) {
|
|
|
94420
94429
|
}
|
|
94421
94430
|
|
|
94422
94431
|
// src/cli/index.ts
|
|
94423
|
-
var version2 = true ? "0.
|
|
94432
|
+
var version2 = true ? "0.5.1" : "0.0.0-dev";
|
|
94424
94433
|
var program2 = new Command();
|
|
94425
94434
|
program2.name("flow-weaver").description("Flow Weaver Annotations - Compile and validate workflow files").version(version2, "-v, --version", "Output the current version");
|
|
94426
94435
|
program2.configureOutput({
|
|
@@ -5,11 +5,11 @@
|
|
|
5
5
|
* They provide the common type system that all AI workflows need.
|
|
6
6
|
*/
|
|
7
7
|
/** Core LLM types — embedded in all AI templates */
|
|
8
|
-
export declare const LLM_CORE_TYPES = "interface LLMMessage {\n role: 'system' | 'user' | 'assistant' | 'tool';\n content: string;\n toolCallId?: string;\n}\n\ninterface LLMResponse {\n content: string | null;\n toolCalls: LLMToolCall[];\n finishReason: 'stop' | 'tool_calls' | 'length' | 'error';\n}\n\ninterface LLMToolCall {\n id: string;\n name: string;\n arguments: Record<string, unknown>;\n}\n\ninterface LLMTool {\n name: string;\n description: string;\n parameters: {\n type: 'object';\n properties: Record<string, { type: string }>;\n required?: string[];\n };\n}\n\ninterface LLMProvider {\n chat(\n messages: LLMMessage[],\n options?: { tools?: LLMTool[]; systemPrompt?: string; model?: string; temperature?: number; maxTokens?: number }\n ): Promise<LLMResponse>;\n}";
|
|
8
|
+
export declare const LLM_CORE_TYPES = "interface LLMMessage {\n role: 'system' | 'user' | 'assistant' | 'tool';\n content: string;\n toolCallId?: string;\n}\n\ninterface LLMResponse {\n content: string | null;\n toolCalls: LLMToolCall[];\n finishReason: 'stop' | 'tool_calls' | 'length' | 'error';\n usage?: { promptTokens: number; completionTokens: number };\n}\n\ninterface LLMToolCall {\n id: string;\n name: string;\n arguments: Record<string, unknown>;\n}\n\ninterface LLMTool {\n name: string;\n description: string;\n parameters: {\n type: 'object';\n properties: Record<string, { type: string }>;\n required?: string[];\n };\n}\n\ninterface LLMProvider {\n chat(\n messages: LLMMessage[],\n options?: { tools?: LLMTool[]; systemPrompt?: string; model?: string; temperature?: number; maxTokens?: number }\n ): Promise<LLMResponse>;\n}";
|
|
9
9
|
/** Simplified LLM types — for templates that don't need tool calling */
|
|
10
|
-
export declare const LLM_SIMPLE_TYPES = "interface LLMMessage {\n role: 'system' | 'user' | 'assistant' | 'tool';\n content: string;\n toolCallId?: string;\n}\n\ninterface LLMToolCall {\n id: string;\n name: string;\n arguments: Record<string, unknown>;\n}\n\ninterface LLMResponse {\n content: string | null;\n toolCalls: LLMToolCall[];\n finishReason: 'stop' | 'tool_calls' | 'length' | 'error';\n}\n\ninterface LLMProvider {\n chat(messages: LLMMessage[], options?: { systemPrompt?: string; model?: string; temperature?: number; maxTokens?: number }): Promise<LLMResponse>;\n}";
|
|
10
|
+
export declare const LLM_SIMPLE_TYPES = "interface LLMMessage {\n role: 'system' | 'user' | 'assistant' | 'tool';\n content: string;\n toolCallId?: string;\n}\n\ninterface LLMToolCall {\n id: string;\n name: string;\n arguments: Record<string, unknown>;\n}\n\ninterface LLMResponse {\n content: string | null;\n toolCalls: LLMToolCall[];\n finishReason: 'stop' | 'tool_calls' | 'length' | 'error';\n usage?: { promptTokens: number; completionTokens: number };\n}\n\ninterface LLMProvider {\n chat(messages: LLMMessage[], options?: { systemPrompt?: string; model?: string; temperature?: number; maxTokens?: number }): Promise<LLMResponse>;\n}";
|
|
11
11
|
/** Mock provider factory code */
|
|
12
|
-
export declare const LLM_MOCK_PROVIDER = "const createMockProvider = (): LLMProvider => ({\n async chat(messages) {\n const lastMessage = messages[messages.length - 1];\n return {\n content: `[Mock response to: ${lastMessage.content.slice(0, 50)}...]`,\n toolCalls: [],\n finishReason: 'stop',\n };\n },\n});\n\nconst llmProvider: LLMProvider = (globalThis as unknown as { __fw_llm_provider__?: LLMProvider }).__fw_llm_provider__ ?? createMockProvider();";
|
|
12
|
+
export declare const LLM_MOCK_PROVIDER = "const createMockProvider = (): LLMProvider => ({\n async chat(messages) {\n const lastMessage = messages[messages.length - 1];\n return {\n content: `[Mock response to: ${lastMessage.content.slice(0, 50)}...]`,\n toolCalls: [],\n finishReason: 'stop',\n usage: { promptTokens: 10, completionTokens: 20 },\n };\n },\n});\n\nconst llmProvider: LLMProvider = (globalThis as unknown as { __fw_llm_provider__?: LLMProvider }).__fw_llm_provider__ ?? createMockProvider();";
|
|
13
13
|
/** Mock provider with tool calling support (for ai-agent) */
|
|
14
|
-
export declare const LLM_MOCK_PROVIDER_WITH_TOOLS = "const createMockProvider = (): LLMProvider => ({\n async chat(messages, options) {\n const last = messages[messages.length - 1];\n if (options?.tools && last.content.toLowerCase().includes('search')) {\n return {\n content: null,\n toolCalls: [\n {\n id: 'call_' + Date.now(),\n name: 'search',\n arguments: { query: last.content },\n },\n ],\n finishReason: 'tool_calls',\n };\n }\n\n return {\n content: '[Mock answer] ' + last.content,\n toolCalls: [],\n finishReason: 'stop',\n };\n },\n});\n\nconst llmProvider: LLMProvider = (globalThis as unknown as { __fw_llm_provider__?: LLMProvider }).__fw_llm_provider__ ?? createMockProvider();";
|
|
14
|
+
export declare const LLM_MOCK_PROVIDER_WITH_TOOLS = "const createMockProvider = (): LLMProvider => ({\n async chat(messages, options) {\n const last = messages[messages.length - 1];\n if (options?.tools && last.content.toLowerCase().includes('search')) {\n return {\n content: null,\n toolCalls: [\n {\n id: 'call_' + Date.now(),\n name: 'search',\n arguments: { query: last.content },\n },\n ],\n finishReason: 'tool_calls',\n usage: { promptTokens: 15, completionTokens: 30 },\n };\n }\n\n return {\n content: '[Mock answer] ' + last.content,\n toolCalls: [],\n finishReason: 'stop',\n usage: { promptTokens: 10, completionTokens: 20 },\n };\n },\n});\n\nconst llmProvider: LLMProvider = (globalThis as unknown as { __fw_llm_provider__?: LLMProvider }).__fw_llm_provider__ ?? createMockProvider();";
|
|
15
15
|
//# sourceMappingURL=llm-types.d.ts.map
|
|
@@ -15,6 +15,7 @@ interface LLMResponse {
|
|
|
15
15
|
content: string | null;
|
|
16
16
|
toolCalls: LLMToolCall[];
|
|
17
17
|
finishReason: 'stop' | 'tool_calls' | 'length' | 'error';
|
|
18
|
+
usage?: { promptTokens: number; completionTokens: number };
|
|
18
19
|
}
|
|
19
20
|
|
|
20
21
|
interface LLMToolCall {
|
|
@@ -56,6 +57,7 @@ interface LLMResponse {
|
|
|
56
57
|
content: string | null;
|
|
57
58
|
toolCalls: LLMToolCall[];
|
|
58
59
|
finishReason: 'stop' | 'tool_calls' | 'length' | 'error';
|
|
60
|
+
usage?: { promptTokens: number; completionTokens: number };
|
|
59
61
|
}
|
|
60
62
|
|
|
61
63
|
interface LLMProvider {
|
|
@@ -69,6 +71,7 @@ export const LLM_MOCK_PROVIDER = `const createMockProvider = (): LLMProvider =>
|
|
|
69
71
|
content: \`[Mock response to: \${lastMessage.content.slice(0, 50)}...]\`,
|
|
70
72
|
toolCalls: [],
|
|
71
73
|
finishReason: 'stop',
|
|
74
|
+
usage: { promptTokens: 10, completionTokens: 20 },
|
|
72
75
|
};
|
|
73
76
|
},
|
|
74
77
|
});
|
|
@@ -89,6 +92,7 @@ export const LLM_MOCK_PROVIDER_WITH_TOOLS = `const createMockProvider = (): LLMP
|
|
|
89
92
|
},
|
|
90
93
|
],
|
|
91
94
|
finishReason: 'tool_calls',
|
|
95
|
+
usage: { promptTokens: 15, completionTokens: 30 },
|
|
92
96
|
};
|
|
93
97
|
}
|
|
94
98
|
|
|
@@ -96,6 +100,7 @@ export const LLM_MOCK_PROVIDER_WITH_TOOLS = `const createMockProvider = (): LLMP
|
|
|
96
100
|
content: '[Mock answer] ' + last.content,
|
|
97
101
|
toolCalls: [],
|
|
98
102
|
finishReason: 'stop',
|
|
103
|
+
usage: { promptTokens: 10, completionTokens: 20 },
|
|
99
104
|
};
|
|
100
105
|
},
|
|
101
106
|
});
|
|
@@ -314,6 +314,7 @@ export function generateNodeWithExecutionContext(node, workflow, lines, isAsync,
|
|
|
314
314
|
const getCall = isAsync ? 'await ctx.getVariable' : 'ctx.getVariable';
|
|
315
315
|
const setCall = isAsync ? 'await ctx.setVariable' : 'ctx.setVariable';
|
|
316
316
|
lines.push(`${indent}const ${safeNodeName}Idx = ctx.addExecution('${nodeName}');`);
|
|
317
|
+
lines.push(`${indent}if (typeof globalThis !== 'undefined') (globalThis as any).__fw_current_node_id__ = '${nodeName}';`);
|
|
317
318
|
lines.push(`${indent}ctx.sendStatusChangedEvent({`);
|
|
318
319
|
lines.push(`${indent} nodeTypeName: '${nodeName}',`);
|
|
319
320
|
lines.push(`${indent} id: '${nodeName}',`);
|
|
@@ -184,6 +184,7 @@ export function generateScopeFunctionClosure(scopeName, parentNodeId, parentNode
|
|
|
184
184
|
lines.push(` // Execute: ${child.id} (${child.nodeType})`);
|
|
185
185
|
lines.push(` scopedCtx.checkAborted('${child.id}');`);
|
|
186
186
|
lines.push(` const ${safeChildId}Idx = scopedCtx.addExecution('${child.id}');`);
|
|
187
|
+
lines.push(` if (typeof globalThis !== 'undefined') (globalThis as any).__fw_current_node_id__ = '${child.id}';`);
|
|
187
188
|
lines.push(` scopedCtx.sendStatusChangedEvent({`);
|
|
188
189
|
lines.push(` nodeTypeName: '${child.nodeType}',`);
|
|
189
190
|
lines.push(` id: '${child.id}',`);
|
|
@@ -887,6 +887,7 @@ bundleMode = false, preDeclaredSuccessFlags = new Set(), forceTrackSuccess = fal
|
|
|
887
887
|
const functionName = branchNode.functionName;
|
|
888
888
|
lines.push(`${indent}${ctxVar}.checkAborted('${instanceId}');`);
|
|
889
889
|
lines.push(`${indent}${safeId}Idx = ${ctxVar}.addExecution('${instanceId}');`);
|
|
890
|
+
lines.push(`${indent}if (typeof globalThis !== 'undefined') (globalThis as any).__fw_current_node_id__ = '${instanceId}';`);
|
|
890
891
|
lines.push(`${indent}${ctxVar}.sendStatusChangedEvent({`);
|
|
891
892
|
lines.push(`${indent} nodeTypeName: '${functionName}',`);
|
|
892
893
|
lines.push(`${indent} id: '${instanceId}',`);
|
|
@@ -1197,6 +1198,7 @@ bundleMode = false) {
|
|
|
1197
1198
|
lines.push(`${indent} }`);
|
|
1198
1199
|
lines.push(`${indent} ${ctxVar}.checkAborted('${instanceId}');`);
|
|
1199
1200
|
lines.push(`${indent} ${safeId}Idx = ${ctxVar}.addExecution('${instanceId}');`);
|
|
1201
|
+
lines.push(`${indent} if (typeof globalThis !== 'undefined') (globalThis as any).__fw_current_node_id__ = '${instanceId}';`);
|
|
1200
1202
|
lines.push(`${indent} ${ctxVar}.sendStatusChangedEvent({`);
|
|
1201
1203
|
lines.push(`${indent} nodeTypeName: '${functionName}',`);
|
|
1202
1204
|
lines.push(`${indent} id: '${instanceId}',`);
|
|
@@ -1450,6 +1452,7 @@ branchingNodes = new Set() // Branching nodes set for port-aware STEP guards
|
|
|
1450
1452
|
const varDecl = useConst ? 'const ' : '';
|
|
1451
1453
|
lines.push(`${indent}${ctxVar}.checkAborted('${instanceId}');`);
|
|
1452
1454
|
lines.push(`${indent}${varDecl}${safeId}Idx = ${ctxVar}.addExecution('${instanceId}');`);
|
|
1455
|
+
lines.push(`${indent}if (typeof globalThis !== 'undefined') (globalThis as any).__fw_current_node_id__ = '${instanceId}';`);
|
|
1453
1456
|
lines.push(`${indent}${ctxVar}.sendStatusChangedEvent({`);
|
|
1454
1457
|
lines.push(`${indent} nodeTypeName: '${functionName}',`);
|
|
1455
1458
|
lines.push(`${indent} id: '${instanceId}',`);
|
package/package.json
CHANGED