@synergenius/flow-weaver 0.22.8 → 0.22.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2,13 +2,15 @@
2
2
  * @synergenius/flow-weaver/agent
3
3
  *
4
4
  * Provider-agnostic agent loop with MCP bridge for tool execution.
5
- * Two built-in providers: Anthropic API (raw fetch) and Claude CLI.
5
+ * Built-in providers: Anthropic API, Claude CLI, OpenAI-compatible (GPT-4o, Groq, Ollama, etc).
6
6
  */
7
7
  export type { StreamEvent, AgentMessage, AgentProvider, ToolDefinition, ToolExecutor, ToolEvent, McpBridge, AgentLoopOptions, AgentLoopResult, StreamOptions, SpawnFn, ClaudeCliProviderOptions, CliSessionOptions, Logger, } from './types.js';
8
8
  export { runAgentLoop } from './agent-loop.js';
9
9
  export { AnthropicProvider, createAnthropicProvider } from './providers/anthropic.js';
10
10
  export type { AnthropicProviderOptions } from './providers/anthropic.js';
11
11
  export { ClaudeCliProvider, createClaudeCliProvider } from './providers/claude-cli.js';
12
+ export { OpenAICompatProvider, createOpenAICompatProvider } from './providers/openai-compat.js';
13
+ export type { OpenAICompatProviderOptions } from './providers/openai-compat.js';
12
14
  export { createMcpBridge } from './mcp-bridge.js';
13
15
  export { CliSession, getOrCreateCliSession, killCliSession, killAllCliSessions, } from './cli-session.js';
14
16
  export { buildSafeEnv, buildSafeSpawnOpts, MINIMAL_PATH, ENV_ALLOWLIST } from './env-allowlist.js';
@@ -2,13 +2,14 @@
2
2
  * @synergenius/flow-weaver/agent
3
3
  *
4
4
  * Provider-agnostic agent loop with MCP bridge for tool execution.
5
- * Two built-in providers: Anthropic API (raw fetch) and Claude CLI.
5
+ * Built-in providers: Anthropic API, Claude CLI, OpenAI-compatible (GPT-4o, Groq, Ollama, etc).
6
6
  */
7
7
  // Agent loop
8
8
  export { runAgentLoop } from './agent-loop.js';
9
9
  // Providers
10
10
  export { AnthropicProvider, createAnthropicProvider } from './providers/anthropic.js';
11
11
  export { ClaudeCliProvider, createClaudeCliProvider } from './providers/claude-cli.js';
12
+ export { OpenAICompatProvider, createOpenAICompatProvider } from './providers/openai-compat.js';
12
13
  // MCP bridge
13
14
  export { createMcpBridge } from './mcp-bridge.js';
14
15
  // CLI session (warm persistent sessions)
@@ -0,0 +1,25 @@
1
+ /**
2
+ * OpenAI-compatible API provider — works with any service that speaks
3
+ * the OpenAI chat completions API: OpenAI, Groq, Together, Ollama, etc.
4
+ *
5
+ * No SDK dependency. Uses only Node.js native fetch + SSE parsing.
6
+ * Converts OpenAI's delta format to the canonical StreamEvent union.
7
+ */
8
+ import type { AgentProvider, AgentMessage, ToolDefinition, StreamEvent, StreamOptions } from '../types.js';
9
+ export interface OpenAICompatProviderOptions {
10
+ apiKey: string;
11
+ model?: string;
12
+ maxTokens?: number;
13
+ /** Base URL for the API (default: https://api.openai.com). Include /v1 if needed. */
14
+ baseUrl?: string;
15
+ }
16
+ export declare class OpenAICompatProvider implements AgentProvider {
17
+ private apiKey;
18
+ private model;
19
+ private maxTokens;
20
+ private baseUrl;
21
+ constructor(options: OpenAICompatProviderOptions);
22
+ stream(messages: AgentMessage[], tools: ToolDefinition[], options?: StreamOptions): AsyncGenerator<StreamEvent>;
23
+ }
24
+ export declare function createOpenAICompatProvider(options: OpenAICompatProviderOptions): OpenAICompatProvider;
25
+ //# sourceMappingURL=openai-compat.d.ts.map
@@ -0,0 +1,204 @@
1
+ /**
2
+ * OpenAI-compatible API provider — works with any service that speaks
3
+ * the OpenAI chat completions API: OpenAI, Groq, Together, Ollama, etc.
4
+ *
5
+ * No SDK dependency. Uses only Node.js native fetch + SSE parsing.
6
+ * Converts OpenAI's delta format to the canonical StreamEvent union.
7
+ */
8
+ export class OpenAICompatProvider {
9
+ apiKey;
10
+ model;
11
+ maxTokens;
12
+ baseUrl;
13
+ constructor(options) {
14
+ if (!options.apiKey) {
15
+ throw new Error('OpenAICompatProvider requires an API key');
16
+ }
17
+ this.apiKey = options.apiKey;
18
+ this.model = options.model ?? 'gpt-4o';
19
+ this.maxTokens = options.maxTokens ?? 4096;
20
+ this.baseUrl = (options.baseUrl ?? 'https://api.openai.com').replace(/\/+$/, '');
21
+ }
22
+ async *stream(messages, tools, options) {
23
+ const model = options?.model ?? this.model;
24
+ const maxTokens = options?.maxTokens ?? this.maxTokens;
25
+ // Build OpenAI-format messages
26
+ const apiMessages = messages.map((m) => formatMessage(m));
27
+ // Build OpenAI-format tools
28
+ const apiTools = tools.length > 0
29
+ ? tools.map((t) => ({
30
+ type: 'function',
31
+ function: {
32
+ name: t.name,
33
+ description: t.description,
34
+ parameters: t.inputSchema,
35
+ },
36
+ }))
37
+ : undefined;
38
+ const body = {
39
+ model,
40
+ messages: [
41
+ ...(options?.systemPrompt ? [{ role: 'system', content: options.systemPrompt }] : []),
42
+ ...apiMessages,
43
+ ],
44
+ max_tokens: maxTokens,
45
+ stream: true,
46
+ };
47
+ if (apiTools && apiTools.length > 0) {
48
+ body.tools = apiTools;
49
+ }
50
+ // Determine the completions endpoint
51
+ const url = this.baseUrl.includes('/v1/')
52
+ ? `${this.baseUrl}chat/completions`
53
+ : `${this.baseUrl}/v1/chat/completions`;
54
+ const response = await fetch(url, {
55
+ method: 'POST',
56
+ headers: {
57
+ 'Content-Type': 'application/json',
58
+ Authorization: `Bearer ${this.apiKey}`,
59
+ },
60
+ body: JSON.stringify(body),
61
+ signal: options?.signal,
62
+ });
63
+ if (!response.ok) {
64
+ const errText = await response.text().catch(() => '');
65
+ yield { type: 'text_delta', text: `OpenAI API error ${response.status}: ${errText.slice(0, 300)}` };
66
+ yield { type: 'message_stop', finishReason: 'error' };
67
+ return;
68
+ }
69
+ if (!response.body) {
70
+ yield { type: 'message_stop', finishReason: 'error' };
71
+ return;
72
+ }
73
+ // Parse SSE stream
74
+ const reader = response.body.getReader();
75
+ const decoder = new TextDecoder();
76
+ let buffer = '';
77
+ let hasToolCalls = false;
78
+ // Track active tool calls (OpenAI sends incremental deltas by index)
79
+ const activeToolCalls = new Map();
80
+ try {
81
+ while (true) {
82
+ const { done, value } = await reader.read();
83
+ if (done)
84
+ break;
85
+ buffer += decoder.decode(value, { stream: true });
86
+ const lines = buffer.split('\n');
87
+ buffer = lines.pop() || '';
88
+ for (const line of lines) {
89
+ if (!line.startsWith('data: '))
90
+ continue;
91
+ const data = line.slice(6).trim();
92
+ if (data === '[DONE]') {
93
+ // Flush any pending tool calls
94
+ for (const [, tc] of activeToolCalls) {
95
+ let args = {};
96
+ try {
97
+ args = JSON.parse(tc.argsJson);
98
+ }
99
+ catch { /* malformed */ }
100
+ yield { type: 'tool_use_end', id: tc.id, arguments: args };
101
+ }
102
+ yield { type: 'message_stop', finishReason: hasToolCalls ? 'tool_calls' : 'stop' };
103
+ return;
104
+ }
105
+ let parsed;
106
+ try {
107
+ parsed = JSON.parse(data);
108
+ }
109
+ catch {
110
+ continue;
111
+ }
112
+ // Extract usage if present
113
+ if (parsed.usage) {
114
+ const usage = parsed.usage;
115
+ yield {
116
+ type: 'usage',
117
+ promptTokens: usage.prompt_tokens ?? 0,
118
+ completionTokens: usage.completion_tokens ?? 0,
119
+ };
120
+ }
121
+ const choices = parsed.choices;
122
+ if (!choices || choices.length === 0)
123
+ continue;
124
+ const choice = choices[0];
125
+ const delta = choice.delta;
126
+ if (!delta)
127
+ continue;
128
+ // Text content
129
+ if (delta.content && typeof delta.content === 'string') {
130
+ yield { type: 'text_delta', text: delta.content };
131
+ }
132
+ // Tool calls
133
+ const toolCalls = delta.tool_calls;
134
+ if (toolCalls) {
135
+ for (const tc of toolCalls) {
136
+ const index = tc.index ?? 0;
137
+ const fn = tc.function;
138
+ if (tc.id) {
139
+ // New tool call
140
+ hasToolCalls = true;
141
+ const name = fn?.name ? String(fn.name) : 'unknown';
142
+ activeToolCalls.set(index, { id: String(tc.id), name, argsJson: '' });
143
+ yield { type: 'tool_use_start', id: String(tc.id), name };
144
+ }
145
+ // Accumulate function arguments
146
+ if (fn?.arguments && typeof fn.arguments === 'string') {
147
+ const existing = activeToolCalls.get(index);
148
+ if (existing) {
149
+ existing.argsJson += fn.arguments;
150
+ yield { type: 'tool_use_delta', id: existing.id, partialJson: fn.arguments };
151
+ }
152
+ }
153
+ }
154
+ }
155
+ // Finish reason
156
+ if (choice.finish_reason === 'tool_calls') {
157
+ hasToolCalls = true;
158
+ }
159
+ }
160
+ }
161
+ }
162
+ finally {
163
+ reader.releaseLock();
164
+ }
165
+ // If we got here without [DONE], flush
166
+ for (const [, tc] of activeToolCalls) {
167
+ let args = {};
168
+ try {
169
+ args = JSON.parse(tc.argsJson);
170
+ }
171
+ catch { /* malformed */ }
172
+ yield { type: 'tool_use_end', id: tc.id, arguments: args };
173
+ }
174
+ yield { type: 'message_stop', finishReason: hasToolCalls ? 'tool_calls' : 'stop' };
175
+ }
176
+ }
177
+ function formatMessage(m) {
178
+ if (m.role === 'tool') {
179
+ return {
180
+ role: 'tool',
181
+ tool_call_id: m.toolCallId,
182
+ content: typeof m.content === 'string' ? m.content : JSON.stringify(m.content),
183
+ };
184
+ }
185
+ if (m.role === 'assistant' && m.toolCalls?.length) {
186
+ return {
187
+ role: 'assistant',
188
+ content: typeof m.content === 'string' && m.content ? m.content : null,
189
+ tool_calls: m.toolCalls.map((tc) => ({
190
+ id: tc.id,
191
+ type: 'function',
192
+ function: { name: tc.name, arguments: JSON.stringify(tc.arguments) },
193
+ })),
194
+ };
195
+ }
196
+ return {
197
+ role: m.role,
198
+ content: typeof m.content === 'string' ? m.content : JSON.stringify(m.content),
199
+ };
200
+ }
201
+ export function createOpenAICompatProvider(options) {
202
+ return new OpenAICompatProvider(options);
203
+ }
204
+ //# sourceMappingURL=openai-compat.js.map
@@ -9886,7 +9886,7 @@ var VERSION;
9886
9886
  var init_generated_version = __esm({
9887
9887
  "src/generated-version.ts"() {
9888
9888
  "use strict";
9889
- VERSION = "0.22.8";
9889
+ VERSION = "0.22.9";
9890
9890
  }
9891
9891
  });
9892
9892
 
@@ -94939,7 +94939,7 @@ var {
94939
94939
  // src/cli/index.ts
94940
94940
  init_logger();
94941
94941
  init_error_utils();
94942
- var version2 = true ? "0.22.8" : "0.0.0-dev";
94942
+ var version2 = true ? "0.22.9" : "0.0.0-dev";
94943
94943
  var program2 = new Command();
94944
94944
  program2.name("fw").description("Flow Weaver Annotations - Compile and validate workflow files").option("-v, --version", "Output the current version").option("--no-color", "Disable colors").option("--color", "Force colors").on("option:version", () => {
94945
94945
  logger.banner(version2);
@@ -1,2 +1,2 @@
1
- export declare const VERSION = "0.22.8";
1
+ export declare const VERSION = "0.22.9";
2
2
  //# sourceMappingURL=generated-version.d.ts.map
@@ -1,3 +1,3 @@
1
1
  // Auto-generated by scripts/generate-version.ts — do not edit manually
2
- export const VERSION = '0.22.8';
2
+ export const VERSION = '0.22.9';
3
3
  //# sourceMappingURL=generated-version.js.map
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@synergenius/flow-weaver",
3
- "version": "0.22.8",
3
+ "version": "0.22.9",
4
4
  "description": "Deterministic workflow compiler for AI agents. Compiles to standalone TypeScript, no runtime dependencies.",
5
5
  "private": false,
6
6
  "type": "module",