@workglow/ai-provider 0.0.122 → 0.0.123

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. package/dist/provider-hf-transformers/common/HFT_JobRunFns.d.ts +2 -2
  2. package/dist/provider-hf-transformers/common/HFT_Pipeline.d.ts.map +1 -1
  3. package/dist/provider-hf-transformers/common/HFT_Streaming.d.ts +3 -2
  4. package/dist/provider-hf-transformers/common/HFT_Streaming.d.ts.map +1 -1
  5. package/dist/provider-hf-transformers/common/HFT_ToolCalling.d.ts.map +1 -1
  6. package/dist/provider-hf-transformers/runtime.js +39 -37
  7. package/dist/provider-hf-transformers/runtime.js.map +11 -11
  8. package/dist/provider-llamacpp/common/LlamaCpp_Runtime.d.ts +5 -8
  9. package/dist/provider-llamacpp/common/LlamaCpp_Runtime.d.ts.map +1 -1
  10. package/dist/provider-llamacpp/runtime.js.map +2 -2
  11. package/dist/provider-ollama/common/Ollama_JobRunFns.browser.d.ts +1 -1
  12. package/dist/provider-ollama/common/Ollama_JobRunFns.d.ts +1 -1
  13. package/dist/provider-openai/common/OpenAI_CountTokens.browser.d.ts +10 -0
  14. package/dist/provider-openai/common/OpenAI_CountTokens.browser.d.ts.map +1 -0
  15. package/dist/provider-openai/common/OpenAI_CountTokens.d.ts.map +1 -1
  16. package/dist/provider-openai/common/OpenAI_JobRunFns.browser.d.ts +12 -0
  17. package/dist/provider-openai/common/OpenAI_JobRunFns.browser.d.ts.map +1 -0
  18. package/dist/provider-openai/index.browser.d.ts +9 -0
  19. package/dist/provider-openai/index.browser.d.ts.map +1 -0
  20. package/dist/provider-openai/index.browser.js +105 -0
  21. package/dist/provider-openai/index.browser.js.map +13 -0
  22. package/dist/provider-openai/registerOpenAiInline.browser.d.ts +8 -0
  23. package/dist/provider-openai/registerOpenAiInline.browser.d.ts.map +1 -0
  24. package/dist/provider-openai/registerOpenAiWorker.browser.d.ts +7 -0
  25. package/dist/provider-openai/registerOpenAiWorker.browser.d.ts.map +1 -0
  26. package/dist/provider-openai/runtime.browser.d.ts +15 -0
  27. package/dist/provider-openai/runtime.browser.d.ts.map +1 -0
  28. package/dist/provider-openai/runtime.browser.js +647 -0
  29. package/dist/provider-openai/runtime.browser.js.map +25 -0
  30. package/dist/provider-openai/runtime.js.map +2 -2
  31. package/dist/provider-tf-mediapipe/common/TFMP_Client.d.ts.map +1 -1
  32. package/dist/provider-tf-mediapipe/common/TFMP_JobRunFns.d.ts +2 -2
  33. package/dist/provider-tf-mediapipe/runtime.js.map +2 -2
  34. package/package.json +21 -14
@@ -0,0 +1,647 @@
1
+ var __require = /* @__PURE__ */ ((x) => typeof require !== "undefined" ? require : typeof Proxy !== "undefined" ? new Proxy(x, {
2
+ get: (a, b) => (typeof require !== "undefined" ? require : a)[b]
3
+ }) : x)(function(x) {
4
+ if (typeof require !== "undefined")
5
+ return require.apply(this, arguments);
6
+ throw Error('Dynamic require of "' + x + '" is not supported');
7
+ });
8
+
9
+ // src/provider-openai/common/OpenAI_Client.ts
10
+ var _OpenAIClass;
11
+ async function loadOpenAISDK() {
12
+ if (!_OpenAIClass) {
13
+ try {
14
+ const sdk = await import("openai");
15
+ _OpenAIClass = sdk.default;
16
+ } catch {
17
+ throw new Error("openai is required for OpenAI tasks. Install it with: bun add openai");
18
+ }
19
+ }
20
+ return _OpenAIClass;
21
+ }
22
+ async function getClient(model) {
23
+ const OpenAI = await loadOpenAISDK();
24
+ const config = model?.provider_config;
25
+ const apiKey = config?.credential_key || config?.api_key || (typeof process !== "undefined" ? process.env?.OPENAI_API_KEY : undefined);
26
+ if (!apiKey) {
27
+ throw new Error("Missing OpenAI API key: set provider_config.credential_key or the OPENAI_API_KEY environment variable.");
28
+ }
29
+ return new OpenAI({
30
+ apiKey,
31
+ baseURL: config?.base_url || undefined,
32
+ organization: config?.organization || undefined,
33
+ dangerouslyAllowBrowser: true
34
+ });
35
+ }
36
+ function getModelName(model) {
37
+ const name = model?.provider_config?.model_name;
38
+ if (!name) {
39
+ throw new Error("Missing model name in provider_config.model_name.");
40
+ }
41
+ return name;
42
+ }
43
+ // src/provider-openai/common/OpenAI_Constants.ts
44
+ var OPENAI = "OPENAI";
45
+
46
+ // src/provider-openai/common/OpenAI_ModelSearch.ts
47
+ var OPENAI_FALLBACK = [
48
+ { label: "gpt-5.4", value: "gpt-5.4" },
49
+ { label: "gpt-5", value: "gpt-5" },
50
+ { label: "gpt-5-mini", value: "gpt-5-mini" },
51
+ { label: "gpt-4o-mini", value: "gpt-4o-mini" },
52
+ { label: "gpt-4-turbo", value: "gpt-4-turbo" },
53
+ { label: "o3", value: "o3" },
54
+ { label: "o3-mini", value: "o3-mini" },
55
+ { label: "o1", value: "o1" },
56
+ { label: "o1-mini", value: "o1-mini" }
57
+ ];
58
+ async function listOpenAiModels() {
59
+ const client = await getClient(undefined);
60
+ const models = [];
61
+ for await (const m of client.models.list()) {
62
+ models.push({ label: `${m.id} ${m.owned_by}`, value: m.id });
63
+ }
64
+ models.sort((a, b) => {
65
+ const aGpt = a.value.startsWith("gpt") || a.value.startsWith("o1") ? 0 : 1;
66
+ const bGpt = b.value.startsWith("gpt") || b.value.startsWith("o1") ? 0 : 1;
67
+ if (aGpt !== bGpt)
68
+ return aGpt - bGpt;
69
+ return a.value.localeCompare(b.value);
70
+ });
71
+ return models;
72
+ }
73
+ function mapModelList(models) {
74
+ return models.map((m) => ({
75
+ id: m.value,
76
+ label: m.label,
77
+ description: "",
78
+ record: {
79
+ model_id: m.value,
80
+ provider: OPENAI,
81
+ title: m.value,
82
+ description: "",
83
+ tasks: [],
84
+ provider_config: { model_name: m.value },
85
+ metadata: {}
86
+ },
87
+ raw: m
88
+ }));
89
+ }
90
+ var OpenAI_ModelSearch = async () => {
91
+ let models;
92
+ try {
93
+ models = await listOpenAiModels();
94
+ } catch {
95
+ models = OPENAI_FALLBACK;
96
+ }
97
+ return { results: mapModelList(models) };
98
+ };
99
+
100
+ // src/provider-openai/common/OpenAI_CountTokens.browser.ts
101
+ import { getLogger } from "@workglow/util/worker";
102
+ var _tiktoken;
103
+ async function loadTiktoken() {
104
+ if (!_tiktoken) {
105
+ try {
106
+ _tiktoken = await import("js-tiktoken");
107
+ } catch {
108
+ throw new Error("js-tiktoken is required for OpenAI token counting in the browser. Install it with: bun add js-tiktoken");
109
+ }
110
+ }
111
+ return _tiktoken;
112
+ }
113
+ var _encoderCache = new Map;
114
+ async function getEncoder(modelName) {
115
+ const tiktoken = await loadTiktoken();
116
+ if (!_encoderCache.has(modelName)) {
117
+ try {
118
+ _encoderCache.set(modelName, tiktoken.encodingForModel(modelName));
119
+ } catch {
120
+ const fallback = "cl100k_base";
121
+ if (!_encoderCache.has(fallback)) {
122
+ _encoderCache.set(fallback, tiktoken.getEncoding(fallback));
123
+ }
124
+ _encoderCache.set(modelName, _encoderCache.get(fallback));
125
+ }
126
+ }
127
+ return _encoderCache.get(modelName);
128
+ }
129
+ var OpenAI_CountTokens = async (input, model) => {
130
+ if (Array.isArray(input.text)) {
131
+ getLogger().warn("OpenAI_CountTokens: array input received; processing sequentially (no native batch support)");
132
+ const texts = input.text;
133
+ const counts = [];
134
+ for (const item of texts) {
135
+ const r = await OpenAI_CountTokens({ ...input, text: item }, model, () => {}, new AbortController().signal);
136
+ counts.push(r.count);
137
+ }
138
+ return { count: counts };
139
+ }
140
+ const enc = await getEncoder(getModelName(model));
141
+ const tokens = enc.encode(input.text);
142
+ return { count: tokens.length };
143
+ };
144
+ var OpenAI_CountTokens_Reactive = async (input, _output, model) => {
145
+ return OpenAI_CountTokens(input, model, () => {}, new AbortController().signal);
146
+ };
147
+
148
+ // src/provider-openai/common/OpenAI_ModelInfo.ts
149
+ var OpenAI_ModelInfo = async (input) => {
150
+ return {
151
+ model: input.model,
152
+ is_local: false,
153
+ is_remote: true,
154
+ supports_browser: true,
155
+ supports_node: true,
156
+ is_cached: false,
157
+ is_loaded: false,
158
+ file_sizes: null
159
+ };
160
+ };
161
+
162
+ // src/provider-openai/common/OpenAI_StructuredGeneration.ts
163
+ import { parsePartialJson } from "@workglow/util/worker";
164
+ var OpenAI_StructuredGeneration = async (input, model, update_progress, signal, outputSchema) => {
165
+ update_progress(0, "Starting OpenAI structured generation");
166
+ const client = await getClient(model);
167
+ const modelName = getModelName(model);
168
+ const schema = input.outputSchema ?? outputSchema;
169
+ const response = await client.chat.completions.create({
170
+ model: modelName,
171
+ messages: [{ role: "user", content: input.prompt }],
172
+ response_format: {
173
+ type: "json_schema",
174
+ json_schema: {
175
+ name: "structured_output",
176
+ schema,
177
+ strict: true
178
+ }
179
+ },
180
+ max_completion_tokens: input.maxTokens,
181
+ temperature: input.temperature
182
+ }, { signal });
183
+ const content = response.choices[0]?.message?.content ?? "{}";
184
+ update_progress(100, "Completed OpenAI structured generation");
185
+ return { object: JSON.parse(content) };
186
+ };
187
+ var OpenAI_StructuredGeneration_Stream = async function* (input, model, signal, outputSchema) {
188
+ const client = await getClient(model);
189
+ const modelName = getModelName(model);
190
+ const schema = input.outputSchema ?? outputSchema;
191
+ const stream = await client.chat.completions.create({
192
+ model: modelName,
193
+ messages: [{ role: "user", content: input.prompt }],
194
+ response_format: {
195
+ type: "json_schema",
196
+ json_schema: {
197
+ name: "structured_output",
198
+ schema,
199
+ strict: true
200
+ }
201
+ },
202
+ max_completion_tokens: input.maxTokens,
203
+ temperature: input.temperature,
204
+ stream: true
205
+ }, { signal });
206
+ let accumulatedJson = "";
207
+ for await (const chunk of stream) {
208
+ const delta = chunk.choices[0]?.delta?.content ?? "";
209
+ if (delta) {
210
+ accumulatedJson += delta;
211
+ const partial = parsePartialJson(accumulatedJson);
212
+ if (partial !== undefined) {
213
+ yield { type: "object-delta", port: "object", objectDelta: partial };
214
+ }
215
+ }
216
+ }
217
+ let finalObject;
218
+ try {
219
+ finalObject = JSON.parse(accumulatedJson);
220
+ } catch {
221
+ finalObject = parsePartialJson(accumulatedJson) ?? {};
222
+ }
223
+ yield { type: "finish", data: { object: finalObject } };
224
+ };
225
+
226
+ // src/provider-openai/common/OpenAI_TextEmbedding.ts
227
+ import { getLogger as getLogger2 } from "@workglow/util/worker";
228
+ var OpenAI_TextEmbedding = async (input, model, update_progress, signal) => {
229
+ const logger = getLogger2();
230
+ const timerLabel = `openai:TextEmbedding:${model?.provider_config?.model_name}`;
231
+ logger.time(timerLabel, { model: model?.provider_config?.model_name });
232
+ update_progress(0, "Starting OpenAI text embedding");
233
+ const client = await getClient(model);
234
+ const modelName = getModelName(model);
235
+ const response = await client.embeddings.create({
236
+ model: modelName,
237
+ input: input.text
238
+ }, { signal });
239
+ update_progress(100, "Completed OpenAI text embedding");
240
+ logger.timeEnd(timerLabel, { model: model?.provider_config?.model_name });
241
+ if (Array.isArray(input.text)) {
242
+ return {
243
+ vector: response.data.map((item) => new Float32Array(item.embedding))
244
+ };
245
+ }
246
+ return { vector: new Float32Array(response.data[0].embedding) };
247
+ };
248
+
249
+ // src/provider-openai/common/OpenAI_TextGeneration.ts
250
+ import { getLogger as getLogger3 } from "@workglow/util/worker";
251
+ var OpenAI_TextGeneration = async (input, model, update_progress, signal) => {
252
+ if (Array.isArray(input.prompt)) {
253
+ getLogger3().warn("OpenAI_TextGeneration: array input received; processing sequentially (no native batch support)");
254
+ const prompts = input.prompt;
255
+ const results = [];
256
+ for (const item of prompts) {
257
+ const r = await OpenAI_TextGeneration({ ...input, prompt: item }, model, update_progress, signal);
258
+ results.push(r.text);
259
+ }
260
+ return { text: results };
261
+ }
262
+ const logger = getLogger3();
263
+ const timerLabel = `openai:TextGeneration:${model?.provider_config?.model_name}`;
264
+ logger.time(timerLabel, { model: model?.provider_config?.model_name });
265
+ update_progress(0, "Starting OpenAI text generation");
266
+ const client = await getClient(model);
267
+ const modelName = getModelName(model);
268
+ const response = await client.chat.completions.create({
269
+ model: modelName,
270
+ messages: [{ role: "user", content: input.prompt }],
271
+ max_completion_tokens: input.maxTokens,
272
+ temperature: input.temperature,
273
+ top_p: input.topP,
274
+ frequency_penalty: input.frequencyPenalty,
275
+ presence_penalty: input.presencePenalty
276
+ }, { signal });
277
+ update_progress(100, "Completed OpenAI text generation");
278
+ logger.timeEnd(timerLabel, { model: model?.provider_config?.model_name });
279
+ return { text: response.choices[0]?.message?.content ?? "" };
280
+ };
281
+ var OpenAI_TextGeneration_Stream = async function* (input, model, signal) {
282
+ const client = await getClient(model);
283
+ const modelName = getModelName(model);
284
+ const stream = await client.chat.completions.create({
285
+ model: modelName,
286
+ messages: [{ role: "user", content: input.prompt }],
287
+ max_completion_tokens: input.maxTokens,
288
+ temperature: input.temperature,
289
+ top_p: input.topP,
290
+ frequency_penalty: input.frequencyPenalty,
291
+ presence_penalty: input.presencePenalty,
292
+ stream: true
293
+ }, { signal });
294
+ for await (const chunk of stream) {
295
+ const delta = chunk.choices[0]?.delta?.content ?? "";
296
+ if (delta) {
297
+ yield { type: "text-delta", port: "text", textDelta: delta };
298
+ }
299
+ }
300
+ yield { type: "finish", data: {} };
301
+ };
302
+
303
+ // src/provider-openai/common/OpenAI_TextRewriter.ts
304
+ import { getLogger as getLogger4 } from "@workglow/util/worker";
305
+ var OpenAI_TextRewriter = async (input, model, update_progress, signal) => {
306
+ if (Array.isArray(input.text)) {
307
+ getLogger4().warn("OpenAI_TextRewriter: array input received; processing sequentially (no native batch support)");
308
+ const texts = input.text;
309
+ const results = [];
310
+ for (const item of texts) {
311
+ const r = await OpenAI_TextRewriter({ ...input, text: item }, model, update_progress, signal);
312
+ results.push(r.text);
313
+ }
314
+ return { text: results };
315
+ }
316
+ update_progress(0, "Starting OpenAI text rewriting");
317
+ const client = await getClient(model);
318
+ const modelName = getModelName(model);
319
+ const response = await client.chat.completions.create({
320
+ model: modelName,
321
+ messages: [
322
+ { role: "system", content: input.prompt },
323
+ { role: "user", content: input.text }
324
+ ]
325
+ }, { signal });
326
+ update_progress(100, "Completed OpenAI text rewriting");
327
+ return { text: response.choices[0]?.message?.content ?? "" };
328
+ };
329
+ var OpenAI_TextRewriter_Stream = async function* (input, model, signal) {
330
+ const client = await getClient(model);
331
+ const modelName = getModelName(model);
332
+ const stream = await client.chat.completions.create({
333
+ model: modelName,
334
+ messages: [
335
+ { role: "system", content: input.prompt },
336
+ { role: "user", content: input.text }
337
+ ],
338
+ stream: true
339
+ }, { signal });
340
+ for await (const chunk of stream) {
341
+ const delta = chunk.choices[0]?.delta?.content ?? "";
342
+ if (delta) {
343
+ yield { type: "text-delta", port: "text", textDelta: delta };
344
+ }
345
+ }
346
+ yield { type: "finish", data: {} };
347
+ };
348
+
349
+ // src/provider-openai/common/OpenAI_TextSummary.ts
350
+ import { getLogger as getLogger5 } from "@workglow/util/worker";
351
+ var OpenAI_TextSummary = async (input, model, update_progress, signal) => {
352
+ if (Array.isArray(input.text)) {
353
+ getLogger5().warn("OpenAI_TextSummary: array input received; processing sequentially (no native batch support)");
354
+ const texts = input.text;
355
+ const results = [];
356
+ for (const item of texts) {
357
+ const r = await OpenAI_TextSummary({ ...input, text: item }, model, update_progress, signal);
358
+ results.push(r.text);
359
+ }
360
+ return { text: results };
361
+ }
362
+ update_progress(0, "Starting OpenAI text summarization");
363
+ const client = await getClient(model);
364
+ const modelName = getModelName(model);
365
+ const response = await client.chat.completions.create({
366
+ model: modelName,
367
+ messages: [
368
+ { role: "system", content: "Summarize the following text concisely." },
369
+ { role: "user", content: input.text }
370
+ ]
371
+ }, { signal });
372
+ update_progress(100, "Completed OpenAI text summarization");
373
+ return { text: response.choices[0]?.message?.content ?? "" };
374
+ };
375
+ var OpenAI_TextSummary_Stream = async function* (input, model, signal) {
376
+ const client = await getClient(model);
377
+ const modelName = getModelName(model);
378
+ const stream = await client.chat.completions.create({
379
+ model: modelName,
380
+ messages: [
381
+ { role: "system", content: "Summarize the following text concisely." },
382
+ { role: "user", content: input.text }
383
+ ],
384
+ stream: true
385
+ }, { signal });
386
+ for await (const chunk of stream) {
387
+ const delta = chunk.choices[0]?.delta?.content ?? "";
388
+ if (delta) {
389
+ yield { type: "text-delta", port: "text", textDelta: delta };
390
+ }
391
+ }
392
+ yield { type: "finish", data: {} };
393
+ };
394
+
395
+ // src/provider-openai/common/OpenAI_ToolCalling.ts
396
+ import { buildToolDescription, filterValidToolCalls, toOpenAIMessages } from "@workglow/ai/worker";
397
+ import { getLogger as getLogger6, parsePartialJson as parsePartialJson2 } from "@workglow/util/worker";
398
+ function mapOpenAIToolChoice(toolChoice) {
399
+ if (!toolChoice || toolChoice === "auto")
400
+ return "auto";
401
+ if (toolChoice === "none")
402
+ return "none";
403
+ if (toolChoice === "required")
404
+ return "required";
405
+ return { type: "function", function: { name: toolChoice } };
406
+ }
407
+ var OpenAI_ToolCalling = async (input, model, update_progress, signal) => {
408
+ if (Array.isArray(input.prompt)) {
409
+ getLogger6().warn("OpenAI_ToolCalling: array input received; processing sequentially (no native batch support)");
410
+ const prompts = input.prompt;
411
+ const texts = [];
412
+ const toolCallsList = [];
413
+ for (const item of prompts) {
414
+ const r = await OpenAI_ToolCalling({ ...input, prompt: item }, model, update_progress, signal);
415
+ texts.push(r.text);
416
+ toolCallsList.push(r.toolCalls);
417
+ }
418
+ return { text: texts, toolCalls: toolCallsList };
419
+ }
420
+ update_progress(0, "Starting OpenAI tool calling");
421
+ const client = await getClient(model);
422
+ const modelName = getModelName(model);
423
+ const tools = input.tools.map((t) => ({
424
+ type: "function",
425
+ function: {
426
+ name: t.name,
427
+ description: buildToolDescription(t),
428
+ parameters: t.inputSchema
429
+ }
430
+ }));
431
+ const messages = toOpenAIMessages(input);
432
+ const toolChoice = mapOpenAIToolChoice(input.toolChoice);
433
+ const params = {
434
+ model: modelName,
435
+ messages,
436
+ max_completion_tokens: input.maxTokens,
437
+ temperature: input.temperature
438
+ };
439
+ if (toolChoice !== undefined) {
440
+ params.tools = tools;
441
+ params.tool_choice = toolChoice;
442
+ }
443
+ const response = await client.chat.completions.create(params, { signal });
444
+ const text = response.choices[0]?.message?.content ?? "";
445
+ const toolCalls = [];
446
+ for (const tc of response.choices[0]?.message?.tool_calls ?? []) {
447
+ if (!("function" in tc))
448
+ continue;
449
+ const id = tc.id;
450
+ const name = tc.function.name;
451
+ let inputArgs = {};
452
+ const rawArgs = tc.function.arguments;
453
+ if (typeof rawArgs === "string") {
454
+ try {
455
+ inputArgs = JSON.parse(rawArgs);
456
+ } catch {
457
+ try {
458
+ const partial = parsePartialJson2(rawArgs);
459
+ if (partial && typeof partial === "object") {
460
+ inputArgs = partial;
461
+ }
462
+ } catch {
463
+ inputArgs = {};
464
+ }
465
+ }
466
+ }
467
+ toolCalls.push({ id, name, input: inputArgs });
468
+ }
469
+ update_progress(100, "Completed OpenAI tool calling");
470
+ return { text, toolCalls: filterValidToolCalls(toolCalls, input.tools) };
471
+ };
472
+ var OpenAI_ToolCalling_Stream = async function* (input, model, signal) {
473
+ const client = await getClient(model);
474
+ const modelName = getModelName(model);
475
+ const tools = input.tools.map((t) => ({
476
+ type: "function",
477
+ function: {
478
+ name: t.name,
479
+ description: buildToolDescription(t),
480
+ parameters: t.inputSchema
481
+ }
482
+ }));
483
+ const messages = toOpenAIMessages(input);
484
+ const toolChoice = mapOpenAIToolChoice(input.toolChoice);
485
+ const stream = await client.chat.completions.create({
486
+ model: modelName,
487
+ messages,
488
+ max_completion_tokens: input.maxTokens,
489
+ temperature: input.temperature,
490
+ stream: true,
491
+ ...toolChoice !== undefined ? { tools, tool_choice: toolChoice } : {}
492
+ }, { signal });
493
+ let accumulatedText = "";
494
+ const toolCallAccumulator = new Map;
495
+ for await (const chunk of stream) {
496
+ const choice = chunk.choices[0];
497
+ if (!choice)
498
+ continue;
499
+ const contentDelta = choice.delta?.content ?? "";
500
+ if (contentDelta) {
501
+ accumulatedText += contentDelta;
502
+ yield { type: "text-delta", port: "text", textDelta: contentDelta };
503
+ }
504
+ const tcDeltas = choice.delta?.tool_calls;
505
+ if (Array.isArray(tcDeltas)) {
506
+ for (const tcDelta of tcDeltas) {
507
+ const idx = tcDelta.index;
508
+ if (!toolCallAccumulator.has(idx)) {
509
+ toolCallAccumulator.set(idx, {
510
+ id: tcDelta.id ?? "",
511
+ name: tcDelta.function?.name ?? "",
512
+ arguments: ""
513
+ });
514
+ }
515
+ const acc = toolCallAccumulator.get(idx);
516
+ if (tcDelta.id)
517
+ acc.id = tcDelta.id;
518
+ if (tcDelta.function?.name)
519
+ acc.name = tcDelta.function.name;
520
+ if (tcDelta.function?.arguments)
521
+ acc.arguments += tcDelta.function.arguments;
522
+ }
523
+ const snapshot = [];
524
+ for (const [, tc] of toolCallAccumulator) {
525
+ let parsedInput;
526
+ try {
527
+ parsedInput = JSON.parse(tc.arguments);
528
+ } catch {
529
+ const partial = parsePartialJson2(tc.arguments);
530
+ parsedInput = partial ?? {};
531
+ }
532
+ snapshot.push({ id: tc.id, name: tc.name, input: parsedInput });
533
+ }
534
+ yield { type: "object-delta", port: "toolCalls", objectDelta: snapshot };
535
+ }
536
+ }
537
+ const toolCalls = [];
538
+ for (const [, tc] of toolCallAccumulator) {
539
+ let finalInput;
540
+ try {
541
+ finalInput = JSON.parse(tc.arguments);
542
+ } catch {
543
+ finalInput = parsePartialJson2(tc.arguments) ?? {};
544
+ }
545
+ toolCalls.push({ id: tc.id, name: tc.name, input: finalInput });
546
+ }
547
+ const validToolCalls = filterValidToolCalls(toolCalls, input.tools);
548
+ yield {
549
+ type: "finish",
550
+ data: { text: accumulatedText, toolCalls: validToolCalls }
551
+ };
552
+ };
553
+
554
+ // src/provider-openai/common/OpenAI_JobRunFns.browser.ts
555
+ var OPENAI_TASKS = {
556
+ TextGenerationTask: OpenAI_TextGeneration,
557
+ ModelInfoTask: OpenAI_ModelInfo,
558
+ TextEmbeddingTask: OpenAI_TextEmbedding,
559
+ TextRewriterTask: OpenAI_TextRewriter,
560
+ TextSummaryTask: OpenAI_TextSummary,
561
+ CountTokensTask: OpenAI_CountTokens,
562
+ StructuredGenerationTask: OpenAI_StructuredGeneration,
563
+ ToolCallingTask: OpenAI_ToolCalling,
564
+ ModelSearchTask: OpenAI_ModelSearch
565
+ };
566
+ var OPENAI_STREAM_TASKS = {
567
+ TextGenerationTask: OpenAI_TextGeneration_Stream,
568
+ TextRewriterTask: OpenAI_TextRewriter_Stream,
569
+ TextSummaryTask: OpenAI_TextSummary_Stream,
570
+ StructuredGenerationTask: OpenAI_StructuredGeneration_Stream,
571
+ ToolCallingTask: OpenAI_ToolCalling_Stream
572
+ };
573
+ var OPENAI_REACTIVE_TASKS = {
574
+ CountTokensTask: OpenAI_CountTokens_Reactive
575
+ };
576
+
577
+ // src/provider-openai/OpenAiQueuedProvider.ts
578
+ import {
579
+ QueuedAiProvider
580
+ } from "@workglow/ai";
581
+ class OpenAiQueuedProvider extends QueuedAiProvider {
582
+ name = OPENAI;
583
+ isLocal = false;
584
+ supportsBrowser = true;
585
+ taskTypes = [
586
+ "TextGenerationTask",
587
+ "TextEmbeddingTask",
588
+ "TextRewriterTask",
589
+ "TextSummaryTask",
590
+ "CountTokensTask",
591
+ "ModelInfoTask",
592
+ "StructuredGenerationTask",
593
+ "ToolCallingTask",
594
+ "ModelSearchTask"
595
+ ];
596
+ constructor(tasks, streamTasks, reactiveTasks) {
597
+ super(tasks, streamTasks, reactiveTasks);
598
+ }
599
+ }
600
+
601
+ // src/provider-openai/registerOpenAiInline.browser.ts
602
+ async function registerOpenAiInline(options) {
603
+ await new OpenAiQueuedProvider(OPENAI_TASKS, OPENAI_STREAM_TASKS, OPENAI_REACTIVE_TASKS).register(options ?? {});
604
+ }
605
+ // src/provider-openai/registerOpenAiWorker.browser.ts
606
+ import { getLogger as getLogger7, globalServiceRegistry, WORKER_SERVER } from "@workglow/util/worker";
607
+
608
+ // src/provider-openai/OpenAiProvider.ts
609
+ import {
610
+ AiProvider
611
+ } from "@workglow/ai/worker";
612
+ class OpenAiProvider extends AiProvider {
613
+ name = OPENAI;
614
+ isLocal = false;
615
+ supportsBrowser = true;
616
+ taskTypes = [
617
+ "TextGenerationTask",
618
+ "TextEmbeddingTask",
619
+ "TextRewriterTask",
620
+ "TextSummaryTask",
621
+ "CountTokensTask",
622
+ "ModelInfoTask",
623
+ "StructuredGenerationTask",
624
+ "ToolCallingTask",
625
+ "ModelSearchTask"
626
+ ];
627
+ constructor(tasks, streamTasks, reactiveTasks) {
628
+ super(tasks, streamTasks, reactiveTasks);
629
+ }
630
+ }
631
+
632
+ // src/provider-openai/registerOpenAiWorker.browser.ts
633
+ async function registerOpenAiWorker() {
634
+ const workerServer = globalServiceRegistry.get(WORKER_SERVER);
635
+ new OpenAiProvider(OPENAI_TASKS, OPENAI_STREAM_TASKS, OPENAI_REACTIVE_TASKS).registerOnWorkerServer(workerServer);
636
+ workerServer.sendReady();
637
+ getLogger7().info("OpenAI worker job run functions registered");
638
+ }
639
+ export {
640
+ registerOpenAiWorker,
641
+ registerOpenAiInline,
642
+ loadOpenAISDK,
643
+ getModelName,
644
+ getClient
645
+ };
646
+
647
+ //# debugId=866D6CD3BDFA2E1164756E2164756E21