@workglow/ai-provider 0.0.122 → 0.0.123
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/provider-hf-transformers/common/HFT_JobRunFns.d.ts +2 -2
- package/dist/provider-hf-transformers/common/HFT_Pipeline.d.ts.map +1 -1
- package/dist/provider-hf-transformers/common/HFT_Streaming.d.ts +3 -2
- package/dist/provider-hf-transformers/common/HFT_Streaming.d.ts.map +1 -1
- package/dist/provider-hf-transformers/common/HFT_ToolCalling.d.ts.map +1 -1
- package/dist/provider-hf-transformers/runtime.js +39 -37
- package/dist/provider-hf-transformers/runtime.js.map +11 -11
- package/dist/provider-llamacpp/common/LlamaCpp_Runtime.d.ts +5 -8
- package/dist/provider-llamacpp/common/LlamaCpp_Runtime.d.ts.map +1 -1
- package/dist/provider-llamacpp/runtime.js.map +2 -2
- package/dist/provider-ollama/common/Ollama_JobRunFns.browser.d.ts +1 -1
- package/dist/provider-ollama/common/Ollama_JobRunFns.d.ts +1 -1
- package/dist/provider-openai/common/OpenAI_CountTokens.browser.d.ts +10 -0
- package/dist/provider-openai/common/OpenAI_CountTokens.browser.d.ts.map +1 -0
- package/dist/provider-openai/common/OpenAI_CountTokens.d.ts.map +1 -1
- package/dist/provider-openai/common/OpenAI_JobRunFns.browser.d.ts +12 -0
- package/dist/provider-openai/common/OpenAI_JobRunFns.browser.d.ts.map +1 -0
- package/dist/provider-openai/index.browser.d.ts +9 -0
- package/dist/provider-openai/index.browser.d.ts.map +1 -0
- package/dist/provider-openai/index.browser.js +105 -0
- package/dist/provider-openai/index.browser.js.map +13 -0
- package/dist/provider-openai/registerOpenAiInline.browser.d.ts +8 -0
- package/dist/provider-openai/registerOpenAiInline.browser.d.ts.map +1 -0
- package/dist/provider-openai/registerOpenAiWorker.browser.d.ts +7 -0
- package/dist/provider-openai/registerOpenAiWorker.browser.d.ts.map +1 -0
- package/dist/provider-openai/runtime.browser.d.ts +15 -0
- package/dist/provider-openai/runtime.browser.d.ts.map +1 -0
- package/dist/provider-openai/runtime.browser.js +647 -0
- package/dist/provider-openai/runtime.browser.js.map +25 -0
- package/dist/provider-openai/runtime.js.map +2 -2
- package/dist/provider-tf-mediapipe/common/TFMP_Client.d.ts.map +1 -1
- package/dist/provider-tf-mediapipe/common/TFMP_JobRunFns.d.ts +2 -2
- package/dist/provider-tf-mediapipe/runtime.js.map +2 -2
- package/package.json +21 -14
|
@@ -309,7 +309,7 @@ export declare const HFT_TASKS: {
|
|
|
309
309
|
};
|
|
310
310
|
text: string | string[];
|
|
311
311
|
}, {
|
|
312
|
-
vector: import("@workglow/util").TypedArray[] | import("@workglow/util").TypedArray;
|
|
312
|
+
vector: import("@workglow/util/schema").TypedArray[] | import("@workglow/util/schema").TypedArray;
|
|
313
313
|
}, {
|
|
314
314
|
description?: string | undefined;
|
|
315
315
|
metadata?: {
|
|
@@ -1172,7 +1172,7 @@ export declare const HFT_TASKS: {
|
|
|
1172
1172
|
title?: string | undefined;
|
|
1173
1173
|
};
|
|
1174
1174
|
}, {
|
|
1175
|
-
vector: import("@workglow/util").TypedArray;
|
|
1175
|
+
vector: import("@workglow/util/schema").TypedArray;
|
|
1176
1176
|
}, {
|
|
1177
1177
|
description?: string | undefined;
|
|
1178
1178
|
metadata?: {
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"HFT_Pipeline.d.ts","sourceRoot":"","sources":["../../../src/provider-hf-transformers/common/HFT_Pipeline.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AAEH,OAAO,KAAK,EAAE,sBAAsB,EAAgB,MAAM,2BAA2B,CAAC;AAEtF,OAAO,KAAK,EAAE,6BAA6B,EAAE,MAAM,mBAAmB,CAAC;AAKvE;;;GAGG;AACH,wBAAgB,cAAc,CAAC,GAAG,EAAE,MAAM,GAAG,IAAI,CAKhD;AAED,wBAAsB,mBAAmB,
|
|
1
|
+
{"version":3,"file":"HFT_Pipeline.d.ts","sourceRoot":"","sources":["../../../src/provider-hf-transformers/common/HFT_Pipeline.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AAEH,OAAO,KAAK,EAAE,sBAAsB,EAAgB,MAAM,2BAA2B,CAAC;AAEtF,OAAO,KAAK,EAAE,6BAA6B,EAAE,MAAM,mBAAmB,CAAC;AAKvE;;;GAGG;AACH,wBAAgB,cAAc,CAAC,GAAG,EAAE,MAAM,GAAG,IAAI,CAKhD;AAED,wBAAsB,mBAAmB,wDAexC;AA0BD;;GAEG;AACH,wBAAgB,kBAAkB,IAAI,IAAI,CAEzC;AAED,wBAAgB,iBAAiB,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAE3D;AAED,wBAAgB,oBAAoB,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAE9D;AAOD;;;GAGG;AACH,wBAAgB,mBAAmB,CAAC,KAAK,EAAE,6BAA6B,GAAG,MAAM,CAIhF;AAED;;;;;GAKG;AACH,wBAAsB,WAAW,CAC/B,KAAK,EAAE,6BAA6B,EACpC,UAAU,EAAE,CAAC,QAAQ,EAAE,MAAM,EAAE,OAAO,CAAC,EAAE,MAAM,EAAE,OAAO,CAAC,EAAE,GAAG,KAAK,IAAI,EACvE,OAAO,GAAE,sBAA2B,EACpC,MAAM,CAAC,EAAE,WAAW,EACpB,gBAAgB,GAAE,MAAW,GAC5B,OAAO,CAAC,GAAG,CAAC,CA6Bd"}
|
|
@@ -3,6 +3,7 @@
|
|
|
3
3
|
* Copyright 2025 Steven Roussey <sroussey@gmail.com>
|
|
4
4
|
* SPDX-License-Identifier: Apache-2.0
|
|
5
5
|
*/
|
|
6
|
+
import type { TextStreamer } from "@huggingface/transformers";
|
|
6
7
|
import type { StreamEvent } from "@workglow/task-graph";
|
|
7
8
|
export type StreamEventQueue<T> = {
|
|
8
9
|
push: (event: T) => void;
|
|
@@ -16,9 +17,9 @@ export declare function createStreamEventQueue<T>(): StreamEventQueue<T>;
|
|
|
16
17
|
* The pipeline runs to completion and updates the queue; the caller
|
|
17
18
|
* consumes the queue as an AsyncIterable<StreamEvent>.
|
|
18
19
|
*/
|
|
19
|
-
export declare function createStreamingTextStreamer(tokenizer: any, queue: StreamEventQueue<StreamEvent<any>>,
|
|
20
|
+
export declare function createStreamingTextStreamer(tokenizer: any, queue: StreamEventQueue<StreamEvent<any>>, textStreamer: typeof TextStreamer): TextStreamer;
|
|
20
21
|
/**
|
|
21
22
|
* Create a text streamer for a given tokenizer and update progress function
|
|
22
23
|
*/
|
|
23
|
-
export declare function createTextStreamer(tokenizer: any, updateProgress: (progress: number, message?: string, details?: any) => void,
|
|
24
|
+
export declare function createTextStreamer(tokenizer: any, updateProgress: (progress: number, message?: string, details?: any) => void, textStreamer: typeof TextStreamer): TextStreamer;
|
|
24
25
|
//# sourceMappingURL=HFT_Streaming.d.ts.map
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"HFT_Streaming.d.ts","sourceRoot":"","sources":["../../../src/provider-hf-transformers/common/HFT_Streaming.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AAEH,OAAO,KAAK,EAAE,WAAW,EAAE,MAAM,sBAAsB,CAAC;AAExD,MAAM,MAAM,gBAAgB,CAAC,CAAC,IAAI;IAChC,IAAI,EAAE,CAAC,KAAK,EAAE,CAAC,KAAK,IAAI,CAAC;IACzB,IAAI,EAAE,MAAM,IAAI,CAAC;IACjB,KAAK,EAAE,CAAC,GAAG,EAAE,KAAK,KAAK,IAAI,CAAC;IAC5B,QAAQ,EAAE,aAAa,CAAC,CAAC,CAAC,CAAC;CAC5B,CAAC;AAEF,wBAAgB,sBAAsB,CAAC,CAAC,KAAK,gBAAgB,CAAC,CAAC,CAAC,CAsD/D;AAED;;;;GAIG;AACH,wBAAgB,2BAA2B,CACzC,SAAS,EAAE,GAAG,EACd,KAAK,EAAE,gBAAgB,CAAC,WAAW,CAAC,GAAG,CAAC,CAAC,EACzC,YAAY,EAAE,
|
|
1
|
+
{"version":3,"file":"HFT_Streaming.d.ts","sourceRoot":"","sources":["../../../src/provider-hf-transformers/common/HFT_Streaming.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AAEH,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,2BAA2B,CAAC;AAC9D,OAAO,KAAK,EAAE,WAAW,EAAE,MAAM,sBAAsB,CAAC;AAExD,MAAM,MAAM,gBAAgB,CAAC,CAAC,IAAI;IAChC,IAAI,EAAE,CAAC,KAAK,EAAE,CAAC,KAAK,IAAI,CAAC;IACzB,IAAI,EAAE,MAAM,IAAI,CAAC;IACjB,KAAK,EAAE,CAAC,GAAG,EAAE,KAAK,KAAK,IAAI,CAAC;IAC5B,QAAQ,EAAE,aAAa,CAAC,CAAC,CAAC,CAAC;CAC5B,CAAC;AAEF,wBAAgB,sBAAsB,CAAC,CAAC,KAAK,gBAAgB,CAAC,CAAC,CAAC,CAsD/D;AAED;;;;GAIG;AACH,wBAAgB,2BAA2B,CACzC,SAAS,EAAE,GAAG,EACd,KAAK,EAAE,gBAAgB,CAAC,WAAW,CAAC,GAAG,CAAC,CAAC,EACzC,YAAY,EAAE,OAAO,YAAY,gBASlC;AAED;;GAEG;AACH,wBAAgB,kBAAkB,CAChC,SAAS,EAAE,GAAG,EACd,cAAc,EAAE,CAAC,QAAQ,EAAE,MAAM,EAAE,OAAO,CAAC,EAAE,MAAM,EAAE,OAAO,CAAC,EAAE,GAAG,KAAK,IAAI,EAC3E,YAAY,EAAE,OAAO,YAAY,gBAalC"}
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"HFT_ToolCalling.d.ts","sourceRoot":"","sources":["../../../src/provider-hf-transformers/common/HFT_ToolCalling.ts"],"names":[],"mappings":"AAAA;;;;GAIG;
|
|
1
|
+
{"version":3,"file":"HFT_ToolCalling.d.ts","sourceRoot":"","sources":["../../../src/provider-hf-transformers/common/HFT_ToolCalling.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AAQH,OAAO,KAAK,EACV,eAAe,EACf,kBAAkB,EAClB,oBAAoB,EACpB,qBAAqB,EAEtB,MAAM,cAAc,CAAC;AAEtB,OAAO,KAAK,EAAE,6BAA6B,EAAE,MAAM,mBAAmB,CAAC;AA2DvE,eAAO,MAAM,eAAe,EAAE,eAAe,CAC3C,oBAAoB,EACpB,qBAAqB,EACrB,6BAA6B,CAwF9B,CAAC;AAEF,eAAO,MAAM,sBAAsB,EAAE,kBAAkB,CACrD,oBAAoB,EACpB,qBAAqB,EACrB,6BAA6B,CAkF9B,CAAC"}
|
|
@@ -1044,9 +1044,8 @@ function createStreamEventQueue() {
|
|
|
1044
1044
|
};
|
|
1045
1045
|
return { push, done, error, iterable };
|
|
1046
1046
|
}
|
|
1047
|
-
function createStreamingTextStreamer(tokenizer, queue,
|
|
1048
|
-
|
|
1049
|
-
return new TextStreamer(tokenizer, {
|
|
1047
|
+
function createStreamingTextStreamer(tokenizer, queue, textStreamer) {
|
|
1048
|
+
return new textStreamer(tokenizer, {
|
|
1050
1049
|
skip_prompt: true,
|
|
1051
1050
|
decode_kwargs: { skip_special_tokens: true },
|
|
1052
1051
|
callback_function: (text) => {
|
|
@@ -1054,10 +1053,9 @@ function createStreamingTextStreamer(tokenizer, queue, transformers) {
|
|
|
1054
1053
|
}
|
|
1055
1054
|
});
|
|
1056
1055
|
}
|
|
1057
|
-
function createTextStreamer(tokenizer, updateProgress,
|
|
1058
|
-
const { TextStreamer } = transformers;
|
|
1056
|
+
function createTextStreamer(tokenizer, updateProgress, textStreamer) {
|
|
1059
1057
|
let count = 0;
|
|
1060
|
-
return new
|
|
1058
|
+
return new textStreamer(tokenizer, {
|
|
1061
1059
|
skip_prompt: true,
|
|
1062
1060
|
decode_kwargs: { skip_special_tokens: true },
|
|
1063
1061
|
callback_function: (text) => {
|
|
@@ -1116,14 +1114,14 @@ function extractJsonFromText(text) {
|
|
|
1116
1114
|
}
|
|
1117
1115
|
var HFT_StructuredGeneration = async (input, model, onProgress, signal) => {
|
|
1118
1116
|
const generateText = await getPipeline(model, onProgress, {}, signal);
|
|
1119
|
-
const
|
|
1117
|
+
const { TextStreamer } = await loadTransformersSDK();
|
|
1120
1118
|
const prompt = buildStructuredGenerationPrompt(input);
|
|
1121
1119
|
const messages = [{ role: "user", content: prompt }];
|
|
1122
1120
|
const formattedPrompt = generateText.tokenizer.apply_chat_template(messages, {
|
|
1123
1121
|
tokenize: false,
|
|
1124
1122
|
add_generation_prompt: true
|
|
1125
1123
|
});
|
|
1126
|
-
const streamer = createTextStreamer(generateText.tokenizer, onProgress,
|
|
1124
|
+
const streamer = createTextStreamer(generateText.tokenizer, onProgress, TextStreamer);
|
|
1127
1125
|
let results = await generateText(formattedPrompt, {
|
|
1128
1126
|
max_new_tokens: input.maxTokens ?? 1024,
|
|
1129
1127
|
temperature: input.temperature ?? undefined,
|
|
@@ -1140,7 +1138,7 @@ var HFT_StructuredGeneration = async (input, model, onProgress, signal) => {
|
|
|
1140
1138
|
var HFT_StructuredGeneration_Stream = async function* (input, model, signal) {
|
|
1141
1139
|
const noopProgress = () => {};
|
|
1142
1140
|
const generateText = await getPipeline(model, noopProgress, {}, signal);
|
|
1143
|
-
const
|
|
1141
|
+
const { TextStreamer } = await loadTransformersSDK();
|
|
1144
1142
|
const prompt = buildStructuredGenerationPrompt(input);
|
|
1145
1143
|
const messages = [{ role: "user", content: prompt }];
|
|
1146
1144
|
const formattedPrompt = generateText.tokenizer.apply_chat_template(messages, {
|
|
@@ -1148,7 +1146,7 @@ var HFT_StructuredGeneration_Stream = async function* (input, model, signal) {
|
|
|
1148
1146
|
add_generation_prompt: true
|
|
1149
1147
|
});
|
|
1150
1148
|
const queue = createStreamEventQueue();
|
|
1151
|
-
const streamer = createStreamingTextStreamer(generateText.tokenizer, queue,
|
|
1149
|
+
const streamer = createStreamingTextStreamer(generateText.tokenizer, queue, TextStreamer);
|
|
1152
1150
|
let fullText = "";
|
|
1153
1151
|
const originalPush = queue.push;
|
|
1154
1152
|
queue.push = (event) => {
|
|
@@ -1320,12 +1318,12 @@ var HFT_TextGeneration = async (input, model, onProgress, signal) => {
|
|
|
1320
1318
|
logger.time(timerLabel, { model: model?.provider_config.model_path });
|
|
1321
1319
|
const isArrayInput = Array.isArray(input.prompt);
|
|
1322
1320
|
const generateText = await getPipeline(model, onProgress, {}, signal);
|
|
1323
|
-
const
|
|
1321
|
+
const { TextStreamer } = await loadTransformersSDK();
|
|
1324
1322
|
logger.debug("HFT TextGeneration: pipeline ready, generating text", {
|
|
1325
1323
|
model: model?.provider_config.model_path,
|
|
1326
1324
|
promptLength: isArrayInput ? input.prompt.length : input.prompt?.length
|
|
1327
1325
|
});
|
|
1328
|
-
const streamer = isArrayInput ? undefined : createTextStreamer(generateText.tokenizer, onProgress,
|
|
1326
|
+
const streamer = isArrayInput ? undefined : createTextStreamer(generateText.tokenizer, onProgress, TextStreamer);
|
|
1329
1327
|
let results = await generateText(input.prompt, {
|
|
1330
1328
|
...streamer ? { streamer } : {}
|
|
1331
1329
|
});
|
|
@@ -1350,9 +1348,9 @@ var HFT_TextGeneration = async (input, model, onProgress, signal) => {
|
|
|
1350
1348
|
var HFT_TextGeneration_Stream = async function* (input, model, signal) {
|
|
1351
1349
|
const noopProgress = () => {};
|
|
1352
1350
|
const generateText = await getPipeline(model, noopProgress, {}, signal);
|
|
1353
|
-
const
|
|
1351
|
+
const { TextStreamer } = await loadTransformersSDK();
|
|
1354
1352
|
const queue = createStreamEventQueue();
|
|
1355
|
-
const streamer = createStreamingTextStreamer(generateText.tokenizer, queue,
|
|
1353
|
+
const streamer = createStreamingTextStreamer(generateText.tokenizer, queue, TextStreamer);
|
|
1356
1354
|
const pipelinePromise = generateText(input.prompt, {
|
|
1357
1355
|
streamer
|
|
1358
1356
|
}).then(() => queue.done(), (err) => queue.error(err));
|
|
@@ -1455,8 +1453,8 @@ var HFT_TextQuestionAnswer = async (input, model, onProgress, signal) => {
|
|
|
1455
1453
|
}
|
|
1456
1454
|
return { text: answers };
|
|
1457
1455
|
}
|
|
1458
|
-
const
|
|
1459
|
-
const streamer = createTextStreamer(generateAnswer.tokenizer, onProgress,
|
|
1456
|
+
const { TextStreamer } = await loadTransformersSDK();
|
|
1457
|
+
const streamer = createTextStreamer(generateAnswer.tokenizer, onProgress, TextStreamer);
|
|
1460
1458
|
const result = await generateAnswer(input.question, input.context, {
|
|
1461
1459
|
streamer
|
|
1462
1460
|
});
|
|
@@ -1473,9 +1471,9 @@ var HFT_TextQuestionAnswer = async (input, model, onProgress, signal) => {
|
|
|
1473
1471
|
var HFT_TextQuestionAnswer_Stream = async function* (input, model, signal) {
|
|
1474
1472
|
const noopProgress = () => {};
|
|
1475
1473
|
const generateAnswer = await getPipeline(model, noopProgress, {}, signal);
|
|
1476
|
-
const
|
|
1474
|
+
const { TextStreamer } = await loadTransformersSDK();
|
|
1477
1475
|
const queue = createStreamEventQueue();
|
|
1478
|
-
const streamer = createStreamingTextStreamer(generateAnswer.tokenizer, queue,
|
|
1476
|
+
const streamer = createStreamingTextStreamer(generateAnswer.tokenizer, queue, TextStreamer);
|
|
1479
1477
|
let pipelineResult;
|
|
1480
1478
|
const pipelinePromise = generateAnswer(input.question, input.context, {
|
|
1481
1479
|
streamer
|
|
@@ -1501,8 +1499,8 @@ init_HFT_Pipeline();
|
|
|
1501
1499
|
var HFT_TextRewriter = async (input, model, onProgress, signal) => {
|
|
1502
1500
|
const isArrayInput = Array.isArray(input.text);
|
|
1503
1501
|
const generateText = await getPipeline(model, onProgress, {}, signal);
|
|
1504
|
-
const
|
|
1505
|
-
const streamer = isArrayInput ? undefined : createTextStreamer(generateText.tokenizer, onProgress,
|
|
1502
|
+
const { TextStreamer } = await loadTransformersSDK();
|
|
1503
|
+
const streamer = isArrayInput ? undefined : createTextStreamer(generateText.tokenizer, onProgress, TextStreamer);
|
|
1506
1504
|
if (isArrayInput) {
|
|
1507
1505
|
const texts = input.text;
|
|
1508
1506
|
const promptedTexts = texts.map((t) => (input.prompt ? input.prompt + `
|
|
@@ -1538,9 +1536,9 @@ var HFT_TextRewriter = async (input, model, onProgress, signal) => {
|
|
|
1538
1536
|
var HFT_TextRewriter_Stream = async function* (input, model, signal) {
|
|
1539
1537
|
const noopProgress = () => {};
|
|
1540
1538
|
const generateText = await getPipeline(model, noopProgress, {}, signal);
|
|
1541
|
-
const
|
|
1539
|
+
const { TextStreamer } = await loadTransformersSDK();
|
|
1542
1540
|
const queue = createStreamEventQueue();
|
|
1543
|
-
const streamer = createStreamingTextStreamer(generateText.tokenizer, queue,
|
|
1541
|
+
const streamer = createStreamingTextStreamer(generateText.tokenizer, queue, TextStreamer);
|
|
1544
1542
|
const promptedText = (input.prompt ? input.prompt + `
|
|
1545
1543
|
` : "") + input.text;
|
|
1546
1544
|
const pipelinePromise = generateText(promptedText, {
|
|
@@ -1556,8 +1554,8 @@ init_HFT_Pipeline();
|
|
|
1556
1554
|
var HFT_TextSummary = async (input, model, onProgress, signal) => {
|
|
1557
1555
|
const isArrayInput = Array.isArray(input.text);
|
|
1558
1556
|
const generateSummary = await getPipeline(model, onProgress, {}, signal);
|
|
1559
|
-
const
|
|
1560
|
-
const streamer = isArrayInput ? undefined : createTextStreamer(generateSummary.tokenizer, onProgress,
|
|
1557
|
+
const { TextStreamer } = await loadTransformersSDK();
|
|
1558
|
+
const streamer = isArrayInput ? undefined : createTextStreamer(generateSummary.tokenizer, onProgress, TextStreamer);
|
|
1561
1559
|
const result = await generateSummary(input.text, {
|
|
1562
1560
|
...streamer ? { streamer } : {}
|
|
1563
1561
|
});
|
|
@@ -1580,9 +1578,9 @@ var HFT_TextSummary = async (input, model, onProgress, signal) => {
|
|
|
1580
1578
|
var HFT_TextSummary_Stream = async function* (input, model, signal) {
|
|
1581
1579
|
const noopProgress = () => {};
|
|
1582
1580
|
const generateSummary = await getPipeline(model, noopProgress, {}, signal);
|
|
1583
|
-
const
|
|
1581
|
+
const { TextStreamer } = await loadTransformersSDK();
|
|
1584
1582
|
const queue = createStreamEventQueue();
|
|
1585
|
-
const streamer = createStreamingTextStreamer(generateSummary.tokenizer, queue,
|
|
1583
|
+
const streamer = createStreamingTextStreamer(generateSummary.tokenizer, queue, TextStreamer);
|
|
1586
1584
|
const pipelinePromise = generateSummary(input.text, {
|
|
1587
1585
|
streamer
|
|
1588
1586
|
}).then(() => queue.done(), (err) => queue.error(err));
|
|
@@ -1596,8 +1594,8 @@ init_HFT_Pipeline();
|
|
|
1596
1594
|
var HFT_TextTranslation = async (input, model, onProgress, signal) => {
|
|
1597
1595
|
const isArrayInput = Array.isArray(input.text);
|
|
1598
1596
|
const translate = await getPipeline(model, onProgress, {}, signal);
|
|
1599
|
-
const
|
|
1600
|
-
const streamer = isArrayInput ? undefined : createTextStreamer(translate.tokenizer, onProgress,
|
|
1597
|
+
const { TextStreamer } = await loadTransformersSDK();
|
|
1598
|
+
const streamer = isArrayInput ? undefined : createTextStreamer(translate.tokenizer, onProgress, TextStreamer);
|
|
1601
1599
|
const result = await translate(input.text, {
|
|
1602
1600
|
src_lang: input.source_lang,
|
|
1603
1601
|
tgt_lang: input.target_lang,
|
|
@@ -1619,9 +1617,9 @@ var HFT_TextTranslation = async (input, model, onProgress, signal) => {
|
|
|
1619
1617
|
var HFT_TextTranslation_Stream = async function* (input, model, signal) {
|
|
1620
1618
|
const noopProgress = () => {};
|
|
1621
1619
|
const translate = await getPipeline(model, noopProgress, {}, signal);
|
|
1622
|
-
const
|
|
1620
|
+
const { TextStreamer } = await loadTransformersSDK();
|
|
1623
1621
|
const queue = createStreamEventQueue();
|
|
1624
|
-
const streamer = createStreamingTextStreamer(translate.tokenizer, queue,
|
|
1622
|
+
const streamer = createStreamingTextStreamer(translate.tokenizer, queue, TextStreamer);
|
|
1625
1623
|
const pipelinePromise = translate(input.text, {
|
|
1626
1624
|
src_lang: input.source_lang,
|
|
1627
1625
|
tgt_lang: input.target_lang,
|
|
@@ -1634,7 +1632,11 @@ var HFT_TextTranslation_Stream = async function* (input, model, signal) {
|
|
|
1634
1632
|
|
|
1635
1633
|
// src/provider-hf-transformers/common/HFT_ToolCalling.ts
|
|
1636
1634
|
init_HFT_Pipeline();
|
|
1637
|
-
import {
|
|
1635
|
+
import {
|
|
1636
|
+
buildToolDescription,
|
|
1637
|
+
filterValidToolCalls,
|
|
1638
|
+
toTextFlatMessages
|
|
1639
|
+
} from "@workglow/ai/worker";
|
|
1638
1640
|
function mapHFTTools(tools) {
|
|
1639
1641
|
return tools.map((t) => ({
|
|
1640
1642
|
type: "function",
|
|
@@ -1670,7 +1672,7 @@ ${requiredInstruction}` };
|
|
|
1670
1672
|
var HFT_ToolCalling = async (input, model, onProgress, signal) => {
|
|
1671
1673
|
const isArrayInput = Array.isArray(input.prompt);
|
|
1672
1674
|
const generateText = await getPipeline(model, onProgress, {}, signal);
|
|
1673
|
-
const
|
|
1675
|
+
const { TextStreamer } = await loadTransformersSDK();
|
|
1674
1676
|
if (isArrayInput) {
|
|
1675
1677
|
const prompts = input.prompt;
|
|
1676
1678
|
const texts = [];
|
|
@@ -1684,7 +1686,7 @@ var HFT_ToolCalling = async (input, model, onProgress, signal) => {
|
|
|
1684
1686
|
tokenize: false,
|
|
1685
1687
|
add_generation_prompt: true
|
|
1686
1688
|
});
|
|
1687
|
-
const streamer2 = createTextStreamer(generateText.tokenizer, onProgress,
|
|
1689
|
+
const streamer2 = createTextStreamer(generateText.tokenizer, onProgress, TextStreamer);
|
|
1688
1690
|
let results2 = await generateText(prompt2, {
|
|
1689
1691
|
max_new_tokens: input.maxTokens ?? 1024,
|
|
1690
1692
|
temperature: input.temperature ?? undefined,
|
|
@@ -1708,7 +1710,7 @@ var HFT_ToolCalling = async (input, model, onProgress, signal) => {
|
|
|
1708
1710
|
tokenize: false,
|
|
1709
1711
|
add_generation_prompt: true
|
|
1710
1712
|
});
|
|
1711
|
-
const streamer = createTextStreamer(generateText.tokenizer, onProgress,
|
|
1713
|
+
const streamer = createTextStreamer(generateText.tokenizer, onProgress, TextStreamer);
|
|
1712
1714
|
let results = await generateText(prompt, {
|
|
1713
1715
|
max_new_tokens: input.maxTokens ?? 1024,
|
|
1714
1716
|
temperature: input.temperature ?? undefined,
|
|
@@ -1728,7 +1730,7 @@ var HFT_ToolCalling = async (input, model, onProgress, signal) => {
|
|
|
1728
1730
|
var HFT_ToolCalling_Stream = async function* (input, model, signal) {
|
|
1729
1731
|
const noopProgress = () => {};
|
|
1730
1732
|
const generateText = await getPipeline(model, noopProgress, {}, signal);
|
|
1731
|
-
const
|
|
1733
|
+
const { TextStreamer } = await loadTransformersSDK();
|
|
1732
1734
|
const messages = toTextFlatMessages(input);
|
|
1733
1735
|
const tools = resolveHFTToolsAndMessages(input, messages);
|
|
1734
1736
|
const prompt = generateText.tokenizer.apply_chat_template(messages, {
|
|
@@ -1738,7 +1740,7 @@ var HFT_ToolCalling_Stream = async function* (input, model, signal) {
|
|
|
1738
1740
|
});
|
|
1739
1741
|
const innerQueue = createStreamEventQueue();
|
|
1740
1742
|
const outerQueue = createStreamEventQueue();
|
|
1741
|
-
const streamer = createStreamingTextStreamer(generateText.tokenizer, innerQueue,
|
|
1743
|
+
const streamer = createStreamingTextStreamer(generateText.tokenizer, innerQueue, TextStreamer);
|
|
1742
1744
|
let fullText = "";
|
|
1743
1745
|
const filter = createToolCallMarkupFilter((text) => {
|
|
1744
1746
|
outerQueue.push({ type: "text-delta", port: "text", textDelta: text });
|
|
@@ -2002,4 +2004,4 @@ export {
|
|
|
2002
2004
|
HF_TRANSFORMERS_ONNX
|
|
2003
2005
|
};
|
|
2004
2006
|
|
|
2005
|
-
//# debugId=
|
|
2007
|
+
//# debugId=8397F27EBE0B0C0B64756E2164756E21
|