@workglow/ai-provider 0.0.105 → 0.0.107
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +39 -13
- package/dist/{HFT_JobRunFns-bpedvh0r.js → HFT_JobRunFns-c40ex37f.js} +12 -4
- package/dist/{HFT_JobRunFns-bpedvh0r.js.map → HFT_JobRunFns-c40ex37f.js.map} +1 -1
- package/dist/anthropic/AnthropicProvider.d.ts +1 -1
- package/dist/anthropic/AnthropicProvider.d.ts.map +1 -1
- package/dist/anthropic/common/Anthropic_JobRunFns.d.ts +3 -1
- package/dist/anthropic/common/Anthropic_JobRunFns.d.ts.map +1 -1
- package/dist/anthropic/common/Anthropic_ModelSchema.d.ts.map +1 -1
- package/dist/anthropic/index.js +142 -4
- package/dist/anthropic/index.js.map +3 -3
- package/dist/google-gemini/GoogleGeminiProvider.d.ts +1 -1
- package/dist/google-gemini/GoogleGeminiProvider.d.ts.map +1 -1
- package/dist/google-gemini/common/Gemini_JobRunFns.d.ts +3 -1
- package/dist/google-gemini/common/Gemini_JobRunFns.d.ts.map +1 -1
- package/dist/google-gemini/common/Gemini_ModelSchema.d.ts.map +1 -1
- package/dist/google-gemini/index.js +116 -4
- package/dist/google-gemini/index.js.map +3 -3
- package/dist/hf-transformers/HuggingFaceTransformersProvider.d.ts +5 -5
- package/dist/hf-transformers/HuggingFaceTransformersProvider.d.ts.map +1 -1
- package/dist/hf-transformers/common/HFT_JobRunFns.d.ts +113 -3
- package/dist/hf-transformers/common/HFT_JobRunFns.d.ts.map +1 -1
- package/dist/hf-transformers/index.js +16 -8
- package/dist/hf-transformers/index.js.map +2 -2
- package/dist/{index-w496eeda.js → index-14pbwsc9.js} +3 -2
- package/dist/{index-w496eeda.js.map → index-14pbwsc9.js.map} +3 -3
- package/dist/{index-3345vk2h.js → index-4fr8p4gy.js} +303 -5
- package/dist/index-4fr8p4gy.js.map +10 -0
- package/dist/{index-gjc388y4.js → index-5hjgs1z7.js} +3 -2
- package/dist/{index-gjc388y4.js.map → index-5hjgs1z7.js.map} +4 -4
- package/dist/{index-c5z3v9gn.js → index-5qjdc78z.js} +3 -2
- package/dist/{index-c5z3v9gn.js.map → index-5qjdc78z.js.map} +4 -4
- package/dist/{index-t54dgrfj.js → index-aef54vq3.js} +3 -2
- package/dist/{index-t54dgrfj.js.map → index-aef54vq3.js.map} +4 -4
- package/dist/index-cejxxqcz.js +55 -0
- package/dist/index-cejxxqcz.js.map +10 -0
- package/dist/{index-79rqre58.js → index-drcnh4z5.js} +3 -2
- package/dist/{index-79rqre58.js.map → index-drcnh4z5.js.map} +4 -4
- package/dist/{index-cfd8ne0j.js → index-xc6m9mcp.js} +3 -2
- package/dist/{index-cfd8ne0j.js.map → index-xc6m9mcp.js.map} +3 -3
- package/dist/{index-5e3yw7mb.js → index-zqq3kw0n.js} +55 -54
- package/dist/index-zqq3kw0n.js.map +11 -0
- package/dist/index.js +12 -12
- package/dist/index.js.map +1 -1
- package/dist/provider-hf-inference/HfInferenceProvider.d.ts +1 -1
- package/dist/provider-hf-inference/HfInferenceProvider.d.ts.map +1 -1
- package/dist/provider-hf-inference/common/HFI_JobRunFns.d.ts +3 -1
- package/dist/provider-hf-inference/common/HFI_JobRunFns.d.ts.map +1 -1
- package/dist/provider-hf-inference/common/HFI_ModelSchema.d.ts.map +1 -1
- package/dist/provider-hf-inference/index.js +165 -5
- package/dist/provider-hf-inference/index.js.map +3 -3
- package/dist/provider-llamacpp/LlamaCppProvider.d.ts +1 -1
- package/dist/provider-llamacpp/LlamaCppProvider.d.ts.map +1 -1
- package/dist/provider-llamacpp/common/LlamaCpp_JobRunFns.d.ts +3 -1
- package/dist/provider-llamacpp/common/LlamaCpp_JobRunFns.d.ts.map +1 -1
- package/dist/provider-llamacpp/index.js +138 -4
- package/dist/provider-llamacpp/index.js.map +3 -3
- package/dist/provider-ollama/OllamaProvider.d.ts +1 -1
- package/dist/provider-ollama/OllamaProvider.d.ts.map +1 -1
- package/dist/provider-ollama/common/Ollama_JobRunFns.browser.d.ts +3 -1
- package/dist/provider-ollama/common/Ollama_JobRunFns.browser.d.ts.map +1 -1
- package/dist/provider-ollama/common/Ollama_JobRunFns.d.ts +3 -1
- package/dist/provider-ollama/common/Ollama_JobRunFns.d.ts.map +1 -1
- package/dist/provider-ollama/index.browser.js +122 -4
- package/dist/provider-ollama/index.browser.js.map +4 -4
- package/dist/provider-ollama/index.js +121 -4
- package/dist/provider-ollama/index.js.map +3 -3
- package/dist/provider-openai/OpenAiProvider.d.ts +1 -1
- package/dist/provider-openai/OpenAiProvider.d.ts.map +1 -1
- package/dist/provider-openai/common/OpenAI_JobRunFns.d.ts +3 -1
- package/dist/provider-openai/common/OpenAI_JobRunFns.d.ts.map +1 -1
- package/dist/provider-openai/common/OpenAI_ModelSchema.d.ts.map +1 -1
- package/dist/provider-openai/index.js +162 -4
- package/dist/provider-openai/index.js.map +3 -3
- package/package.json +11 -11
- package/dist/index-3345vk2h.js.map +0 -10
- package/dist/index-5e3yw7mb.js.map +0 -11
- package/dist/index-m0r2hvfz.js +0 -57
- package/dist/index-m0r2hvfz.js.map +0 -10
|
@@ -4,12 +4,13 @@ import {
|
|
|
4
4
|
HfInferenceModelRecordSchema,
|
|
5
5
|
HfInferenceModelSchema,
|
|
6
6
|
HfInferenceProvider
|
|
7
|
-
} from "../index-
|
|
7
|
+
} from "../index-aef54vq3.js";
|
|
8
8
|
import {
|
|
9
9
|
__require
|
|
10
10
|
} from "../index-6j5pq722.js";
|
|
11
11
|
// src/provider-hf-inference/common/HFI_JobRunFns.ts
|
|
12
|
-
import {
|
|
12
|
+
import { buildToolDescription, filterValidToolCalls } from "@workglow/ai";
|
|
13
|
+
import { getLogger, parsePartialJson } from "@workglow/util";
|
|
13
14
|
var _sdk;
|
|
14
15
|
async function loadHfInferenceSDK() {
|
|
15
16
|
if (!_sdk) {
|
|
@@ -180,16 +181,173 @@ var HFI_TextSummary_Stream = async function* (input, model, signal) {
|
|
|
180
181
|
}
|
|
181
182
|
yield { type: "finish", data: {} };
|
|
182
183
|
};
|
|
184
|
+
function mapHFIToolChoice(toolChoice) {
|
|
185
|
+
if (!toolChoice || toolChoice === "auto")
|
|
186
|
+
return "auto";
|
|
187
|
+
if (toolChoice === "none")
|
|
188
|
+
return "none";
|
|
189
|
+
if (toolChoice === "required")
|
|
190
|
+
return "required";
|
|
191
|
+
return "auto";
|
|
192
|
+
}
|
|
193
|
+
var HFI_ToolCalling = async (input, model, update_progress, signal) => {
|
|
194
|
+
update_progress(0, "Starting HF Inference tool calling");
|
|
195
|
+
const client = await getClient(model);
|
|
196
|
+
const modelName = getModelName(model);
|
|
197
|
+
const provider = getProvider(model);
|
|
198
|
+
const tools = input.tools.map((t) => ({
|
|
199
|
+
type: "function",
|
|
200
|
+
function: {
|
|
201
|
+
name: t.name,
|
|
202
|
+
description: buildToolDescription(t),
|
|
203
|
+
parameters: t.inputSchema
|
|
204
|
+
}
|
|
205
|
+
}));
|
|
206
|
+
const messages = [];
|
|
207
|
+
if (input.systemPrompt) {
|
|
208
|
+
messages.push({ role: "system", content: input.systemPrompt });
|
|
209
|
+
}
|
|
210
|
+
messages.push({ role: "user", content: input.prompt });
|
|
211
|
+
const toolChoice = mapHFIToolChoice(input.toolChoice);
|
|
212
|
+
const params = {
|
|
213
|
+
model: modelName,
|
|
214
|
+
messages,
|
|
215
|
+
max_tokens: input.maxTokens,
|
|
216
|
+
temperature: input.temperature,
|
|
217
|
+
provider
|
|
218
|
+
};
|
|
219
|
+
if (toolChoice !== "none") {
|
|
220
|
+
params.tools = tools;
|
|
221
|
+
params.tool_choice = toolChoice;
|
|
222
|
+
}
|
|
223
|
+
const response = await client.chatCompletion(params, { signal });
|
|
224
|
+
const text = response.choices[0]?.message?.content ?? "";
|
|
225
|
+
const toolCalls = {};
|
|
226
|
+
let callIndex = 0;
|
|
227
|
+
(response.choices[0]?.message?.tool_calls ?? []).forEach((tc) => {
|
|
228
|
+
let parsedInput = {};
|
|
229
|
+
const rawArgs = tc.function?.arguments;
|
|
230
|
+
if (typeof rawArgs === "string") {
|
|
231
|
+
try {
|
|
232
|
+
parsedInput = JSON.parse(rawArgs);
|
|
233
|
+
} catch {
|
|
234
|
+
const partial = parsePartialJson(rawArgs);
|
|
235
|
+
parsedInput = partial ?? {};
|
|
236
|
+
}
|
|
237
|
+
} else if (rawArgs != null) {
|
|
238
|
+
parsedInput = rawArgs;
|
|
239
|
+
}
|
|
240
|
+
const id = tc.id ?? `call_${callIndex}`;
|
|
241
|
+
callIndex++;
|
|
242
|
+
toolCalls[id] = { id, name: tc.function.name, input: parsedInput };
|
|
243
|
+
});
|
|
244
|
+
update_progress(100, "Completed HF Inference tool calling");
|
|
245
|
+
return { text, toolCalls: filterValidToolCalls(toolCalls, input.tools) };
|
|
246
|
+
};
|
|
247
|
+
var HFI_ToolCalling_Stream = async function* (input, model, signal) {
|
|
248
|
+
const client = await getClient(model);
|
|
249
|
+
const modelName = getModelName(model);
|
|
250
|
+
const provider = getProvider(model);
|
|
251
|
+
const tools = input.tools.map((t) => ({
|
|
252
|
+
type: "function",
|
|
253
|
+
function: {
|
|
254
|
+
name: t.name,
|
|
255
|
+
description: buildToolDescription(t),
|
|
256
|
+
parameters: t.inputSchema
|
|
257
|
+
}
|
|
258
|
+
}));
|
|
259
|
+
const messages = [];
|
|
260
|
+
if (input.systemPrompt) {
|
|
261
|
+
messages.push({ role: "system", content: input.systemPrompt });
|
|
262
|
+
}
|
|
263
|
+
messages.push({ role: "user", content: input.prompt });
|
|
264
|
+
const toolChoice = mapHFIToolChoice(input.toolChoice);
|
|
265
|
+
const params = {
|
|
266
|
+
model: modelName,
|
|
267
|
+
messages,
|
|
268
|
+
max_tokens: input.maxTokens,
|
|
269
|
+
temperature: input.temperature,
|
|
270
|
+
provider
|
|
271
|
+
};
|
|
272
|
+
if (toolChoice !== "none") {
|
|
273
|
+
params.tools = tools;
|
|
274
|
+
params.tool_choice = toolChoice;
|
|
275
|
+
}
|
|
276
|
+
const stream = client.chatCompletionStream(params, { signal });
|
|
277
|
+
let accumulatedText = "";
|
|
278
|
+
const toolCallAccumulator = new Map;
|
|
279
|
+
for await (const chunk of stream) {
|
|
280
|
+
const choice = chunk.choices[0];
|
|
281
|
+
if (!choice)
|
|
282
|
+
continue;
|
|
283
|
+
const contentDelta = choice.delta?.content ?? "";
|
|
284
|
+
if (contentDelta) {
|
|
285
|
+
accumulatedText += contentDelta;
|
|
286
|
+
yield { type: "text-delta", port: "text", textDelta: contentDelta };
|
|
287
|
+
}
|
|
288
|
+
const tcDeltas = choice.delta?.tool_calls;
|
|
289
|
+
if (Array.isArray(tcDeltas)) {
|
|
290
|
+
for (const tcDelta of tcDeltas) {
|
|
291
|
+
const idx = tcDelta.index;
|
|
292
|
+
if (!toolCallAccumulator.has(idx)) {
|
|
293
|
+
toolCallAccumulator.set(idx, {
|
|
294
|
+
id: tcDelta.id ?? "",
|
|
295
|
+
name: tcDelta.function?.name ?? "",
|
|
296
|
+
arguments: ""
|
|
297
|
+
});
|
|
298
|
+
}
|
|
299
|
+
const acc = toolCallAccumulator.get(idx);
|
|
300
|
+
if (tcDelta.id)
|
|
301
|
+
acc.id = tcDelta.id;
|
|
302
|
+
if (tcDelta.function?.name)
|
|
303
|
+
acc.name = tcDelta.function.name;
|
|
304
|
+
if (tcDelta.function?.arguments)
|
|
305
|
+
acc.arguments += tcDelta.function.arguments;
|
|
306
|
+
}
|
|
307
|
+
const snapshotObject = {};
|
|
308
|
+
Array.from(toolCallAccumulator.entries()).forEach(([idx, tc]) => {
|
|
309
|
+
let parsedInput;
|
|
310
|
+
try {
|
|
311
|
+
parsedInput = JSON.parse(tc.arguments);
|
|
312
|
+
} catch {
|
|
313
|
+
const partial = parsePartialJson(tc.arguments);
|
|
314
|
+
parsedInput = partial ?? {};
|
|
315
|
+
}
|
|
316
|
+
const key = tc.id || String(idx);
|
|
317
|
+
snapshotObject[key] = { id: tc.id, name: tc.name, input: parsedInput };
|
|
318
|
+
});
|
|
319
|
+
yield { type: "object-delta", port: "toolCalls", objectDelta: snapshotObject };
|
|
320
|
+
}
|
|
321
|
+
}
|
|
322
|
+
const toolCalls = {};
|
|
323
|
+
Array.from(toolCallAccumulator.entries()).forEach(([idx, tc]) => {
|
|
324
|
+
let finalInput;
|
|
325
|
+
try {
|
|
326
|
+
finalInput = JSON.parse(tc.arguments);
|
|
327
|
+
} catch {
|
|
328
|
+
finalInput = parsePartialJson(tc.arguments) ?? {};
|
|
329
|
+
}
|
|
330
|
+
const key = tc.id || String(idx);
|
|
331
|
+
toolCalls[key] = { id: tc.id, name: tc.name, input: finalInput };
|
|
332
|
+
});
|
|
333
|
+
const validToolCalls = filterValidToolCalls(toolCalls, input.tools);
|
|
334
|
+
yield {
|
|
335
|
+
type: "finish",
|
|
336
|
+
data: { text: accumulatedText, toolCalls: validToolCalls }
|
|
337
|
+
};
|
|
338
|
+
};
|
|
183
339
|
var HFI_TASKS = {
|
|
184
340
|
TextGenerationTask: HFI_TextGeneration,
|
|
185
341
|
TextEmbeddingTask: HFI_TextEmbedding,
|
|
186
342
|
TextRewriterTask: HFI_TextRewriter,
|
|
187
|
-
TextSummaryTask: HFI_TextSummary
|
|
343
|
+
TextSummaryTask: HFI_TextSummary,
|
|
344
|
+
ToolCallingTask: HFI_ToolCalling
|
|
188
345
|
};
|
|
189
346
|
var HFI_STREAM_TASKS = {
|
|
190
347
|
TextGenerationTask: HFI_TextGeneration_Stream,
|
|
191
348
|
TextRewriterTask: HFI_TextRewriter_Stream,
|
|
192
|
-
TextSummaryTask: HFI_TextSummary_Stream
|
|
349
|
+
TextSummaryTask: HFI_TextSummary_Stream,
|
|
350
|
+
ToolCallingTask: HFI_ToolCalling_Stream
|
|
193
351
|
};
|
|
194
352
|
// src/provider-hf-inference/HFI_Worker.ts
|
|
195
353
|
import { globalServiceRegistry, WORKER_SERVER } from "@workglow/util";
|
|
@@ -206,6 +364,8 @@ export {
|
|
|
206
364
|
HfInferenceModelConfigSchema,
|
|
207
365
|
HF_INFERENCE,
|
|
208
366
|
HFI_WORKER_JOBRUN_REGISTER,
|
|
367
|
+
HFI_ToolCalling_Stream,
|
|
368
|
+
HFI_ToolCalling,
|
|
209
369
|
HFI_TextSummary_Stream,
|
|
210
370
|
HFI_TextSummary,
|
|
211
371
|
HFI_TextRewriter_Stream,
|
|
@@ -217,4 +377,4 @@ export {
|
|
|
217
377
|
HFI_STREAM_TASKS
|
|
218
378
|
};
|
|
219
379
|
|
|
220
|
-
//# debugId=
|
|
380
|
+
//# debugId=ED879DD8B5C5316464756E2164756E21
|
|
@@ -2,10 +2,10 @@
|
|
|
2
2
|
"version": 3,
|
|
3
3
|
"sources": ["../src/provider-hf-inference/common/HFI_JobRunFns.ts", "../src/provider-hf-inference/HFI_Worker.ts"],
|
|
4
4
|
"sourcesContent": [
|
|
5
|
-
"/**\n * @license\n * Copyright 2025 Steven Roussey <sroussey@gmail.com>\n * SPDX-License-Identifier: Apache-2.0\n */\n\nimport type { InferenceProviderOrPolicy } from \"@huggingface/inference\";\nimport type {\n AiProviderRunFn,\n AiProviderStreamFn,\n TextEmbeddingTaskInput,\n TextEmbeddingTaskOutput,\n TextGenerationTaskInput,\n TextGenerationTaskOutput,\n TextRewriterTaskInput,\n TextRewriterTaskOutput,\n TextSummaryTaskInput,\n TextSummaryTaskOutput,\n} from \"@workglow/ai\";\nimport type { StreamEvent } from \"@workglow/task-graph\";\nimport { getLogger } from \"@workglow/util\";\nimport type { HfInferenceModelConfig } from \"./HFI_ModelSchema\";\n\nlet _sdk: typeof import(\"@huggingface/inference\") | undefined;\nasync function loadHfInferenceSDK() {\n if (!_sdk) {\n try {\n _sdk = await import(\"@huggingface/inference\");\n } catch {\n throw new Error(\n \"@huggingface/inference is required for Hugging Face Inference tasks. Install it with: bun add @huggingface/inference\"\n );\n }\n }\n return _sdk;\n}\n\ninterface ResolvedProviderConfig {\n readonly credential_key?: string;\n readonly api_key?: string;\n readonly model_name?: string;\n readonly provider?: string;\n}\n\nasync function getClient(model: HfInferenceModelConfig | undefined) {\n const sdk = await loadHfInferenceSDK();\n const config = model?.provider_config as ResolvedProviderConfig | undefined;\n const apiKey =\n config?.credential_key ||\n config?.api_key ||\n (typeof process !== \"undefined\" ? process.env?.HF_TOKEN : undefined);\n if (!apiKey) {\n throw new Error(\n \"Missing Hugging Face API key: set provider_config.credential_key or the HF_TOKEN environment variable.\"\n );\n }\n return new sdk.InferenceClient(apiKey);\n}\n\nfunction getModelName(model: HfInferenceModelConfig | undefined): string {\n const name = model?.provider_config?.model_name;\n if (!name) {\n throw new Error(\"Missing model name in provider_config.model_name.\");\n }\n return name;\n}\n\nfunction getProvider(\n model: HfInferenceModelConfig | undefined\n): InferenceProviderOrPolicy | undefined {\n return model?.provider_config?.provider as InferenceProviderOrPolicy | undefined;\n}\n\nexport const HFI_TextGeneration: AiProviderRunFn<\n TextGenerationTaskInput,\n TextGenerationTaskOutput,\n HfInferenceModelConfig\n> = async (input, model, update_progress, signal) => {\n const logger = getLogger();\n const timerLabel = `hfi:TextGeneration:${model?.provider_config?.model_name}`;\n logger.time(timerLabel, { model: model?.provider_config?.model_name });\n\n update_progress(0, \"Starting HF Inference text generation\");\n const client = await getClient(model);\n const modelName = getModelName(model);\n const provider = getProvider(model);\n\n const response = await client.chatCompletion(\n {\n model: modelName,\n messages: [{ role: \"user\", content: input.prompt }],\n max_tokens: input.maxTokens,\n temperature: input.temperature,\n top_p: input.topP,\n frequency_penalty: input.frequencyPenalty,\n provider,\n },\n { signal }\n );\n\n update_progress(100, \"Completed HF Inference text generation\");\n logger.timeEnd(timerLabel, { model: model?.provider_config?.model_name });\n return { text: response.choices[0]?.message?.content ?? \"\" };\n};\n\nexport const HFI_TextEmbedding: AiProviderRunFn<\n TextEmbeddingTaskInput,\n TextEmbeddingTaskOutput,\n HfInferenceModelConfig\n> = async (input, model, update_progress, signal) => {\n const logger = getLogger();\n const timerLabel = `hfi:TextEmbedding:${model?.provider_config?.model_name}`;\n logger.time(timerLabel, { model: model?.provider_config?.model_name });\n\n update_progress(0, \"Starting HF Inference text embedding\");\n const client = await getClient(model);\n const modelName = getModelName(model);\n\n if (Array.isArray(input.text)) {\n const embeddings = await Promise.all(\n input.text.map((text) =>\n client.featureExtraction(\n {\n model: modelName,\n inputs: text,\n },\n { signal }\n )\n )\n );\n\n update_progress(100, \"Completed HF Inference text embedding\");\n logger.timeEnd(timerLabel, { model: model?.provider_config?.model_name, batch: true });\n return {\n vector: embeddings.map((embedding) => new Float32Array(embedding as unknown as number[])),\n };\n }\n\n const embedding = await client.featureExtraction(\n {\n model: modelName,\n inputs: input.text,\n },\n { signal }\n );\n\n update_progress(100, \"Completed HF Inference text embedding\");\n logger.timeEnd(timerLabel, { model: model?.provider_config?.model_name });\n return { vector: new Float32Array(embedding as unknown as number[]) };\n};\n\nexport const HFI_TextRewriter: AiProviderRunFn<\n TextRewriterTaskInput,\n TextRewriterTaskOutput,\n HfInferenceModelConfig\n> = async (input, model, update_progress, signal) => {\n update_progress(0, \"Starting HF Inference text rewriting\");\n const client = await getClient(model);\n const modelName = getModelName(model);\n const provider = getProvider(model);\n\n const response = await client.chatCompletion(\n {\n model: modelName,\n messages: [\n { role: \"system\", content: input.prompt },\n { role: \"user\", content: input.text },\n ],\n provider,\n },\n { signal }\n );\n\n update_progress(100, \"Completed HF Inference text rewriting\");\n return { text: response.choices[0]?.message?.content ?? \"\" };\n};\n\nexport const HFI_TextSummary: AiProviderRunFn<\n TextSummaryTaskInput,\n TextSummaryTaskOutput,\n HfInferenceModelConfig\n> = async (input, model, update_progress, signal) => {\n update_progress(0, \"Starting HF Inference text summarization\");\n const client = await getClient(model);\n const modelName = getModelName(model);\n const provider = getProvider(model);\n\n const response = await client.chatCompletion(\n {\n model: modelName,\n messages: [\n { role: \"system\", content: \"Summarize the following text concisely.\" },\n { role: \"user\", content: input.text },\n ],\n provider,\n },\n { signal }\n );\n\n update_progress(100, \"Completed HF Inference text summarization\");\n return { text: response.choices[0]?.message?.content ?? \"\" };\n};\n\n// ========================================================================\n// Streaming implementations (append mode)\n// ========================================================================\n\nexport const HFI_TextGeneration_Stream: AiProviderStreamFn<\n TextGenerationTaskInput,\n TextGenerationTaskOutput,\n HfInferenceModelConfig\n> = async function* (input, model, signal): AsyncIterable<StreamEvent<TextGenerationTaskOutput>> {\n const client = await getClient(model);\n const modelName = getModelName(model);\n const provider = getProvider(model);\n\n const stream = client.chatCompletionStream(\n {\n model: modelName,\n messages: [{ role: \"user\", content: input.prompt }],\n max_tokens: input.maxTokens,\n temperature: input.temperature,\n top_p: input.topP,\n frequency_penalty: input.frequencyPenalty,\n provider,\n },\n { signal }\n );\n\n for await (const chunk of stream) {\n const delta = chunk.choices[0]?.delta?.content ?? \"\";\n if (delta) {\n yield { type: \"text-delta\", port: \"text\", textDelta: delta };\n }\n }\n yield { type: \"finish\", data: {} as TextGenerationTaskOutput };\n};\n\nexport const HFI_TextRewriter_Stream: AiProviderStreamFn<\n TextRewriterTaskInput,\n TextRewriterTaskOutput,\n HfInferenceModelConfig\n> = async function* (input, model, signal): AsyncIterable<StreamEvent<TextRewriterTaskOutput>> {\n const client = await getClient(model);\n const modelName = getModelName(model);\n const provider = getProvider(model);\n\n const stream = client.chatCompletionStream(\n {\n model: modelName,\n messages: [\n { role: \"system\", content: input.prompt },\n { role: \"user\", content: input.text },\n ],\n provider,\n },\n { signal }\n );\n\n for await (const chunk of stream) {\n const delta = chunk.choices[0]?.delta?.content ?? \"\";\n if (delta) {\n yield { type: \"text-delta\", port: \"text\", textDelta: delta };\n }\n }\n yield { type: \"finish\", data: {} as TextRewriterTaskOutput };\n};\n\nexport const HFI_TextSummary_Stream: AiProviderStreamFn<\n TextSummaryTaskInput,\n TextSummaryTaskOutput,\n HfInferenceModelConfig\n> = async function* (input, model, signal): AsyncIterable<StreamEvent<TextSummaryTaskOutput>> {\n const client = await getClient(model);\n const modelName = getModelName(model);\n const provider = getProvider(model);\n\n const stream = client.chatCompletionStream(\n {\n model: modelName,\n messages: [\n { role: \"system\", content: \"Summarize the following text concisely.\" },\n { role: \"user\", content: input.text },\n ],\n provider,\n },\n { signal }\n );\n\n for await (const chunk of stream) {\n const delta = chunk.choices[0]?.delta?.content ?? \"\";\n if (delta) {\n yield { type: \"text-delta\", port: \"text\", textDelta: delta };\n }\n }\n yield { type: \"finish\", data: {} as TextSummaryTaskOutput };\n};\n\n// ========================================================================\n// Task registries\n// ========================================================================\n\nexport const HFI_TASKS: Record<string, AiProviderRunFn<any, any, HfInferenceModelConfig>> = {\n TextGenerationTask: HFI_TextGeneration,\n TextEmbeddingTask: HFI_TextEmbedding,\n TextRewriterTask: HFI_TextRewriter,\n TextSummaryTask: HFI_TextSummary,\n};\n\nexport const HFI_STREAM_TASKS: Record<\n string,\n AiProviderStreamFn<any, any, HfInferenceModelConfig>\n> = {\n TextGenerationTask: HFI_TextGeneration_Stream,\n TextRewriterTask: HFI_TextRewriter_Stream,\n TextSummaryTask: HFI_TextSummary_Stream,\n};\n",
|
|
5
|
+
"/**\n * @license\n * Copyright 2025 Steven Roussey <sroussey@gmail.com>\n * SPDX-License-Identifier: Apache-2.0\n */\n\nimport type { InferenceProviderOrPolicy } from \"@huggingface/inference\";\nimport type {\n AiProviderRunFn,\n AiProviderStreamFn,\n TextEmbeddingTaskInput,\n TextEmbeddingTaskOutput,\n TextGenerationTaskInput,\n TextGenerationTaskOutput,\n TextRewriterTaskInput,\n TextRewriterTaskOutput,\n TextSummaryTaskInput,\n TextSummaryTaskOutput,\n ToolCallingTaskInput,\n ToolCallingTaskOutput,\n ToolDefinition,\n} from \"@workglow/ai\";\nimport { buildToolDescription, filterValidToolCalls } from \"@workglow/ai\";\nimport type { StreamEvent } from \"@workglow/task-graph\";\nimport { getLogger, parsePartialJson } from \"@workglow/util\";\nimport type { HfInferenceModelConfig } from \"./HFI_ModelSchema\";\n\nlet _sdk: typeof import(\"@huggingface/inference\") | undefined;\nasync function loadHfInferenceSDK() {\n if (!_sdk) {\n try {\n _sdk = await import(\"@huggingface/inference\");\n } catch {\n throw new Error(\n \"@huggingface/inference is required for Hugging Face Inference tasks. Install it with: bun add @huggingface/inference\"\n );\n }\n }\n return _sdk;\n}\n\ninterface ResolvedProviderConfig {\n readonly credential_key?: string;\n readonly api_key?: string;\n readonly model_name?: string;\n readonly provider?: string;\n}\n\nasync function getClient(model: HfInferenceModelConfig | undefined) {\n const sdk = await loadHfInferenceSDK();\n const config = model?.provider_config as ResolvedProviderConfig | undefined;\n const apiKey =\n config?.credential_key ||\n config?.api_key ||\n (typeof process !== \"undefined\" ? process.env?.HF_TOKEN : undefined);\n if (!apiKey) {\n throw new Error(\n \"Missing Hugging Face API key: set provider_config.credential_key or the HF_TOKEN environment variable.\"\n );\n }\n return new sdk.InferenceClient(apiKey);\n}\n\nfunction getModelName(model: HfInferenceModelConfig | undefined): string {\n const name = model?.provider_config?.model_name;\n if (!name) {\n throw new Error(\"Missing model name in provider_config.model_name.\");\n }\n return name;\n}\n\nfunction getProvider(\n model: HfInferenceModelConfig | undefined\n): InferenceProviderOrPolicy | undefined {\n return model?.provider_config?.provider as InferenceProviderOrPolicy | undefined;\n}\n\nexport const HFI_TextGeneration: AiProviderRunFn<\n TextGenerationTaskInput,\n TextGenerationTaskOutput,\n HfInferenceModelConfig\n> = async (input, model, update_progress, signal) => {\n const logger = getLogger();\n const timerLabel = `hfi:TextGeneration:${model?.provider_config?.model_name}`;\n logger.time(timerLabel, { model: model?.provider_config?.model_name });\n\n update_progress(0, \"Starting HF Inference text generation\");\n const client = await getClient(model);\n const modelName = getModelName(model);\n const provider = getProvider(model);\n\n const response = await client.chatCompletion(\n {\n model: modelName,\n messages: [{ role: \"user\", content: input.prompt }],\n max_tokens: input.maxTokens,\n temperature: input.temperature,\n top_p: input.topP,\n frequency_penalty: input.frequencyPenalty,\n provider,\n },\n { signal }\n );\n\n update_progress(100, \"Completed HF Inference text generation\");\n logger.timeEnd(timerLabel, { model: model?.provider_config?.model_name });\n return { text: response.choices[0]?.message?.content ?? \"\" };\n};\n\nexport const HFI_TextEmbedding: AiProviderRunFn<\n TextEmbeddingTaskInput,\n TextEmbeddingTaskOutput,\n HfInferenceModelConfig\n> = async (input, model, update_progress, signal) => {\n const logger = getLogger();\n const timerLabel = `hfi:TextEmbedding:${model?.provider_config?.model_name}`;\n logger.time(timerLabel, { model: model?.provider_config?.model_name });\n\n update_progress(0, \"Starting HF Inference text embedding\");\n const client = await getClient(model);\n const modelName = getModelName(model);\n\n if (Array.isArray(input.text)) {\n const embeddings = await Promise.all(\n input.text.map((text) =>\n client.featureExtraction(\n {\n model: modelName,\n inputs: text,\n },\n { signal }\n )\n )\n );\n\n update_progress(100, \"Completed HF Inference text embedding\");\n logger.timeEnd(timerLabel, { model: model?.provider_config?.model_name, batch: true });\n return {\n vector: embeddings.map((embedding) => new Float32Array(embedding as unknown as number[])),\n };\n }\n\n const embedding = await client.featureExtraction(\n {\n model: modelName,\n inputs: input.text,\n },\n { signal }\n );\n\n update_progress(100, \"Completed HF Inference text embedding\");\n logger.timeEnd(timerLabel, { model: model?.provider_config?.model_name });\n return { vector: new Float32Array(embedding as unknown as number[]) };\n};\n\nexport const HFI_TextRewriter: AiProviderRunFn<\n TextRewriterTaskInput,\n TextRewriterTaskOutput,\n HfInferenceModelConfig\n> = async (input, model, update_progress, signal) => {\n update_progress(0, \"Starting HF Inference text rewriting\");\n const client = await getClient(model);\n const modelName = getModelName(model);\n const provider = getProvider(model);\n\n const response = await client.chatCompletion(\n {\n model: modelName,\n messages: [\n { role: \"system\", content: input.prompt },\n { role: \"user\", content: input.text },\n ],\n provider,\n },\n { signal }\n );\n\n update_progress(100, \"Completed HF Inference text rewriting\");\n return { text: response.choices[0]?.message?.content ?? \"\" };\n};\n\nexport const HFI_TextSummary: AiProviderRunFn<\n TextSummaryTaskInput,\n TextSummaryTaskOutput,\n HfInferenceModelConfig\n> = async (input, model, update_progress, signal) => {\n update_progress(0, \"Starting HF Inference text summarization\");\n const client = await getClient(model);\n const modelName = getModelName(model);\n const provider = getProvider(model);\n\n const response = await client.chatCompletion(\n {\n model: modelName,\n messages: [\n { role: \"system\", content: \"Summarize the following text concisely.\" },\n { role: \"user\", content: input.text },\n ],\n provider,\n },\n { signal }\n );\n\n update_progress(100, \"Completed HF Inference text summarization\");\n return { text: response.choices[0]?.message?.content ?? \"\" };\n};\n\n// ========================================================================\n// Streaming implementations (append mode)\n// ========================================================================\n\nexport const HFI_TextGeneration_Stream: AiProviderStreamFn<\n TextGenerationTaskInput,\n TextGenerationTaskOutput,\n HfInferenceModelConfig\n> = async function* (input, model, signal): AsyncIterable<StreamEvent<TextGenerationTaskOutput>> {\n const client = await getClient(model);\n const modelName = getModelName(model);\n const provider = getProvider(model);\n\n const stream = client.chatCompletionStream(\n {\n model: modelName,\n messages: [{ role: \"user\", content: input.prompt }],\n max_tokens: input.maxTokens,\n temperature: input.temperature,\n top_p: input.topP,\n frequency_penalty: input.frequencyPenalty,\n provider,\n },\n { signal }\n );\n\n for await (const chunk of stream) {\n const delta = chunk.choices[0]?.delta?.content ?? \"\";\n if (delta) {\n yield { type: \"text-delta\", port: \"text\", textDelta: delta };\n }\n }\n yield { type: \"finish\", data: {} as TextGenerationTaskOutput };\n};\n\nexport const HFI_TextRewriter_Stream: AiProviderStreamFn<\n TextRewriterTaskInput,\n TextRewriterTaskOutput,\n HfInferenceModelConfig\n> = async function* (input, model, signal): AsyncIterable<StreamEvent<TextRewriterTaskOutput>> {\n const client = await getClient(model);\n const modelName = getModelName(model);\n const provider = getProvider(model);\n\n const stream = client.chatCompletionStream(\n {\n model: modelName,\n messages: [\n { role: \"system\", content: input.prompt },\n { role: \"user\", content: input.text },\n ],\n provider,\n },\n { signal }\n );\n\n for await (const chunk of stream) {\n const delta = chunk.choices[0]?.delta?.content ?? \"\";\n if (delta) {\n yield { type: \"text-delta\", port: \"text\", textDelta: delta };\n }\n }\n yield { type: \"finish\", data: {} as TextRewriterTaskOutput };\n};\n\nexport const HFI_TextSummary_Stream: AiProviderStreamFn<\n TextSummaryTaskInput,\n TextSummaryTaskOutput,\n HfInferenceModelConfig\n> = async function* (input, model, signal): AsyncIterable<StreamEvent<TextSummaryTaskOutput>> {\n const client = await getClient(model);\n const modelName = getModelName(model);\n const provider = getProvider(model);\n\n const stream = client.chatCompletionStream(\n {\n model: modelName,\n messages: [\n { role: \"system\", content: \"Summarize the following text concisely.\" },\n { role: \"user\", content: input.text },\n ],\n provider,\n },\n { signal }\n );\n\n for await (const chunk of stream) {\n const delta = chunk.choices[0]?.delta?.content ?? \"\";\n if (delta) {\n yield { type: \"text-delta\", port: \"text\", textDelta: delta };\n }\n }\n yield { type: \"finish\", data: {} as TextSummaryTaskOutput };\n};\n\n// ========================================================================\n// Tool calling implementations\n// ========================================================================\n\nfunction mapHFIToolChoice(\n toolChoice: string | undefined\n): \"auto\" | \"none\" | \"required\" | undefined {\n if (!toolChoice || toolChoice === \"auto\") return \"auto\";\n if (toolChoice === \"none\") return \"none\";\n if (toolChoice === \"required\") return \"required\";\n // Specific tool names are not supported by HF Inference; fall back to \"auto\"\n return \"auto\";\n}\n\nexport const HFI_ToolCalling: AiProviderRunFn<\n ToolCallingTaskInput,\n ToolCallingTaskOutput,\n HfInferenceModelConfig\n> = async (input, model, update_progress, signal) => {\n update_progress(0, \"Starting HF Inference tool calling\");\n const client = await getClient(model);\n const modelName = getModelName(model);\n const provider = getProvider(model);\n\n const tools = input.tools.map((t: ToolDefinition) => ({\n type: \"function\" as const,\n function: {\n name: t.name,\n description: buildToolDescription(t),\n parameters: t.inputSchema as any,\n },\n }));\n\n const messages: Array<{ role: \"system\" | \"user\"; content: string }> = [];\n if (input.systemPrompt) {\n messages.push({ role: \"system\", content: input.systemPrompt });\n }\n messages.push({ role: \"user\", content: input.prompt });\n\n const toolChoice = mapHFIToolChoice(input.toolChoice);\n\n const params: any = {\n model: modelName,\n messages,\n max_tokens: input.maxTokens,\n temperature: input.temperature,\n provider,\n };\n\n if (toolChoice !== \"none\") {\n params.tools = tools;\n params.tool_choice = toolChoice;\n }\n\n const response = await client.chatCompletion(params, { signal });\n\n const text = response.choices[0]?.message?.content ?? \"\";\n const toolCalls: Record<string, unknown> = {};\n let callIndex = 0;\n ((response.choices[0]?.message as any)?.tool_calls ?? []).forEach((tc: any) => {\n let parsedInput: Record<string, unknown> = {};\n const rawArgs = tc.function?.arguments;\n if (typeof rawArgs === \"string\") {\n try {\n parsedInput = JSON.parse(rawArgs);\n } catch {\n const partial = parsePartialJson(rawArgs);\n parsedInput = (partial as Record<string, unknown>) ?? {};\n }\n } else if (rawArgs != null) {\n parsedInput = rawArgs as Record<string, unknown>;\n }\n const id = (tc.id as string) ?? `call_${callIndex}`;\n callIndex++;\n toolCalls[id] = { id, name: tc.function.name as string, input: parsedInput };\n });\n\n update_progress(100, \"Completed HF Inference tool calling\");\n return { text, toolCalls: filterValidToolCalls(toolCalls, input.tools) };\n};\n\nexport const HFI_ToolCalling_Stream: AiProviderStreamFn<\n ToolCallingTaskInput,\n ToolCallingTaskOutput,\n HfInferenceModelConfig\n> = async function* (input, model, signal): AsyncIterable<StreamEvent<ToolCallingTaskOutput>> {\n const client = await getClient(model);\n const modelName = getModelName(model);\n const provider = getProvider(model);\n\n const tools = input.tools.map((t: ToolDefinition) => ({\n type: \"function\" as const,\n function: {\n name: t.name,\n description: buildToolDescription(t),\n parameters: t.inputSchema as any,\n },\n }));\n\n const messages: Array<{ role: \"system\" | \"user\"; content: string }> = [];\n if (input.systemPrompt) {\n messages.push({ role: \"system\", content: input.systemPrompt });\n }\n messages.push({ role: \"user\", content: input.prompt });\n\n const toolChoice = mapHFIToolChoice(input.toolChoice);\n\n const params: any = {\n model: modelName,\n messages,\n max_tokens: input.maxTokens,\n temperature: input.temperature,\n provider,\n };\n\n if (toolChoice !== \"none\") {\n params.tools = tools;\n params.tool_choice = toolChoice;\n }\n\n const stream = client.chatCompletionStream(params, { signal });\n\n let accumulatedText = \"\";\n const toolCallAccumulator = new Map<number, { id: string; name: string; arguments: string }>();\n\n for await (const chunk of stream) {\n const choice = chunk.choices[0];\n if (!choice) continue;\n\n const contentDelta = choice.delta?.content ?? \"\";\n if (contentDelta) {\n accumulatedText += contentDelta;\n yield { type: \"text-delta\", port: \"text\", textDelta: contentDelta };\n }\n\n const tcDeltas = (choice.delta as any)?.tool_calls;\n if (Array.isArray(tcDeltas)) {\n for (const tcDelta of tcDeltas) {\n const idx = tcDelta.index as number;\n if (!toolCallAccumulator.has(idx)) {\n toolCallAccumulator.set(idx, {\n id: tcDelta.id ?? \"\",\n name: tcDelta.function?.name ?? \"\",\n arguments: \"\",\n });\n }\n const acc = toolCallAccumulator.get(idx)!;\n if (tcDelta.id) acc.id = tcDelta.id;\n if (tcDelta.function?.name) acc.name = tcDelta.function.name;\n if (tcDelta.function?.arguments) acc.arguments += tcDelta.function.arguments;\n }\n\n const snapshotObject: Record<string, unknown> = {};\n Array.from(toolCallAccumulator.entries()).forEach(([idx, tc]) => {\n let parsedInput: Record<string, unknown>;\n try {\n parsedInput = JSON.parse(tc.arguments);\n } catch {\n const partial = parsePartialJson(tc.arguments);\n parsedInput = (partial as Record<string, unknown>) ?? {};\n }\n const key = tc.id || String(idx);\n snapshotObject[key] = { id: tc.id, name: tc.name, input: parsedInput };\n });\n yield { type: \"object-delta\", port: \"toolCalls\", objectDelta: snapshotObject };\n }\n }\n\n const toolCalls: Record<string, unknown> = {};\n Array.from(toolCallAccumulator.entries()).forEach(([idx, tc]) => {\n let finalInput: Record<string, unknown>;\n try {\n finalInput = JSON.parse(tc.arguments);\n } catch {\n finalInput = (parsePartialJson(tc.arguments) as Record<string, unknown>) ?? {};\n }\n const key = tc.id || String(idx);\n toolCalls[key] = { id: tc.id, name: tc.name, input: finalInput };\n });\n\n const validToolCalls = filterValidToolCalls(toolCalls, input.tools);\n yield {\n type: \"finish\",\n data: { text: accumulatedText, toolCalls: validToolCalls } as ToolCallingTaskOutput,\n };\n};\n\n// ========================================================================\n// Task registries\n// ========================================================================\n\nexport const HFI_TASKS: Record<string, AiProviderRunFn<any, any, HfInferenceModelConfig>> = {\n TextGenerationTask: HFI_TextGeneration,\n TextEmbeddingTask: HFI_TextEmbedding,\n TextRewriterTask: HFI_TextRewriter,\n TextSummaryTask: HFI_TextSummary,\n ToolCallingTask: HFI_ToolCalling,\n};\n\nexport const HFI_STREAM_TASKS: Record<\n string,\n AiProviderStreamFn<any, any, HfInferenceModelConfig>\n> = {\n TextGenerationTask: HFI_TextGeneration_Stream,\n TextRewriterTask: HFI_TextRewriter_Stream,\n TextSummaryTask: HFI_TextSummary_Stream,\n ToolCallingTask: HFI_ToolCalling_Stream,\n};\n",
|
|
6
6
|
"/**\n * @license\n * Copyright 2025 Steven Roussey <sroussey@gmail.com>\n * SPDX-License-Identifier: Apache-2.0\n */\n\nimport { globalServiceRegistry, WORKER_SERVER } from \"@workglow/util\";\nimport { HFI_STREAM_TASKS, HFI_TASKS } from \"./common/HFI_JobRunFns\";\nimport { HfInferenceProvider } from \"./HfInferenceProvider\";\n\nexport function HFI_WORKER_JOBRUN_REGISTER() {\n const workerServer = globalServiceRegistry.get(WORKER_SERVER);\n new HfInferenceProvider(HFI_TASKS, HFI_STREAM_TASKS).registerOnWorkerServer(workerServer);\n workerServer.sendReady();\n console.log(\"HFI_WORKER_JOBRUN registered\");\n}\n"
|
|
7
7
|
],
|
|
8
|
-
"mappings": ";;;;;;;;;;;
|
|
9
|
-
"debugId": "
|
|
8
|
+
"mappings": ";;;;;;;;;;;AAsBA;AAEA;AAGA,IAAI;AACJ,eAAe,kBAAkB,GAAG;AAAA,EAClC,IAAI,CAAC,MAAM;AAAA,IACT,IAAI;AAAA,MACF,OAAO,MAAa;AAAA,MACpB,MAAM;AAAA,MACN,MAAM,IAAI,MACR,sHACF;AAAA;AAAA,EAEJ;AAAA,EACA,OAAO;AAAA;AAUT,eAAe,SAAS,CAAC,OAA2C;AAAA,EAClE,MAAM,MAAM,MAAM,mBAAmB;AAAA,EACrC,MAAM,SAAS,OAAO;AAAA,EACtB,MAAM,SACJ,QAAQ,kBACR,QAAQ,YACP,OAAO,YAAY,cAAc,QAAQ,KAAK,WAAW;AAAA,EAC5D,IAAI,CAAC,QAAQ;AAAA,IACX,MAAM,IAAI,MACR,wGACF;AAAA,EACF;AAAA,EACA,OAAO,IAAI,IAAI,gBAAgB,MAAM;AAAA;AAGvC,SAAS,YAAY,CAAC,OAAmD;AAAA,EACvE,MAAM,OAAO,OAAO,iBAAiB;AAAA,EACrC,IAAI,CAAC,MAAM;AAAA,IACT,MAAM,IAAI,MAAM,mDAAmD;AAAA,EACrE;AAAA,EACA,OAAO;AAAA;AAGT,SAAS,WAAW,CAClB,OACuC;AAAA,EACvC,OAAO,OAAO,iBAAiB;AAAA;AAG1B,IAAM,qBAIT,OAAO,OAAO,OAAO,iBAAiB,WAAW;AAAA,EACnD,MAAM,SAAS,UAAU;AAAA,EACzB,MAAM,aAAa,sBAAsB,OAAO,iBAAiB;AAAA,EACjE,OAAO,KAAK,YAAY,EAAE,OAAO,OAAO,iBAAiB,WAAW,CAAC;AAAA,EAErE,gBAAgB,GAAG,uCAAuC;AAAA,EAC1D,MAAM,SAAS,MAAM,UAAU,KAAK;AAAA,EACpC,MAAM,YAAY,aAAa,KAAK;AAAA,EACpC,MAAM,WAAW,YAAY,KAAK;AAAA,EAElC,MAAM,WAAW,MAAM,OAAO,eAC5B;AAAA,IACE,OAAO;AAAA,IACP,UAAU,CAAC,EAAE,MAAM,QAAQ,SAAS,MAAM,OAAO,CAAC;AAAA,IAClD,YAAY,MAAM;AAAA,IAClB,aAAa,MAAM;AAAA,IACnB,OAAO,MAAM;AAAA,IACb,mBAAmB,MAAM;AAAA,IACzB;AAAA,EACF,GACA,EAAE,OAAO,CACX;AAAA,EAEA,gBAAgB,KAAK,wCAAwC;AAAA,EAC7D,OAAO,QAAQ,YAAY,EAAE,OAAO,OAAO,iBAAiB,WAAW,CAAC;AAAA,EACxE,OAAO,EAAE,MAAM,SAAS,QAAQ,IAAI,SAAS,WAAW,GAAG;AAAA;AAGtD,IAAM,oBAIT,OAAO,OAAO,OAAO,iBAAiB,WAAW;AAAA,EACnD,MAAM,SAAS,UAAU;AAAA,EACzB,MAAM,aAAa,qBAAqB,OAAO,iBAAiB;AAAA,EAChE,OAAO,KAAK,YAAY,EAAE,OAAO,OAAO,iBAAiB,WAAW,CAAC;AAAA,EAErE,gBAAgB,GAAG,sCAAsC;AAAA,EACzD,MAAM,SAAS,MAAM,UAAU,KAAK;AAAA,EACpC,MAAM,YAAY,aAAa,KAAK;AAAA,EAEpC,IAAI,MAAM,QAAQ,MAAM,IAAI,GAAG;AAAA,IAC7B,MAAM,aAAa,MAAM,QAAQ,IAC/B,MAAM,KAAK,IAAI,CAAC,SACd,OAAO,kBACL;AAAA,MACE,OAAO;AAAA,MACP,QAAQ;AAAA,IACV,GACA,EAAE,OAAO,CACX,CACF,CACF;AAAA,IAEA,gBAAgB,KAAK,uCAAuC;AAAA,IAC5D,OAAO,QAAQ,YAAY,EAAE,OAAO,OAAO,iBAAiB,YAAY,OAAO,KAAK,CAAC;AAAA,IACrF,OAAO;AAAA,MACL,QAAQ,WAAW,IAAI,CAAC,eAAc,IAAI,aAAa,UAAgC,CAAC;AAAA,IAC1F;AAAA,EACF;AAAA,EAEA,MAAM,YAAY,MAAM,OAAO,kBAC7B;AAAA,IACE,OAAO;AAAA,IACP,QAAQ,MAAM;AAAA,EAChB,GACA,EAAE,OAAO,CACX;AAAA,EAEA,gBAAgB,KAAK,uCAAuC;AAAA,EAC5D,OAAO,QAAQ,YAAY,EAAE,OAAO,OAAO,iBAAiB,WAAW,CAAC;AAAA,EACxE,OAAO,EAAE,QAAQ,IAAI,aAAa,SAAgC,EAAE;AAAA;AAG/D,IAAM,mBAIT,OAAO,OAAO,OAAO,iBAAiB,WAAW;AAAA,EACnD,gBAAgB,GAAG,sCAAsC;AAAA,EACzD,MAAM,SAAS,MAAM,UAAU,KAAK;AAAA,EACpC,MAAM,YAAY,aAAa,KAAK;AAAA,EACpC,MAAM,WAAW,YAAY,KAAK;AAAA,EAElC,MAAM,WAAW,MAAM,OAAO,eAC5B;AAAA,IACE,OAAO;AAAA,IACP,UAAU;AAAA,MACR,EAAE,MAAM,UAAU,SAAS,MAAM,OAAO;AAAA,MACxC,EAAE,MAAM,QAAQ,SAAS,MAAM,KAAK;AAAA,IACtC;AAAA,IACA;AAAA,EACF,GACA,EAAE,OAAO,CACX;AAAA,EAEA,gBAAgB,KAAK,uCAAuC;AAAA,EAC5D,OAAO,EAAE,MAAM,SAAS,QAAQ,IAAI,SAAS,WAAW,GAAG;AAAA;AAGtD,IAAM,kBAIT,OAAO,OAAO,OAAO,iBAAiB,WAAW;AAAA,EACnD,gBAAgB,GAAG,0CAA0C;AAAA,EAC7D,MAAM,SAAS,MAAM,UAAU,KAAK;AAAA,EACpC,MAAM,YAAY,aAAa,KAAK;AAAA,EACpC,MAAM,WAAW,YAAY,KAAK;AAAA,EAElC,MAAM,WAAW,MAAM,OAAO,eAC5B;AAAA,IACE,OAAO;AAAA,IACP,UAAU;AAAA,MACR,EAAE,MAAM,UAAU,SAAS,0CAA0C;AAAA,MACrE,EAAE,MAAM,QAAQ,SAAS,MAAM,KAAK;AAAA,IACtC;AAAA,IACA;AAAA,EACF,GACA,EAAE,OAAO,CACX;AAAA,EAEA,gBAAgB,KAAK,2CAA2C;AAAA,EAChE,OAAO,EAAE,MAAM,SAAS,QAAQ,IAAI,SAAS,WAAW,GAAG;AAAA;AAOtD,IAAM,4BAIT,gBAAgB,CAAC,OAAO,OAAO,QAA8D;AAAA,EAC/F,MAAM,SAAS,MAAM,UAAU,KAAK;AAAA,EACpC,MAAM,YAAY,aAAa,KAAK;AAAA,EACpC,MAAM,WAAW,YAAY,KAAK;AAAA,EAElC,MAAM,SAAS,OAAO,qBACpB;AAAA,IACE,OAAO;AAAA,IACP,UAAU,CAAC,EAAE,MAAM,QAAQ,SAAS,MAAM,OAAO,CAAC;AAAA,IAClD,YAAY,MAAM;AAAA,IAClB,aAAa,MAAM;AAAA,IACnB,OAAO,MAAM;AAAA,IACb,mBAAmB,MAAM;AAAA,IACzB;AAAA,EACF,GACA,EAAE,OAAO,CACX;AAAA,EAEA,iBAAiB,SAAS,QAAQ;AAAA,IAChC,MAAM,QAAQ,MAAM,QAAQ,IAAI,OAAO,WAAW;AAAA,IAClD,IAAI,OAAO;AAAA,MACT,MAAM,EAAE,MAAM,cAAc,MAAM,QAAQ,WAAW,MAAM;AAAA,IAC7D;AAAA,EACF;AAAA,EACA,MAAM,EAAE,MAAM,UAAU,MAAM,CAAC,EAA8B;AAAA;AAGxD,IAAM,0BAIT,gBAAgB,CAAC,OAAO,OAAO,QAA4D;AAAA,EAC7F,MAAM,SAAS,MAAM,UAAU,KAAK;AAAA,EACpC,MAAM,YAAY,aAAa,KAAK;AAAA,EACpC,MAAM,WAAW,YAAY,KAAK;AAAA,EAElC,MAAM,SAAS,OAAO,qBACpB;AAAA,IACE,OAAO;AAAA,IACP,UAAU;AAAA,MACR,EAAE,MAAM,UAAU,SAAS,MAAM,OAAO;AAAA,MACxC,EAAE,MAAM,QAAQ,SAAS,MAAM,KAAK;AAAA,IACtC;AAAA,IACA;AAAA,EACF,GACA,EAAE,OAAO,CACX;AAAA,EAEA,iBAAiB,SAAS,QAAQ;AAAA,IAChC,MAAM,QAAQ,MAAM,QAAQ,IAAI,OAAO,WAAW;AAAA,IAClD,IAAI,OAAO;AAAA,MACT,MAAM,EAAE,MAAM,cAAc,MAAM,QAAQ,WAAW,MAAM;AAAA,IAC7D;AAAA,EACF;AAAA,EACA,MAAM,EAAE,MAAM,UAAU,MAAM,CAAC,EAA4B;AAAA;AAGtD,IAAM,yBAIT,gBAAgB,CAAC,OAAO,OAAO,QAA2D;AAAA,EAC5F,MAAM,SAAS,MAAM,UAAU,KAAK;AAAA,EACpC,MAAM,YAAY,aAAa,KAAK;AAAA,EACpC,MAAM,WAAW,YAAY,KAAK;AAAA,EAElC,MAAM,SAAS,OAAO,qBACpB;AAAA,IACE,OAAO;AAAA,IACP,UAAU;AAAA,MACR,EAAE,MAAM,UAAU,SAAS,0CAA0C;AAAA,MACrE,EAAE,MAAM,QAAQ,SAAS,MAAM,KAAK;AAAA,IACtC;AAAA,IACA;AAAA,EACF,GACA,EAAE,OAAO,CACX;AAAA,EAEA,iBAAiB,SAAS,QAAQ;AAAA,IAChC,MAAM,QAAQ,MAAM,QAAQ,IAAI,OAAO,WAAW;AAAA,IAClD,IAAI,OAAO;AAAA,MACT,MAAM,EAAE,MAAM,cAAc,MAAM,QAAQ,WAAW,MAAM;AAAA,IAC7D;AAAA,EACF;AAAA,EACA,MAAM,EAAE,MAAM,UAAU,MAAM,CAAC,EAA2B;AAAA;AAO5D,SAAS,gBAAgB,CACvB,YAC0C;AAAA,EAC1C,IAAI,CAAC,cAAc,eAAe;AAAA,IAAQ,OAAO;AAAA,EACjD,IAAI,eAAe;AAAA,IAAQ,OAAO;AAAA,EAClC,IAAI,eAAe;AAAA,IAAY,OAAO;AAAA,EAEtC,OAAO;AAAA;AAGF,IAAM,kBAIT,OAAO,OAAO,OAAO,iBAAiB,WAAW;AAAA,EACnD,gBAAgB,GAAG,oCAAoC;AAAA,EACvD,MAAM,SAAS,MAAM,UAAU,KAAK;AAAA,EACpC,MAAM,YAAY,aAAa,KAAK;AAAA,EACpC,MAAM,WAAW,YAAY,KAAK;AAAA,EAElC,MAAM,QAAQ,MAAM,MAAM,IAAI,CAAC,OAAuB;AAAA,IACpD,MAAM;AAAA,IACN,UAAU;AAAA,MACR,MAAM,EAAE;AAAA,MACR,aAAa,qBAAqB,CAAC;AAAA,MACnC,YAAY,EAAE;AAAA,IAChB;AAAA,EACF,EAAE;AAAA,EAEF,MAAM,WAAgE,CAAC;AAAA,EACvE,IAAI,MAAM,cAAc;AAAA,IACtB,SAAS,KAAK,EAAE,MAAM,UAAU,SAAS,MAAM,aAAa,CAAC;AAAA,EAC/D;AAAA,EACA,SAAS,KAAK,EAAE,MAAM,QAAQ,SAAS,MAAM,OAAO,CAAC;AAAA,EAErD,MAAM,aAAa,iBAAiB,MAAM,UAAU;AAAA,EAEpD,MAAM,SAAc;AAAA,IAClB,OAAO;AAAA,IACP;AAAA,IACA,YAAY,MAAM;AAAA,IAClB,aAAa,MAAM;AAAA,IACnB;AAAA,EACF;AAAA,EAEA,IAAI,eAAe,QAAQ;AAAA,IACzB,OAAO,QAAQ;AAAA,IACf,OAAO,cAAc;AAAA,EACvB;AAAA,EAEA,MAAM,WAAW,MAAM,OAAO,eAAe,QAAQ,EAAE,OAAO,CAAC;AAAA,EAE/D,MAAM,OAAO,SAAS,QAAQ,IAAI,SAAS,WAAW;AAAA,EACtD,MAAM,YAAqC,CAAC;AAAA,EAC5C,IAAI,YAAY;AAAA,GACd,SAAS,QAAQ,IAAI,SAAiB,cAAc,CAAC,GAAG,QAAQ,CAAC,OAAY;AAAA,IAC7E,IAAI,cAAuC,CAAC;AAAA,IAC5C,MAAM,UAAU,GAAG,UAAU;AAAA,IAC7B,IAAI,OAAO,YAAY,UAAU;AAAA,MAC/B,IAAI;AAAA,QACF,cAAc,KAAK,MAAM,OAAO;AAAA,QAChC,MAAM;AAAA,QACN,MAAM,UAAU,iBAAiB,OAAO;AAAA,QACxC,cAAe,WAAuC,CAAC;AAAA;AAAA,IAE3D,EAAO,SAAI,WAAW,MAAM;AAAA,MAC1B,cAAc;AAAA,IAChB;AAAA,IACA,MAAM,KAAM,GAAG,MAAiB,QAAQ;AAAA,IACxC;AAAA,IACA,UAAU,MAAM,EAAE,IAAI,MAAM,GAAG,SAAS,MAAgB,OAAO,YAAY;AAAA,GAC5E;AAAA,EAED,gBAAgB,KAAK,qCAAqC;AAAA,EAC1D,OAAO,EAAE,MAAM,WAAW,qBAAqB,WAAW,MAAM,KAAK,EAAE;AAAA;AAGlE,IAAM,yBAIT,gBAAgB,CAAC,OAAO,OAAO,QAA2D;AAAA,EAC5F,MAAM,SAAS,MAAM,UAAU,KAAK;AAAA,EACpC,MAAM,YAAY,aAAa,KAAK;AAAA,EACpC,MAAM,WAAW,YAAY,KAAK;AAAA,EAElC,MAAM,QAAQ,MAAM,MAAM,IAAI,CAAC,OAAuB;AAAA,IACpD,MAAM;AAAA,IACN,UAAU;AAAA,MACR,MAAM,EAAE;AAAA,MACR,aAAa,qBAAqB,CAAC;AAAA,MACnC,YAAY,EAAE;AAAA,IAChB;AAAA,EACF,EAAE;AAAA,EAEF,MAAM,WAAgE,CAAC;AAAA,EACvE,IAAI,MAAM,cAAc;AAAA,IACtB,SAAS,KAAK,EAAE,MAAM,UAAU,SAAS,MAAM,aAAa,CAAC;AAAA,EAC/D;AAAA,EACA,SAAS,KAAK,EAAE,MAAM,QAAQ,SAAS,MAAM,OAAO,CAAC;AAAA,EAErD,MAAM,aAAa,iBAAiB,MAAM,UAAU;AAAA,EAEpD,MAAM,SAAc;AAAA,IAClB,OAAO;AAAA,IACP;AAAA,IACA,YAAY,MAAM;AAAA,IAClB,aAAa,MAAM;AAAA,IACnB;AAAA,EACF;AAAA,EAEA,IAAI,eAAe,QAAQ;AAAA,IACzB,OAAO,QAAQ;AAAA,IACf,OAAO,cAAc;AAAA,EACvB;AAAA,EAEA,MAAM,SAAS,OAAO,qBAAqB,QAAQ,EAAE,OAAO,CAAC;AAAA,EAE7D,IAAI,kBAAkB;AAAA,EACtB,MAAM,sBAAsB,IAAI;AAAA,EAEhC,iBAAiB,SAAS,QAAQ;AAAA,IAChC,MAAM,SAAS,MAAM,QAAQ;AAAA,IAC7B,IAAI,CAAC;AAAA,MAAQ;AAAA,IAEb,MAAM,eAAe,OAAO,OAAO,WAAW;AAAA,IAC9C,IAAI,cAAc;AAAA,MAChB,mBAAmB;AAAA,MACnB,MAAM,EAAE,MAAM,cAAc,MAAM,QAAQ,WAAW,aAAa;AAAA,IACpE;AAAA,IAEA,MAAM,WAAY,OAAO,OAAe;AAAA,IACxC,IAAI,MAAM,QAAQ,QAAQ,GAAG;AAAA,MAC3B,WAAW,WAAW,UAAU;AAAA,QAC9B,MAAM,MAAM,QAAQ;AAAA,QACpB,IAAI,CAAC,oBAAoB,IAAI,GAAG,GAAG;AAAA,UACjC,oBAAoB,IAAI,KAAK;AAAA,YAC3B,IAAI,QAAQ,MAAM;AAAA,YAClB,MAAM,QAAQ,UAAU,QAAQ;AAAA,YAChC,WAAW;AAAA,UACb,CAAC;AAAA,QACH;AAAA,QACA,MAAM,MAAM,oBAAoB,IAAI,GAAG;AAAA,QACvC,IAAI,QAAQ;AAAA,UAAI,IAAI,KAAK,QAAQ;AAAA,QACjC,IAAI,QAAQ,UAAU;AAAA,UAAM,IAAI,OAAO,QAAQ,SAAS;AAAA,QACxD,IAAI,QAAQ,UAAU;AAAA,UAAW,IAAI,aAAa,QAAQ,SAAS;AAAA,MACrE;AAAA,MAEA,MAAM,iBAA0C,CAAC;AAAA,MACjD,MAAM,KAAK,oBAAoB,QAAQ,CAAC,EAAE,QAAQ,EAAE,KAAK,QAAQ;AAAA,QAC/D,IAAI;AAAA,QACJ,IAAI;AAAA,UACF,cAAc,KAAK,MAAM,GAAG,SAAS;AAAA,UACrC,MAAM;AAAA,UACN,MAAM,UAAU,iBAAiB,GAAG,SAAS;AAAA,UAC7C,cAAe,WAAuC,CAAC;AAAA;AAAA,QAEzD,MAAM,MAAM,GAAG,MAAM,OAAO,GAAG;AAAA,QAC/B,eAAe,OAAO,EAAE,IAAI,GAAG,IAAI,MAAM,GAAG,MAAM,OAAO,YAAY;AAAA,OACtE;AAAA,MACD,MAAM,EAAE,MAAM,gBAAgB,MAAM,aAAa,aAAa,eAAe;AAAA,IAC/E;AAAA,EACF;AAAA,EAEA,MAAM,YAAqC,CAAC;AAAA,EAC5C,MAAM,KAAK,oBAAoB,QAAQ,CAAC,EAAE,QAAQ,EAAE,KAAK,QAAQ;AAAA,IAC/D,IAAI;AAAA,IACJ,IAAI;AAAA,MACF,aAAa,KAAK,MAAM,GAAG,SAAS;AAAA,MACpC,MAAM;AAAA,MACN,aAAc,iBAAiB,GAAG,SAAS,KAAiC,CAAC;AAAA;AAAA,IAE/E,MAAM,MAAM,GAAG,MAAM,OAAO,GAAG;AAAA,IAC/B,UAAU,OAAO,EAAE,IAAI,GAAG,IAAI,MAAM,GAAG,MAAM,OAAO,WAAW;AAAA,GAChE;AAAA,EAED,MAAM,iBAAiB,qBAAqB,WAAW,MAAM,KAAK;AAAA,EAClE,MAAM;AAAA,IACJ,MAAM;AAAA,IACN,MAAM,EAAE,MAAM,iBAAiB,WAAW,eAAe;AAAA,EAC3D;AAAA;AAOK,IAAM,YAA+E;AAAA,EAC1F,oBAAoB;AAAA,EACpB,mBAAmB;AAAA,EACnB,kBAAkB;AAAA,EAClB,iBAAiB;AAAA,EACjB,iBAAiB;AACnB;AAEO,IAAM,mBAGT;AAAA,EACF,oBAAoB;AAAA,EACpB,kBAAkB;AAAA,EAClB,iBAAiB;AAAA,EACjB,iBAAiB;AACnB;;ACvfA;AAIO,SAAS,0BAA0B,GAAG;AAAA,EAC3C,MAAM,eAAe,sBAAsB,IAAI,aAAa;AAAA,EAC5D,IAAI,oBAAoB,WAAW,gBAAgB,EAAE,uBAAuB,YAAY;AAAA,EACxF,aAAa,UAAU;AAAA,EACvB,QAAQ,IAAI,8BAA8B;AAAA;",
|
|
9
|
+
"debugId": "ED879DD8B5C5316464756E2164756E21",
|
|
10
10
|
"names": []
|
|
11
11
|
}
|
|
@@ -32,7 +32,7 @@ import type { LlamaCppModelConfig } from "./common/LlamaCpp_ModelSchema";
|
|
|
32
32
|
*/
|
|
33
33
|
export declare class LlamaCppProvider extends AiProvider<LlamaCppModelConfig> {
|
|
34
34
|
readonly name = "LOCAL_LLAMACPP";
|
|
35
|
-
readonly taskTypes: readonly ["DownloadModelTask", "UnloadModelTask", "CountTokensTask", "TextGenerationTask", "TextEmbeddingTask", "TextRewriterTask", "TextSummaryTask"];
|
|
35
|
+
readonly taskTypes: readonly ["DownloadModelTask", "UnloadModelTask", "CountTokensTask", "TextGenerationTask", "TextEmbeddingTask", "TextRewriterTask", "TextSummaryTask", "ToolCallingTask"];
|
|
36
36
|
constructor(tasks?: Record<string, AiProviderRunFn<any, any, LlamaCppModelConfig>>, streamTasks?: Record<string, AiProviderStreamFn<any, any, LlamaCppModelConfig>>, reactiveTasks?: Record<string, AiProviderReactiveRunFn<any, any, LlamaCppModelConfig>>);
|
|
37
37
|
}
|
|
38
38
|
//# sourceMappingURL=LlamaCppProvider.d.ts.map
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"LlamaCppProvider.d.ts","sourceRoot":"","sources":["../../src/provider-llamacpp/LlamaCppProvider.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AAEH,OAAO,EACL,UAAU,EACV,KAAK,uBAAuB,EAC5B,KAAK,eAAe,EACpB,KAAK,kBAAkB,EACxB,MAAM,cAAc,CAAC;AAEtB,OAAO,KAAK,EAAE,mBAAmB,EAAE,MAAM,+BAA+B,CAAC;AAEzE;;;;;;;;;;;;;;;;;;;;;;;;GAwBG;AACH,qBAAa,gBAAiB,SAAQ,UAAU,CAAC,mBAAmB,CAAC;IACnE,QAAQ,CAAC,IAAI,oBAAkB;IAE/B,QAAQ,CAAC,SAAS,
|
|
1
|
+
{"version":3,"file":"LlamaCppProvider.d.ts","sourceRoot":"","sources":["../../src/provider-llamacpp/LlamaCppProvider.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AAEH,OAAO,EACL,UAAU,EACV,KAAK,uBAAuB,EAC5B,KAAK,eAAe,EACpB,KAAK,kBAAkB,EACxB,MAAM,cAAc,CAAC;AAEtB,OAAO,KAAK,EAAE,mBAAmB,EAAE,MAAM,+BAA+B,CAAC;AAEzE;;;;;;;;;;;;;;;;;;;;;;;;GAwBG;AACH,qBAAa,gBAAiB,SAAQ,UAAU,CAAC,mBAAmB,CAAC;IACnE,QAAQ,CAAC,IAAI,oBAAkB;IAE/B,QAAQ,CAAC,SAAS,4KASP;gBAGT,KAAK,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,eAAe,CAAC,GAAG,EAAE,GAAG,EAAE,mBAAmB,CAAC,CAAC,EACtE,WAAW,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,kBAAkB,CAAC,GAAG,EAAE,GAAG,EAAE,mBAAmB,CAAC,CAAC,EAC/E,aAAa,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,uBAAuB,CAAC,GAAG,EAAE,GAAG,EAAE,mBAAmB,CAAC,CAAC;CAIzF"}
|
|
@@ -3,7 +3,7 @@
|
|
|
3
3
|
* Copyright 2025 Steven Roussey <sroussey@gmail.com>
|
|
4
4
|
* SPDX-License-Identifier: Apache-2.0
|
|
5
5
|
*/
|
|
6
|
-
import type { AiProviderReactiveRunFn, AiProviderRunFn, AiProviderStreamFn, CountTokensTaskInput, CountTokensTaskOutput, DownloadModelTaskRunInput, DownloadModelTaskRunOutput, TextEmbeddingTaskInput, TextEmbeddingTaskOutput, TextGenerationTaskInput, TextGenerationTaskOutput, TextRewriterTaskInput, TextRewriterTaskOutput, TextSummaryTaskInput, TextSummaryTaskOutput, UnloadModelTaskRunInput, UnloadModelTaskRunOutput } from "@workglow/ai";
|
|
6
|
+
import type { AiProviderReactiveRunFn, AiProviderRunFn, AiProviderStreamFn, CountTokensTaskInput, CountTokensTaskOutput, DownloadModelTaskRunInput, DownloadModelTaskRunOutput, TextEmbeddingTaskInput, TextEmbeddingTaskOutput, TextGenerationTaskInput, TextGenerationTaskOutput, TextRewriterTaskInput, TextRewriterTaskOutput, TextSummaryTaskInput, TextSummaryTaskOutput, ToolCallingTaskInput, ToolCallingTaskOutput, UnloadModelTaskRunInput, UnloadModelTaskRunOutput } from "@workglow/ai";
|
|
7
7
|
import type { LlamaCppModelConfig } from "./LlamaCpp_ModelSchema";
|
|
8
8
|
export declare const LlamaCpp_Download: AiProviderRunFn<DownloadModelTaskRunInput, DownloadModelTaskRunOutput, LlamaCppModelConfig>;
|
|
9
9
|
export declare const LlamaCpp_Unload: AiProviderRunFn<UnloadModelTaskRunInput, UnloadModelTaskRunOutput, LlamaCppModelConfig>;
|
|
@@ -17,6 +17,8 @@ export declare const LlamaCpp_TextSummary_Stream: AiProviderStreamFn<TextSummary
|
|
|
17
17
|
export declare function disposeLlamaCppResources(): Promise<void>;
|
|
18
18
|
export declare const LlamaCpp_CountTokens: AiProviderRunFn<CountTokensTaskInput, CountTokensTaskOutput, LlamaCppModelConfig>;
|
|
19
19
|
export declare const LlamaCpp_CountTokens_Reactive: AiProviderReactiveRunFn<CountTokensTaskInput, CountTokensTaskOutput, LlamaCppModelConfig>;
|
|
20
|
+
export declare const LlamaCpp_ToolCalling: AiProviderRunFn<ToolCallingTaskInput, ToolCallingTaskOutput, LlamaCppModelConfig>;
|
|
21
|
+
export declare const LlamaCpp_ToolCalling_Stream: AiProviderStreamFn<ToolCallingTaskInput, ToolCallingTaskOutput, LlamaCppModelConfig>;
|
|
20
22
|
export declare const LLAMACPP_TASKS: Record<string, AiProviderRunFn<any, any, LlamaCppModelConfig>>;
|
|
21
23
|
export declare const LLAMACPP_STREAM_TASKS: Record<string, AiProviderStreamFn<any, any, LlamaCppModelConfig>>;
|
|
22
24
|
export declare const LLAMACPP_REACTIVE_TASKS: Record<string, AiProviderReactiveRunFn<any, any, LlamaCppModelConfig>>;
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"LlamaCpp_JobRunFns.d.ts","sourceRoot":"","sources":["../../../src/provider-llamacpp/common/LlamaCpp_JobRunFns.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AAEH,OAAO,KAAK,EACV,uBAAuB,EACvB,eAAe,EACf,kBAAkB,EAClB,oBAAoB,EACpB,qBAAqB,EACrB,yBAAyB,EACzB,0BAA0B,EAC1B,sBAAsB,EACtB,uBAAuB,EACvB,uBAAuB,EACvB,wBAAwB,EACxB,qBAAqB,EACrB,sBAAsB,EACtB,oBAAoB,EACpB,qBAAqB,EACrB,uBAAuB,EACvB,wBAAwB,EACzB,MAAM,cAAc,CAAC;
|
|
1
|
+
{"version":3,"file":"LlamaCpp_JobRunFns.d.ts","sourceRoot":"","sources":["../../../src/provider-llamacpp/common/LlamaCpp_JobRunFns.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AAEH,OAAO,KAAK,EACV,uBAAuB,EACvB,eAAe,EACf,kBAAkB,EAClB,oBAAoB,EACpB,qBAAqB,EACrB,yBAAyB,EACzB,0BAA0B,EAC1B,sBAAsB,EACtB,uBAAuB,EACvB,uBAAuB,EACvB,wBAAwB,EACxB,qBAAqB,EACrB,sBAAsB,EACtB,oBAAoB,EACpB,qBAAqB,EACrB,oBAAoB,EACpB,qBAAqB,EAErB,uBAAuB,EACvB,wBAAwB,EACzB,MAAM,cAAc,CAAC;AAItB,OAAO,KAAK,EAAE,mBAAmB,EAAE,MAAM,wBAAwB,CAAC;AA2KlE,eAAO,MAAM,iBAAiB,EAAE,eAAe,CAC7C,yBAAyB,EACzB,0BAA0B,EAC1B,mBAAmB,CAoCpB,CAAC;AAMF,eAAO,MAAM,eAAe,EAAE,eAAe,CAC3C,uBAAuB,EACvB,wBAAwB,EACxB,mBAAmB,CAiCpB,CAAC;AAMF,eAAO,MAAM,uBAAuB,EAAE,eAAe,CACnD,uBAAuB,EACvB,wBAAwB,EACxB,mBAAmB,CAwBpB,CAAC;AAMF,eAAO,MAAM,8BAA8B,EAAE,kBAAkB,CAC7D,uBAAuB,EACvB,wBAAwB,EACxB,mBAAmB,CAsBpB,CAAC;AAMF,eAAO,MAAM,sBAAsB,EAAE,eAAe,CAClD,sBAAsB,EACtB,uBAAuB,EACvB,mBAAmB,CAoBpB,CAAC;AAMF,eAAO,MAAM,qBAAqB,EAAE,eAAe,CACjD,qBAAqB,EACrB,sBAAsB,EACtB,mBAAmB,CAmBpB,CAAC;AAMF,eAAO,MAAM,4BAA4B,EAAE,kBAAkB,CAC3D,qBAAqB,EACrB,sBAAsB,EACtB,mBAAmB,CAgBpB,CAAC;AAMF,eAAO,MAAM,oBAAoB,EAAE,eAAe,CAChD,oBAAoB,EACpB,qBAAqB,EACrB,mBAAmB,CAsBpB,CAAC;AAMF,eAAO,MAAM,2BAA2B,EAAE,kBAAkB,CAC1D,oBAAoB,EACpB,qBAAqB,EACrB,mBAAmB,CAmBpB,CAAC;AAMF,wBAAsB,wBAAwB,IAAI,OAAO,CAAC,IAAI,CAAC,CAkB9D;AAED,eAAO,MAAM,oBAAoB,EAAE,eAAe,CAChD,oBAAoB,EACpB,qBAAqB,EACrB,mBAAmB,CAMpB,CAAC;AAEF,eAAO,MAAM,6BAA6B,EAAE,uBAAuB,CACjE,oBAAoB,EACpB,qBAAqB,EACrB,mBAAmB,CAGpB,CAAC;AA+BF,eAAO,MAAM,oBAAoB,EAAE,eAAe,CAChD,oBAAoB,EACpB,qBAAqB,EACrB,mBAAmB,CAwCpB,CAAC;AAMF,eAAO,MAAM,2BAA2B,EAAE,kBAAkB,CAC1D,oBAAoB,EACpB,qBAAqB,EACrB,mBAAmB,CA8FpB,CAAC;AAMF,eAAO,MAAM,cAAc,EAAE,MAAM,CAAC,MAAM,EAAE,eAAe,CAAC,GAAG,EAAE,GAAG,EAAE,mBAAmB,CAAC,CASzF,CAAC;AAEF,eAAO,MAAM,qBAAqB,EAAE,MAAM,CACxC,MAAM,EACN,kBAAkB,CAAC,GAAG,EAAE,GAAG,EAAE,mBAAmB,CAAC,CAMlD,CAAC;AAEF,eAAO,MAAM,uBAAuB,EAAE,MAAM,CAC1C,MAAM,EACN,uBAAuB,CAAC,GAAG,EAAE,GAAG,EAAE,mBAAmB,CAAC,CAGvD,CAAC"}
|
|
@@ -5,11 +5,12 @@ import {
|
|
|
5
5
|
LlamaCppModelRecordSchema,
|
|
6
6
|
LlamaCppModelSchema,
|
|
7
7
|
LlamaCppProvider
|
|
8
|
-
} from "../index-
|
|
8
|
+
} from "../index-xc6m9mcp.js";
|
|
9
9
|
import {
|
|
10
10
|
__require
|
|
11
11
|
} from "../index-6j5pq722.js";
|
|
12
12
|
// src/provider-llamacpp/common/LlamaCpp_JobRunFns.ts
|
|
13
|
+
import { filterValidToolCalls } from "@workglow/ai";
|
|
13
14
|
var _sdk;
|
|
14
15
|
async function loadSdk() {
|
|
15
16
|
if (!_sdk) {
|
|
@@ -327,6 +328,135 @@ var LlamaCpp_CountTokens = async (input, model, onProgress, signal) => {
|
|
|
327
328
|
var LlamaCpp_CountTokens_Reactive = async (input, _output, model) => {
|
|
328
329
|
return LlamaCpp_CountTokens(input, model, () => {}, new AbortController().signal);
|
|
329
330
|
};
|
|
331
|
+
function buildLlamaCppFunctions(tools, capturedCalls) {
|
|
332
|
+
const { defineChatSessionFunction } = _sdk;
|
|
333
|
+
const functions = {};
|
|
334
|
+
for (const tool of tools) {
|
|
335
|
+
const toolName = tool.name;
|
|
336
|
+
functions[toolName] = defineChatSessionFunction({
|
|
337
|
+
description: tool.description,
|
|
338
|
+
params: tool.inputSchema,
|
|
339
|
+
handler(params) {
|
|
340
|
+
capturedCalls.push({ name: toolName, input: params ?? {} });
|
|
341
|
+
return "OK";
|
|
342
|
+
}
|
|
343
|
+
});
|
|
344
|
+
}
|
|
345
|
+
return functions;
|
|
346
|
+
}
|
|
347
|
+
var LlamaCpp_ToolCalling = async (input, model, update_progress, signal) => {
|
|
348
|
+
if (!model)
|
|
349
|
+
throw new Error("Model config is required for ToolCallingTask.");
|
|
350
|
+
await loadSdk();
|
|
351
|
+
update_progress(0, "Loading model");
|
|
352
|
+
const context = await getOrCreateTextContext(model);
|
|
353
|
+
const capturedCalls = [];
|
|
354
|
+
const functions = input.toolChoice === "none" ? undefined : buildLlamaCppFunctions(input.tools, capturedCalls);
|
|
355
|
+
update_progress(10, "Running tool calling");
|
|
356
|
+
const sequence = context.getSequence();
|
|
357
|
+
const { LlamaChatSession } = _sdk;
|
|
358
|
+
const session = new LlamaChatSession({
|
|
359
|
+
contextSequence: sequence,
|
|
360
|
+
...input.systemPrompt && { systemPrompt: input.systemPrompt }
|
|
361
|
+
});
|
|
362
|
+
try {
|
|
363
|
+
const text = await session.prompt(input.prompt, {
|
|
364
|
+
signal,
|
|
365
|
+
...functions && { functions },
|
|
366
|
+
...input.temperature !== undefined && { temperature: input.temperature },
|
|
367
|
+
...input.maxTokens !== undefined && { maxTokens: input.maxTokens }
|
|
368
|
+
});
|
|
369
|
+
const toolCalls = {};
|
|
370
|
+
capturedCalls.forEach((call, index) => {
|
|
371
|
+
const id = `call_${index}`;
|
|
372
|
+
toolCalls[id] = { id, name: call.name, input: call.input };
|
|
373
|
+
});
|
|
374
|
+
update_progress(100, "Tool calling complete");
|
|
375
|
+
return { text, toolCalls: filterValidToolCalls(toolCalls, input.tools) };
|
|
376
|
+
} finally {
|
|
377
|
+
sequence.dispose();
|
|
378
|
+
}
|
|
379
|
+
};
|
|
380
|
+
var LlamaCpp_ToolCalling_Stream = async function* (input, model, signal) {
|
|
381
|
+
if (!model)
|
|
382
|
+
throw new Error("Model config is required for ToolCallingTask.");
|
|
383
|
+
await loadSdk();
|
|
384
|
+
const context = await getOrCreateTextContext(model);
|
|
385
|
+
const capturedCalls = [];
|
|
386
|
+
const functions = input.toolChoice === "none" ? undefined : buildLlamaCppFunctions(input.tools, capturedCalls);
|
|
387
|
+
const sequence = context.getSequence();
|
|
388
|
+
const { LlamaChatSession } = _sdk;
|
|
389
|
+
const session = new LlamaChatSession({
|
|
390
|
+
contextSequence: sequence,
|
|
391
|
+
...input.systemPrompt && { systemPrompt: input.systemPrompt }
|
|
392
|
+
});
|
|
393
|
+
const queue = [];
|
|
394
|
+
let isComplete = false;
|
|
395
|
+
let completionError;
|
|
396
|
+
let resolveWait = null;
|
|
397
|
+
const notifyWaiter = () => {
|
|
398
|
+
resolveWait?.();
|
|
399
|
+
resolveWait = null;
|
|
400
|
+
};
|
|
401
|
+
let accumulatedText = "";
|
|
402
|
+
const promptPromise = session.prompt(input.prompt, {
|
|
403
|
+
signal,
|
|
404
|
+
...functions && { functions },
|
|
405
|
+
onTextChunk: (chunk) => {
|
|
406
|
+
queue.push(chunk);
|
|
407
|
+
notifyWaiter();
|
|
408
|
+
},
|
|
409
|
+
...input.temperature !== undefined && { temperature: input.temperature },
|
|
410
|
+
...input.maxTokens !== undefined && { maxTokens: input.maxTokens }
|
|
411
|
+
}).then(() => {
|
|
412
|
+
isComplete = true;
|
|
413
|
+
notifyWaiter();
|
|
414
|
+
}).catch((err) => {
|
|
415
|
+
completionError = err;
|
|
416
|
+
isComplete = true;
|
|
417
|
+
notifyWaiter();
|
|
418
|
+
});
|
|
419
|
+
try {
|
|
420
|
+
while (true) {
|
|
421
|
+
while (queue.length > 0) {
|
|
422
|
+
const chunk = queue.shift();
|
|
423
|
+
accumulatedText += chunk;
|
|
424
|
+
yield { type: "text-delta", port: "text", textDelta: chunk };
|
|
425
|
+
}
|
|
426
|
+
if (isComplete)
|
|
427
|
+
break;
|
|
428
|
+
await new Promise((r) => {
|
|
429
|
+
resolveWait = r;
|
|
430
|
+
});
|
|
431
|
+
}
|
|
432
|
+
while (queue.length > 0) {
|
|
433
|
+
const chunk = queue.shift();
|
|
434
|
+
accumulatedText += chunk;
|
|
435
|
+
yield { type: "text-delta", port: "text", textDelta: chunk };
|
|
436
|
+
}
|
|
437
|
+
} finally {
|
|
438
|
+
await promptPromise.catch(() => {});
|
|
439
|
+
sequence.dispose();
|
|
440
|
+
}
|
|
441
|
+
if (completionError) {
|
|
442
|
+
if (!signal.aborted)
|
|
443
|
+
throw completionError;
|
|
444
|
+
return;
|
|
445
|
+
}
|
|
446
|
+
const toolCalls = {};
|
|
447
|
+
capturedCalls.forEach((call, index) => {
|
|
448
|
+
const id = `call_${index}`;
|
|
449
|
+
toolCalls[id] = { id, name: call.name, input: call.input };
|
|
450
|
+
});
|
|
451
|
+
const validToolCalls = filterValidToolCalls(toolCalls, input.tools);
|
|
452
|
+
if (Object.keys(validToolCalls).length > 0) {
|
|
453
|
+
yield { type: "object-delta", port: "toolCalls", objectDelta: { ...validToolCalls } };
|
|
454
|
+
}
|
|
455
|
+
yield {
|
|
456
|
+
type: "finish",
|
|
457
|
+
data: { text: accumulatedText, toolCalls: validToolCalls }
|
|
458
|
+
};
|
|
459
|
+
};
|
|
330
460
|
var LLAMACPP_TASKS = {
|
|
331
461
|
DownloadModelTask: LlamaCpp_Download,
|
|
332
462
|
UnloadModelTask: LlamaCpp_Unload,
|
|
@@ -334,12 +464,14 @@ var LLAMACPP_TASKS = {
|
|
|
334
464
|
TextGenerationTask: LlamaCpp_TextGeneration,
|
|
335
465
|
TextEmbeddingTask: LlamaCpp_TextEmbedding,
|
|
336
466
|
TextRewriterTask: LlamaCpp_TextRewriter,
|
|
337
|
-
TextSummaryTask: LlamaCpp_TextSummary
|
|
467
|
+
TextSummaryTask: LlamaCpp_TextSummary,
|
|
468
|
+
ToolCallingTask: LlamaCpp_ToolCalling
|
|
338
469
|
};
|
|
339
470
|
var LLAMACPP_STREAM_TASKS = {
|
|
340
471
|
TextGenerationTask: LlamaCpp_TextGeneration_Stream,
|
|
341
472
|
TextRewriterTask: LlamaCpp_TextRewriter_Stream,
|
|
342
|
-
TextSummaryTask: LlamaCpp_TextSummary_Stream
|
|
473
|
+
TextSummaryTask: LlamaCpp_TextSummary_Stream,
|
|
474
|
+
ToolCallingTask: LlamaCpp_ToolCalling_Stream
|
|
343
475
|
};
|
|
344
476
|
var LLAMACPP_REACTIVE_TASKS = {
|
|
345
477
|
CountTokensTask: LlamaCpp_CountTokens_Reactive
|
|
@@ -355,6 +487,8 @@ function LLAMACPP_WORKER_JOBRUN_REGISTER() {
|
|
|
355
487
|
export {
|
|
356
488
|
disposeLlamaCppResources,
|
|
357
489
|
LlamaCpp_Unload,
|
|
490
|
+
LlamaCpp_ToolCalling_Stream,
|
|
491
|
+
LlamaCpp_ToolCalling,
|
|
358
492
|
LlamaCpp_TextSummary_Stream,
|
|
359
493
|
LlamaCpp_TextSummary,
|
|
360
494
|
LlamaCpp_TextRewriter_Stream,
|
|
@@ -377,4 +511,4 @@ export {
|
|
|
377
511
|
LLAMACPP_DEFAULT_MODELS_DIR
|
|
378
512
|
};
|
|
379
513
|
|
|
380
|
-
//# debugId=
|
|
514
|
+
//# debugId=4EF99828E101695D64756E2164756E21
|