@workglow/ai-provider 0.0.97 → 0.0.99

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (74) hide show
  1. package/dist/HFT_JobRunFns-ajak33q5.js +60 -0
  2. package/dist/HFT_JobRunFns-ajak33q5.js.map +9 -0
  3. package/dist/anthropic/index.js +11 -92
  4. package/dist/anthropic/index.js.map +4 -7
  5. package/dist/google-gemini/index.js +11 -103
  6. package/dist/google-gemini/index.js.map +4 -7
  7. package/dist/hf-transformers/common/HFT_JobRunFns.d.ts.map +1 -1
  8. package/dist/hf-transformers/index.js +46 -1080
  9. package/dist/hf-transformers/index.js.map +4 -9
  10. package/dist/index-2pvfsjyx.js +857 -0
  11. package/dist/index-2pvfsjyx.js.map +10 -0
  12. package/dist/index-34rcjm0y.js +78 -0
  13. package/dist/index-34rcjm0y.js.map +12 -0
  14. package/dist/index-9w5hs4zc.js +73 -0
  15. package/dist/index-9w5hs4zc.js.map +12 -0
  16. package/dist/index-jd3bbc2x.js +11 -0
  17. package/dist/index-jd3bbc2x.js.map +9 -0
  18. package/dist/index-kctzvn3c.js +75 -0
  19. package/dist/index-kctzvn3c.js.map +12 -0
  20. package/dist/index-m0r2hvfz.js +57 -0
  21. package/dist/index-m0r2hvfz.js.map +10 -0
  22. package/dist/index-rmbnv0rw.js +169 -0
  23. package/dist/index-rmbnv0rw.js.map +11 -0
  24. package/dist/index-tn88s0ke.js +71 -0
  25. package/dist/index-tn88s0ke.js.map +12 -0
  26. package/dist/index-wnmajxg6.js +86 -0
  27. package/dist/index-wnmajxg6.js.map +12 -0
  28. package/dist/index-xyavfrb8.js +97 -0
  29. package/dist/index-xyavfrb8.js.map +12 -0
  30. package/dist/index.browser-jd3bbc2x.js +11 -0
  31. package/dist/index.browser-jd3bbc2x.js.map +9 -0
  32. package/dist/index.d.ts +6 -0
  33. package/dist/index.d.ts.map +1 -1
  34. package/dist/index.js +80 -1369
  35. package/dist/index.js.map +3 -20
  36. package/dist/provider-hf-inference/HFI_Worker.d.ts +7 -0
  37. package/dist/provider-hf-inference/HFI_Worker.d.ts.map +1 -0
  38. package/dist/provider-hf-inference/HfInferenceProvider.d.ts +40 -0
  39. package/dist/provider-hf-inference/HfInferenceProvider.d.ts.map +1 -0
  40. package/dist/provider-hf-inference/common/HFI_Constants.d.ts +7 -0
  41. package/dist/provider-hf-inference/common/HFI_Constants.d.ts.map +1 -0
  42. package/dist/provider-hf-inference/common/HFI_JobRunFns.d.ts +17 -0
  43. package/dist/provider-hf-inference/common/HFI_JobRunFns.d.ts.map +1 -0
  44. package/dist/provider-hf-inference/common/HFI_ModelSchema.d.ts +146 -0
  45. package/dist/provider-hf-inference/common/HFI_ModelSchema.d.ts.map +1 -0
  46. package/dist/provider-hf-inference/index.d.ts +11 -0
  47. package/dist/provider-hf-inference/index.d.ts.map +1 -0
  48. package/dist/provider-hf-inference/index.js +209 -0
  49. package/dist/provider-hf-inference/index.js.map +11 -0
  50. package/dist/provider-llamacpp/LlamaCppProvider.d.ts +38 -0
  51. package/dist/provider-llamacpp/LlamaCppProvider.d.ts.map +1 -0
  52. package/dist/provider-llamacpp/LlamaCpp_Worker.d.ts +7 -0
  53. package/dist/provider-llamacpp/LlamaCpp_Worker.d.ts.map +1 -0
  54. package/dist/provider-llamacpp/common/LlamaCpp_Constants.d.ts +8 -0
  55. package/dist/provider-llamacpp/common/LlamaCpp_Constants.d.ts.map +1 -0
  56. package/dist/provider-llamacpp/common/LlamaCpp_JobRunFns.d.ts +20 -0
  57. package/dist/provider-llamacpp/common/LlamaCpp_JobRunFns.d.ts.map +1 -0
  58. package/dist/provider-llamacpp/common/LlamaCpp_ModelSchema.d.ts +209 -0
  59. package/dist/provider-llamacpp/common/LlamaCpp_ModelSchema.d.ts.map +1 -0
  60. package/dist/provider-llamacpp/index.d.ts +11 -0
  61. package/dist/provider-llamacpp/index.d.ts.map +1 -0
  62. package/dist/provider-llamacpp/index.js +365 -0
  63. package/dist/provider-llamacpp/index.js.map +11 -0
  64. package/dist/provider-ollama/index.browser.js +4 -8
  65. package/dist/provider-ollama/index.browser.js.map +2 -2
  66. package/dist/provider-ollama/index.js +12 -88
  67. package/dist/provider-ollama/index.js.map +4 -7
  68. package/dist/provider-openai/index.js +11 -95
  69. package/dist/provider-openai/index.js.map +4 -7
  70. package/dist/tf-mediapipe/index.js +4 -8
  71. package/dist/tf-mediapipe/index.js.map +2 -2
  72. package/package.json +39 -18
  73. package/dist/ggml/server.d.ts +0 -2
  74. package/dist/ggml/server.d.ts.map +0 -1
@@ -0,0 +1,857 @@
1
+ import {
2
+ HTF_CACHE_NAME
3
+ } from "./index-m0r2hvfz.js";
4
+ import {
5
+ __require
6
+ } from "./index-jd3bbc2x.js";
7
+
8
+ // src/hf-transformers/common/HFT_JobRunFns.ts
9
+ var _transformersSdk;
10
+ async function loadTransformersSDK() {
11
+ if (!_transformersSdk) {
12
+ try {
13
+ _transformersSdk = await import("@sroussey/transformers");
14
+ } catch {
15
+ throw new Error("@sroussey/transformers is required for HuggingFace Transformers tasks. Install it with: bun add @sroussey/transformers");
16
+ }
17
+ }
18
+ return _transformersSdk;
19
+ }
20
+ var pipelines = new Map;
21
+ var pipelineLoadPromises = new Map;
22
+ function clearPipelineCache() {
23
+ pipelines.clear();
24
+ }
25
+ function getPipelineCacheKey(model) {
26
+ const dtype = model.provider_config.dtype || "q8";
27
+ const device = model.provider_config.device || "";
28
+ return `${model.provider_config.model_path}:${model.provider_config.pipeline}:${dtype}:${device}`;
29
+ }
30
+ var getPipeline = async (model, onProgress, options = {}, progressScaleMax = 10) => {
31
+ const cacheKey = getPipelineCacheKey(model);
32
+ if (pipelines.has(cacheKey)) {
33
+ return pipelines.get(cacheKey);
34
+ }
35
+ const inFlight = pipelineLoadPromises.get(cacheKey);
36
+ if (inFlight) {
37
+ await inFlight;
38
+ const cached = pipelines.get(cacheKey);
39
+ if (cached)
40
+ return cached;
41
+ }
42
+ const loadPromise = doGetPipeline(model, onProgress, options, progressScaleMax, cacheKey).finally(() => {
43
+ pipelineLoadPromises.delete(cacheKey);
44
+ });
45
+ pipelineLoadPromises.set(cacheKey, loadPromise);
46
+ return loadPromise;
47
+ };
48
+ var doGetPipeline = async (model, onProgress, options, progressScaleMax, cacheKey) => {
49
+ const fileSizes = new Map;
50
+ const fileProgress = new Map;
51
+ const fileCompleted = new Set;
52
+ const fileFirstSent = new Set;
53
+ const fileLastSent = new Set;
54
+ const fileLastEventTime = new Map;
55
+ const pendingProgressByFile = new Map;
56
+ let throttleTimer = null;
57
+ const THROTTLE_MS = 160;
58
+ const estimatedTinyFiles = 3;
59
+ const estimatedMediumFiles = 1;
60
+ const estimatedTinySize = 1024;
61
+ const estimatedMediumSize = 20971520;
62
+ const estimatedLargeSize = 1073741824;
63
+ const baseEstimate = estimatedTinyFiles * estimatedTinySize + estimatedMediumFiles * estimatedMediumSize;
64
+ const sendProgress = (overallProgress, file, fileProgressValue, isFirst, isLast) => {
65
+ const now = Date.now();
66
+ const lastTime = fileLastEventTime.get(file) || 0;
67
+ const timeSinceLastEvent = now - lastTime;
68
+ const shouldThrottle = !isFirst && !isLast && timeSinceLastEvent < THROTTLE_MS;
69
+ if (shouldThrottle) {
70
+ pendingProgressByFile.set(file, {
71
+ progress: overallProgress,
72
+ file,
73
+ fileProgress: fileProgressValue
74
+ });
75
+ if (!throttleTimer) {
76
+ const timeRemaining = Math.max(1, THROTTLE_MS - timeSinceLastEvent);
77
+ throttleTimer = setTimeout(() => {
78
+ for (const [pendingFile, pending] of pendingProgressByFile.entries()) {
79
+ onProgress(Math.round(pending.progress), "Downloading model", {
80
+ file: pendingFile,
81
+ progress: pending.fileProgress
82
+ });
83
+ fileLastEventTime.set(pendingFile, Date.now());
84
+ }
85
+ pendingProgressByFile.clear();
86
+ throttleTimer = null;
87
+ }, timeRemaining);
88
+ }
89
+ return;
90
+ }
91
+ onProgress(Math.round(overallProgress), "Downloading model", {
92
+ file,
93
+ progress: fileProgressValue
94
+ });
95
+ fileLastEventTime.set(file, now);
96
+ pendingProgressByFile.delete(file);
97
+ if (throttleTimer && pendingProgressByFile.size === 0) {
98
+ clearTimeout(throttleTimer);
99
+ throttleTimer = null;
100
+ }
101
+ };
102
+ let hasSeenSubstantialFile = false;
103
+ const substantialFileThreshold = 1048576;
104
+ const abortSignal = options.abort_signal;
105
+ const progressCallback = (status) => {
106
+ if (abortSignal?.aborted) {
107
+ return;
108
+ }
109
+ if (status.status === "progress") {
110
+ const file = status.file;
111
+ const fileTotal = status.total;
112
+ const fileProgressValue = status.progress;
113
+ if (!fileSizes.has(file)) {
114
+ fileSizes.set(file, fileTotal);
115
+ fileProgress.set(file, 0);
116
+ if (fileTotal >= substantialFileThreshold) {
117
+ hasSeenSubstantialFile = true;
118
+ }
119
+ }
120
+ fileProgress.set(file, fileProgressValue);
121
+ const isComplete = fileProgressValue >= 100;
122
+ if (isComplete && !fileCompleted.has(file)) {
123
+ fileCompleted.add(file);
124
+ fileProgress.set(file, 100);
125
+ }
126
+ let actualLoadedSize = 0;
127
+ let actualTotalSize = 0;
128
+ const tinyThreshold = 102400;
129
+ const mediumThreshold = 104857600;
130
+ let seenTinyCount = 0;
131
+ let seenMediumCount = 0;
132
+ let seenLargeCount = 0;
133
+ for (const [trackedFile, size] of fileSizes.entries()) {
134
+ actualTotalSize += size;
135
+ const progress = fileProgress.get(trackedFile) || 0;
136
+ actualLoadedSize += size * progress / 100;
137
+ if (size < tinyThreshold) {
138
+ seenTinyCount++;
139
+ } else if (size < mediumThreshold) {
140
+ seenMediumCount++;
141
+ } else {
142
+ seenLargeCount++;
143
+ }
144
+ }
145
+ const unseenTinyFiles = Math.max(0, estimatedTinyFiles - seenTinyCount);
146
+ const unseenMediumFiles = Math.max(0, estimatedMediumFiles - seenMediumCount);
147
+ let estimatedLargeFiles;
148
+ if (seenLargeCount > 0) {
149
+ estimatedLargeFiles = 2;
150
+ } else {
151
+ estimatedLargeFiles = 1;
152
+ }
153
+ const unseenLargeFiles = Math.max(0, estimatedLargeFiles - seenLargeCount);
154
+ const adjustedTotalSize = actualTotalSize + unseenTinyFiles * estimatedTinySize + unseenMediumFiles * estimatedMediumSize + unseenLargeFiles * estimatedLargeSize;
155
+ const rawProgress = adjustedTotalSize > 0 ? actualLoadedSize / adjustedTotalSize * 100 : 0;
156
+ const overallProgress = rawProgress * progressScaleMax / 100;
157
+ const isFirst = !fileFirstSent.has(file);
158
+ const isLast = isComplete && !fileLastSent.has(file);
159
+ if (isFirst) {
160
+ fileFirstSent.add(file);
161
+ }
162
+ if (isLast) {
163
+ fileLastSent.add(file);
164
+ }
165
+ if (hasSeenSubstantialFile) {
166
+ sendProgress(overallProgress, file, fileProgressValue, isFirst, isLast);
167
+ }
168
+ } else if (status.status === "done" || status.status === "download") {
169
+ const file = status.file;
170
+ const fileSize = fileSizes.get(file) || 0;
171
+ if (fileSize >= substantialFileThreshold) {
172
+ hasSeenSubstantialFile = true;
173
+ }
174
+ if (!fileCompleted.has(file)) {
175
+ fileCompleted.add(file);
176
+ fileProgress.set(file, 100);
177
+ let actualLoadedSize = 0;
178
+ let actualTotalSize = 0;
179
+ const tinyThreshold = 102400;
180
+ const mediumThreshold = 104857600;
181
+ let seenTinyCount = 0;
182
+ let seenMediumCount = 0;
183
+ let seenLargeCount = 0;
184
+ for (const [trackedFile, size] of fileSizes.entries()) {
185
+ actualTotalSize += size;
186
+ const progress = fileProgress.get(trackedFile) || 0;
187
+ actualLoadedSize += size * progress / 100;
188
+ if (size < tinyThreshold) {
189
+ seenTinyCount++;
190
+ } else if (size < mediumThreshold) {
191
+ seenMediumCount++;
192
+ } else {
193
+ seenLargeCount++;
194
+ }
195
+ }
196
+ const unseenTinyFiles = Math.max(0, estimatedTinyFiles - seenTinyCount);
197
+ const unseenMediumFiles = Math.max(0, estimatedMediumFiles - seenMediumCount);
198
+ let estimatedLargeFiles;
199
+ if (seenLargeCount > 0) {
200
+ estimatedLargeFiles = 2;
201
+ } else {
202
+ estimatedLargeFiles = 1;
203
+ }
204
+ const unseenLargeFiles = Math.max(0, estimatedLargeFiles - seenLargeCount);
205
+ const adjustedTotalSize = actualTotalSize + unseenTinyFiles * estimatedTinySize + unseenMediumFiles * estimatedMediumSize + unseenLargeFiles * estimatedLargeSize;
206
+ const rawProgress = adjustedTotalSize > 0 ? actualLoadedSize / adjustedTotalSize * 100 : 0;
207
+ const overallProgress = rawProgress * progressScaleMax / 100;
208
+ const isLast = !fileLastSent.has(file);
209
+ if (isLast) {
210
+ fileLastSent.add(file);
211
+ if (hasSeenSubstantialFile) {
212
+ sendProgress(overallProgress, file, 100, false, true);
213
+ }
214
+ }
215
+ }
216
+ }
217
+ };
218
+ const pipelineOptions = {
219
+ dtype: model.provider_config.dtype || "q8",
220
+ ...model.provider_config.use_external_data_format ? { useExternalDataFormat: model.provider_config.use_external_data_format } : {},
221
+ ...model.provider_config.device ? { device: model.provider_config.device } : {},
222
+ ...options,
223
+ progress_callback: progressCallback
224
+ };
225
+ if (abortSignal?.aborted) {
226
+ throw new Error("Operation aborted before pipeline creation");
227
+ }
228
+ const pipelineType = model.provider_config.pipeline;
229
+ const abortPromise = new Promise((_, reject) => {
230
+ if (abortSignal) {
231
+ const handleAbort = () => {
232
+ reject(new Error("Pipeline download aborted"));
233
+ };
234
+ if (abortSignal.aborted) {
235
+ handleAbort();
236
+ } else {
237
+ abortSignal.addEventListener("abort", handleAbort, { once: true });
238
+ }
239
+ }
240
+ });
241
+ const { pipeline } = await loadTransformersSDK();
242
+ const pipelinePromise = pipeline(pipelineType, model.provider_config.model_path, pipelineOptions);
243
+ try {
244
+ const result = await (abortSignal ? Promise.race([pipelinePromise, abortPromise]) : pipelinePromise);
245
+ if (abortSignal?.aborted) {
246
+ throw new Error("Operation aborted after pipeline creation");
247
+ }
248
+ pipelines.set(cacheKey, result);
249
+ return result;
250
+ } catch (error) {
251
+ if (abortSignal?.aborted) {
252
+ throw new Error("Pipeline download aborted");
253
+ }
254
+ throw error;
255
+ }
256
+ };
257
+ var HFT_Download = async (input, model, onProgress, signal) => {
258
+ await getPipeline(model, onProgress, { abort_signal: signal }, 100);
259
+ return {
260
+ model: input.model
261
+ };
262
+ };
263
+ var HFT_Unload = async (input, model, onProgress, signal) => {
264
+ const cacheKey = getPipelineCacheKey(model);
265
+ if (pipelines.has(cacheKey)) {
266
+ pipelines.delete(cacheKey);
267
+ onProgress(50, "Pipeline removed from memory");
268
+ }
269
+ const model_path = model.provider_config.model_path;
270
+ await deleteModelCache(model_path);
271
+ onProgress(100, "Model cache deleted");
272
+ return {
273
+ model: input.model
274
+ };
275
+ };
276
+ var deleteModelCache = async (model_path) => {
277
+ const cache = await caches.open(HTF_CACHE_NAME);
278
+ const keys = await cache.keys();
279
+ const prefix = `/${model_path}/`;
280
+ const requestsToDelete = [];
281
+ for (const request of keys) {
282
+ const url = new URL(request.url);
283
+ if (url.pathname.startsWith(prefix)) {
284
+ requestsToDelete.push(request);
285
+ }
286
+ }
287
+ let deletedCount = 0;
288
+ for (const request of requestsToDelete) {
289
+ try {
290
+ const deleted = await cache.delete(request);
291
+ if (deleted) {
292
+ deletedCount++;
293
+ } else {
294
+ const deletedByUrl = await cache.delete(request.url);
295
+ if (deletedByUrl) {
296
+ deletedCount++;
297
+ }
298
+ }
299
+ } catch (error) {
300
+ console.error(`Failed to delete cache entry: ${request.url}`, error);
301
+ }
302
+ }
303
+ };
304
+ var HFT_TextEmbedding = async (input, model, onProgress, signal) => {
305
+ const generateEmbedding = await getPipeline(model, onProgress, {
306
+ abort_signal: signal
307
+ });
308
+ const hfVector = await generateEmbedding(input.text, {
309
+ pooling: model?.provider_config.pooling || "mean",
310
+ normalize: model?.provider_config.normalize,
311
+ ...signal ? { abort_signal: signal } : {}
312
+ });
313
+ const isArrayInput = Array.isArray(input.text);
314
+ const embeddingDim = model?.provider_config.native_dimensions;
315
+ if (isArrayInput && hfVector.dims.length > 1) {
316
+ const [numTexts, vectorDim] = hfVector.dims;
317
+ if (numTexts !== input.text.length) {
318
+ throw new Error(`HuggingFace Embedding tensor batch size does not match input array length: ${numTexts} != ${input.text.length}`);
319
+ }
320
+ if (vectorDim !== embeddingDim) {
321
+ throw new Error(`HuggingFace Embedding vector dimension does not match model dimensions: ${vectorDim} != ${embeddingDim}`);
322
+ }
323
+ const vectors = Array.from({ length: numTexts }, (_, i) => hfVector[i].data);
324
+ return { vector: vectors };
325
+ }
326
+ if (hfVector.size !== embeddingDim) {
327
+ console.warn(`HuggingFace Embedding vector length does not match model dimensions v${hfVector.size} != m${embeddingDim}`, input, hfVector);
328
+ throw new Error(`HuggingFace Embedding vector length does not match model dimensions v${hfVector.size} != m${embeddingDim}`);
329
+ }
330
+ return { vector: hfVector.data };
331
+ };
332
+ var HFT_TextClassification = async (input, model, onProgress, signal) => {
333
+ if (model?.provider_config?.pipeline === "zero-shot-classification") {
334
+ if (!input.candidateLabels || !Array.isArray(input.candidateLabels) || input.candidateLabels.length === 0) {
335
+ throw new Error("Zero-shot text classification requires candidate labels");
336
+ }
337
+ const zeroShotClassifier = await getPipeline(model, onProgress, {
338
+ abort_signal: signal
339
+ });
340
+ const result2 = await zeroShotClassifier(input.text, input.candidateLabels, {});
341
+ return {
342
+ categories: result2.labels.map((label, idx) => ({
343
+ label,
344
+ score: result2.scores[idx]
345
+ }))
346
+ };
347
+ }
348
+ const TextClassification = await getPipeline(model, onProgress, {
349
+ abort_signal: signal
350
+ });
351
+ const result = await TextClassification(input.text, {
352
+ top_k: input.maxCategories || undefined,
353
+ ...signal ? { abort_signal: signal } : {}
354
+ });
355
+ if (Array.isArray(result[0])) {
356
+ return {
357
+ categories: result[0].map((category) => ({
358
+ label: category.label,
359
+ score: category.score
360
+ }))
361
+ };
362
+ }
363
+ return {
364
+ categories: result.map((category) => ({
365
+ label: category.label,
366
+ score: category.score
367
+ }))
368
+ };
369
+ };
370
+ var HFT_TextLanguageDetection = async (input, model, onProgress, signal) => {
371
+ const TextClassification = await getPipeline(model, onProgress, {
372
+ abort_signal: signal
373
+ });
374
+ const result = await TextClassification(input.text, {
375
+ top_k: input.maxLanguages || undefined,
376
+ ...signal ? { abort_signal: signal } : {}
377
+ });
378
+ if (Array.isArray(result[0])) {
379
+ return {
380
+ languages: result[0].map((category) => ({
381
+ language: category.label,
382
+ score: category.score
383
+ }))
384
+ };
385
+ }
386
+ return {
387
+ languages: result.map((category) => ({
388
+ language: category.label,
389
+ score: category.score
390
+ }))
391
+ };
392
+ };
393
+ var HFT_TextNamedEntityRecognition = async (input, model, onProgress, signal) => {
394
+ const textNamedEntityRecognition = await getPipeline(model, onProgress, {
395
+ abort_signal: signal
396
+ });
397
+ let results = await textNamedEntityRecognition(input.text, {
398
+ ignore_labels: input.blockList,
399
+ ...signal ? { abort_signal: signal } : {}
400
+ });
401
+ let entities = [];
402
+ if (!Array.isArray(results)) {
403
+ entities = [results];
404
+ } else {
405
+ entities = results;
406
+ }
407
+ return {
408
+ entities: entities.map((entity) => ({
409
+ entity: entity.entity,
410
+ score: entity.score,
411
+ word: entity.word
412
+ }))
413
+ };
414
+ };
415
+ var HFT_TextFillMask = async (input, model, onProgress, signal) => {
416
+ const unmasker = await getPipeline(model, onProgress, {
417
+ abort_signal: signal
418
+ });
419
+ let results = await unmasker(input.text);
420
+ let predictions = [];
421
+ if (!Array.isArray(results)) {
422
+ predictions = [results];
423
+ } else {
424
+ predictions = results;
425
+ }
426
+ return {
427
+ predictions: predictions.map((prediction) => ({
428
+ entity: prediction.token_str,
429
+ score: prediction.score,
430
+ sequence: prediction.sequence
431
+ }))
432
+ };
433
+ };
434
+ var HFT_TextGeneration = async (input, model, onProgress, signal) => {
435
+ const generateText = await getPipeline(model, onProgress, {
436
+ abort_signal: signal
437
+ });
438
+ const streamer = createTextStreamer(generateText.tokenizer, onProgress, signal);
439
+ let results = await generateText(input.prompt, {
440
+ streamer,
441
+ ...signal ? { abort_signal: signal } : {}
442
+ });
443
+ if (!Array.isArray(results)) {
444
+ results = [results];
445
+ }
446
+ let text = results[0]?.generated_text;
447
+ if (Array.isArray(text)) {
448
+ text = text[text.length - 1]?.content;
449
+ }
450
+ return {
451
+ text
452
+ };
453
+ };
454
+ var HFT_TextTranslation = async (input, model, onProgress, signal) => {
455
+ const translate = await getPipeline(model, onProgress, {
456
+ abort_signal: signal
457
+ });
458
+ const streamer = createTextStreamer(translate.tokenizer, onProgress);
459
+ const result = await translate(input.text, {
460
+ src_lang: input.source_lang,
461
+ tgt_lang: input.target_lang,
462
+ streamer,
463
+ ...signal ? { abort_signal: signal } : {}
464
+ });
465
+ const translatedText = Array.isArray(result) ? result[0]?.translation_text || "" : result?.translation_text || "";
466
+ return {
467
+ text: translatedText,
468
+ target_lang: input.target_lang
469
+ };
470
+ };
471
+ var HFT_TextRewriter = async (input, model, onProgress, signal) => {
472
+ const generateText = await getPipeline(model, onProgress, {
473
+ abort_signal: signal
474
+ });
475
+ const streamer = createTextStreamer(generateText.tokenizer, onProgress);
476
+ const promptedText = (input.prompt ? input.prompt + `
477
+ ` : "") + input.text;
478
+ let results = await generateText(promptedText, {
479
+ streamer,
480
+ ...signal ? { abort_signal: signal } : {}
481
+ });
482
+ if (!Array.isArray(results)) {
483
+ results = [results];
484
+ }
485
+ let text = results[0]?.generated_text;
486
+ if (Array.isArray(text)) {
487
+ text = text[text.length - 1]?.content;
488
+ }
489
+ if (text === promptedText) {
490
+ throw new Error("Rewriter failed to generate new text");
491
+ }
492
+ return {
493
+ text
494
+ };
495
+ };
496
+ var HFT_TextSummary = async (input, model, onProgress, signal) => {
497
+ const generateSummary = await getPipeline(model, onProgress, {
498
+ abort_signal: signal
499
+ });
500
+ const streamer = createTextStreamer(generateSummary.tokenizer, onProgress);
501
+ let result = await generateSummary(input.text, {
502
+ streamer,
503
+ ...signal ? { abort_signal: signal } : {}
504
+ });
505
+ let summaryText = "";
506
+ if (Array.isArray(result)) {
507
+ summaryText = result[0]?.summary_text || "";
508
+ } else {
509
+ summaryText = result?.summary_text || "";
510
+ }
511
+ return {
512
+ text: summaryText
513
+ };
514
+ };
515
+ var HFT_TextQuestionAnswer = async (input, model, onProgress, signal) => {
516
+ const generateAnswer = await getPipeline(model, onProgress, {
517
+ abort_signal: signal
518
+ });
519
+ const streamer = createTextStreamer(generateAnswer.tokenizer, onProgress);
520
+ const result = await generateAnswer(input.question, input.context, {
521
+ streamer,
522
+ ...signal ? { abort_signal: signal } : {}
523
+ });
524
+ let answerText = "";
525
+ if (Array.isArray(result)) {
526
+ answerText = result[0]?.answer || "";
527
+ } else {
528
+ answerText = result?.answer || "";
529
+ }
530
+ return {
531
+ text: answerText
532
+ };
533
+ };
534
+ var HFT_ImageSegmentation = async (input, model, onProgress, signal) => {
535
+ const segmenter = await getPipeline(model, onProgress, {
536
+ abort_signal: signal
537
+ });
538
+ const result = await segmenter(input.image, {
539
+ threshold: input.threshold,
540
+ mask_threshold: input.maskThreshold,
541
+ ...signal ? { abort_signal: signal } : {}
542
+ });
543
+ const masks = Array.isArray(result) ? result : [result];
544
+ const processedMasks = await Promise.all(masks.map(async (mask) => ({
545
+ label: mask.label || "",
546
+ score: mask.score || 0,
547
+ mask: {}
548
+ })));
549
+ return {
550
+ masks: processedMasks
551
+ };
552
+ };
553
+ var HFT_ImageToText = async (input, model, onProgress, signal) => {
554
+ const captioner = await getPipeline(model, onProgress, {
555
+ abort_signal: signal
556
+ });
557
+ const result = await captioner(input.image, {
558
+ max_new_tokens: input.maxTokens,
559
+ ...signal ? { abort_signal: signal } : {}
560
+ });
561
+ const text = Array.isArray(result) ? result[0]?.generated_text : result?.generated_text;
562
+ return {
563
+ text: text || ""
564
+ };
565
+ };
566
+ var HFT_BackgroundRemoval = async (input, model, onProgress, signal) => {
567
+ const remover = await getPipeline(model, onProgress, {
568
+ abort_signal: signal
569
+ });
570
+ const result = await remover(input.image, {
571
+ ...signal ? { abort_signal: signal } : {}
572
+ });
573
+ const resultImage = Array.isArray(result) ? result[0] : result;
574
+ return {
575
+ image: imageToBase64(resultImage)
576
+ };
577
+ };
578
+ var HFT_ImageEmbedding = async (input, model, onProgress, signal) => {
579
+ const embedder = await getPipeline(model, onProgress, {
580
+ abort_signal: signal
581
+ });
582
+ const result = await embedder(input.image);
583
+ return {
584
+ vector: result.data
585
+ };
586
+ };
587
+ var HFT_ImageClassification = async (input, model, onProgress, signal) => {
588
+ if (model?.provider_config?.pipeline === "zero-shot-image-classification") {
589
+ if (!input.categories || !Array.isArray(input.categories) || input.categories.length === 0) {
590
+ console.warn("Zero-shot image classification requires categories", input);
591
+ throw new Error("Zero-shot image classification requires categories");
592
+ }
593
+ const zeroShotClassifier = await getPipeline(model, onProgress, {
594
+ abort_signal: signal
595
+ });
596
+ const result2 = await zeroShotClassifier(input.image, input.categories, {});
597
+ const results2 = Array.isArray(result2) ? result2 : [result2];
598
+ return {
599
+ categories: results2.map((r) => ({
600
+ label: r.label,
601
+ score: r.score
602
+ }))
603
+ };
604
+ }
605
+ const classifier = await getPipeline(model, onProgress, {
606
+ abort_signal: signal
607
+ });
608
+ const result = await classifier(input.image, {
609
+ top_k: input.maxCategories,
610
+ ...signal ? { abort_signal: signal } : {}
611
+ });
612
+ const results = Array.isArray(result) ? result : [result];
613
+ return {
614
+ categories: results.map((r) => ({
615
+ label: r.label,
616
+ score: r.score
617
+ }))
618
+ };
619
+ };
620
+ var HFT_ObjectDetection = async (input, model, onProgress, signal) => {
621
+ if (model?.provider_config?.pipeline === "zero-shot-object-detection") {
622
+ if (!input.labels || !Array.isArray(input.labels) || input.labels.length === 0) {
623
+ throw new Error("Zero-shot object detection requires labels");
624
+ }
625
+ const zeroShotDetector = await getPipeline(model, onProgress, {
626
+ abort_signal: signal
627
+ });
628
+ const result2 = await zeroShotDetector(input.image, Array.from(input.labels), {
629
+ threshold: input.threshold
630
+ });
631
+ const detections2 = Array.isArray(result2) ? result2 : [result2];
632
+ return {
633
+ detections: detections2.map((d) => ({
634
+ label: d.label,
635
+ score: d.score,
636
+ box: d.box
637
+ }))
638
+ };
639
+ }
640
+ const detector = await getPipeline(model, onProgress, {
641
+ abort_signal: signal
642
+ });
643
+ const result = await detector(input.image, {
644
+ threshold: input.threshold,
645
+ ...signal ? { abort_signal: signal } : {}
646
+ });
647
+ const detections = Array.isArray(result) ? result : [result];
648
+ return {
649
+ detections: detections.map((d) => ({
650
+ label: d.label,
651
+ score: d.score,
652
+ box: d.box
653
+ }))
654
+ };
655
+ };
656
+ function imageToBase64(image) {
657
+ return image.toBase64?.() || "";
658
+ }
659
+ function createTextStreamer(tokenizer, updateProgress, signal) {
660
+ const { TextStreamer } = _transformersSdk;
661
+ let count = 0;
662
+ return new TextStreamer(tokenizer, {
663
+ skip_prompt: true,
664
+ decode_kwargs: { skip_special_tokens: true },
665
+ callback_function: (text) => {
666
+ count++;
667
+ const result = 100 * (1 - Math.exp(-0.05 * count));
668
+ const progress = Math.round(Math.min(result, 100));
669
+ updateProgress(progress, "Generating", { text, progress });
670
+ },
671
+ ...signal ? { abort_signal: signal } : {}
672
+ });
673
+ }
674
+ function createStreamEventQueue() {
675
+ const buffer = [];
676
+ let resolve = null;
677
+ let finished = false;
678
+ let err = null;
679
+ const push = (event) => {
680
+ if (resolve) {
681
+ const r = resolve;
682
+ resolve = null;
683
+ r({ value: event, done: false });
684
+ } else {
685
+ buffer.push(event);
686
+ }
687
+ };
688
+ const done = () => {
689
+ finished = true;
690
+ if (resolve) {
691
+ const r = resolve;
692
+ resolve = null;
693
+ r({ value: undefined, done: true });
694
+ }
695
+ };
696
+ const error = (e) => {
697
+ err = e;
698
+ if (resolve) {
699
+ const r = resolve;
700
+ resolve = null;
701
+ r({ value: undefined, done: true });
702
+ }
703
+ };
704
+ const iterable = {
705
+ [Symbol.asyncIterator]() {
706
+ return {
707
+ next() {
708
+ if (err)
709
+ return Promise.reject(err);
710
+ if (buffer.length > 0) {
711
+ return Promise.resolve({ value: buffer.shift(), done: false });
712
+ }
713
+ if (finished) {
714
+ return Promise.resolve({ value: undefined, done: true });
715
+ }
716
+ return new Promise((r) => {
717
+ resolve = r;
718
+ });
719
+ }
720
+ };
721
+ }
722
+ };
723
+ return { push, done, error, iterable };
724
+ }
725
+ function createStreamingTextStreamer(tokenizer, queue, signal) {
726
+ const { TextStreamer } = _transformersSdk;
727
+ return new TextStreamer(tokenizer, {
728
+ skip_prompt: true,
729
+ decode_kwargs: { skip_special_tokens: true },
730
+ callback_function: (text) => {
731
+ queue.push({ type: "text-delta", port: "text", textDelta: text });
732
+ },
733
+ ...signal ? { abort_signal: signal } : {}
734
+ });
735
+ }
736
+ var HFT_TextGeneration_Stream = async function* (input, model, signal) {
737
+ const noopProgress = () => {};
738
+ const generateText = await getPipeline(model, noopProgress, {
739
+ abort_signal: signal
740
+ });
741
+ const queue = createStreamEventQueue();
742
+ const streamer = createStreamingTextStreamer(generateText.tokenizer, queue, signal);
743
+ const pipelinePromise = generateText(input.prompt, {
744
+ streamer,
745
+ ...signal ? { abort_signal: signal } : {}
746
+ }).then(() => queue.done(), (err) => queue.error(err));
747
+ yield* queue.iterable;
748
+ await pipelinePromise;
749
+ yield { type: "finish", data: {} };
750
+ };
751
+ var HFT_TextRewriter_Stream = async function* (input, model, signal) {
752
+ const noopProgress = () => {};
753
+ const generateText = await getPipeline(model, noopProgress, {
754
+ abort_signal: signal
755
+ });
756
+ const queue = createStreamEventQueue();
757
+ const streamer = createStreamingTextStreamer(generateText.tokenizer, queue);
758
+ const promptedText = (input.prompt ? input.prompt + `
759
+ ` : "") + input.text;
760
+ const pipelinePromise = generateText(promptedText, {
761
+ streamer,
762
+ ...signal ? { abort_signal: signal } : {}
763
+ }).then(() => queue.done(), (err) => queue.error(err));
764
+ yield* queue.iterable;
765
+ await pipelinePromise;
766
+ yield { type: "finish", data: {} };
767
+ };
768
+ var HFT_TextSummary_Stream = async function* (input, model, signal) {
769
+ const noopProgress = () => {};
770
+ const generateSummary = await getPipeline(model, noopProgress, {
771
+ abort_signal: signal
772
+ });
773
+ const queue = createStreamEventQueue();
774
+ const streamer = createStreamingTextStreamer(generateSummary.tokenizer, queue);
775
+ const pipelinePromise = generateSummary(input.text, {
776
+ streamer,
777
+ ...signal ? { abort_signal: signal } : {}
778
+ }).then(() => queue.done(), (err) => queue.error(err));
779
+ yield* queue.iterable;
780
+ await pipelinePromise;
781
+ yield { type: "finish", data: {} };
782
+ };
783
+ var HFT_TextQuestionAnswer_Stream = async function* (input, model, signal) {
784
+ const noopProgress = () => {};
785
+ const generateAnswer = await getPipeline(model, noopProgress, {
786
+ abort_signal: signal
787
+ });
788
+ const queue = createStreamEventQueue();
789
+ const streamer = createStreamingTextStreamer(generateAnswer.tokenizer, queue);
790
+ let pipelineResult;
791
+ const pipelinePromise = generateAnswer(input.question, input.context, {
792
+ streamer,
793
+ ...signal ? { abort_signal: signal } : {}
794
+ }).then((result) => {
795
+ pipelineResult = result;
796
+ queue.done();
797
+ }, (err) => queue.error(err));
798
+ yield* queue.iterable;
799
+ await pipelinePromise;
800
+ let answerText = "";
801
+ if (pipelineResult !== undefined) {
802
+ if (Array.isArray(pipelineResult)) {
803
+ answerText = pipelineResult[0]?.answer ?? "";
804
+ } else {
805
+ answerText = pipelineResult?.answer ?? "";
806
+ }
807
+ }
808
+ yield { type: "finish", data: { text: answerText } };
809
+ };
810
+ var HFT_TextTranslation_Stream = async function* (input, model, signal) {
811
+ const noopProgress = () => {};
812
+ const translate = await getPipeline(model, noopProgress, {
813
+ abort_signal: signal
814
+ });
815
+ const queue = createStreamEventQueue();
816
+ const streamer = createStreamingTextStreamer(translate.tokenizer, queue);
817
+ const pipelinePromise = translate(input.text, {
818
+ src_lang: input.source_lang,
819
+ tgt_lang: input.target_lang,
820
+ streamer,
821
+ ...signal ? { abort_signal: signal } : {}
822
+ }).then(() => queue.done(), (err) => queue.error(err));
823
+ yield* queue.iterable;
824
+ await pipelinePromise;
825
+ yield { type: "finish", data: { target_lang: input.target_lang } };
826
+ };
827
+ var HFT_TASKS = {
828
+ DownloadModelTask: HFT_Download,
829
+ UnloadModelTask: HFT_Unload,
830
+ TextEmbeddingTask: HFT_TextEmbedding,
831
+ TextGenerationTask: HFT_TextGeneration,
832
+ TextQuestionAnswerTask: HFT_TextQuestionAnswer,
833
+ TextLanguageDetectionTask: HFT_TextLanguageDetection,
834
+ TextClassificationTask: HFT_TextClassification,
835
+ TextFillMaskTask: HFT_TextFillMask,
836
+ TextNamedEntityRecognitionTask: HFT_TextNamedEntityRecognition,
837
+ TextRewriterTask: HFT_TextRewriter,
838
+ TextSummaryTask: HFT_TextSummary,
839
+ TextTranslationTask: HFT_TextTranslation,
840
+ ImageSegmentationTask: HFT_ImageSegmentation,
841
+ ImageToTextTask: HFT_ImageToText,
842
+ BackgroundRemovalTask: HFT_BackgroundRemoval,
843
+ ImageEmbeddingTask: HFT_ImageEmbedding,
844
+ ImageClassificationTask: HFT_ImageClassification,
845
+ ObjectDetectionTask: HFT_ObjectDetection
846
+ };
847
+ var HFT_STREAM_TASKS = {
848
+ TextGenerationTask: HFT_TextGeneration_Stream,
849
+ TextRewriterTask: HFT_TextRewriter_Stream,
850
+ TextSummaryTask: HFT_TextSummary_Stream,
851
+ TextQuestionAnswerTask: HFT_TextQuestionAnswer_Stream,
852
+ TextTranslationTask: HFT_TextTranslation_Stream
853
+ };
854
+
855
+ export { clearPipelineCache, HFT_Download, HFT_Unload, HFT_TextEmbedding, HFT_TextClassification, HFT_TextLanguageDetection, HFT_TextNamedEntityRecognition, HFT_TextFillMask, HFT_TextGeneration, HFT_TextTranslation, HFT_TextRewriter, HFT_TextSummary, HFT_TextQuestionAnswer, HFT_ImageSegmentation, HFT_ImageToText, HFT_BackgroundRemoval, HFT_ImageEmbedding, HFT_ImageClassification, HFT_ObjectDetection, HFT_TextGeneration_Stream, HFT_TextRewriter_Stream, HFT_TextSummary_Stream, HFT_TextQuestionAnswer_Stream, HFT_TextTranslation_Stream, HFT_TASKS, HFT_STREAM_TASKS };
856
+
857
+ //# debugId=4AF2ECA21E0D4F3464756E2164756E21