@workglow/test 0.0.81 → 0.0.83
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/browser.js +38 -38
- package/dist/browser.js.map +4 -4
- package/dist/bun.js +38 -38
- package/dist/bun.js.map +4 -4
- package/dist/node.js +38 -38
- package/dist/node.js.map +4 -4
- package/package.json +19 -19
package/dist/browser.js
CHANGED
|
@@ -50,7 +50,7 @@ async function registerMediaPipeTfJsLocalModels() {
|
|
|
50
50
|
description: "Universal Sentence Encoder",
|
|
51
51
|
tasks: ["TextEmbeddingTask"],
|
|
52
52
|
provider: TENSORFLOW_MEDIAPIPE,
|
|
53
|
-
|
|
53
|
+
provider_config: {
|
|
54
54
|
taskEngine: "text",
|
|
55
55
|
pipeline: "text-embedder",
|
|
56
56
|
modelPath: "https://storage.googleapis.com/mediapipe-tasks/text_embedder/universal_sentence_encoder.tflite"
|
|
@@ -63,7 +63,7 @@ async function registerMediaPipeTfJsLocalModels() {
|
|
|
63
63
|
description: "BERT-based text classification model",
|
|
64
64
|
tasks: ["TextClassificationTask"],
|
|
65
65
|
provider: TENSORFLOW_MEDIAPIPE,
|
|
66
|
-
|
|
66
|
+
provider_config: {
|
|
67
67
|
taskEngine: "text",
|
|
68
68
|
pipeline: "text-classifier",
|
|
69
69
|
modelPath: "https://storage.googleapis.com/mediapipe-models/text_classifier/bert_classifier/float32/1/bert_classifier.tflite"
|
|
@@ -76,7 +76,7 @@ async function registerMediaPipeTfJsLocalModels() {
|
|
|
76
76
|
description: "Language detection model",
|
|
77
77
|
tasks: ["TextLanguageDetectionTask"],
|
|
78
78
|
provider: TENSORFLOW_MEDIAPIPE,
|
|
79
|
-
|
|
79
|
+
provider_config: {
|
|
80
80
|
taskEngine: "text",
|
|
81
81
|
pipeline: "text-language-detector",
|
|
82
82
|
modelPath: "https://storage.googleapis.com/mediapipe-models/language_detector/language_detector/float32/1/language_detector.tflite"
|
|
@@ -89,7 +89,7 @@ async function registerMediaPipeTfJsLocalModels() {
|
|
|
89
89
|
description: "Lightweight image classification model",
|
|
90
90
|
tasks: ["ImageClassificationTask"],
|
|
91
91
|
provider: TENSORFLOW_MEDIAPIPE,
|
|
92
|
-
|
|
92
|
+
provider_config: {
|
|
93
93
|
taskEngine: "vision",
|
|
94
94
|
pipeline: "vision-image-classifier",
|
|
95
95
|
modelPath: "https://storage.googleapis.com/mediapipe-models/image_classifier/efficientnet_lite0/float32/1/efficientnet_lite0.tflite"
|
|
@@ -102,7 +102,7 @@ async function registerMediaPipeTfJsLocalModels() {
|
|
|
102
102
|
description: "Lightweight image embedding model",
|
|
103
103
|
tasks: ["ImageEmbeddingTask"],
|
|
104
104
|
provider: TENSORFLOW_MEDIAPIPE,
|
|
105
|
-
|
|
105
|
+
provider_config: {
|
|
106
106
|
taskEngine: "vision",
|
|
107
107
|
pipeline: "vision-image-embedder",
|
|
108
108
|
modelPath: "https://storage.googleapis.com/mediapipe-models/image_embedder/mobilenet_v3_small/float32/1/mobilenet_v3_small.tflite"
|
|
@@ -115,7 +115,7 @@ async function registerMediaPipeTfJsLocalModels() {
|
|
|
115
115
|
description: "Lightweight object detection model",
|
|
116
116
|
tasks: ["ObjectDetectionTask"],
|
|
117
117
|
provider: TENSORFLOW_MEDIAPIPE,
|
|
118
|
-
|
|
118
|
+
provider_config: {
|
|
119
119
|
taskEngine: "vision",
|
|
120
120
|
pipeline: "vision-object-detector",
|
|
121
121
|
modelPath: "https://storage.googleapis.com/mediapipe-models/object_detector/efficientdet_lite0/float32/1/efficientdet_lite0.tflite"
|
|
@@ -128,7 +128,7 @@ async function registerMediaPipeTfJsLocalModels() {
|
|
|
128
128
|
description: "Image segmentation model",
|
|
129
129
|
tasks: ["ImageSegmentationTask"],
|
|
130
130
|
provider: TENSORFLOW_MEDIAPIPE,
|
|
131
|
-
|
|
131
|
+
provider_config: {
|
|
132
132
|
taskEngine: "vision",
|
|
133
133
|
pipeline: "vision-image-segmenter",
|
|
134
134
|
modelPath: "https://storage.googleapis.com/mediapipe-models/image_segmenter/deeplab_v3/float32/1/deeplab_v3.tflite"
|
|
@@ -141,7 +141,7 @@ async function registerMediaPipeTfJsLocalModels() {
|
|
|
141
141
|
description: "Audio event classification model",
|
|
142
142
|
tasks: ["AudioClassificationTask"],
|
|
143
143
|
provider: TENSORFLOW_MEDIAPIPE,
|
|
144
|
-
|
|
144
|
+
provider_config: {
|
|
145
145
|
taskEngine: "audio",
|
|
146
146
|
pipeline: "audio-classifier",
|
|
147
147
|
modelPath: "https://storage.googleapis.com/mediapipe-models/audio_classifier/yamnet/float32/1/yamnet.tflite"
|
|
@@ -154,7 +154,7 @@ async function registerMediaPipeTfJsLocalModels() {
|
|
|
154
154
|
description: "Recognizes hand gestures (thumbs up, victory, etc.)",
|
|
155
155
|
tasks: ["GestureRecognizerTask"],
|
|
156
156
|
provider: TENSORFLOW_MEDIAPIPE,
|
|
157
|
-
|
|
157
|
+
provider_config: {
|
|
158
158
|
taskEngine: "vision",
|
|
159
159
|
pipeline: "vision-gesture-recognizer",
|
|
160
160
|
modelPath: "https://storage.googleapis.com/mediapipe-models/gesture_recognizer/gesture_recognizer/float16/1/gesture_recognizer.task"
|
|
@@ -167,7 +167,7 @@ async function registerMediaPipeTfJsLocalModels() {
|
|
|
167
167
|
description: "Detects 21 hand landmarks",
|
|
168
168
|
tasks: ["HandLandmarkerTask"],
|
|
169
169
|
provider: TENSORFLOW_MEDIAPIPE,
|
|
170
|
-
|
|
170
|
+
provider_config: {
|
|
171
171
|
taskEngine: "vision",
|
|
172
172
|
pipeline: "vision-hand-landmarker",
|
|
173
173
|
modelPath: "https://storage.googleapis.com/mediapipe-models/hand_landmarker/hand_landmarker/float16/1/hand_landmarker.task"
|
|
@@ -180,7 +180,7 @@ async function registerMediaPipeTfJsLocalModels() {
|
|
|
180
180
|
description: "Detects faces with bounding boxes and keypoints",
|
|
181
181
|
tasks: ["FaceDetectorTask"],
|
|
182
182
|
provider: TENSORFLOW_MEDIAPIPE,
|
|
183
|
-
|
|
183
|
+
provider_config: {
|
|
184
184
|
taskEngine: "vision",
|
|
185
185
|
pipeline: "vision-face-detector",
|
|
186
186
|
modelPath: "https://storage.googleapis.com/mediapipe-models/face_detector/blaze_face_short_range/float16/1/blaze_face_short_range.tflite"
|
|
@@ -193,7 +193,7 @@ async function registerMediaPipeTfJsLocalModels() {
|
|
|
193
193
|
description: "Detects 478 facial landmarks with blendshapes",
|
|
194
194
|
tasks: ["FaceLandmarkerTask"],
|
|
195
195
|
provider: TENSORFLOW_MEDIAPIPE,
|
|
196
|
-
|
|
196
|
+
provider_config: {
|
|
197
197
|
taskEngine: "vision",
|
|
198
198
|
pipeline: "vision-face-landmarker",
|
|
199
199
|
modelPath: "https://storage.googleapis.com/mediapipe-models/face_landmarker/face_landmarker/float16/1/face_landmarker.task"
|
|
@@ -206,7 +206,7 @@ async function registerMediaPipeTfJsLocalModels() {
|
|
|
206
206
|
description: "Detects 33 body pose landmarks",
|
|
207
207
|
tasks: ["PoseLandmarkerTask"],
|
|
208
208
|
provider: TENSORFLOW_MEDIAPIPE,
|
|
209
|
-
|
|
209
|
+
provider_config: {
|
|
210
210
|
taskEngine: "vision",
|
|
211
211
|
pipeline: "vision-pose-landmarker",
|
|
212
212
|
modelPath: "https://storage.googleapis.com/mediapipe-models/pose_landmarker/pose_landmarker_lite/float16/1/pose_landmarker_lite.task"
|
|
@@ -229,7 +229,7 @@ async function registerHuggingfaceLocalModels() {
|
|
|
229
229
|
description: "Xenova/all-MiniLM-L6-v2",
|
|
230
230
|
tasks: ["TextEmbeddingTask"],
|
|
231
231
|
provider: HF_TRANSFORMERS_ONNX,
|
|
232
|
-
|
|
232
|
+
provider_config: {
|
|
233
233
|
pipeline: "feature-extraction",
|
|
234
234
|
modelPath: "Xenova/all-MiniLM-L6-v2",
|
|
235
235
|
device: "webgpu",
|
|
@@ -243,7 +243,7 @@ async function registerHuggingfaceLocalModels() {
|
|
|
243
243
|
description: "Xenova/bge-base-en-v1.5",
|
|
244
244
|
tasks: ["TextEmbeddingTask"],
|
|
245
245
|
provider: HF_TRANSFORMERS_ONNX,
|
|
246
|
-
|
|
246
|
+
provider_config: {
|
|
247
247
|
pipeline: "feature-extraction",
|
|
248
248
|
modelPath: "Xenova/bge-base-en-v1.5",
|
|
249
249
|
device: "webgpu",
|
|
@@ -257,7 +257,7 @@ async function registerHuggingfaceLocalModels() {
|
|
|
257
257
|
description: "Xenova/gte-small",
|
|
258
258
|
tasks: ["TextEmbeddingTask"],
|
|
259
259
|
provider: HF_TRANSFORMERS_ONNX,
|
|
260
|
-
|
|
260
|
+
provider_config: {
|
|
261
261
|
pipeline: "feature-extraction",
|
|
262
262
|
modelPath: "Xenova/gte-small",
|
|
263
263
|
device: "webgpu",
|
|
@@ -271,7 +271,7 @@ async function registerHuggingfaceLocalModels() {
|
|
|
271
271
|
description: "onnx-community/bert_uncased_L-2_H-128_A-2-ONNX",
|
|
272
272
|
tasks: ["TextEmbeddingTask"],
|
|
273
273
|
provider: HF_TRANSFORMERS_ONNX,
|
|
274
|
-
|
|
274
|
+
provider_config: {
|
|
275
275
|
pipeline: "feature-extraction",
|
|
276
276
|
modelPath: "onnx-community/bert_uncased_L-2_H-128_A-2-ONNX",
|
|
277
277
|
device: "webgpu",
|
|
@@ -285,7 +285,7 @@ async function registerHuggingfaceLocalModels() {
|
|
|
285
285
|
description: "Xenova/paraphrase-albert-base-v2",
|
|
286
286
|
tasks: ["TextEmbeddingTask"],
|
|
287
287
|
provider: HF_TRANSFORMERS_ONNX,
|
|
288
|
-
|
|
288
|
+
provider_config: {
|
|
289
289
|
pipeline: "feature-extraction",
|
|
290
290
|
modelPath: "Xenova/paraphrase-albert-base-v2",
|
|
291
291
|
device: "webgpu",
|
|
@@ -299,7 +299,7 @@ async function registerHuggingfaceLocalModels() {
|
|
|
299
299
|
description: "Xenova/distilbert-base-uncased-distilled-squad quantized to 8bit",
|
|
300
300
|
tasks: ["TextQuestionAnsweringTask"],
|
|
301
301
|
provider: HF_TRANSFORMERS_ONNX,
|
|
302
|
-
|
|
302
|
+
provider_config: {
|
|
303
303
|
pipeline: "question-answering",
|
|
304
304
|
modelPath: "Xenova/distilbert-base-uncased-distilled-squad"
|
|
305
305
|
},
|
|
@@ -311,7 +311,7 @@ async function registerHuggingfaceLocalModels() {
|
|
|
311
311
|
description: "Xenova/gpt2 quantized to 8bit",
|
|
312
312
|
tasks: ["TextGenerationTask"],
|
|
313
313
|
provider: HF_TRANSFORMERS_ONNX,
|
|
314
|
-
|
|
314
|
+
provider_config: {
|
|
315
315
|
pipeline: "text-generation",
|
|
316
316
|
modelPath: "Xenova/gpt2",
|
|
317
317
|
dType: "q8"
|
|
@@ -324,7 +324,7 @@ async function registerHuggingfaceLocalModels() {
|
|
|
324
324
|
description: "Xenova/Phi-3-mini-4k-instruct quantized to q4f16",
|
|
325
325
|
tasks: ["TextGenerationTask"],
|
|
326
326
|
provider: HF_TRANSFORMERS_ONNX,
|
|
327
|
-
|
|
327
|
+
provider_config: {
|
|
328
328
|
pipeline: "text-generation",
|
|
329
329
|
modelPath: "Xenova/Phi-3-mini-4k-instruct",
|
|
330
330
|
device: "webgpu",
|
|
@@ -339,7 +339,7 @@ async function registerHuggingfaceLocalModels() {
|
|
|
339
339
|
description: "Xenova/distilgpt2 quantized to 8bit",
|
|
340
340
|
tasks: ["TextGenerationTask"],
|
|
341
341
|
provider: HF_TRANSFORMERS_ONNX,
|
|
342
|
-
|
|
342
|
+
provider_config: {
|
|
343
343
|
pipeline: "text-generation",
|
|
344
344
|
modelPath: "Xenova/distilgpt2",
|
|
345
345
|
dType: "q8"
|
|
@@ -352,7 +352,7 @@ async function registerHuggingfaceLocalModels() {
|
|
|
352
352
|
description: "Xenova/LaMini-Flan-T5-783M quantized to 8bit",
|
|
353
353
|
tasks: ["TextGenerationTask", "TextRewriterTask"],
|
|
354
354
|
provider: HF_TRANSFORMERS_ONNX,
|
|
355
|
-
|
|
355
|
+
provider_config: {
|
|
356
356
|
pipeline: "text2text-generation",
|
|
357
357
|
modelPath: "Xenova/LaMini-Flan-T5-783M"
|
|
358
358
|
},
|
|
@@ -364,7 +364,7 @@ async function registerHuggingfaceLocalModels() {
|
|
|
364
364
|
description: "Xenova/LaMini-Flan-T5-783M quantized to 8bit",
|
|
365
365
|
tasks: ["TextGenerationTask", "TextRewriterTask"],
|
|
366
366
|
provider: HF_TRANSFORMERS_ONNX,
|
|
367
|
-
|
|
367
|
+
provider_config: {
|
|
368
368
|
pipeline: "text2text-generation",
|
|
369
369
|
modelPath: "Xenova/LaMini-Flan-T5-783M",
|
|
370
370
|
dType: "q8"
|
|
@@ -377,7 +377,7 @@ async function registerHuggingfaceLocalModels() {
|
|
|
377
377
|
description: "Falconsai/text_summarization quantized to 8bit",
|
|
378
378
|
tasks: ["TextSummaryTask"],
|
|
379
379
|
provider: HF_TRANSFORMERS_ONNX,
|
|
380
|
-
|
|
380
|
+
provider_config: {
|
|
381
381
|
pipeline: "summarization",
|
|
382
382
|
modelPath: "Falconsai/text_summarization",
|
|
383
383
|
dType: "q8"
|
|
@@ -390,7 +390,7 @@ async function registerHuggingfaceLocalModels() {
|
|
|
390
390
|
description: "Xenova/nllb-200-distilled-600M quantized to 8bit",
|
|
391
391
|
tasks: ["TextTranslationTask"],
|
|
392
392
|
provider: HF_TRANSFORMERS_ONNX,
|
|
393
|
-
|
|
393
|
+
provider_config: {
|
|
394
394
|
pipeline: "translation",
|
|
395
395
|
modelPath: "Xenova/nllb-200-distilled-600M",
|
|
396
396
|
languageStyle: "FLORES-200",
|
|
@@ -404,7 +404,7 @@ async function registerHuggingfaceLocalModels() {
|
|
|
404
404
|
description: "Xenova/m2m100_418M quantized to 8bit",
|
|
405
405
|
tasks: ["TextTranslationTask"],
|
|
406
406
|
provider: HF_TRANSFORMERS_ONNX,
|
|
407
|
-
|
|
407
|
+
provider_config: {
|
|
408
408
|
pipeline: "translation",
|
|
409
409
|
modelPath: "Xenova/m2m100_418M",
|
|
410
410
|
languageStyle: "ISO-639",
|
|
@@ -418,7 +418,7 @@ async function registerHuggingfaceLocalModels() {
|
|
|
418
418
|
description: "Xenova/m2m100_418M quantized to 8bit",
|
|
419
419
|
tasks: ["TextTranslationTask"],
|
|
420
420
|
provider: HF_TRANSFORMERS_ONNX,
|
|
421
|
-
|
|
421
|
+
provider_config: {
|
|
422
422
|
pipeline: "translation",
|
|
423
423
|
modelPath: "Xenova/m2m100_418M",
|
|
424
424
|
languageStyle: "ISO-639",
|
|
@@ -432,7 +432,7 @@ async function registerHuggingfaceLocalModels() {
|
|
|
432
432
|
description: "Xenova/mbart-large-50-many-to-many-mmt quantized to 8bit",
|
|
433
433
|
tasks: ["TextTranslationTask"],
|
|
434
434
|
provider: HF_TRANSFORMERS_ONNX,
|
|
435
|
-
|
|
435
|
+
provider_config: {
|
|
436
436
|
pipeline: "translation",
|
|
437
437
|
modelPath: "Xenova/mbart-large-50-many-to-many-mmt",
|
|
438
438
|
languageStyle: "ISO-639_ISO-3166-1-alpha-2",
|
|
@@ -446,7 +446,7 @@ async function registerHuggingfaceLocalModels() {
|
|
|
446
446
|
description: "Vision Transformer for image classification",
|
|
447
447
|
tasks: ["ImageClassificationTask"],
|
|
448
448
|
provider: HF_TRANSFORMERS_ONNX,
|
|
449
|
-
|
|
449
|
+
provider_config: {
|
|
450
450
|
pipeline: "image-classification",
|
|
451
451
|
modelPath: "Xenova/vit-base-patch16-224",
|
|
452
452
|
dType: "q8"
|
|
@@ -459,7 +459,7 @@ async function registerHuggingfaceLocalModels() {
|
|
|
459
459
|
description: "CLIP model for zero-shot image classification and embeddings",
|
|
460
460
|
tasks: ["ImageClassificationTask", "ImageEmbeddingTask"],
|
|
461
461
|
provider: HF_TRANSFORMERS_ONNX,
|
|
462
|
-
|
|
462
|
+
provider_config: {
|
|
463
463
|
pipeline: "zero-shot-image-classification",
|
|
464
464
|
modelPath: "Xenova/clip-vit-base-patch32",
|
|
465
465
|
dType: "q8"
|
|
@@ -472,7 +472,7 @@ async function registerHuggingfaceLocalModels() {
|
|
|
472
472
|
description: "Object detection model",
|
|
473
473
|
tasks: ["ObjectDetectionTask"],
|
|
474
474
|
provider: HF_TRANSFORMERS_ONNX,
|
|
475
|
-
|
|
475
|
+
provider_config: {
|
|
476
476
|
pipeline: "object-detection",
|
|
477
477
|
modelPath: "Xenova/detr-resnet-50",
|
|
478
478
|
dType: "q8"
|
|
@@ -485,7 +485,7 @@ async function registerHuggingfaceLocalModels() {
|
|
|
485
485
|
description: "Zero-shot object detection model",
|
|
486
486
|
tasks: ["ObjectDetectionTask"],
|
|
487
487
|
provider: HF_TRANSFORMERS_ONNX,
|
|
488
|
-
|
|
488
|
+
provider_config: {
|
|
489
489
|
pipeline: "zero-shot-object-detection",
|
|
490
490
|
modelPath: "Xenova/owlvit-base-patch32",
|
|
491
491
|
dType: "q8"
|
|
@@ -498,7 +498,7 @@ async function registerHuggingfaceLocalModels() {
|
|
|
498
498
|
description: "Image segmentation model trained on ADE20K dataset",
|
|
499
499
|
tasks: ["ImageSegmentationTask"],
|
|
500
500
|
provider: HF_TRANSFORMERS_ONNX,
|
|
501
|
-
|
|
501
|
+
provider_config: {
|
|
502
502
|
pipeline: "image-segmentation",
|
|
503
503
|
modelPath: "Xenova/segformer-b0-finetuned-ade-512-512",
|
|
504
504
|
dType: "q8"
|
|
@@ -511,7 +511,7 @@ async function registerHuggingfaceLocalModels() {
|
|
|
511
511
|
description: "Image to text captioning model",
|
|
512
512
|
tasks: ["ImageToTextTask"],
|
|
513
513
|
provider: HF_TRANSFORMERS_ONNX,
|
|
514
|
-
|
|
514
|
+
provider_config: {
|
|
515
515
|
pipeline: "image-to-text",
|
|
516
516
|
modelPath: "Xenova/vit-gpt2-image-captioning",
|
|
517
517
|
dType: "q8"
|
|
@@ -524,7 +524,7 @@ async function registerHuggingfaceLocalModels() {
|
|
|
524
524
|
description: "Background removal model",
|
|
525
525
|
tasks: ["BackgroundRemovalTask"],
|
|
526
526
|
provider: HF_TRANSFORMERS_ONNX,
|
|
527
|
-
|
|
527
|
+
provider_config: {
|
|
528
528
|
pipeline: "background-removal",
|
|
529
529
|
modelPath: "Xenova/modnet",
|
|
530
530
|
dType: "q8"
|
|
@@ -537,7 +537,7 @@ async function registerHuggingfaceLocalModels() {
|
|
|
537
537
|
description: "Zero-shot text classification model",
|
|
538
538
|
tasks: ["TextClassificationTask"],
|
|
539
539
|
provider: HF_TRANSFORMERS_ONNX,
|
|
540
|
-
|
|
540
|
+
provider_config: {
|
|
541
541
|
pipeline: "zero-shot-classification",
|
|
542
542
|
modelPath: "Xenova/mobilebert-uncased-mnli",
|
|
543
543
|
dType: "q8"
|
|
@@ -634,4 +634,4 @@ export {
|
|
|
634
634
|
IDB_TASK_GRAPH_REPOSITORY
|
|
635
635
|
};
|
|
636
636
|
|
|
637
|
-
//# debugId=
|
|
637
|
+
//# debugId=1189E79CE0A34BA564756E2164756E21
|
package/dist/browser.js.map
CHANGED
|
@@ -5,12 +5,12 @@
|
|
|
5
5
|
"/**\n * @license\n * Copyright 2025 Steven Roussey <sroussey@gmail.com>\n * SPDX-License-Identifier: Apache-2.0\n */\n\nimport { InMemoryTabularRepository } from \"@workglow/storage\";\nimport {\n TaskGraphPrimaryKeyNames,\n TaskGraphSchema,\n TaskGraphTabularRepository,\n} from \"@workglow/task-graph\";\nimport { createServiceToken } from \"@workglow/util\";\n\nexport const MEMORY_TASK_GRAPH_REPOSITORY = createServiceToken<TaskGraphTabularRepository>(\n \"taskgraph.taskGraphRepository.inMemory\"\n);\n\n/**\n * In-memory implementation of a task graph repository.\n * Provides storage and retrieval for task graphs.\n */\nexport class InMemoryTaskGraphRepository extends TaskGraphTabularRepository {\n constructor() {\n super({\n tabularRepository: new InMemoryTabularRepository(TaskGraphSchema, TaskGraphPrimaryKeyNames),\n });\n }\n}\n",
|
|
6
6
|
"/**\n * @license\n * Copyright 2025 Steven Roussey <sroussey@gmail.com>\n * SPDX-License-Identifier: Apache-2.0\n */\n\nimport { InMemoryTabularRepository } from \"@workglow/storage\";\nimport {\n TaskOutputPrimaryKeyNames,\n TaskOutputSchema,\n TaskOutputTabularRepository,\n} from \"@workglow/task-graph\";\nimport { createServiceToken } from \"@workglow/util\";\n\nexport const MEMORY_TASK_OUTPUT_REPOSITORY = createServiceToken<InMemoryTaskOutputRepository>(\n \"taskgraph.taskOutputRepository.inMemory\"\n);\n\n/**\n * In-memory implementation of a task output repository.\n * Provides storage and retrieval for task outputs.\n */\nexport class InMemoryTaskOutputRepository extends TaskOutputTabularRepository {\n constructor() {\n super({\n tabularRepository: new InMemoryTabularRepository(\n TaskOutputSchema,\n TaskOutputPrimaryKeyNames,\n [\"createdAt\"]\n ),\n });\n }\n}\n",
|
|
7
7
|
"/**\n * @license\n * Copyright 2025 Steven Roussey <sroussey@gmail.com>\n * SPDX-License-Identifier: Apache-2.0\n */\n\nimport { AiJob, AiJobInput } from \"@workglow/ai\";\nimport { TENSORFLOW_MEDIAPIPE } from \"@workglow/ai-provider\";\nimport { ConcurrencyLimiter, JobQueueClient, JobQueueServer } from \"@workglow/job-queue\";\nimport { InMemoryQueueStorage } from \"@workglow/storage\";\nimport { getTaskQueueRegistry, TaskInput, TaskOutput } from \"@workglow/task-graph\";\nexport * from \"./MediaPipeModelSamples\";\nexport * from \"./ONNXModelSamples\";\n\nexport async function register_HFT_InMemoryQueue(): Promise<void> {\n const queueName = \"HF_TRANSFORMERS_ONNX\";\n const storage = new InMemoryQueueStorage<AiJobInput<TaskInput>, TaskOutput>(queueName);\n await storage.setupDatabase();\n\n const server = new JobQueueServer<AiJobInput<TaskInput>, TaskOutput>(\n AiJob<AiJobInput<TaskInput>, TaskOutput>,\n {\n storage,\n queueName,\n limiter: new ConcurrencyLimiter(1, 10),\n }\n );\n\n const client = new JobQueueClient<AiJobInput<TaskInput>, TaskOutput>({\n storage,\n queueName,\n });\n\n client.attach(server);\n\n getTaskQueueRegistry().registerQueue({ server, client, storage });\n await server.start();\n}\n\nexport async function register_TFMP_InMemoryQueue(): Promise<void> {\n const queueName = TENSORFLOW_MEDIAPIPE;\n const storage = new InMemoryQueueStorage<AiJobInput<TaskInput>, TaskOutput>(queueName);\n await storage.setupDatabase();\n\n const server = new JobQueueServer<AiJobInput<TaskInput>, TaskOutput>(\n AiJob<AiJobInput<TaskInput>, TaskOutput>,\n {\n storage,\n queueName,\n limiter: new ConcurrencyLimiter(1, 10),\n }\n );\n\n const client = new JobQueueClient<AiJobInput<TaskInput>, TaskOutput>({\n storage,\n queueName,\n });\n\n client.attach(server);\n\n getTaskQueueRegistry().registerQueue({ server, client, storage });\n await server.start();\n}\n",
|
|
8
|
-
"import { getGlobalModelRepository } from \"@workglow/ai\";\nimport { TENSORFLOW_MEDIAPIPE, type TFMPModelRecord } from \"@workglow/ai-provider\";\n\nexport async function registerMediaPipeTfJsLocalModels(): Promise<void> {\n const models: TFMPModelRecord[] = [\n // Text Models\n {\n model_id: \"media-pipe:Universal Sentence Encoder\",\n title: \"Universal Sentence Encoder\",\n description: \"Universal Sentence Encoder\",\n tasks: [\"TextEmbeddingTask\"],\n provider: TENSORFLOW_MEDIAPIPE,\n
|
|
9
|
-
"import { getGlobalModelRepository } from \"@workglow/ai\";\nimport { HF_TRANSFORMERS_ONNX, HfTransformersOnnxModelRecord } from \"@workglow/ai-provider\";\n\nexport async function registerHuggingfaceLocalModels(): Promise<void> {\n const onnxModels: HfTransformersOnnxModelRecord[] = [\n {\n model_id: \"onnx:Xenova/all-MiniLM-L6-v2:q8\",\n title: \"All MiniLM L6 V2 384D\",\n description: \"Xenova/all-MiniLM-L6-v2\",\n tasks: [\"TextEmbeddingTask\"],\n provider: HF_TRANSFORMERS_ONNX,\n providerConfig: {\n pipeline: \"feature-extraction\",\n modelPath: \"Xenova/all-MiniLM-L6-v2\",\n device: \"webgpu\",\n nativeDimensions: 384,\n },\n metadata: {},\n },\n {\n model_id: \"onnx:Xenova/bge-base-en-v1.5:q8\",\n title: \"BGE Base English V1.5 768D\",\n description: \"Xenova/bge-base-en-v1.5\",\n tasks: [\"TextEmbeddingTask\"],\n provider: HF_TRANSFORMERS_ONNX,\n providerConfig: {\n pipeline: \"feature-extraction\",\n modelPath: \"Xenova/bge-base-en-v1.5\",\n device: \"webgpu\",\n nativeDimensions: 768,\n },\n metadata: {},\n },\n {\n model_id: \"onnx:Xenova/gte-small:q8\",\n title: \"GTE Small 384D\",\n description: \"Xenova/gte-small\",\n tasks: [\"TextEmbeddingTask\"],\n provider: HF_TRANSFORMERS_ONNX,\n providerConfig: {\n pipeline: \"feature-extraction\",\n modelPath: \"Xenova/gte-small\",\n device: \"webgpu\",\n nativeDimensions: 384,\n },\n metadata: {},\n },\n {\n model_id: \"onnx:onnx-community/bert_uncased_L-2_H-128_A-2-ONNX:q8\",\n title: \"BERT Uncased 128D\",\n description: \"onnx-community/bert_uncased_L-2_H-128_A-2-ONNX\",\n tasks: [\"TextEmbeddingTask\"],\n provider: HF_TRANSFORMERS_ONNX,\n providerConfig: {\n pipeline: \"feature-extraction\",\n modelPath: \"onnx-community/bert_uncased_L-2_H-128_A-2-ONNX\",\n device: \"webgpu\",\n nativeDimensions: 128,\n },\n metadata: {},\n },\n {\n model_id: \"onnx:Xenova/paraphrase-albert-base-v2:q8\",\n title: \"Paraphrase ALBERT Base V2 768D\",\n description: \"Xenova/paraphrase-albert-base-v2\",\n tasks: [\"TextEmbeddingTask\"],\n provider: HF_TRANSFORMERS_ONNX,\n providerConfig: {\n pipeline: \"feature-extraction\",\n modelPath: \"Xenova/paraphrase-albert-base-v2\",\n device: \"webgpu\",\n nativeDimensions: 768,\n },\n metadata: {},\n },\n {\n model_id: \"onnx:Xenova/distilbert-base-uncased-distilled-squad:q8\",\n title: \"distilbert-base-uncased-distilled-squad\",\n description: \"Xenova/distilbert-base-uncased-distilled-squad quantized to 8bit\",\n tasks: [\"TextQuestionAnsweringTask\"],\n provider: HF_TRANSFORMERS_ONNX,\n providerConfig: {\n pipeline: \"question-answering\",\n modelPath: \"Xenova/distilbert-base-uncased-distilled-squad\",\n },\n metadata: {},\n },\n {\n model_id: \"onnx:Xenova/gpt2:q8\",\n title: \"gpt2\",\n description: \"Xenova/gpt2 quantized to 8bit\",\n tasks: [\"TextGenerationTask\"],\n provider: HF_TRANSFORMERS_ONNX,\n providerConfig: {\n pipeline: \"text-generation\",\n modelPath: \"Xenova/gpt2\",\n dType: \"q8\",\n },\n metadata: {},\n },\n {\n model_id: \"onnx:Xenova/Phi-3-mini-4k-instruct:q4f16\",\n title: \"Phi-3-mini-4k-instruct:q4f16\",\n description: \"Xenova/Phi-3-mini-4k-instruct quantized to q4f16\",\n tasks: [\"TextGenerationTask\"],\n provider: HF_TRANSFORMERS_ONNX,\n providerConfig: {\n pipeline: \"text-generation\",\n modelPath: \"Xenova/Phi-3-mini-4k-instruct\",\n device: \"webgpu\",\n dType: \"q4f16\",\n useExternalDataFormat: true,\n },\n metadata: {},\n },\n {\n model_id: \"onnx:Xenova/distilgpt2:q8\",\n title: \"distilgpt2\",\n description: \"Xenova/distilgpt2 quantized to 8bit\",\n tasks: [\"TextGenerationTask\"],\n provider: HF_TRANSFORMERS_ONNX,\n providerConfig: {\n pipeline: \"text-generation\",\n modelPath: \"Xenova/distilgpt2\",\n dType: \"q8\",\n },\n metadata: {},\n },\n {\n model_id: \"onnx:Xenova/LaMini-Flan-T5-783M:q8\",\n title: \"LaMini-Flan-T5-783M\",\n description: \"Xenova/LaMini-Flan-T5-783M quantized to 8bit\",\n tasks: [\"TextGenerationTask\", \"TextRewriterTask\"],\n provider: HF_TRANSFORMERS_ONNX,\n providerConfig: {\n pipeline: \"text2text-generation\",\n modelPath: \"Xenova/LaMini-Flan-T5-783M\",\n },\n metadata: {},\n },\n {\n model_id: \"onnx:Xenova/LaMini-Flan-T5-783M:q8\",\n title: \"LaMini-Flan-T5-783M\",\n description: \"Xenova/LaMini-Flan-T5-783M quantized to 8bit\",\n tasks: [\"TextGenerationTask\", \"TextRewriterTask\"],\n provider: HF_TRANSFORMERS_ONNX,\n providerConfig: {\n pipeline: \"text2text-generation\",\n modelPath: \"Xenova/LaMini-Flan-T5-783M\",\n dType: \"q8\",\n },\n metadata: {},\n },\n {\n model_id: \"onnx:Falconsai/text_summarization:q8\",\n title: \"text_summarization\",\n description: \"Falconsai/text_summarization quantized to 8bit\",\n tasks: [\"TextSummaryTask\"],\n provider: HF_TRANSFORMERS_ONNX,\n providerConfig: {\n pipeline: \"summarization\",\n modelPath: \"Falconsai/text_summarization\",\n dType: \"q8\",\n },\n metadata: {},\n },\n {\n model_id: \"onnx:Xenova/nllb-200-distilled-600M:q8\",\n title: \"nllb-200-distilled-600M\",\n description: \"Xenova/nllb-200-distilled-600M quantized to 8bit\",\n tasks: [\"TextTranslationTask\"],\n provider: HF_TRANSFORMERS_ONNX,\n providerConfig: {\n pipeline: \"translation\",\n modelPath: \"Xenova/nllb-200-distilled-600M\",\n languageStyle: \"FLORES-200\",\n dType: \"q8\",\n },\n metadata: {},\n },\n {\n model_id: \"onnx:Xenova/m2m100_418M:q8\",\n title: \"m2m100_418M\",\n description: \"Xenova/m2m100_418M quantized to 8bit\",\n tasks: [\"TextTranslationTask\"],\n provider: HF_TRANSFORMERS_ONNX,\n providerConfig: {\n pipeline: \"translation\",\n modelPath: \"Xenova/m2m100_418M\",\n languageStyle: \"ISO-639\",\n dType: \"q8\",\n },\n metadata: {},\n },\n {\n model_id: \"onnx:Xenova/m2m100_418M:q8\",\n title: \"m2m100_418M\",\n description: \"Xenova/m2m100_418M quantized to 8bit\",\n tasks: [\"TextTranslationTask\"],\n provider: HF_TRANSFORMERS_ONNX,\n providerConfig: {\n pipeline: \"translation\",\n modelPath: \"Xenova/m2m100_418M\",\n languageStyle: \"ISO-639\",\n dType: \"q8\",\n },\n metadata: {},\n },\n {\n model_id: \"onnx:Xenova/mbart-large-50-many-to-many-mmt:q8\",\n title: \"mbart-large-50-many-to-many-mmt\",\n description: \"Xenova/mbart-large-50-many-to-many-mmt quantized to 8bit\",\n tasks: [\"TextTranslationTask\"],\n provider: HF_TRANSFORMERS_ONNX,\n providerConfig: {\n pipeline: \"translation\",\n modelPath: \"Xenova/mbart-large-50-many-to-many-mmt\",\n languageStyle: \"ISO-639_ISO-3166-1-alpha-2\",\n dType: \"q8\",\n },\n metadata: {},\n },\n // Vision Models\n {\n model_id: \"onnx:Xenova/vit-base-patch16-224:q8\",\n title: \"ViT Base Patch16 224\",\n description: \"Vision Transformer for image classification\",\n tasks: [\"ImageClassificationTask\"],\n provider: HF_TRANSFORMERS_ONNX,\n providerConfig: {\n pipeline: \"image-classification\",\n modelPath: \"Xenova/vit-base-patch16-224\",\n dType: \"q8\",\n },\n metadata: {},\n },\n {\n model_id: \"onnx:Xenova/clip-vit-base-patch32:q8\",\n title: \"CLIP ViT Base Patch32\",\n description: \"CLIP model for zero-shot image classification and embeddings\",\n tasks: [\"ImageClassificationTask\", \"ImageEmbeddingTask\"],\n provider: HF_TRANSFORMERS_ONNX,\n providerConfig: {\n pipeline: \"zero-shot-image-classification\",\n modelPath: \"Xenova/clip-vit-base-patch32\",\n dType: \"q8\",\n },\n metadata: {},\n },\n {\n model_id: \"onnx:Xenova/detr-resnet-50:q8\",\n title: \"DETR ResNet-50\",\n description: \"Object detection model\",\n tasks: [\"ObjectDetectionTask\"],\n provider: HF_TRANSFORMERS_ONNX,\n providerConfig: {\n pipeline: \"object-detection\",\n modelPath: \"Xenova/detr-resnet-50\",\n dType: \"q8\",\n },\n metadata: {},\n },\n {\n model_id: \"onnx:Xenova/owlvit-base-patch32:q8\",\n title: \"OWL-ViT Base Patch32\",\n description: \"Zero-shot object detection model\",\n tasks: [\"ObjectDetectionTask\"],\n provider: HF_TRANSFORMERS_ONNX,\n providerConfig: {\n pipeline: \"zero-shot-object-detection\",\n modelPath: \"Xenova/owlvit-base-patch32\",\n dType: \"q8\",\n },\n metadata: {},\n },\n {\n model_id: \"onnx:Xenova/segformer-b0-finetuned-ade-512-512:q8\",\n title: \"Segformer B0 ADE\",\n description: \"Image segmentation model trained on ADE20K dataset\",\n tasks: [\"ImageSegmentationTask\"],\n provider: HF_TRANSFORMERS_ONNX,\n providerConfig: {\n pipeline: \"image-segmentation\",\n modelPath: \"Xenova/segformer-b0-finetuned-ade-512-512\",\n dType: \"q8\",\n },\n metadata: {},\n },\n {\n model_id: \"onnx:Xenova/vit-gpt2-image-captioning:q8\",\n title: \"ViT GPT2 Image Captioning\",\n description: \"Image to text captioning model\",\n tasks: [\"ImageToTextTask\"],\n provider: HF_TRANSFORMERS_ONNX,\n providerConfig: {\n pipeline: \"image-to-text\",\n modelPath: \"Xenova/vit-gpt2-image-captioning\",\n dType: \"q8\",\n },\n metadata: {},\n },\n {\n model_id: \"onnx:Xenova/modnet:q8\",\n title: \"MODNet Background Removal\",\n description: \"Background removal model\",\n tasks: [\"BackgroundRemovalTask\"],\n provider: HF_TRANSFORMERS_ONNX,\n providerConfig: {\n pipeline: \"background-removal\",\n modelPath: \"Xenova/modnet\",\n dType: \"q8\",\n },\n metadata: {},\n },\n {\n model_id: \"onnx:Xenova/mobilebert-uncased-mnli:q8\",\n title: \"MobileBERT MNLI\",\n description: \"Zero-shot text classification model\",\n tasks: [\"TextClassificationTask\"],\n provider: HF_TRANSFORMERS_ONNX,\n providerConfig: {\n pipeline: \"zero-shot-classification\",\n modelPath: \"Xenova/mobilebert-uncased-mnli\",\n dType: \"q8\",\n },\n metadata: {},\n },\n ];\n\n for (const model of onnxModels) {\n await getGlobalModelRepository().addModel(model);\n }\n}\n",
|
|
8
|
+
"import { getGlobalModelRepository } from \"@workglow/ai\";\nimport { TENSORFLOW_MEDIAPIPE, type TFMPModelRecord } from \"@workglow/ai-provider\";\n\nexport async function registerMediaPipeTfJsLocalModels(): Promise<void> {\n const models: TFMPModelRecord[] = [\n // Text Models\n {\n model_id: \"media-pipe:Universal Sentence Encoder\",\n title: \"Universal Sentence Encoder\",\n description: \"Universal Sentence Encoder\",\n tasks: [\"TextEmbeddingTask\"],\n provider: TENSORFLOW_MEDIAPIPE,\n provider_config: {\n taskEngine: \"text\",\n pipeline: \"text-embedder\",\n modelPath:\n \"https://storage.googleapis.com/mediapipe-tasks/text_embedder/universal_sentence_encoder.tflite\",\n },\n metadata: {},\n },\n {\n model_id: \"media-pipe:BERT Text Classifier\",\n title: \"BERT Text Classifier\",\n description: \"BERT-based text classification model\",\n tasks: [\"TextClassificationTask\"],\n provider: TENSORFLOW_MEDIAPIPE,\n provider_config: {\n taskEngine: \"text\",\n pipeline: \"text-classifier\",\n modelPath:\n \"https://storage.googleapis.com/mediapipe-models/text_classifier/bert_classifier/float32/1/bert_classifier.tflite\",\n },\n metadata: {},\n },\n {\n model_id: \"media-pipe:Language Detector\",\n title: \"Language Detector\",\n description: \"Language detection model\",\n tasks: [\"TextLanguageDetectionTask\"],\n provider: TENSORFLOW_MEDIAPIPE,\n provider_config: {\n taskEngine: \"text\",\n pipeline: \"text-language-detector\",\n modelPath:\n \"https://storage.googleapis.com/mediapipe-models/language_detector/language_detector/float32/1/language_detector.tflite\",\n },\n metadata: {},\n },\n // Vision Models\n {\n model_id: \"media-pipe:EfficientNet Lite0 Image Classifier\",\n title: \"EfficientNet Lite0\",\n description: \"Lightweight image classification model\",\n tasks: [\"ImageClassificationTask\"],\n provider: TENSORFLOW_MEDIAPIPE,\n provider_config: {\n taskEngine: \"vision\",\n pipeline: \"vision-image-classifier\",\n modelPath:\n \"https://storage.googleapis.com/mediapipe-models/image_classifier/efficientnet_lite0/float32/1/efficientnet_lite0.tflite\",\n },\n metadata: {},\n },\n {\n model_id: \"media-pipe:MobileNet V3 Image Embedder\",\n title: \"MobileNet V3 Small\",\n description: \"Lightweight image embedding model\",\n tasks: [\"ImageEmbeddingTask\"],\n provider: TENSORFLOW_MEDIAPIPE,\n provider_config: {\n taskEngine: \"vision\",\n pipeline: \"vision-image-embedder\",\n modelPath:\n \"https://storage.googleapis.com/mediapipe-models/image_embedder/mobilenet_v3_small/float32/1/mobilenet_v3_small.tflite\",\n },\n metadata: {},\n },\n {\n model_id: \"media-pipe:EfficientDet Lite0 Object Detector\",\n title: \"EfficientDet Lite0\",\n description: \"Lightweight object detection model\",\n tasks: [\"ObjectDetectionTask\"],\n provider: TENSORFLOW_MEDIAPIPE,\n provider_config: {\n taskEngine: \"vision\",\n pipeline: \"vision-object-detector\",\n modelPath:\n \"https://storage.googleapis.com/mediapipe-models/object_detector/efficientdet_lite0/float32/1/efficientdet_lite0.tflite\",\n },\n metadata: {},\n },\n {\n model_id: \"media-pipe:DeepLab V3 Image Segmenter\",\n title: \"DeepLab V3\",\n description: \"Image segmentation model\",\n tasks: [\"ImageSegmentationTask\"],\n provider: TENSORFLOW_MEDIAPIPE,\n provider_config: {\n taskEngine: \"vision\",\n pipeline: \"vision-image-segmenter\",\n modelPath:\n \"https://storage.googleapis.com/mediapipe-models/image_segmenter/deeplab_v3/float32/1/deeplab_v3.tflite\",\n },\n metadata: {},\n },\n // Audio Models\n {\n model_id: \"media-pipe:YAMNet Audio Classifier\",\n title: \"YAMNet\",\n description: \"Audio event classification model\",\n tasks: [\"AudioClassificationTask\"],\n provider: TENSORFLOW_MEDIAPIPE,\n provider_config: {\n taskEngine: \"audio\",\n pipeline: \"audio-classifier\",\n modelPath:\n \"https://storage.googleapis.com/mediapipe-models/audio_classifier/yamnet/float32/1/yamnet.tflite\",\n },\n metadata: {},\n },\n // New Vision Tasks\n {\n model_id: \"media-pipe:Gesture Recognizer\",\n title: \"Gesture Recognizer\",\n description: \"Recognizes hand gestures (thumbs up, victory, etc.)\",\n tasks: [\"GestureRecognizerTask\"],\n provider: TENSORFLOW_MEDIAPIPE,\n provider_config: {\n taskEngine: \"vision\",\n pipeline: \"vision-gesture-recognizer\",\n modelPath:\n \"https://storage.googleapis.com/mediapipe-models/gesture_recognizer/gesture_recognizer/float16/1/gesture_recognizer.task\",\n },\n metadata: {},\n },\n {\n model_id: \"media-pipe:Hand Landmarker\",\n title: \"Hand Landmarker\",\n description: \"Detects 21 hand landmarks\",\n tasks: [\"HandLandmarkerTask\"],\n provider: TENSORFLOW_MEDIAPIPE,\n provider_config: {\n taskEngine: \"vision\",\n pipeline: \"vision-hand-landmarker\",\n modelPath:\n \"https://storage.googleapis.com/mediapipe-models/hand_landmarker/hand_landmarker/float16/1/hand_landmarker.task\",\n },\n metadata: {},\n },\n {\n model_id: \"media-pipe:Face Detector\",\n title: \"Face Detector\",\n description: \"Detects faces with bounding boxes and keypoints\",\n tasks: [\"FaceDetectorTask\"],\n provider: TENSORFLOW_MEDIAPIPE,\n provider_config: {\n taskEngine: \"vision\",\n pipeline: \"vision-face-detector\",\n modelPath:\n \"https://storage.googleapis.com/mediapipe-models/face_detector/blaze_face_short_range/float16/1/blaze_face_short_range.tflite\",\n },\n metadata: {},\n },\n {\n model_id: \"media-pipe:Face Landmarker\",\n title: \"Face Landmarker\",\n description: \"Detects 478 facial landmarks with blendshapes\",\n tasks: [\"FaceLandmarkerTask\"],\n provider: TENSORFLOW_MEDIAPIPE,\n provider_config: {\n taskEngine: \"vision\",\n pipeline: \"vision-face-landmarker\",\n modelPath:\n \"https://storage.googleapis.com/mediapipe-models/face_landmarker/face_landmarker/float16/1/face_landmarker.task\",\n },\n metadata: {},\n },\n {\n model_id: \"media-pipe:Pose Landmarker\",\n title: \"Pose Landmarker\",\n description: \"Detects 33 body pose landmarks\",\n tasks: [\"PoseLandmarkerTask\"],\n provider: TENSORFLOW_MEDIAPIPE,\n provider_config: {\n taskEngine: \"vision\",\n pipeline: \"vision-pose-landmarker\",\n modelPath:\n \"https://storage.googleapis.com/mediapipe-models/pose_landmarker/pose_landmarker_lite/float16/1/pose_landmarker_lite.task\",\n },\n metadata: {},\n },\n ];\n\n for (const model of models) {\n await getGlobalModelRepository().addModel(model);\n }\n}\n",
|
|
9
|
+
"import { getGlobalModelRepository } from \"@workglow/ai\";\nimport { HF_TRANSFORMERS_ONNX, HfTransformersOnnxModelRecord } from \"@workglow/ai-provider\";\n\nexport async function registerHuggingfaceLocalModels(): Promise<void> {\n const onnxModels: HfTransformersOnnxModelRecord[] = [\n {\n model_id: \"onnx:Xenova/all-MiniLM-L6-v2:q8\",\n title: \"All MiniLM L6 V2 384D\",\n description: \"Xenova/all-MiniLM-L6-v2\",\n tasks: [\"TextEmbeddingTask\"],\n provider: HF_TRANSFORMERS_ONNX,\n provider_config: {\n pipeline: \"feature-extraction\",\n modelPath: \"Xenova/all-MiniLM-L6-v2\",\n device: \"webgpu\",\n nativeDimensions: 384,\n },\n metadata: {},\n },\n {\n model_id: \"onnx:Xenova/bge-base-en-v1.5:q8\",\n title: \"BGE Base English V1.5 768D\",\n description: \"Xenova/bge-base-en-v1.5\",\n tasks: [\"TextEmbeddingTask\"],\n provider: HF_TRANSFORMERS_ONNX,\n provider_config: {\n pipeline: \"feature-extraction\",\n modelPath: \"Xenova/bge-base-en-v1.5\",\n device: \"webgpu\",\n nativeDimensions: 768,\n },\n metadata: {},\n },\n {\n model_id: \"onnx:Xenova/gte-small:q8\",\n title: \"GTE Small 384D\",\n description: \"Xenova/gte-small\",\n tasks: [\"TextEmbeddingTask\"],\n provider: HF_TRANSFORMERS_ONNX,\n provider_config: {\n pipeline: \"feature-extraction\",\n modelPath: \"Xenova/gte-small\",\n device: \"webgpu\",\n nativeDimensions: 384,\n },\n metadata: {},\n },\n {\n model_id: \"onnx:onnx-community/bert_uncased_L-2_H-128_A-2-ONNX:q8\",\n title: \"BERT Uncased 128D\",\n description: \"onnx-community/bert_uncased_L-2_H-128_A-2-ONNX\",\n tasks: [\"TextEmbeddingTask\"],\n provider: HF_TRANSFORMERS_ONNX,\n provider_config: {\n pipeline: \"feature-extraction\",\n modelPath: \"onnx-community/bert_uncased_L-2_H-128_A-2-ONNX\",\n device: \"webgpu\",\n nativeDimensions: 128,\n },\n metadata: {},\n },\n {\n model_id: \"onnx:Xenova/paraphrase-albert-base-v2:q8\",\n title: \"Paraphrase ALBERT Base V2 768D\",\n description: \"Xenova/paraphrase-albert-base-v2\",\n tasks: [\"TextEmbeddingTask\"],\n provider: HF_TRANSFORMERS_ONNX,\n provider_config: {\n pipeline: \"feature-extraction\",\n modelPath: \"Xenova/paraphrase-albert-base-v2\",\n device: \"webgpu\",\n nativeDimensions: 768,\n },\n metadata: {},\n },\n {\n model_id: \"onnx:Xenova/distilbert-base-uncased-distilled-squad:q8\",\n title: \"distilbert-base-uncased-distilled-squad\",\n description: \"Xenova/distilbert-base-uncased-distilled-squad quantized to 8bit\",\n tasks: [\"TextQuestionAnsweringTask\"],\n provider: HF_TRANSFORMERS_ONNX,\n provider_config: {\n pipeline: \"question-answering\",\n modelPath: \"Xenova/distilbert-base-uncased-distilled-squad\",\n },\n metadata: {},\n },\n {\n model_id: \"onnx:Xenova/gpt2:q8\",\n title: \"gpt2\",\n description: \"Xenova/gpt2 quantized to 8bit\",\n tasks: [\"TextGenerationTask\"],\n provider: HF_TRANSFORMERS_ONNX,\n provider_config: {\n pipeline: \"text-generation\",\n modelPath: \"Xenova/gpt2\",\n dType: \"q8\",\n },\n metadata: {},\n },\n {\n model_id: \"onnx:Xenova/Phi-3-mini-4k-instruct:q4f16\",\n title: \"Phi-3-mini-4k-instruct:q4f16\",\n description: \"Xenova/Phi-3-mini-4k-instruct quantized to q4f16\",\n tasks: [\"TextGenerationTask\"],\n provider: HF_TRANSFORMERS_ONNX,\n provider_config: {\n pipeline: \"text-generation\",\n modelPath: \"Xenova/Phi-3-mini-4k-instruct\",\n device: \"webgpu\",\n dType: \"q4f16\",\n useExternalDataFormat: true,\n },\n metadata: {},\n },\n {\n model_id: \"onnx:Xenova/distilgpt2:q8\",\n title: \"distilgpt2\",\n description: \"Xenova/distilgpt2 quantized to 8bit\",\n tasks: [\"TextGenerationTask\"],\n provider: HF_TRANSFORMERS_ONNX,\n provider_config: {\n pipeline: \"text-generation\",\n modelPath: \"Xenova/distilgpt2\",\n dType: \"q8\",\n },\n metadata: {},\n },\n {\n model_id: \"onnx:Xenova/LaMini-Flan-T5-783M:q8\",\n title: \"LaMini-Flan-T5-783M\",\n description: \"Xenova/LaMini-Flan-T5-783M quantized to 8bit\",\n tasks: [\"TextGenerationTask\", \"TextRewriterTask\"],\n provider: HF_TRANSFORMERS_ONNX,\n provider_config: {\n pipeline: \"text2text-generation\",\n modelPath: \"Xenova/LaMini-Flan-T5-783M\",\n },\n metadata: {},\n },\n {\n model_id: \"onnx:Xenova/LaMini-Flan-T5-783M:q8\",\n title: \"LaMini-Flan-T5-783M\",\n description: \"Xenova/LaMini-Flan-T5-783M quantized to 8bit\",\n tasks: [\"TextGenerationTask\", \"TextRewriterTask\"],\n provider: HF_TRANSFORMERS_ONNX,\n provider_config: {\n pipeline: \"text2text-generation\",\n modelPath: \"Xenova/LaMini-Flan-T5-783M\",\n dType: \"q8\",\n },\n metadata: {},\n },\n {\n model_id: \"onnx:Falconsai/text_summarization:q8\",\n title: \"text_summarization\",\n description: \"Falconsai/text_summarization quantized to 8bit\",\n tasks: [\"TextSummaryTask\"],\n provider: HF_TRANSFORMERS_ONNX,\n provider_config: {\n pipeline: \"summarization\",\n modelPath: \"Falconsai/text_summarization\",\n dType: \"q8\",\n },\n metadata: {},\n },\n {\n model_id: \"onnx:Xenova/nllb-200-distilled-600M:q8\",\n title: \"nllb-200-distilled-600M\",\n description: \"Xenova/nllb-200-distilled-600M quantized to 8bit\",\n tasks: [\"TextTranslationTask\"],\n provider: HF_TRANSFORMERS_ONNX,\n provider_config: {\n pipeline: \"translation\",\n modelPath: \"Xenova/nllb-200-distilled-600M\",\n languageStyle: \"FLORES-200\",\n dType: \"q8\",\n },\n metadata: {},\n },\n {\n model_id: \"onnx:Xenova/m2m100_418M:q8\",\n title: \"m2m100_418M\",\n description: \"Xenova/m2m100_418M quantized to 8bit\",\n tasks: [\"TextTranslationTask\"],\n provider: HF_TRANSFORMERS_ONNX,\n provider_config: {\n pipeline: \"translation\",\n modelPath: \"Xenova/m2m100_418M\",\n languageStyle: \"ISO-639\",\n dType: \"q8\",\n },\n metadata: {},\n },\n {\n model_id: \"onnx:Xenova/m2m100_418M:q8\",\n title: \"m2m100_418M\",\n description: \"Xenova/m2m100_418M quantized to 8bit\",\n tasks: [\"TextTranslationTask\"],\n provider: HF_TRANSFORMERS_ONNX,\n provider_config: {\n pipeline: \"translation\",\n modelPath: \"Xenova/m2m100_418M\",\n languageStyle: \"ISO-639\",\n dType: \"q8\",\n },\n metadata: {},\n },\n {\n model_id: \"onnx:Xenova/mbart-large-50-many-to-many-mmt:q8\",\n title: \"mbart-large-50-many-to-many-mmt\",\n description: \"Xenova/mbart-large-50-many-to-many-mmt quantized to 8bit\",\n tasks: [\"TextTranslationTask\"],\n provider: HF_TRANSFORMERS_ONNX,\n provider_config: {\n pipeline: \"translation\",\n modelPath: \"Xenova/mbart-large-50-many-to-many-mmt\",\n languageStyle: \"ISO-639_ISO-3166-1-alpha-2\",\n dType: \"q8\",\n },\n metadata: {},\n },\n // Vision Models\n {\n model_id: \"onnx:Xenova/vit-base-patch16-224:q8\",\n title: \"ViT Base Patch16 224\",\n description: \"Vision Transformer for image classification\",\n tasks: [\"ImageClassificationTask\"],\n provider: HF_TRANSFORMERS_ONNX,\n provider_config: {\n pipeline: \"image-classification\",\n modelPath: \"Xenova/vit-base-patch16-224\",\n dType: \"q8\",\n },\n metadata: {},\n },\n {\n model_id: \"onnx:Xenova/clip-vit-base-patch32:q8\",\n title: \"CLIP ViT Base Patch32\",\n description: \"CLIP model for zero-shot image classification and embeddings\",\n tasks: [\"ImageClassificationTask\", \"ImageEmbeddingTask\"],\n provider: HF_TRANSFORMERS_ONNX,\n provider_config: {\n pipeline: \"zero-shot-image-classification\",\n modelPath: \"Xenova/clip-vit-base-patch32\",\n dType: \"q8\",\n },\n metadata: {},\n },\n {\n model_id: \"onnx:Xenova/detr-resnet-50:q8\",\n title: \"DETR ResNet-50\",\n description: \"Object detection model\",\n tasks: [\"ObjectDetectionTask\"],\n provider: HF_TRANSFORMERS_ONNX,\n provider_config: {\n pipeline: \"object-detection\",\n modelPath: \"Xenova/detr-resnet-50\",\n dType: \"q8\",\n },\n metadata: {},\n },\n {\n model_id: \"onnx:Xenova/owlvit-base-patch32:q8\",\n title: \"OWL-ViT Base Patch32\",\n description: \"Zero-shot object detection model\",\n tasks: [\"ObjectDetectionTask\"],\n provider: HF_TRANSFORMERS_ONNX,\n provider_config: {\n pipeline: \"zero-shot-object-detection\",\n modelPath: \"Xenova/owlvit-base-patch32\",\n dType: \"q8\",\n },\n metadata: {},\n },\n {\n model_id: \"onnx:Xenova/segformer-b0-finetuned-ade-512-512:q8\",\n title: \"Segformer B0 ADE\",\n description: \"Image segmentation model trained on ADE20K dataset\",\n tasks: [\"ImageSegmentationTask\"],\n provider: HF_TRANSFORMERS_ONNX,\n provider_config: {\n pipeline: \"image-segmentation\",\n modelPath: \"Xenova/segformer-b0-finetuned-ade-512-512\",\n dType: \"q8\",\n },\n metadata: {},\n },\n {\n model_id: \"onnx:Xenova/vit-gpt2-image-captioning:q8\",\n title: \"ViT GPT2 Image Captioning\",\n description: \"Image to text captioning model\",\n tasks: [\"ImageToTextTask\"],\n provider: HF_TRANSFORMERS_ONNX,\n provider_config: {\n pipeline: \"image-to-text\",\n modelPath: \"Xenova/vit-gpt2-image-captioning\",\n dType: \"q8\",\n },\n metadata: {},\n },\n {\n model_id: \"onnx:Xenova/modnet:q8\",\n title: \"MODNet Background Removal\",\n description: \"Background removal model\",\n tasks: [\"BackgroundRemovalTask\"],\n provider: HF_TRANSFORMERS_ONNX,\n provider_config: {\n pipeline: \"background-removal\",\n modelPath: \"Xenova/modnet\",\n dType: \"q8\",\n },\n metadata: {},\n },\n {\n model_id: \"onnx:Xenova/mobilebert-uncased-mnli:q8\",\n title: \"MobileBERT MNLI\",\n description: \"Zero-shot text classification model\",\n tasks: [\"TextClassificationTask\"],\n provider: HF_TRANSFORMERS_ONNX,\n provider_config: {\n pipeline: \"zero-shot-classification\",\n modelPath: \"Xenova/mobilebert-uncased-mnli\",\n dType: \"q8\",\n },\n metadata: {},\n },\n ];\n\n for (const model of onnxModels) {\n await getGlobalModelRepository().addModel(model);\n }\n}\n",
|
|
10
10
|
"/**\n * @license\n * Copyright 2025 Steven Roussey <sroussey@gmail.com>\n * SPDX-License-Identifier: Apache-2.0\n */\n\nimport { IndexedDbTabularRepository } from \"@workglow/storage\";\nimport {\n TaskGraphPrimaryKeyNames,\n TaskGraphSchema,\n TaskGraphTabularRepository,\n} from \"@workglow/task-graph\";\nimport { createServiceToken } from \"@workglow/util\";\n\nexport const IDB_TASK_GRAPH_REPOSITORY = createServiceToken<TaskGraphTabularRepository>(\n \"taskgraph.taskGraphRepository.indexedDb\"\n);\n\n/**\n * IndexedDB implementation of a task graph repository.\n * Provides storage and retrieval for task graphs using IndexedDB.\n */\nexport class IndexedDbTaskGraphRepository extends TaskGraphTabularRepository {\n constructor(table: string = \"task_graphs\") {\n super({\n tabularRepository: new IndexedDbTabularRepository(\n table,\n TaskGraphSchema,\n TaskGraphPrimaryKeyNames\n ),\n });\n }\n}\n",
|
|
11
11
|
"/**\n * @license\n * Copyright 2025 Steven Roussey <sroussey@gmail.com>\n * SPDX-License-Identifier: Apache-2.0\n */\n\nimport { IndexedDbTabularRepository } from \"@workglow/storage\";\nimport {\n TaskOutputPrimaryKeyNames,\n TaskOutputSchema,\n TaskOutputTabularRepository,\n} from \"@workglow/task-graph\";\nimport { createServiceToken } from \"@workglow/util\";\n\nexport const IDB_TASK_OUTPUT_REPOSITORY = createServiceToken<IndexedDbTaskOutputRepository>(\n \"taskgraph.taskOutputRepository.indexedDb\"\n);\n\n/**\n * IndexedDB implementation of a task output repository.\n * Provides storage and retrieval for task outputs using IndexedDB.\n */\nexport class IndexedDbTaskOutputRepository extends TaskOutputTabularRepository {\n constructor(table: string = \"task_outputs\") {\n super({\n tabularRepository: new IndexedDbTabularRepository(\n table,\n TaskOutputSchema,\n TaskOutputPrimaryKeyNames,\n [\"createdAt\"]\n ),\n });\n }\n}\n"
|
|
12
12
|
],
|
|
13
|
-
"mappings": ";AAMA;AACA;AAAA;AAAA;AAAA;AAAA;AAKA;AAEO,IAAM,+BAA+B,mBAC1C,wCACF;AAAA;AAMO,MAAM,oCAAoC,2BAA2B;AAAA,EAC1E,WAAW,GAAG;AAAA,IACZ,MAAM;AAAA,MACJ,mBAAmB,IAAI,0BAA0B,iBAAiB,wBAAwB;AAAA,IAC5F,CAAC;AAAA;AAEL;;ACtBA,sCAAS;AACT;AAAA;AAAA;AAAA;AAAA;AAKA,+BAAS;AAEF,IAAM,gCAAgC,oBAC3C,yCACF;AAAA;AAMO,MAAM,qCAAqC,4BAA4B;AAAA,EAC5E,WAAW,GAAG;AAAA,IACZ,MAAM;AAAA,MACJ,mBAAmB,IAAI,2BACrB,kBACA,2BACA,CAAC,WAAW,CACd;AAAA,IACF,CAAC;AAAA;AAEL;;AC1BA;AACA,iCAAS;AACT;AACA;AACA;;;ACVA;AACA;AAEA,eAAsB,gCAAgC,GAAkB;AAAA,EACtE,MAAM,SAA4B;AAAA,IAEhC;AAAA,MACE,UAAU;AAAA,MACV,OAAO;AAAA,MACP,aAAa;AAAA,MACb,OAAO,CAAC,mBAAmB;AAAA,MAC3B,UAAU;AAAA,MACV,
|
|
14
|
-
"debugId": "
|
|
13
|
+
"mappings": ";AAMA;AACA;AAAA;AAAA;AAAA;AAAA;AAKA;AAEO,IAAM,+BAA+B,mBAC1C,wCACF;AAAA;AAMO,MAAM,oCAAoC,2BAA2B;AAAA,EAC1E,WAAW,GAAG;AAAA,IACZ,MAAM;AAAA,MACJ,mBAAmB,IAAI,0BAA0B,iBAAiB,wBAAwB;AAAA,IAC5F,CAAC;AAAA;AAEL;;ACtBA,sCAAS;AACT;AAAA;AAAA;AAAA;AAAA;AAKA,+BAAS;AAEF,IAAM,gCAAgC,oBAC3C,yCACF;AAAA;AAMO,MAAM,qCAAqC,4BAA4B;AAAA,EAC5E,WAAW,GAAG;AAAA,IACZ,MAAM;AAAA,MACJ,mBAAmB,IAAI,2BACrB,kBACA,2BACA,CAAC,WAAW,CACd;AAAA,IACF,CAAC;AAAA;AAEL;;AC1BA;AACA,iCAAS;AACT;AACA;AACA;;;ACVA;AACA;AAEA,eAAsB,gCAAgC,GAAkB;AAAA,EACtE,MAAM,SAA4B;AAAA,IAEhC;AAAA,MACE,UAAU;AAAA,MACV,OAAO;AAAA,MACP,aAAa;AAAA,MACb,OAAO,CAAC,mBAAmB;AAAA,MAC3B,UAAU;AAAA,MACV,iBAAiB;AAAA,QACf,YAAY;AAAA,QACZ,UAAU;AAAA,QACV,WACE;AAAA,MACJ;AAAA,MACA,UAAU,CAAC;AAAA,IACb;AAAA,IACA;AAAA,MACE,UAAU;AAAA,MACV,OAAO;AAAA,MACP,aAAa;AAAA,MACb,OAAO,CAAC,wBAAwB;AAAA,MAChC,UAAU;AAAA,MACV,iBAAiB;AAAA,QACf,YAAY;AAAA,QACZ,UAAU;AAAA,QACV,WACE;AAAA,MACJ;AAAA,MACA,UAAU,CAAC;AAAA,IACb;AAAA,IACA;AAAA,MACE,UAAU;AAAA,MACV,OAAO;AAAA,MACP,aAAa;AAAA,MACb,OAAO,CAAC,2BAA2B;AAAA,MACnC,UAAU;AAAA,MACV,iBAAiB;AAAA,QACf,YAAY;AAAA,QACZ,UAAU;AAAA,QACV,WACE;AAAA,MACJ;AAAA,MACA,UAAU,CAAC;AAAA,IACb;AAAA,IAEA;AAAA,MACE,UAAU;AAAA,MACV,OAAO;AAAA,MACP,aAAa;AAAA,MACb,OAAO,CAAC,yBAAyB;AAAA,MACjC,UAAU;AAAA,MACV,iBAAiB;AAAA,QACf,YAAY;AAAA,QACZ,UAAU;AAAA,QACV,WACE;AAAA,MACJ;AAAA,MACA,UAAU,CAAC;AAAA,IACb;AAAA,IACA;AAAA,MACE,UAAU;AAAA,MACV,OAAO;AAAA,MACP,aAAa;AAAA,MACb,OAAO,CAAC,oBAAoB;AAAA,MAC5B,UAAU;AAAA,MACV,iBAAiB;AAAA,QACf,YAAY;AAAA,QACZ,UAAU;AAAA,QACV,WACE;AAAA,MACJ;AAAA,MACA,UAAU,CAAC;AAAA,IACb;AAAA,IACA;AAAA,MACE,UAAU;AAAA,MACV,OAAO;AAAA,MACP,aAAa;AAAA,MACb,OAAO,CAAC,qBAAqB;AAAA,MAC7B,UAAU;AAAA,MACV,iBAAiB;AAAA,QACf,YAAY;AAAA,QACZ,UAAU;AAAA,QACV,WACE;AAAA,MACJ;AAAA,MACA,UAAU,CAAC;AAAA,IACb;AAAA,IACA;AAAA,MACE,UAAU;AAAA,MACV,OAAO;AAAA,MACP,aAAa;AAAA,MACb,OAAO,CAAC,uBAAuB;AAAA,MAC/B,UAAU;AAAA,MACV,iBAAiB;AAAA,QACf,YAAY;AAAA,QACZ,UAAU;AAAA,QACV,WACE;AAAA,MACJ;AAAA,MACA,UAAU,CAAC;AAAA,IACb;AAAA,IAEA;AAAA,MACE,UAAU;AAAA,MACV,OAAO;AAAA,MACP,aAAa;AAAA,MACb,OAAO,CAAC,yBAAyB;AAAA,MACjC,UAAU;AAAA,MACV,iBAAiB;AAAA,QACf,YAAY;AAAA,QACZ,UAAU;AAAA,QACV,WACE;AAAA,MACJ;AAAA,MACA,UAAU,CAAC;AAAA,IACb;AAAA,IAEA;AAAA,MACE,UAAU;AAAA,MACV,OAAO;AAAA,MACP,aAAa;AAAA,MACb,OAAO,CAAC,uBAAuB;AAAA,MAC/B,UAAU;AAAA,MACV,iBAAiB;AAAA,QACf,YAAY;AAAA,QACZ,UAAU;AAAA,QACV,WACE;AAAA,MACJ;AAAA,MACA,UAAU,CAAC;AAAA,IACb;AAAA,IACA;AAAA,MACE,UAAU;AAAA,MACV,OAAO;AAAA,MACP,aAAa;AAAA,MACb,OAAO,CAAC,oBAAoB;AAAA,MAC5B,UAAU;AAAA,MACV,iBAAiB;AAAA,QACf,YAAY;AAAA,QACZ,UAAU;AAAA,QACV,WACE;AAAA,MACJ;AAAA,MACA,UAAU,CAAC;AAAA,IACb;AAAA,IACA;AAAA,MACE,UAAU;AAAA,MACV,OAAO;AAAA,MACP,aAAa;AAAA,MACb,OAAO,CAAC,kBAAkB;AAAA,MAC1B,UAAU;AAAA,MACV,iBAAiB;AAAA,QACf,YAAY;AAAA,QACZ,UAAU;AAAA,QACV,WACE;AAAA,MACJ;AAAA,MACA,UAAU,CAAC;AAAA,IACb;AAAA,IACA;AAAA,MACE,UAAU;AAAA,MACV,OAAO;AAAA,MACP,aAAa;AAAA,MACb,OAAO,CAAC,oBAAoB;AAAA,MAC5B,UAAU;AAAA,MACV,iBAAiB;AAAA,QACf,YAAY;AAAA,QACZ,UAAU;AAAA,QACV,WACE;AAAA,MACJ;AAAA,MACA,UAAU,CAAC;AAAA,IACb;AAAA,IACA;AAAA,MACE,UAAU;AAAA,MACV,OAAO;AAAA,MACP,aAAa;AAAA,MACb,OAAO,CAAC,oBAAoB;AAAA,MAC5B,UAAU;AAAA,MACV,iBAAiB;AAAA,QACf,YAAY;AAAA,QACZ,UAAU;AAAA,QACV,WACE;AAAA,MACJ;AAAA,MACA,UAAU,CAAC;AAAA,IACb;AAAA,EACF;AAAA,EAEA,WAAW,SAAS,QAAQ;AAAA,IAC1B,MAAM,yBAAyB,EAAE,SAAS,KAAK;AAAA,EACjD;AAAA;;ACnMF,qCAAS;AACT;AAEA,eAAsB,8BAA8B,GAAkB;AAAA,EACpE,MAAM,aAA8C;AAAA,IAClD;AAAA,MACE,UAAU;AAAA,MACV,OAAO;AAAA,MACP,aAAa;AAAA,MACb,OAAO,CAAC,mBAAmB;AAAA,MAC3B,UAAU;AAAA,MACV,iBAAiB;AAAA,QACf,UAAU;AAAA,QACV,WAAW;AAAA,QACX,QAAQ;AAAA,QACR,kBAAkB;AAAA,MACpB;AAAA,MACA,UAAU,CAAC;AAAA,IACb;AAAA,IACA;AAAA,MACE,UAAU;AAAA,MACV,OAAO;AAAA,MACP,aAAa;AAAA,MACb,OAAO,CAAC,mBAAmB;AAAA,MAC3B,UAAU;AAAA,MACV,iBAAiB;AAAA,QACf,UAAU;AAAA,QACV,WAAW;AAAA,QACX,QAAQ;AAAA,QACR,kBAAkB;AAAA,MACpB;AAAA,MACA,UAAU,CAAC;AAAA,IACb;AAAA,IACA;AAAA,MACE,UAAU;AAAA,MACV,OAAO;AAAA,MACP,aAAa;AAAA,MACb,OAAO,CAAC,mBAAmB;AAAA,MAC3B,UAAU;AAAA,MACV,iBAAiB;AAAA,QACf,UAAU;AAAA,QACV,WAAW;AAAA,QACX,QAAQ;AAAA,QACR,kBAAkB;AAAA,MACpB;AAAA,MACA,UAAU,CAAC;AAAA,IACb;AAAA,IACA;AAAA,MACE,UAAU;AAAA,MACV,OAAO;AAAA,MACP,aAAa;AAAA,MACb,OAAO,CAAC,mBAAmB;AAAA,MAC3B,UAAU;AAAA,MACV,iBAAiB;AAAA,QACf,UAAU;AAAA,QACV,WAAW;AAAA,QACX,QAAQ;AAAA,QACR,kBAAkB;AAAA,MACpB;AAAA,MACA,UAAU,CAAC;AAAA,IACb;AAAA,IACA;AAAA,MACE,UAAU;AAAA,MACV,OAAO;AAAA,MACP,aAAa;AAAA,MACb,OAAO,CAAC,mBAAmB;AAAA,MAC3B,UAAU;AAAA,MACV,iBAAiB;AAAA,QACf,UAAU;AAAA,QACV,WAAW;AAAA,QACX,QAAQ;AAAA,QACR,kBAAkB;AAAA,MACpB;AAAA,MACA,UAAU,CAAC;AAAA,IACb;AAAA,IACA;AAAA,MACE,UAAU;AAAA,MACV,OAAO;AAAA,MACP,aAAa;AAAA,MACb,OAAO,CAAC,2BAA2B;AAAA,MACnC,UAAU;AAAA,MACV,iBAAiB;AAAA,QACf,UAAU;AAAA,QACV,WAAW;AAAA,MACb;AAAA,MACA,UAAU,CAAC;AAAA,IACb;AAAA,IACA;AAAA,MACE,UAAU;AAAA,MACV,OAAO;AAAA,MACP,aAAa;AAAA,MACb,OAAO,CAAC,oBAAoB;AAAA,MAC5B,UAAU;AAAA,MACV,iBAAiB;AAAA,QACf,UAAU;AAAA,QACV,WAAW;AAAA,QACX,OAAO;AAAA,MACT;AAAA,MACA,UAAU,CAAC;AAAA,IACb;AAAA,IACA;AAAA,MACE,UAAU;AAAA,MACV,OAAO;AAAA,MACP,aAAa;AAAA,MACb,OAAO,CAAC,oBAAoB;AAAA,MAC5B,UAAU;AAAA,MACV,iBAAiB;AAAA,QACf,UAAU;AAAA,QACV,WAAW;AAAA,QACX,QAAQ;AAAA,QACR,OAAO;AAAA,QACP,uBAAuB;AAAA,MACzB;AAAA,MACA,UAAU,CAAC;AAAA,IACb;AAAA,IACA;AAAA,MACE,UAAU;AAAA,MACV,OAAO;AAAA,MACP,aAAa;AAAA,MACb,OAAO,CAAC,oBAAoB;AAAA,MAC5B,UAAU;AAAA,MACV,iBAAiB;AAAA,QACf,UAAU;AAAA,QACV,WAAW;AAAA,QACX,OAAO;AAAA,MACT;AAAA,MACA,UAAU,CAAC;AAAA,IACb;AAAA,IACA;AAAA,MACE,UAAU;AAAA,MACV,OAAO;AAAA,MACP,aAAa;AAAA,MACb,OAAO,CAAC,sBAAsB,kBAAkB;AAAA,MAChD,UAAU;AAAA,MACV,iBAAiB;AAAA,QACf,UAAU;AAAA,QACV,WAAW;AAAA,MACb;AAAA,MACA,UAAU,CAAC;AAAA,IACb;AAAA,IACA;AAAA,MACE,UAAU;AAAA,MACV,OAAO;AAAA,MACP,aAAa;AAAA,MACb,OAAO,CAAC,sBAAsB,kBAAkB;AAAA,MAChD,UAAU;AAAA,MACV,iBAAiB;AAAA,QACf,UAAU;AAAA,QACV,WAAW;AAAA,QACX,OAAO;AAAA,MACT;AAAA,MACA,UAAU,CAAC;AAAA,IACb;AAAA,IACA;AAAA,MACE,UAAU;AAAA,MACV,OAAO;AAAA,MACP,aAAa;AAAA,MACb,OAAO,CAAC,iBAAiB;AAAA,MACzB,UAAU;AAAA,MACV,iBAAiB;AAAA,QACf,UAAU;AAAA,QACV,WAAW;AAAA,QACX,OAAO;AAAA,MACT;AAAA,MACA,UAAU,CAAC;AAAA,IACb;AAAA,IACA;AAAA,MACE,UAAU;AAAA,MACV,OAAO;AAAA,MACP,aAAa;AAAA,MACb,OAAO,CAAC,qBAAqB;AAAA,MAC7B,UAAU;AAAA,MACV,iBAAiB;AAAA,QACf,UAAU;AAAA,QACV,WAAW;AAAA,QACX,eAAe;AAAA,QACf,OAAO;AAAA,MACT;AAAA,MACA,UAAU,CAAC;AAAA,IACb;AAAA,IACA;AAAA,MACE,UAAU;AAAA,MACV,OAAO;AAAA,MACP,aAAa;AAAA,MACb,OAAO,CAAC,qBAAqB;AAAA,MAC7B,UAAU;AAAA,MACV,iBAAiB;AAAA,QACf,UAAU;AAAA,QACV,WAAW;AAAA,QACX,eAAe;AAAA,QACf,OAAO;AAAA,MACT;AAAA,MACA,UAAU,CAAC;AAAA,IACb;AAAA,IACA;AAAA,MACE,UAAU;AAAA,MACV,OAAO;AAAA,MACP,aAAa;AAAA,MACb,OAAO,CAAC,qBAAqB;AAAA,MAC7B,UAAU;AAAA,MACV,iBAAiB;AAAA,QACf,UAAU;AAAA,QACV,WAAW;AAAA,QACX,eAAe;AAAA,QACf,OAAO;AAAA,MACT;AAAA,MACA,UAAU,CAAC;AAAA,IACb;AAAA,IACA;AAAA,MACE,UAAU;AAAA,MACV,OAAO;AAAA,MACP,aAAa;AAAA,MACb,OAAO,CAAC,qBAAqB;AAAA,MAC7B,UAAU;AAAA,MACV,iBAAiB;AAAA,QACf,UAAU;AAAA,QACV,WAAW;AAAA,QACX,eAAe;AAAA,QACf,OAAO;AAAA,MACT;AAAA,MACA,UAAU,CAAC;AAAA,IACb;AAAA,IAEA;AAAA,MACE,UAAU;AAAA,MACV,OAAO;AAAA,MACP,aAAa;AAAA,MACb,OAAO,CAAC,yBAAyB;AAAA,MACjC,UAAU;AAAA,MACV,iBAAiB;AAAA,QACf,UAAU;AAAA,QACV,WAAW;AAAA,QACX,OAAO;AAAA,MACT;AAAA,MACA,UAAU,CAAC;AAAA,IACb;AAAA,IACA;AAAA,MACE,UAAU;AAAA,MACV,OAAO;AAAA,MACP,aAAa;AAAA,MACb,OAAO,CAAC,2BAA2B,oBAAoB;AAAA,MACvD,UAAU;AAAA,MACV,iBAAiB;AAAA,QACf,UAAU;AAAA,QACV,WAAW;AAAA,QACX,OAAO;AAAA,MACT;AAAA,MACA,UAAU,CAAC;AAAA,IACb;AAAA,IACA;AAAA,MACE,UAAU;AAAA,MACV,OAAO;AAAA,MACP,aAAa;AAAA,MACb,OAAO,CAAC,qBAAqB;AAAA,MAC7B,UAAU;AAAA,MACV,iBAAiB;AAAA,QACf,UAAU;AAAA,QACV,WAAW;AAAA,QACX,OAAO;AAAA,MACT;AAAA,MACA,UAAU,CAAC;AAAA,IACb;AAAA,IACA;AAAA,MACE,UAAU;AAAA,MACV,OAAO;AAAA,MACP,aAAa;AAAA,MACb,OAAO,CAAC,qBAAqB;AAAA,MAC7B,UAAU;AAAA,MACV,iBAAiB;AAAA,QACf,UAAU;AAAA,QACV,WAAW;AAAA,QACX,OAAO;AAAA,MACT;AAAA,MACA,UAAU,CAAC;AAAA,IACb;AAAA,IACA;AAAA,MACE,UAAU;AAAA,MACV,OAAO;AAAA,MACP,aAAa;AAAA,MACb,OAAO,CAAC,uBAAuB;AAAA,MAC/B,UAAU;AAAA,MACV,iBAAiB;AAAA,QACf,UAAU;AAAA,QACV,WAAW;AAAA,QACX,OAAO;AAAA,MACT;AAAA,MACA,UAAU,CAAC;AAAA,IACb;AAAA,IACA;AAAA,MACE,UAAU;AAAA,MACV,OAAO;AAAA,MACP,aAAa;AAAA,MACb,OAAO,CAAC,iBAAiB;AAAA,MACzB,UAAU;AAAA,MACV,iBAAiB;AAAA,QACf,UAAU;AAAA,QACV,WAAW;AAAA,QACX,OAAO;AAAA,MACT;AAAA,MACA,UAAU,CAAC;AAAA,IACb;AAAA,IACA;AAAA,MACE,UAAU;AAAA,MACV,OAAO;AAAA,MACP,aAAa;AAAA,MACb,OAAO,CAAC,uBAAuB;AAAA,MAC/B,UAAU;AAAA,MACV,iBAAiB;AAAA,QACf,UAAU;AAAA,QACV,WAAW;AAAA,QACX,OAAO;AAAA,MACT;AAAA,MACA,UAAU,CAAC;AAAA,IACb;AAAA,IACA;AAAA,MACE,UAAU;AAAA,MACV,OAAO;AAAA,MACP,aAAa;AAAA,MACb,OAAO,CAAC,wBAAwB;AAAA,MAChC,UAAU;AAAA,MACV,iBAAiB;AAAA,QACf,UAAU;AAAA,QACV,WAAW;AAAA,QACX,OAAO;AAAA,MACT;AAAA,MACA,UAAU,CAAC;AAAA,IACb;AAAA,EACF;AAAA,EAEA,WAAW,SAAS,YAAY;AAAA,IAC9B,MAAM,0BAAyB,EAAE,SAAS,KAAK;AAAA,EACjD;AAAA;;;AF7TF,eAAsB,0BAA0B,GAAkB;AAAA,EAChE,MAAM,YAAY;AAAA,EAClB,MAAM,UAAU,IAAI,qBAAwD,SAAS;AAAA,EACrF,MAAM,QAAQ,cAAc;AAAA,EAE5B,MAAM,SAAS,IAAI,eACjB,OACA;AAAA,IACE;AAAA,IACA;AAAA,IACA,SAAS,IAAI,mBAAmB,GAAG,EAAE;AAAA,EACvC,CACF;AAAA,EAEA,MAAM,SAAS,IAAI,eAAkD;AAAA,IACnE;AAAA,IACA;AAAA,EACF,CAAC;AAAA,EAED,OAAO,OAAO,MAAM;AAAA,EAEpB,qBAAqB,EAAE,cAAc,EAAE,QAAQ,QAAQ,QAAQ,CAAC;AAAA,EAChE,MAAM,OAAO,MAAM;AAAA;AAGrB,eAAsB,2BAA2B,GAAkB;AAAA,EACjE,MAAM,YAAY;AAAA,EAClB,MAAM,UAAU,IAAI,qBAAwD,SAAS;AAAA,EACrF,MAAM,QAAQ,cAAc;AAAA,EAE5B,MAAM,SAAS,IAAI,eACjB,OACA;AAAA,IACE;AAAA,IACA;AAAA,IACA,SAAS,IAAI,mBAAmB,GAAG,EAAE;AAAA,EACvC,CACF;AAAA,EAEA,MAAM,SAAS,IAAI,eAAkD;AAAA,IACnE;AAAA,IACA;AAAA,EACF,CAAC;AAAA,EAED,OAAO,OAAO,MAAM;AAAA,EAEpB,qBAAqB,EAAE,cAAc,EAAE,QAAQ,QAAQ,QAAQ,CAAC;AAAA,EAChE,MAAM,OAAO,MAAM;AAAA;;AGvDrB;AACA;AAAA,8BACE;AAAA,qBACA;AAAA,gCACA;AAAA;AAEF,+BAAS;AAEF,IAAM,4BAA4B,oBACvC,yCACF;AAAA;AAMO,MAAM,qCAAqC,4BAA2B;AAAA,EAC3E,WAAW,CAAC,QAAgB,eAAe;AAAA,IACzC,MAAM;AAAA,MACJ,mBAAmB,IAAI,2BACrB,OACA,kBACA,yBACF;AAAA,IACF,CAAC;AAAA;AAEL;;AC1BA,uCAAS;AACT;AAAA,+BACE;AAAA,sBACA;AAAA,iCACA;AAAA;AAEF,+BAAS;AAEF,IAAM,6BAA6B,oBACxC,0CACF;AAAA;AAMO,MAAM,sCAAsC,6BAA4B;AAAA,EAC7E,WAAW,CAAC,QAAgB,gBAAgB;AAAA,IAC1C,MAAM;AAAA,MACJ,mBAAmB,IAAI,4BACrB,OACA,mBACA,4BACA,CAAC,WAAW,CACd;AAAA,IACF,CAAC;AAAA;AAEL;",
|
|
14
|
+
"debugId": "1189E79CE0A34BA564756E2164756E21",
|
|
15
15
|
"names": []
|
|
16
16
|
}
|