@workglow/test 0.0.92 → 0.0.93

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/node.js CHANGED
@@ -32,566 +32,6 @@ class InMemoryTaskOutputRepository extends TaskOutputTabularRepository {
32
32
  });
33
33
  }
34
34
  }
35
- // src/samples/index.ts
36
- import {
37
- HuggingFaceTransformersProvider,
38
- TensorFlowMediaPipeProvider
39
- } from "@workglow/ai-provider";
40
- import { HFT_TASKS } from "@workglow/ai-provider/hf-transformers";
41
- import { TFMP_TASKS } from "@workglow/ai-provider/tf-mediapipe";
42
-
43
- // src/samples/MediaPipeModelSamples.ts
44
- import { getGlobalModelRepository } from "@workglow/ai";
45
- import { TENSORFLOW_MEDIAPIPE } from "@workglow/ai-provider";
46
- async function registerMediaPipeTfJsLocalModels() {
47
- const models = [
48
- {
49
- model_id: "media-pipe:Universal Sentence Encoder",
50
- title: "Universal Sentence Encoder",
51
- description: "Universal Sentence Encoder",
52
- tasks: ["TextEmbeddingTask"],
53
- provider: TENSORFLOW_MEDIAPIPE,
54
- provider_config: {
55
- task_engine: "text",
56
- pipeline: "text-embedder",
57
- model_path: "https://storage.googleapis.com/mediapipe-tasks/text_embedder/universal_sentence_encoder.tflite"
58
- },
59
- metadata: {}
60
- },
61
- {
62
- model_id: "media-pipe:BERT Text Classifier",
63
- title: "BERT Text Classifier",
64
- description: "BERT-based text classification model",
65
- tasks: ["TextClassificationTask"],
66
- provider: TENSORFLOW_MEDIAPIPE,
67
- provider_config: {
68
- task_engine: "text",
69
- pipeline: "text-classifier",
70
- model_path: "https://storage.googleapis.com/mediapipe-models/text_classifier/bert_classifier/float32/1/bert_classifier.tflite"
71
- },
72
- metadata: {}
73
- },
74
- {
75
- model_id: "media-pipe:Language Detector",
76
- title: "Language Detector",
77
- description: "Language detection model",
78
- tasks: ["TextLanguageDetectionTask"],
79
- provider: TENSORFLOW_MEDIAPIPE,
80
- provider_config: {
81
- task_engine: "text",
82
- pipeline: "text-language-detector",
83
- model_path: "https://storage.googleapis.com/mediapipe-models/language_detector/language_detector/float32/1/language_detector.tflite"
84
- },
85
- metadata: {}
86
- },
87
- {
88
- model_id: "media-pipe:EfficientNet Lite0 Image Classifier",
89
- title: "EfficientNet Lite0",
90
- description: "Lightweight image classification model",
91
- tasks: ["ImageClassificationTask"],
92
- provider: TENSORFLOW_MEDIAPIPE,
93
- provider_config: {
94
- task_engine: "vision",
95
- pipeline: "vision-image-classifier",
96
- model_path: "https://storage.googleapis.com/mediapipe-models/image_classifier/efficientnet_lite0/float32/1/efficientnet_lite0.tflite"
97
- },
98
- metadata: {}
99
- },
100
- {
101
- model_id: "media-pipe:MobileNet V3 Image Embedder",
102
- title: "MobileNet V3 Small",
103
- description: "Lightweight image embedding model",
104
- tasks: ["ImageEmbeddingTask"],
105
- provider: TENSORFLOW_MEDIAPIPE,
106
- provider_config: {
107
- task_engine: "vision",
108
- pipeline: "vision-image-embedder",
109
- model_path: "https://storage.googleapis.com/mediapipe-models/image_embedder/mobilenet_v3_small/float32/1/mobilenet_v3_small.tflite"
110
- },
111
- metadata: {}
112
- },
113
- {
114
- model_id: "media-pipe:EfficientDet Lite0 Object Detector",
115
- title: "EfficientDet Lite0",
116
- description: "Lightweight object detection model",
117
- tasks: ["ObjectDetectionTask"],
118
- provider: TENSORFLOW_MEDIAPIPE,
119
- provider_config: {
120
- task_engine: "vision",
121
- pipeline: "vision-object-detector",
122
- model_path: "https://storage.googleapis.com/mediapipe-models/object_detector/efficientdet_lite0/float32/1/efficientdet_lite0.tflite"
123
- },
124
- metadata: {}
125
- },
126
- {
127
- model_id: "media-pipe:DeepLab V3 Image Segmenter",
128
- title: "DeepLab V3",
129
- description: "Image segmentation model",
130
- tasks: ["ImageSegmentationTask"],
131
- provider: TENSORFLOW_MEDIAPIPE,
132
- provider_config: {
133
- task_engine: "vision",
134
- pipeline: "vision-image-segmenter",
135
- model_path: "https://storage.googleapis.com/mediapipe-models/image_segmenter/deeplab_v3/float32/1/deeplab_v3.tflite"
136
- },
137
- metadata: {}
138
- },
139
- {
140
- model_id: "media-pipe:YAMNet Audio Classifier",
141
- title: "YAMNet",
142
- description: "Audio event classification model",
143
- tasks: ["AudioClassificationTask"],
144
- provider: TENSORFLOW_MEDIAPIPE,
145
- provider_config: {
146
- task_engine: "audio",
147
- pipeline: "audio-classifier",
148
- model_path: "https://storage.googleapis.com/mediapipe-models/audio_classifier/yamnet/float32/1/yamnet.tflite"
149
- },
150
- metadata: {}
151
- },
152
- {
153
- model_id: "media-pipe:Gesture Recognizer",
154
- title: "Gesture Recognizer",
155
- description: "Recognizes hand gestures (thumbs up, victory, etc.)",
156
- tasks: ["GestureRecognizerTask"],
157
- provider: TENSORFLOW_MEDIAPIPE,
158
- provider_config: {
159
- task_engine: "vision",
160
- pipeline: "vision-gesture-recognizer",
161
- model_path: "https://storage.googleapis.com/mediapipe-models/gesture_recognizer/gesture_recognizer/float16/1/gesture_recognizer.task"
162
- },
163
- metadata: {}
164
- },
165
- {
166
- model_id: "media-pipe:Hand Landmarker",
167
- title: "Hand Landmarker",
168
- description: "Detects 21 hand landmarks",
169
- tasks: ["HandLandmarkerTask"],
170
- provider: TENSORFLOW_MEDIAPIPE,
171
- provider_config: {
172
- task_engine: "vision",
173
- pipeline: "vision-hand-landmarker",
174
- model_path: "https://storage.googleapis.com/mediapipe-models/hand_landmarker/hand_landmarker/float16/1/hand_landmarker.task"
175
- },
176
- metadata: {}
177
- },
178
- {
179
- model_id: "media-pipe:Face Detector",
180
- title: "Face Detector",
181
- description: "Detects faces with bounding boxes and keypoints",
182
- tasks: ["FaceDetectorTask"],
183
- provider: TENSORFLOW_MEDIAPIPE,
184
- provider_config: {
185
- task_engine: "vision",
186
- pipeline: "vision-face-detector",
187
- model_path: "https://storage.googleapis.com/mediapipe-models/face_detector/blaze_face_short_range/float16/1/blaze_face_short_range.tflite"
188
- },
189
- metadata: {}
190
- },
191
- {
192
- model_id: "media-pipe:Face Landmarker",
193
- title: "Face Landmarker",
194
- description: "Detects 478 facial landmarks with blendshapes",
195
- tasks: ["FaceLandmarkerTask"],
196
- provider: TENSORFLOW_MEDIAPIPE,
197
- provider_config: {
198
- task_engine: "vision",
199
- pipeline: "vision-face-landmarker",
200
- model_path: "https://storage.googleapis.com/mediapipe-models/face_landmarker/face_landmarker/float16/1/face_landmarker.task"
201
- },
202
- metadata: {}
203
- },
204
- {
205
- model_id: "media-pipe:Pose Landmarker",
206
- title: "Pose Landmarker",
207
- description: "Detects 33 body pose landmarks",
208
- tasks: ["PoseLandmarkerTask"],
209
- provider: TENSORFLOW_MEDIAPIPE,
210
- provider_config: {
211
- task_engine: "vision",
212
- pipeline: "vision-pose-landmarker",
213
- model_path: "https://storage.googleapis.com/mediapipe-models/pose_landmarker/pose_landmarker_lite/float16/1/pose_landmarker_lite.task"
214
- },
215
- metadata: {}
216
- }
217
- ];
218
- for (const model of models) {
219
- await getGlobalModelRepository().addModel(model);
220
- }
221
- }
222
- // src/samples/ONNXModelSamples.ts
223
- import { getGlobalModelRepository as getGlobalModelRepository2 } from "@workglow/ai";
224
- import { HF_TRANSFORMERS_ONNX } from "@workglow/ai-provider";
225
- async function registerHuggingfaceLocalModels() {
226
- const onnxModels = [
227
- {
228
- model_id: "onnx:Xenova/all-MiniLM-L6-v2:q8",
229
- title: "All MiniLM L6 V2 384D",
230
- description: "Xenova/all-MiniLM-L6-v2",
231
- tasks: ["TextEmbeddingTask"],
232
- provider: HF_TRANSFORMERS_ONNX,
233
- provider_config: {
234
- pipeline: "feature-extraction",
235
- model_path: "Xenova/all-MiniLM-L6-v2",
236
- native_dimensions: 384
237
- },
238
- metadata: {}
239
- },
240
- {
241
- model_id: "onnx:Xenova/bge-base-en-v1.5:q8",
242
- title: "BGE Base English V1.5 768D",
243
- description: "Xenova/bge-base-en-v1.5",
244
- tasks: ["TextEmbeddingTask"],
245
- provider: HF_TRANSFORMERS_ONNX,
246
- provider_config: {
247
- pipeline: "feature-extraction",
248
- model_path: "Xenova/bge-base-en-v1.5",
249
- native_dimensions: 768
250
- },
251
- metadata: {}
252
- },
253
- {
254
- model_id: "onnx:Xenova/gte-small:q8",
255
- title: "GTE Small 384D",
256
- description: "Xenova/gte-small",
257
- tasks: ["TextEmbeddingTask"],
258
- provider: HF_TRANSFORMERS_ONNX,
259
- provider_config: {
260
- pipeline: "feature-extraction",
261
- model_path: "Xenova/gte-small",
262
- native_dimensions: 384
263
- },
264
- metadata: {}
265
- },
266
- {
267
- model_id: "onnx:onnx-community/bert_uncased_L-2_H-128_A-2-ONNX:q8",
268
- title: "BERT Uncased 128D",
269
- description: "onnx-community/bert_uncased_L-2_H-128_A-2-ONNX",
270
- tasks: ["TextEmbeddingTask"],
271
- provider: HF_TRANSFORMERS_ONNX,
272
- provider_config: {
273
- pipeline: "feature-extraction",
274
- model_path: "onnx-community/bert_uncased_L-2_H-128_A-2-ONNX",
275
- native_dimensions: 128
276
- },
277
- metadata: {}
278
- },
279
- {
280
- model_id: "onnx:Xenova/paraphrase-albert-base-v2:q8",
281
- title: "Paraphrase ALBERT Base V2 768D",
282
- description: "Xenova/paraphrase-albert-base-v2",
283
- tasks: ["TextEmbeddingTask"],
284
- provider: HF_TRANSFORMERS_ONNX,
285
- provider_config: {
286
- pipeline: "feature-extraction",
287
- model_path: "Xenova/paraphrase-albert-base-v2",
288
- native_dimensions: 768
289
- },
290
- metadata: {}
291
- },
292
- {
293
- model_id: "onnx:Qwen3-Embedding-0.6B:auto",
294
- title: "Qwen3 Embedding 0.6B",
295
- description: "onnx-community/Qwen3-Embedding-0.6B-ONNX",
296
- tasks: ["TextEmbeddingTask"],
297
- provider: HF_TRANSFORMERS_ONNX,
298
- provider_config: {
299
- pipeline: "feature-extraction",
300
- model_path: "onnx-community/Qwen3-Embedding-0.6B-ONNX",
301
- native_dimensions: 1024,
302
- mrl: true
303
- },
304
- metadata: {}
305
- },
306
- {
307
- model_id: "onnx:onnx-community/NeuroBERT-NER-ONNX:q8",
308
- title: "NeuroBERT NER",
309
- description: "onnx-community/NeuroBERT-NER-ONNX",
310
- tasks: ["TextNamedEntityRecognitionTask"],
311
- provider: HF_TRANSFORMERS_ONNX,
312
- provider_config: {
313
- pipeline: "token-classification",
314
- model_path: "onnx-community/NeuroBERT-NER-ONNX"
315
- },
316
- metadata: {}
317
- },
318
- {
319
- model_id: "onnx:Xenova/distilbert-base-uncased-distilled-squad:q8",
320
- title: "distilbert-base-uncased-distilled-squad",
321
- description: "Xenova/distilbert-base-uncased-distilled-squad quantized to 8bit",
322
- tasks: ["TextQuestionAnswerTask"],
323
- provider: HF_TRANSFORMERS_ONNX,
324
- provider_config: {
325
- pipeline: "question-answering",
326
- model_path: "Xenova/distilbert-base-uncased-distilled-squad"
327
- },
328
- metadata: {}
329
- },
330
- {
331
- model_id: "onnx:onnx-community/ModernBERT-finetuned-squad-ONNX",
332
- title: "ModernBERT-finetuned-squad-ONNX",
333
- description: "onnx-community/ModernBERT-finetuned-squad-ONNX quantized to int8",
334
- tasks: ["TextQuestionAnswerTask"],
335
- provider: HF_TRANSFORMERS_ONNX,
336
- provider_config: {
337
- pipeline: "question-answering",
338
- model_path: "onnx-community/ModernBERT-finetuned-squad-ONNX",
339
- dtype: "int8"
340
- },
341
- metadata: {}
342
- },
343
- {
344
- model_id: "onnx:Xenova/gpt2:q8",
345
- title: "gpt2",
346
- description: "Xenova/gpt2 quantized to 8bit",
347
- tasks: ["TextGenerationTask"],
348
- provider: HF_TRANSFORMERS_ONNX,
349
- provider_config: {
350
- pipeline: "text-generation",
351
- model_path: "Xenova/gpt2",
352
- dtype: "q8"
353
- },
354
- metadata: {}
355
- },
356
- {
357
- model_id: "onnx:Xenova/Phi-3-mini-4k-instruct:q4f16",
358
- title: "Phi-3-mini-4k-instruct:q4f16",
359
- description: "Xenova/Phi-3-mini-4k-instruct quantized to q4f16",
360
- tasks: ["TextGenerationTask"],
361
- provider: HF_TRANSFORMERS_ONNX,
362
- provider_config: {
363
- pipeline: "text-generation",
364
- model_path: "Xenova/Phi-3-mini-4k-instruct",
365
- dtype: "q4f16",
366
- use_external_data_format: true
367
- },
368
- metadata: {}
369
- },
370
- {
371
- model_id: "onnx:Xenova/distilgpt2:q8",
372
- title: "distilgpt2",
373
- description: "Xenova/distilgpt2 quantized to 8bit",
374
- tasks: ["TextGenerationTask"],
375
- provider: HF_TRANSFORMERS_ONNX,
376
- provider_config: {
377
- pipeline: "text-generation",
378
- model_path: "Xenova/distilgpt2",
379
- dtype: "q8"
380
- },
381
- metadata: {}
382
- },
383
- {
384
- model_id: "onnx:Xenova/LaMini-Flan-T5-783M:q8",
385
- title: "LaMini-Flan-T5-783M",
386
- description: "Xenova/LaMini-Flan-T5-783M quantized to 8bit",
387
- tasks: ["TextGenerationTask", "TextRewriterTask"],
388
- provider: HF_TRANSFORMERS_ONNX,
389
- provider_config: {
390
- pipeline: "text2text-generation",
391
- model_path: "Xenova/LaMini-Flan-T5-783M",
392
- dtype: "q8"
393
- },
394
- metadata: {}
395
- },
396
- {
397
- model_id: "onnx:Falconsai/text_summarization:fp32",
398
- title: "text_summarization",
399
- description: "Falconsai/text_summarization",
400
- tasks: ["TextSummaryTask"],
401
- provider: HF_TRANSFORMERS_ONNX,
402
- provider_config: {
403
- pipeline: "summarization",
404
- model_path: "Falconsai/text_summarization",
405
- dtype: "fp32"
406
- },
407
- metadata: {}
408
- },
409
- {
410
- model_id: "onnx:Xenova/nllb-200-distilled-600M:q8",
411
- title: "nllb-200-distilled-600M",
412
- description: "Xenova/nllb-200-distilled-600M quantized to 8bit",
413
- tasks: ["TextTranslationTask"],
414
- provider: HF_TRANSFORMERS_ONNX,
415
- provider_config: {
416
- pipeline: "translation",
417
- model_path: "Xenova/nllb-200-distilled-600M",
418
- language_style: "FLORES-200",
419
- dtype: "q8"
420
- },
421
- metadata: {}
422
- },
423
- {
424
- model_id: "onnx:Xenova/m2m100_418M:q8",
425
- title: "m2m100_418M",
426
- description: "Xenova/m2m100_418M quantized to 8bit",
427
- tasks: ["TextTranslationTask"],
428
- provider: HF_TRANSFORMERS_ONNX,
429
- provider_config: {
430
- pipeline: "translation",
431
- model_path: "Xenova/m2m100_418M",
432
- language_style: "ISO-639",
433
- dtype: "q8"
434
- },
435
- metadata: {}
436
- },
437
- {
438
- model_id: "onnx:Xenova/m2m100_418M:q8",
439
- title: "m2m100_418M",
440
- description: "Xenova/m2m100_418M quantized to 8bit",
441
- tasks: ["TextTranslationTask"],
442
- provider: HF_TRANSFORMERS_ONNX,
443
- provider_config: {
444
- pipeline: "translation",
445
- model_path: "Xenova/m2m100_418M",
446
- language_style: "ISO-639",
447
- dtype: "q8"
448
- },
449
- metadata: {}
450
- },
451
- {
452
- model_id: "onnx:Xenova/mbart-large-50-many-to-many-mmt:q8",
453
- title: "mbart-large-50-many-to-many-mmt",
454
- description: "Xenova/mbart-large-50-many-to-many-mmt quantized to 8bit",
455
- tasks: ["TextTranslationTask"],
456
- provider: HF_TRANSFORMERS_ONNX,
457
- provider_config: {
458
- pipeline: "translation",
459
- model_path: "Xenova/mbart-large-50-many-to-many-mmt",
460
- language_style: "ISO-639_ISO-3166-1-alpha-2",
461
- dtype: "q8"
462
- },
463
- metadata: {}
464
- },
465
- {
466
- model_id: "onnx:Xenova/vit-base-patch16-224:q8",
467
- title: "ViT Base Patch16 224",
468
- description: "Vision Transformer for image classification",
469
- tasks: ["ImageClassificationTask"],
470
- provider: HF_TRANSFORMERS_ONNX,
471
- provider_config: {
472
- pipeline: "image-classification",
473
- model_path: "Xenova/vit-base-patch16-224",
474
- dtype: "q8"
475
- },
476
- metadata: {}
477
- },
478
- {
479
- model_id: "onnx:Xenova/clip-vit-base-patch32:q8",
480
- title: "CLIP ViT Base Patch32",
481
- description: "CLIP model for zero-shot image classification and embeddings",
482
- tasks: ["ImageClassificationTask", "ImageEmbeddingTask"],
483
- provider: HF_TRANSFORMERS_ONNX,
484
- provider_config: {
485
- pipeline: "zero-shot-image-classification",
486
- model_path: "Xenova/clip-vit-base-patch32",
487
- dtype: "q8"
488
- },
489
- metadata: {}
490
- },
491
- {
492
- model_id: "onnx:Xenova/detr-resnet-50:q8",
493
- title: "DETR ResNet-50",
494
- description: "Object detection model",
495
- tasks: ["ObjectDetectionTask"],
496
- provider: HF_TRANSFORMERS_ONNX,
497
- provider_config: {
498
- pipeline: "object-detection",
499
- model_path: "Xenova/detr-resnet-50",
500
- dtype: "q8"
501
- },
502
- metadata: {}
503
- },
504
- {
505
- model_id: "onnx:Xenova/owlvit-base-patch32:q8",
506
- title: "OWL-ViT Base Patch32",
507
- description: "Zero-shot object detection model",
508
- tasks: ["ObjectDetectionTask"],
509
- provider: HF_TRANSFORMERS_ONNX,
510
- provider_config: {
511
- pipeline: "zero-shot-object-detection",
512
- model_path: "Xenova/owlvit-base-patch32",
513
- dtype: "q8"
514
- },
515
- metadata: {}
516
- },
517
- {
518
- model_id: "onnx:Xenova/segformer-b0-finetuned-ade-512-512:q8",
519
- title: "Segformer B0 ADE",
520
- description: "Image segmentation model trained on ADE20K dataset",
521
- tasks: ["ImageSegmentationTask"],
522
- provider: HF_TRANSFORMERS_ONNX,
523
- provider_config: {
524
- pipeline: "image-segmentation",
525
- model_path: "Xenova/segformer-b0-finetuned-ade-512-512",
526
- dtype: "q8"
527
- },
528
- metadata: {}
529
- },
530
- {
531
- model_id: "onnx:Xenova/vit-gpt2-image-captioning:q8",
532
- title: "ViT GPT2 Image Captioning",
533
- description: "Image to text captioning model",
534
- tasks: ["ImageToTextTask"],
535
- provider: HF_TRANSFORMERS_ONNX,
536
- provider_config: {
537
- pipeline: "image-to-text",
538
- model_path: "Xenova/vit-gpt2-image-captioning",
539
- dtype: "q8"
540
- },
541
- metadata: {}
542
- },
543
- {
544
- model_id: "onnx:Xenova/modnet:q8",
545
- title: "MODNet Background Removal",
546
- description: "Background removal model",
547
- tasks: ["BackgroundRemovalTask"],
548
- provider: HF_TRANSFORMERS_ONNX,
549
- provider_config: {
550
- pipeline: "background-removal",
551
- model_path: "Xenova/modnet",
552
- dtype: "q8"
553
- },
554
- metadata: {}
555
- },
556
- {
557
- model_id: "onnx:Xenova/mobilebert-uncased-mnli:q8",
558
- title: "MobileBERT MNLI",
559
- description: "Zero-shot text classification model",
560
- tasks: ["TextClassificationTask"],
561
- provider: HF_TRANSFORMERS_ONNX,
562
- provider_config: {
563
- pipeline: "zero-shot-classification",
564
- model_path: "Xenova/mobilebert-uncased-mnli",
565
- dtype: "q8"
566
- },
567
- metadata: {}
568
- },
569
- {
570
- model_id: "onnx:Xenova/bge-reranker-base:q8",
571
- title: "BGE Reranker Base",
572
- description: "Cross-encoder reranker model for relevance scoring",
573
- tasks: ["TextClassificationTask", "RerankerTask"],
574
- provider: HF_TRANSFORMERS_ONNX,
575
- provider_config: {
576
- pipeline: "text-classification",
577
- model_path: "Xenova/bge-reranker-base",
578
- dtype: "q8"
579
- },
580
- metadata: {}
581
- }
582
- ];
583
- for (const model of onnxModels) {
584
- await getGlobalModelRepository2().addModel(model);
585
- }
586
- }
587
-
588
- // src/samples/index.ts
589
- async function register_HFT_InMemoryQueue() {
590
- await new HuggingFaceTransformersProvider(HFT_TASKS).register({ mode: "inline" });
591
- }
592
- async function register_TFMP_InMemoryQueue() {
593
- await new TensorFlowMediaPipeProvider(TFMP_TASKS).register({ mode: "inline" });
594
- }
595
35
  // src/binding/FsFolderTaskGraphRepository.ts
596
36
  import { FsFolderTabularStorage } from "@workglow/storage";
597
37
  import {
@@ -729,10 +169,6 @@ class SqliteTaskOutputRepository extends TaskOutputTabularRepository5 {
729
169
  }
730
170
  }
731
171
  export {
732
- register_TFMP_InMemoryQueue,
733
- register_HFT_InMemoryQueue,
734
- registerMediaPipeTfJsLocalModels,
735
- registerHuggingfaceLocalModels,
736
172
  SqliteTaskOutputRepository,
737
173
  SqliteTaskGraphRepository,
738
174
  SQLITE_TASK_OUTPUT_REPOSITORY,
@@ -755,4 +191,4 @@ export {
755
191
  FS_FOLDER_TASK_GRAPH_REPOSITORY
756
192
  };
757
193
 
758
- //# debugId=A492DE3C008ECA7464756E2164756E21
194
+ //# debugId=E54A8018F73E78C164756E2164756E21