@workglow/ai-provider 0.0.102 → 0.0.103
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/{HFT_JobRunFns-aap9x58c.js → HFT_JobRunFns-66fave8m.js} +3 -3
- package/dist/anthropic/AnthropicProvider.d.ts +1 -1
- package/dist/anthropic/AnthropicProvider.d.ts.map +1 -1
- package/dist/anthropic/common/Anthropic_JobRunFns.d.ts +3 -1
- package/dist/anthropic/common/Anthropic_JobRunFns.d.ts.map +1 -1
- package/dist/anthropic/index.js +74 -7
- package/dist/anthropic/index.js.map +4 -4
- package/dist/google-gemini/GoogleGeminiProvider.d.ts +1 -1
- package/dist/google-gemini/GoogleGeminiProvider.d.ts.map +1 -1
- package/dist/google-gemini/common/Gemini_JobRunFns.d.ts +3 -1
- package/dist/google-gemini/common/Gemini_JobRunFns.d.ts.map +1 -1
- package/dist/google-gemini/index.js +75 -7
- package/dist/google-gemini/index.js.map +4 -4
- package/dist/hf-transformers/common/HFT_JobRunFns.d.ts.map +1 -1
- package/dist/hf-transformers/index.js +10 -10
- package/dist/hf-transformers/index.js.map +3 -3
- package/dist/index-3tvpdt0s.js.map +2 -2
- package/dist/{index-jd3bbc2x.js → index-6j5pq722.js} +1 -1
- package/dist/{index-jd3bbc2x.js.map → index-6j5pq722.js.map} +1 -1
- package/dist/index-795ethaq.js +54 -0
- package/dist/index-795ethaq.js.map +10 -0
- package/dist/{index-8jqhbz1h.js → index-7et44e16.js} +41 -4
- package/dist/index-7et44e16.js.map +10 -0
- package/dist/{index-236gqvq1.js → index-edjqamf9.js} +3 -2
- package/dist/{index-236gqvq1.js.map → index-edjqamf9.js.map} +3 -3
- package/dist/{index-fgp2zg78.js → index-p88ezt14.js} +3 -2
- package/dist/{index-fgp2zg78.js.map → index-p88ezt14.js.map} +3 -3
- package/dist/{index-h5kwbbzq.js → index-qy5ksm4w.js} +3 -2
- package/dist/{index-h5kwbbzq.js.map → index-qy5ksm4w.js.map} +3 -3
- package/dist/{index-b9310x5k.js → index-zqq3kw0n.js} +55 -54
- package/dist/index-zqq3kw0n.js.map +11 -0
- package/dist/{index.browser-jd3bbc2x.js → index.browser-6j5pq722.js} +1 -1
- package/dist/{index.browser-jd3bbc2x.js.map → index.browser-6j5pq722.js.map} +1 -1
- package/dist/index.js +10 -10
- package/dist/index.js.map +1 -1
- package/dist/provider-hf-inference/common/HFI_JobRunFns.d.ts.map +1 -1
- package/dist/provider-hf-inference/common/HFI_ModelSchema.d.ts.map +1 -1
- package/dist/provider-hf-inference/index.js +14 -4
- package/dist/provider-hf-inference/index.js.map +4 -4
- package/dist/provider-llamacpp/index.js +4 -4
- package/dist/provider-llamacpp/index.js.map +3 -3
- package/dist/provider-ollama/index.browser.js +4 -4
- package/dist/provider-ollama/index.browser.js.map +3 -3
- package/dist/provider-ollama/index.js +4 -4
- package/dist/provider-ollama/index.js.map +3 -3
- package/dist/provider-openai/OpenAiProvider.d.ts +1 -1
- package/dist/provider-openai/OpenAiProvider.d.ts.map +1 -1
- package/dist/provider-openai/common/OpenAI_JobRunFns.d.ts +3 -1
- package/dist/provider-openai/common/OpenAI_JobRunFns.d.ts.map +1 -1
- package/dist/provider-openai/index.js +81 -7
- package/dist/provider-openai/index.js.map +4 -4
- package/dist/tf-mediapipe/index.js +4 -4
- package/dist/tf-mediapipe/index.js.map +3 -3
- package/package.json +17 -17
- package/dist/index-8jqhbz1h.js.map +0 -10
- package/dist/index-b9310x5k.js.map +0 -11
- package/dist/index-m0r2hvfz.js +0 -57
- package/dist/index-m0r2hvfz.js.map +0 -10
- /package/dist/{HFT_JobRunFns-aap9x58c.js.map → HFT_JobRunFns-66fave8m.js.map} +0 -0
|
@@ -4,9 +4,9 @@
|
|
|
4
4
|
"sourcesContent": [
|
|
5
5
|
"/**\n * @license\n * Copyright 2025 Steven Roussey <sroussey@gmail.com>\n * SPDX-License-Identifier: Apache-2.0\n */\n\nexport const OPENAI = \"OPENAI\";\n",
|
|
6
6
|
"/**\n * @license\n * Copyright 2025 Steven Roussey <sroussey@gmail.com>\n * SPDX-License-Identifier: Apache-2.0\n */\n\nimport { ModelConfigSchema, ModelRecordSchema } from \"@workglow/ai\";\nimport { DataPortSchemaObject, FromSchema } from \"@workglow/util\";\nimport { OPENAI } from \"./OpenAI_Constants\";\n\nexport const OpenAiModelSchema = {\n type: \"object\",\n properties: {\n provider: {\n const: OPENAI,\n description: \"Discriminator: OpenAI cloud provider.\",\n },\n provider_config: {\n type: \"object\",\n description: \"OpenAI-specific configuration.\",\n properties: {\n model_name: {\n type: \"string\",\n description: \"The OpenAI model identifier (e.g., 'gpt-4o', 'text-embedding-3-small').\",\n },\n api_key: {\n type: \"string\",\n description: \"OpenAI API key. Falls back to default API key if not set.\",\n },\n base_url: {\n type: \"string\",\n description: \"Base URL for the OpenAI API. Useful for Azure OpenAI or proxy servers.\",\n default: \"https://api.openai.com/v1\",\n },\n organization: {\n type: \"string\",\n description: \"OpenAI organization ID (optional).\",\n },\n },\n required: [\"model_name\"],\n additionalProperties: false,\n },\n },\n required: [\"provider\", \"provider_config\"],\n additionalProperties: true,\n} as const satisfies DataPortSchemaObject;\n\nexport const OpenAiModelRecordSchema = {\n type: \"object\",\n properties: {\n ...ModelRecordSchema.properties,\n ...OpenAiModelSchema.properties,\n },\n required: [...ModelRecordSchema.required, ...OpenAiModelSchema.required],\n additionalProperties: false,\n} as const satisfies DataPortSchemaObject;\n\nexport type OpenAiModelRecord = FromSchema<typeof OpenAiModelRecordSchema>;\n\nexport const OpenAiModelConfigSchema = {\n type: \"object\",\n properties: {\n ...ModelConfigSchema.properties,\n ...OpenAiModelSchema.properties,\n },\n required: [...ModelConfigSchema.required, ...OpenAiModelSchema.required],\n additionalProperties: false,\n} as const satisfies DataPortSchemaObject;\n\nexport type OpenAiModelConfig = FromSchema<typeof OpenAiModelConfigSchema>;\n",
|
|
7
|
-
"/**\n * @license\n * Copyright 2025 Steven Roussey <sroussey@gmail.com>\n * SPDX-License-Identifier: Apache-2.0\n */\n\nimport {\n AiProvider,\n type AiProviderReactiveRunFn,\n type AiProviderRunFn,\n type AiProviderStreamFn,\n} from \"@workglow/ai\";\nimport { OPENAI } from \"./common/OpenAI_Constants\";\nimport type { OpenAiModelConfig } from \"./common/OpenAI_ModelSchema\";\n\n/**\n * AI provider for OpenAI cloud models.\n *\n * Supports text generation, text embedding, text rewriting, and text summarization\n * via the OpenAI API using the `openai` SDK.\n *\n * Task run functions are injected via the constructor so that the `openai` SDK\n * is only imported where actually needed (inline mode, worker server), not on\n * the main thread in worker mode.\n *\n * @example\n * ```typescript\n * // Worker mode (main thread) -- lightweight, no SDK import:\n * await new OpenAiProvider().register({\n * mode: \"worker\",\n * worker: new Worker(new URL(\"./worker_openai.ts\", import.meta.url), { type: \"module\" }),\n * });\n *\n * // Inline mode -- caller provides the tasks:\n * import { OPENAI_TASKS } from \"@workglow/ai-provider/openai\";\n * await new OpenAiProvider(OPENAI_TASKS).register({ mode: \"inline\" });\n *\n * // Worker side -- caller provides the tasks:\n * import { OPENAI_TASKS } from \"@workglow/ai-provider/openai\";\n * new OpenAiProvider(OPENAI_TASKS).registerOnWorkerServer(workerServer);\n * ```\n */\nexport class OpenAiProvider extends AiProvider<OpenAiModelConfig> {\n readonly name = OPENAI;\n\n readonly taskTypes = [\n \"TextGenerationTask\",\n \"TextEmbeddingTask\",\n \"TextRewriterTask\",\n \"TextSummaryTask\",\n \"CountTokensTask\",\n ] as const;\n\n constructor(\n tasks?: Record<string, AiProviderRunFn<any, any, OpenAiModelConfig>>,\n streamTasks?: Record<string, AiProviderStreamFn<any, any, OpenAiModelConfig>>,\n reactiveTasks?: Record<string, AiProviderReactiveRunFn<any, any, OpenAiModelConfig>>\n ) {\n super(tasks, streamTasks, reactiveTasks);\n }\n}\n"
|
|
7
|
+
"/**\n * @license\n * Copyright 2025 Steven Roussey <sroussey@gmail.com>\n * SPDX-License-Identifier: Apache-2.0\n */\n\nimport {\n AiProvider,\n type AiProviderReactiveRunFn,\n type AiProviderRunFn,\n type AiProviderStreamFn,\n} from \"@workglow/ai\";\nimport { OPENAI } from \"./common/OpenAI_Constants\";\nimport type { OpenAiModelConfig } from \"./common/OpenAI_ModelSchema\";\n\n/**\n * AI provider for OpenAI cloud models.\n *\n * Supports text generation, text embedding, text rewriting, and text summarization\n * via the OpenAI API using the `openai` SDK.\n *\n * Task run functions are injected via the constructor so that the `openai` SDK\n * is only imported where actually needed (inline mode, worker server), not on\n * the main thread in worker mode.\n *\n * @example\n * ```typescript\n * // Worker mode (main thread) -- lightweight, no SDK import:\n * await new OpenAiProvider().register({\n * mode: \"worker\",\n * worker: new Worker(new URL(\"./worker_openai.ts\", import.meta.url), { type: \"module\" }),\n * });\n *\n * // Inline mode -- caller provides the tasks:\n * import { OPENAI_TASKS } from \"@workglow/ai-provider/openai\";\n * await new OpenAiProvider(OPENAI_TASKS).register({ mode: \"inline\" });\n *\n * // Worker side -- caller provides the tasks:\n * import { OPENAI_TASKS } from \"@workglow/ai-provider/openai\";\n * new OpenAiProvider(OPENAI_TASKS).registerOnWorkerServer(workerServer);\n * ```\n */\nexport class OpenAiProvider extends AiProvider<OpenAiModelConfig> {\n readonly name = OPENAI;\n\n readonly taskTypes = [\n \"TextGenerationTask\",\n \"TextEmbeddingTask\",\n \"TextRewriterTask\",\n \"TextSummaryTask\",\n \"CountTokensTask\",\n \"StructuredGenerationTask\",\n ] as const;\n\n constructor(\n tasks?: Record<string, AiProviderRunFn<any, any, OpenAiModelConfig>>,\n streamTasks?: Record<string, AiProviderStreamFn<any, any, OpenAiModelConfig>>,\n reactiveTasks?: Record<string, AiProviderReactiveRunFn<any, any, OpenAiModelConfig>>\n ) {\n super(tasks, streamTasks, reactiveTasks);\n }\n}\n"
|
|
8
8
|
],
|
|
9
|
-
"mappings": ";AAMO,IAAM,SAAS;;;ACAtB;AAIO,IAAM,oBAAoB;AAAA,EAC/B,MAAM;AAAA,EACN,YAAY;AAAA,IACV,UAAU;AAAA,MACR,OAAO;AAAA,MACP,aAAa;AAAA,IACf;AAAA,IACA,iBAAiB;AAAA,MACf,MAAM;AAAA,MACN,aAAa;AAAA,MACb,YAAY;AAAA,QACV,YAAY;AAAA,UACV,MAAM;AAAA,UACN,aAAa;AAAA,QACf;AAAA,QACA,SAAS;AAAA,UACP,MAAM;AAAA,UACN,aAAa;AAAA,QACf;AAAA,QACA,UAAU;AAAA,UACR,MAAM;AAAA,UACN,aAAa;AAAA,UACb,SAAS;AAAA,QACX;AAAA,QACA,cAAc;AAAA,UACZ,MAAM;AAAA,UACN,aAAa;AAAA,QACf;AAAA,MACF;AAAA,MACA,UAAU,CAAC,YAAY;AAAA,MACvB,sBAAsB;AAAA,IACxB;AAAA,EACF;AAAA,EACA,UAAU,CAAC,YAAY,iBAAiB;AAAA,EACxC,sBAAsB;AACxB;AAEO,IAAM,0BAA0B;AAAA,EACrC,MAAM;AAAA,EACN,YAAY;AAAA,OACP,kBAAkB;AAAA,OAClB,kBAAkB;AAAA,EACvB;AAAA,EACA,UAAU,CAAC,GAAG,kBAAkB,UAAU,GAAG,kBAAkB,QAAQ;AAAA,EACvE,sBAAsB;AACxB;AAIO,IAAM,0BAA0B;AAAA,EACrC,MAAM;AAAA,EACN,YAAY;AAAA,OACP,kBAAkB;AAAA,OAClB,kBAAkB;AAAA,EACvB;AAAA,EACA,UAAU,CAAC,GAAG,kBAAkB,UAAU,GAAG,kBAAkB,QAAQ;AAAA,EACvE,sBAAsB;AACxB;;;AC7DA;AAAA;AAAA;AAoCO,MAAM,uBAAuB,WAA8B;AAAA,EACvD,OAAO;AAAA,EAEP,YAAY;AAAA,IACnB;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF;AAAA,EAEA,WAAW,CACT,OACA,aACA,eACA;AAAA,IACA,MAAM,OAAO,aAAa,aAAa;AAAA;AAE3C;",
|
|
10
|
-
"debugId": "
|
|
9
|
+
"mappings": ";AAMO,IAAM,SAAS;;;ACAtB;AAIO,IAAM,oBAAoB;AAAA,EAC/B,MAAM;AAAA,EACN,YAAY;AAAA,IACV,UAAU;AAAA,MACR,OAAO;AAAA,MACP,aAAa;AAAA,IACf;AAAA,IACA,iBAAiB;AAAA,MACf,MAAM;AAAA,MACN,aAAa;AAAA,MACb,YAAY;AAAA,QACV,YAAY;AAAA,UACV,MAAM;AAAA,UACN,aAAa;AAAA,QACf;AAAA,QACA,SAAS;AAAA,UACP,MAAM;AAAA,UACN,aAAa;AAAA,QACf;AAAA,QACA,UAAU;AAAA,UACR,MAAM;AAAA,UACN,aAAa;AAAA,UACb,SAAS;AAAA,QACX;AAAA,QACA,cAAc;AAAA,UACZ,MAAM;AAAA,UACN,aAAa;AAAA,QACf;AAAA,MACF;AAAA,MACA,UAAU,CAAC,YAAY;AAAA,MACvB,sBAAsB;AAAA,IACxB;AAAA,EACF;AAAA,EACA,UAAU,CAAC,YAAY,iBAAiB;AAAA,EACxC,sBAAsB;AACxB;AAEO,IAAM,0BAA0B;AAAA,EACrC,MAAM;AAAA,EACN,YAAY;AAAA,OACP,kBAAkB;AAAA,OAClB,kBAAkB;AAAA,EACvB;AAAA,EACA,UAAU,CAAC,GAAG,kBAAkB,UAAU,GAAG,kBAAkB,QAAQ;AAAA,EACvE,sBAAsB;AACxB;AAIO,IAAM,0BAA0B;AAAA,EACrC,MAAM;AAAA,EACN,YAAY;AAAA,OACP,kBAAkB;AAAA,OAClB,kBAAkB;AAAA,EACvB;AAAA,EACA,UAAU,CAAC,GAAG,kBAAkB,UAAU,GAAG,kBAAkB,QAAQ;AAAA,EACvE,sBAAsB;AACxB;;;AC7DA;AAAA;AAAA;AAoCO,MAAM,uBAAuB,WAA8B;AAAA,EACvD,OAAO;AAAA,EAEP,YAAY;AAAA,IACnB;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF;AAAA,EAEA,WAAW,CACT,OACA,aACA,eACA;AAAA,IACA,MAAM,OAAO,aAAa,aAAa;AAAA;AAE3C;",
|
|
10
|
+
"debugId": "CF0D989CCE26DA1C64756E2164756E21",
|
|
11
11
|
"names": []
|
|
12
12
|
}
|
|
@@ -77,7 +77,8 @@ class GoogleGeminiProvider extends AiProvider {
|
|
|
77
77
|
"TextGenerationTask",
|
|
78
78
|
"TextEmbeddingTask",
|
|
79
79
|
"TextRewriterTask",
|
|
80
|
-
"TextSummaryTask"
|
|
80
|
+
"TextSummaryTask",
|
|
81
|
+
"StructuredGenerationTask"
|
|
81
82
|
];
|
|
82
83
|
constructor(tasks, streamTasks, reactiveTasks) {
|
|
83
84
|
super(tasks, streamTasks, reactiveTasks);
|
|
@@ -86,4 +87,4 @@ class GoogleGeminiProvider extends AiProvider {
|
|
|
86
87
|
|
|
87
88
|
export { GOOGLE_GEMINI, GeminiModelSchema, GeminiModelRecordSchema, GeminiModelConfigSchema, GoogleGeminiProvider };
|
|
88
89
|
|
|
89
|
-
//# debugId=
|
|
90
|
+
//# debugId=76CE2B0E054479C164756E2164756E21
|
|
@@ -4,9 +4,9 @@
|
|
|
4
4
|
"sourcesContent": [
|
|
5
5
|
"/**\n * @license\n * Copyright 2025 Steven Roussey <sroussey@gmail.com>\n * SPDX-License-Identifier: Apache-2.0\n */\n\nexport const GOOGLE_GEMINI = \"GOOGLE_GEMINI\";\n",
|
|
6
6
|
"/**\n * @license\n * Copyright 2025 Steven Roussey <sroussey@gmail.com>\n * SPDX-License-Identifier: Apache-2.0\n */\n\nimport { ModelConfigSchema, ModelRecordSchema } from \"@workglow/ai\";\nimport { DataPortSchemaObject, FromSchema } from \"@workglow/util\";\nimport { GOOGLE_GEMINI } from \"./Gemini_Constants\";\n\nexport const GeminiModelSchema = {\n type: \"object\",\n properties: {\n provider: {\n const: GOOGLE_GEMINI,\n description: \"Discriminator: Google Gemini cloud provider.\",\n },\n provider_config: {\n type: \"object\",\n description: \"Google Gemini-specific configuration.\",\n properties: {\n model_name: {\n type: \"string\",\n description:\n \"The Gemini model identifier (e.g., 'gemini-2.0-flash', 'text-embedding-004').\",\n },\n api_key: {\n type: \"string\",\n description: \"Google AI API key. Falls back to default API key if not set.\",\n },\n embedding_task_type: {\n oneOf: [\n { type: \"null\" },\n {\n type: \"string\",\n enum: [\n \"RETRIEVAL_QUERY\",\n \"RETRIEVAL_DOCUMENT\",\n \"SEMANTIC_SIMILARITY\",\n \"CLASSIFICATION\",\n \"CLUSTERING\",\n ],\n },\n ],\n description: \"Task type hint for embedding models.\",\n default: null,\n },\n },\n required: [\"model_name\"],\n additionalProperties: false,\n },\n },\n required: [\"provider\", \"provider_config\"],\n additionalProperties: true,\n} as const satisfies DataPortSchemaObject;\n\nexport const GeminiModelRecordSchema = {\n type: \"object\",\n properties: {\n ...ModelRecordSchema.properties,\n ...GeminiModelSchema.properties,\n },\n required: [...ModelRecordSchema.required, ...GeminiModelSchema.required],\n additionalProperties: false,\n} as const satisfies DataPortSchemaObject;\n\nexport type GeminiModelRecord = FromSchema<typeof GeminiModelRecordSchema>;\n\nexport const GeminiModelConfigSchema = {\n type: \"object\",\n properties: {\n ...ModelConfigSchema.properties,\n ...GeminiModelSchema.properties,\n },\n required: [...ModelConfigSchema.required, ...GeminiModelSchema.required],\n additionalProperties: false,\n} as const satisfies DataPortSchemaObject;\n\nexport type GeminiModelConfig = FromSchema<typeof GeminiModelConfigSchema>;\n",
|
|
7
|
-
"/**\n * @license\n * Copyright 2025 Steven Roussey <sroussey@gmail.com>\n * SPDX-License-Identifier: Apache-2.0\n */\n\nimport {\n AiProvider,\n type AiProviderReactiveRunFn,\n type AiProviderRunFn,\n type AiProviderStreamFn,\n} from \"@workglow/ai\";\nimport { GOOGLE_GEMINI } from \"./common/Gemini_Constants\";\nimport type { GeminiModelConfig } from \"./common/Gemini_ModelSchema\";\n\n/**\n * AI provider for Google Gemini cloud models.\n *\n * Supports text generation, text embedding, text rewriting, and text summarization\n * via the Google Generative AI API using the `@google/generative-ai` SDK.\n *\n * Task run functions are injected via the constructor so that the SDK\n * is only imported where actually needed (inline mode, worker server), not on\n * the main thread in worker mode.\n *\n * @example\n * ```typescript\n * // Worker mode (main thread) -- lightweight, no SDK import:\n * await new GoogleGeminiProvider().register({\n * mode: \"worker\",\n * worker: new Worker(new URL(\"./worker_gemini.ts\", import.meta.url), { type: \"module\" }),\n * });\n *\n * // Inline mode -- caller provides the tasks:\n * import { GEMINI_TASKS } from \"@workglow/ai-provider/google-gemini\";\n * await new GoogleGeminiProvider(GEMINI_TASKS).register({ mode: \"inline\" });\n * ```\n */\nexport class GoogleGeminiProvider extends AiProvider<GeminiModelConfig> {\n readonly name = GOOGLE_GEMINI;\n\n readonly taskTypes = [\n \"CountTokensTask\",\n \"TextGenerationTask\",\n \"TextEmbeddingTask\",\n \"TextRewriterTask\",\n \"TextSummaryTask\",\n ] as const;\n\n constructor(\n tasks?: Record<string, AiProviderRunFn<any, any, GeminiModelConfig>>,\n streamTasks?: Record<string, AiProviderStreamFn<any, any, GeminiModelConfig>>,\n reactiveTasks?: Record<string, AiProviderReactiveRunFn<any, any, GeminiModelConfig>>\n ) {\n super(tasks, streamTasks, reactiveTasks);\n }\n}\n"
|
|
7
|
+
"/**\n * @license\n * Copyright 2025 Steven Roussey <sroussey@gmail.com>\n * SPDX-License-Identifier: Apache-2.0\n */\n\nimport {\n AiProvider,\n type AiProviderReactiveRunFn,\n type AiProviderRunFn,\n type AiProviderStreamFn,\n} from \"@workglow/ai\";\nimport { GOOGLE_GEMINI } from \"./common/Gemini_Constants\";\nimport type { GeminiModelConfig } from \"./common/Gemini_ModelSchema\";\n\n/**\n * AI provider for Google Gemini cloud models.\n *\n * Supports text generation, text embedding, text rewriting, and text summarization\n * via the Google Generative AI API using the `@google/generative-ai` SDK.\n *\n * Task run functions are injected via the constructor so that the SDK\n * is only imported where actually needed (inline mode, worker server), not on\n * the main thread in worker mode.\n *\n * @example\n * ```typescript\n * // Worker mode (main thread) -- lightweight, no SDK import:\n * await new GoogleGeminiProvider().register({\n * mode: \"worker\",\n * worker: new Worker(new URL(\"./worker_gemini.ts\", import.meta.url), { type: \"module\" }),\n * });\n *\n * // Inline mode -- caller provides the tasks:\n * import { GEMINI_TASKS } from \"@workglow/ai-provider/google-gemini\";\n * await new GoogleGeminiProvider(GEMINI_TASKS).register({ mode: \"inline\" });\n * ```\n */\nexport class GoogleGeminiProvider extends AiProvider<GeminiModelConfig> {\n readonly name = GOOGLE_GEMINI;\n\n readonly taskTypes = [\n \"CountTokensTask\",\n \"TextGenerationTask\",\n \"TextEmbeddingTask\",\n \"TextRewriterTask\",\n \"TextSummaryTask\",\n \"StructuredGenerationTask\",\n ] as const;\n\n constructor(\n tasks?: Record<string, AiProviderRunFn<any, any, GeminiModelConfig>>,\n streamTasks?: Record<string, AiProviderStreamFn<any, any, GeminiModelConfig>>,\n reactiveTasks?: Record<string, AiProviderReactiveRunFn<any, any, GeminiModelConfig>>\n ) {\n super(tasks, streamTasks, reactiveTasks);\n }\n}\n"
|
|
8
8
|
],
|
|
9
|
-
"mappings": ";AAMO,IAAM,gBAAgB;;;ACA7B;AAIO,IAAM,oBAAoB;AAAA,EAC/B,MAAM;AAAA,EACN,YAAY;AAAA,IACV,UAAU;AAAA,MACR,OAAO;AAAA,MACP,aAAa;AAAA,IACf;AAAA,IACA,iBAAiB;AAAA,MACf,MAAM;AAAA,MACN,aAAa;AAAA,MACb,YAAY;AAAA,QACV,YAAY;AAAA,UACV,MAAM;AAAA,UACN,aACE;AAAA,QACJ;AAAA,QACA,SAAS;AAAA,UACP,MAAM;AAAA,UACN,aAAa;AAAA,QACf;AAAA,QACA,qBAAqB;AAAA,UACnB,OAAO;AAAA,YACL,EAAE,MAAM,OAAO;AAAA,YACf;AAAA,cACE,MAAM;AAAA,cACN,MAAM;AAAA,gBACJ;AAAA,gBACA;AAAA,gBACA;AAAA,gBACA;AAAA,gBACA;AAAA,cACF;AAAA,YACF;AAAA,UACF;AAAA,UACA,aAAa;AAAA,UACb,SAAS;AAAA,QACX;AAAA,MACF;AAAA,MACA,UAAU,CAAC,YAAY;AAAA,MACvB,sBAAsB;AAAA,IACxB;AAAA,EACF;AAAA,EACA,UAAU,CAAC,YAAY,iBAAiB;AAAA,EACxC,sBAAsB;AACxB;AAEO,IAAM,0BAA0B;AAAA,EACrC,MAAM;AAAA,EACN,YAAY;AAAA,OACP,kBAAkB;AAAA,OAClB,kBAAkB;AAAA,EACvB;AAAA,EACA,UAAU,CAAC,GAAG,kBAAkB,UAAU,GAAG,kBAAkB,QAAQ;AAAA,EACvE,sBAAsB;AACxB;AAIO,IAAM,0BAA0B;AAAA,EACrC,MAAM;AAAA,EACN,YAAY;AAAA,OACP,kBAAkB;AAAA,OAClB,kBAAkB;AAAA,EACvB;AAAA,EACA,UAAU,CAAC,GAAG,kBAAkB,UAAU,GAAG,kBAAkB,QAAQ;AAAA,EACvE,sBAAsB;AACxB;;;ACtEA;AAAA;AAAA;AAgCO,MAAM,6BAA6B,WAA8B;AAAA,EAC7D,OAAO;AAAA,EAEP,YAAY;AAAA,IACnB;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF;AAAA,EAEA,WAAW,CACT,OACA,aACA,eACA;AAAA,IACA,MAAM,OAAO,aAAa,aAAa;AAAA;AAE3C;",
|
|
10
|
-
"debugId": "
|
|
9
|
+
"mappings": ";AAMO,IAAM,gBAAgB;;;ACA7B;AAIO,IAAM,oBAAoB;AAAA,EAC/B,MAAM;AAAA,EACN,YAAY;AAAA,IACV,UAAU;AAAA,MACR,OAAO;AAAA,MACP,aAAa;AAAA,IACf;AAAA,IACA,iBAAiB;AAAA,MACf,MAAM;AAAA,MACN,aAAa;AAAA,MACb,YAAY;AAAA,QACV,YAAY;AAAA,UACV,MAAM;AAAA,UACN,aACE;AAAA,QACJ;AAAA,QACA,SAAS;AAAA,UACP,MAAM;AAAA,UACN,aAAa;AAAA,QACf;AAAA,QACA,qBAAqB;AAAA,UACnB,OAAO;AAAA,YACL,EAAE,MAAM,OAAO;AAAA,YACf;AAAA,cACE,MAAM;AAAA,cACN,MAAM;AAAA,gBACJ;AAAA,gBACA;AAAA,gBACA;AAAA,gBACA;AAAA,gBACA;AAAA,cACF;AAAA,YACF;AAAA,UACF;AAAA,UACA,aAAa;AAAA,UACb,SAAS;AAAA,QACX;AAAA,MACF;AAAA,MACA,UAAU,CAAC,YAAY;AAAA,MACvB,sBAAsB;AAAA,IACxB;AAAA,EACF;AAAA,EACA,UAAU,CAAC,YAAY,iBAAiB;AAAA,EACxC,sBAAsB;AACxB;AAEO,IAAM,0BAA0B;AAAA,EACrC,MAAM;AAAA,EACN,YAAY;AAAA,OACP,kBAAkB;AAAA,OAClB,kBAAkB;AAAA,EACvB;AAAA,EACA,UAAU,CAAC,GAAG,kBAAkB,UAAU,GAAG,kBAAkB,QAAQ;AAAA,EACvE,sBAAsB;AACxB;AAIO,IAAM,0BAA0B;AAAA,EACrC,MAAM;AAAA,EACN,YAAY;AAAA,OACP,kBAAkB;AAAA,OAClB,kBAAkB;AAAA,EACvB;AAAA,EACA,UAAU,CAAC,GAAG,kBAAkB,UAAU,GAAG,kBAAkB,QAAQ;AAAA,EACvE,sBAAsB;AACxB;;;ACtEA;AAAA;AAAA;AAgCO,MAAM,6BAA6B,WAA8B;AAAA,EAC7D,OAAO;AAAA,EAEP,YAAY;AAAA,IACnB;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF;AAAA,EAEA,WAAW,CACT,OACA,aACA,eACA;AAAA,IACA,MAAM,OAAO,aAAa,aAAa;AAAA;AAE3C;",
|
|
10
|
+
"debugId": "76CE2B0E054479C164756E2164756E21",
|
|
11
11
|
"names": []
|
|
12
12
|
}
|
|
@@ -1,11 +1,56 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
1
|
+
// src/hf-transformers/common/HFT_Constants.ts
|
|
2
|
+
var HF_TRANSFORMERS_ONNX = "HF_TRANSFORMERS_ONNX";
|
|
3
|
+
var HTF_CACHE_NAME = "transformers-cache";
|
|
4
|
+
var QuantizationDataType = {
|
|
5
|
+
auto: "auto",
|
|
6
|
+
fp32: "fp32",
|
|
7
|
+
fp16: "fp16",
|
|
8
|
+
q8: "q8",
|
|
9
|
+
int8: "int8",
|
|
10
|
+
uint8: "uint8",
|
|
11
|
+
q4: "q4",
|
|
12
|
+
bnb4: "bnb4",
|
|
13
|
+
q4f16: "q4f16"
|
|
14
|
+
};
|
|
15
|
+
var TextPipelineUseCase = {
|
|
16
|
+
"fill-mask": "fill-mask",
|
|
17
|
+
"token-classification": "token-classification",
|
|
18
|
+
"text-generation": "text-generation",
|
|
19
|
+
"text2text-generation": "text2text-generation",
|
|
20
|
+
"text-classification": "text-classification",
|
|
21
|
+
summarization: "summarization",
|
|
22
|
+
translation: "translation",
|
|
23
|
+
"feature-extraction": "feature-extraction",
|
|
24
|
+
"zero-shot-classification": "zero-shot-classification",
|
|
25
|
+
"question-answering": "question-answering"
|
|
26
|
+
};
|
|
27
|
+
var VisionPipelineUseCase = {
|
|
28
|
+
"background-removal": "background-removal",
|
|
29
|
+
"image-segmentation": "image-segmentation",
|
|
30
|
+
"depth-estimation": "depth-estimation",
|
|
31
|
+
"image-classification": "image-classification",
|
|
32
|
+
"image-to-image": "image-to-image",
|
|
33
|
+
"object-detection": "object-detection",
|
|
34
|
+
"image-feature-extraction": "image-feature-extraction"
|
|
35
|
+
};
|
|
36
|
+
var AudioPipelineUseCase = {
|
|
37
|
+
"audio-classification": "audio-classification",
|
|
38
|
+
"automatic-speech-recognition": "automatic-speech-recognition",
|
|
39
|
+
"text-to-speech": "text-to-speech"
|
|
40
|
+
};
|
|
41
|
+
var MultimodalPipelineUseCase = {
|
|
42
|
+
"document-question-answering": "document-question-answering",
|
|
43
|
+
"image-to-text": "image-to-text",
|
|
44
|
+
"zero-shot-audio-classification": "zero-shot-audio-classification",
|
|
45
|
+
"zero-shot-image-classification": "zero-shot-image-classification",
|
|
46
|
+
"zero-shot-object-detection": "zero-shot-object-detection"
|
|
47
|
+
};
|
|
48
|
+
var PipelineUseCase = {
|
|
49
|
+
...TextPipelineUseCase,
|
|
50
|
+
...VisionPipelineUseCase,
|
|
51
|
+
...AudioPipelineUseCase,
|
|
52
|
+
...MultimodalPipelineUseCase
|
|
53
|
+
};
|
|
9
54
|
|
|
10
55
|
// src/hf-transformers/common/HFT_ModelSchema.ts
|
|
11
56
|
import { ModelConfigSchema, ModelRecordSchema } from "@workglow/ai";
|
|
@@ -121,50 +166,6 @@ var HfTransformersOnnxModelConfigSchema = {
|
|
|
121
166
|
additionalProperties: false
|
|
122
167
|
};
|
|
123
168
|
|
|
124
|
-
|
|
125
|
-
import {
|
|
126
|
-
AiProvider
|
|
127
|
-
} from "@workglow/ai";
|
|
128
|
-
class HuggingFaceTransformersProvider extends AiProvider {
|
|
129
|
-
name = HF_TRANSFORMERS_ONNX;
|
|
130
|
-
taskTypes = [
|
|
131
|
-
"DownloadModelTask",
|
|
132
|
-
"UnloadModelTask",
|
|
133
|
-
"CountTokensTask",
|
|
134
|
-
"TextEmbeddingTask",
|
|
135
|
-
"TextGenerationTask",
|
|
136
|
-
"TextQuestionAnswerTask",
|
|
137
|
-
"TextLanguageDetectionTask",
|
|
138
|
-
"TextClassificationTask",
|
|
139
|
-
"TextFillMaskTask",
|
|
140
|
-
"TextNamedEntityRecognitionTask",
|
|
141
|
-
"TextRewriterTask",
|
|
142
|
-
"TextSummaryTask",
|
|
143
|
-
"TextTranslationTask",
|
|
144
|
-
"ImageSegmentationTask",
|
|
145
|
-
"ImageToTextTask",
|
|
146
|
-
"BackgroundRemovalTask",
|
|
147
|
-
"ImageEmbeddingTask",
|
|
148
|
-
"ImageClassificationTask",
|
|
149
|
-
"ObjectDetectionTask"
|
|
150
|
-
];
|
|
151
|
-
constructor(tasks, streamTasks, reactiveTasks) {
|
|
152
|
-
super(tasks, streamTasks, reactiveTasks);
|
|
153
|
-
}
|
|
154
|
-
async onInitialize(options) {
|
|
155
|
-
if (options.mode === "inline") {
|
|
156
|
-
const { env } = await import("@sroussey/transformers");
|
|
157
|
-
env.backends.onnx.wasm.proxy = true;
|
|
158
|
-
}
|
|
159
|
-
}
|
|
160
|
-
async dispose() {
|
|
161
|
-
if (this.tasks) {
|
|
162
|
-
const { clearPipelineCache } = await import("./HFT_JobRunFns-aap9x58c.js");
|
|
163
|
-
clearPipelineCache();
|
|
164
|
-
}
|
|
165
|
-
}
|
|
166
|
-
}
|
|
167
|
-
|
|
168
|
-
export { HfTransformersOnnxModelSchema, HfTransformersOnnxModelRecordSchema, HfTransformersOnnxModelConfigSchema, HuggingFaceTransformersProvider };
|
|
169
|
+
export { HF_TRANSFORMERS_ONNX, HTF_CACHE_NAME, QuantizationDataType, TextPipelineUseCase, VisionPipelineUseCase, AudioPipelineUseCase, MultimodalPipelineUseCase, PipelineUseCase, HfTransformersOnnxModelSchema, HfTransformersOnnxModelRecordSchema, HfTransformersOnnxModelConfigSchema };
|
|
169
170
|
|
|
170
|
-
//# debugId=
|
|
171
|
+
//# debugId=633B1866FC05294D64756E2164756E21
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
{
|
|
2
|
+
"version": 3,
|
|
3
|
+
"sources": ["../src/hf-transformers/common/HFT_Constants.ts", "../src/hf-transformers/common/HFT_ModelSchema.ts"],
|
|
4
|
+
"sourcesContent": [
|
|
5
|
+
"/**\n * @license\n * Copyright 2025 Steven Roussey <sroussey@gmail.com>\n * SPDX-License-Identifier: Apache-2.0\n */\n\nexport const HF_TRANSFORMERS_ONNX = \"HF_TRANSFORMERS_ONNX\";\nexport const HTF_CACHE_NAME = \"transformers-cache\";\n\nexport type QuantizationDataType =\n | \"auto\" // Auto-detect based on environment\n | \"fp32\"\n | \"fp16\"\n | \"q8\"\n | \"int8\"\n | \"uint8\"\n | \"q4\"\n | \"bnb4\"\n | \"q4f16\"; // fp16 model with int4 block weight quantization\n\nexport const QuantizationDataType = {\n auto: \"auto\",\n fp32: \"fp32\",\n fp16: \"fp16\",\n q8: \"q8\",\n int8: \"int8\",\n uint8: \"uint8\",\n q4: \"q4\",\n bnb4: \"bnb4\",\n q4f16: \"q4f16\",\n} as const satisfies Record<QuantizationDataType, QuantizationDataType>;\n\nexport type TextPipelineUseCase =\n | \"fill-mask\" // https://huggingface.co/tasks/fill-mask\n | \"token-classification\" // https://huggingface.co/tasks/token-classification\n | \"text-generation\" // https://huggingface.co/tasks/text-generation#completion-generation-models\n | \"text2text-generation\" // https://huggingface.co/tasks/text-generation#text-to-text-generation-models\n | \"text-classification\" // https://huggingface.co/tasks/text-classification\n | \"summarization\" // https://huggingface.co/tasks/sentence-similarity\n | \"translation\" // https://huggingface.co/tasks/translation\n | \"feature-extraction\" // https://huggingface.co/tasks/feature-extraction\n | \"zero-shot-classification\" // https://huggingface.co/tasks/zero-shot-classification\n | \"question-answering\"; // https://huggingface.co/tasks/question-answering\n\nexport const TextPipelineUseCase = {\n \"fill-mask\": \"fill-mask\",\n \"token-classification\": \"token-classification\",\n \"text-generation\": \"text-generation\",\n \"text2text-generation\": \"text2text-generation\",\n \"text-classification\": \"text-classification\",\n summarization: \"summarization\",\n translation: \"translation\",\n \"feature-extraction\": \"feature-extraction\",\n \"zero-shot-classification\": \"zero-shot-classification\",\n \"question-answering\": \"question-answering\",\n} as const satisfies Record<TextPipelineUseCase, TextPipelineUseCase>;\n\nexport type VisionPipelineUseCase =\n | \"background-removal\" // https://huggingface.co/tasks/image-segmentation#background-removal\n | \"image-segmentation\" // https://huggingface.co/tasks/image-segmentation\n | \"depth-estimation\" // https://huggingface.co/tasks/depth-estimation\n | \"image-classification\" // https://huggingface.co/tasks/image-classification\n | \"image-to-image\" // https://huggingface.co/tasks/image-to-image\n | \"object-detection\" // https://huggingface.co/tasks/object-detection\n | \"image-feature-extraction\"; // https://huggingface.co/tasks/image-feature-extraction\n\nexport const VisionPipelineUseCase = {\n \"background-removal\": \"background-removal\",\n \"image-segmentation\": \"image-segmentation\",\n \"depth-estimation\": \"depth-estimation\",\n \"image-classification\": \"image-classification\",\n \"image-to-image\": \"image-to-image\",\n \"object-detection\": \"object-detection\",\n \"image-feature-extraction\": \"image-feature-extraction\",\n} as const satisfies Record<VisionPipelineUseCase, VisionPipelineUseCase>;\n\nexport type AudioPipelineUseCase =\n | \"audio-classification\" // https://huggingface.co/tasks/audio-classification\n | \"automatic-speech-recognition\" // https://huggingface.co/tasks/automatic-speech-recognition\n | \"text-to-speech\"; // https://huggingface.co/tasks/text-to-speech\n\nexport const AudioPipelineUseCase = {\n \"audio-classification\": \"audio-classification\",\n \"automatic-speech-recognition\": \"automatic-speech-recognition\",\n \"text-to-speech\": \"text-to-speech\",\n} as const satisfies Record<AudioPipelineUseCase, AudioPipelineUseCase>;\n\nexport type MultimodalPipelineUseCase =\n | \"document-question-answering\" // https://huggingface.co/tasks/document-question-answering\n | \"image-to-text\" // https://huggingface.co/tasks/image-to-text\n | \"zero-shot-audio-classification\" // https://huggingface.co/tasks/zero-shot-audio-classification\n | \"zero-shot-image-classification\" // https://huggingface.co/tasks/zero-shot-image-classification\n | \"zero-shot-object-detection\"; // https://huggingface.co/tasks/zero-shot-object-detection\n\nexport const MultimodalPipelineUseCase = {\n \"document-question-answering\": \"document-question-answering\",\n \"image-to-text\": \"image-to-text\",\n \"zero-shot-audio-classification\": \"zero-shot-audio-classification\",\n \"zero-shot-image-classification\": \"zero-shot-image-classification\",\n \"zero-shot-object-detection\": \"zero-shot-object-detection\",\n} as const satisfies Record<MultimodalPipelineUseCase, MultimodalPipelineUseCase>;\n\nexport type PipelineUseCase =\n | TextPipelineUseCase\n | VisionPipelineUseCase\n | AudioPipelineUseCase\n | MultimodalPipelineUseCase;\n\nexport const PipelineUseCase = {\n ...TextPipelineUseCase,\n ...VisionPipelineUseCase,\n ...AudioPipelineUseCase,\n ...MultimodalPipelineUseCase,\n} as const satisfies Record<PipelineUseCase, PipelineUseCase>;\n",
|
|
6
|
+
"/**\n * @license\n * Copyright 2025 Steven Roussey <sroussey@gmail.com>\n * SPDX-License-Identifier: Apache-2.0\n */\n\nimport { ModelConfigSchema, ModelRecordSchema } from \"@workglow/ai\";\nimport { DataPortSchemaObject, FromSchema } from \"@workglow/util\";\nimport { HF_TRANSFORMERS_ONNX, PipelineUseCase, QuantizationDataType } from \"./HFT_Constants\";\n\nexport const HfTransformersOnnxModelSchema = {\n type: \"object\",\n properties: {\n provider: {\n const: HF_TRANSFORMERS_ONNX,\n description: \"Discriminator: ONNX runtime backend.\",\n },\n provider_config: {\n type: \"object\",\n description: \"ONNX runtime-specific options.\",\n properties: {\n pipeline: {\n type: \"string\",\n enum: Object.values(PipelineUseCase),\n description: \"Pipeline type for the ONNX model.\",\n default: \"text-generation\",\n },\n model_path: {\n type: \"string\",\n description: \"Filesystem path or URI for the ONNX model.\",\n },\n dtype: {\n type: \"string\",\n enum: Object.values(QuantizationDataType),\n description: \"Data type for the ONNX model.\",\n default: \"auto\",\n },\n device: {\n type: \"string\",\n enum: [\"cpu\", \"gpu\", \"webgpu\", \"wasm\", \"metal\"],\n description: \"High-level device selection.\",\n default: \"webgpu\",\n },\n execution_providers: {\n type: \"array\",\n items: { type: \"string\" },\n description: \"Raw ONNX Runtime execution provider identifiers.\",\n \"x-ui-hidden\": true,\n },\n intra_op_num_threads: {\n type: \"integer\",\n minimum: 1,\n },\n inter_op_num_threads: {\n type: \"integer\",\n minimum: 1,\n },\n use_external_data_format: {\n type: \"boolean\",\n description: \"Whether the model uses external data format.\",\n },\n native_dimensions: {\n type: \"integer\",\n description: \"The native dimensions of the model.\",\n },\n pooling: {\n type: \"string\",\n enum: [\"mean\", \"last_token\", \"cls\"],\n description: \"The pooling strategy to use for the model.\",\n default: \"mean\",\n },\n normalize: {\n type: \"boolean\",\n description: \"Whether the model uses normalization.\",\n default: true,\n },\n language_style: {\n type: \"string\",\n description: \"The language style of the model.\",\n },\n mrl: {\n type: \"boolean\",\n description: \"Whether the model uses matryoshka.\",\n default: false,\n },\n },\n required: [\"model_path\", \"pipeline\"],\n additionalProperties: false,\n if: {\n properties: {\n pipeline: {\n const: \"feature-extraction\",\n },\n },\n },\n then: {\n required: [\"native_dimensions\"],\n },\n },\n },\n required: [\"provider\", \"provider_config\"],\n additionalProperties: true,\n} as const satisfies DataPortSchemaObject;\n\nexport const HfTransformersOnnxModelRecordSchema = {\n type: \"object\",\n properties: {\n ...ModelRecordSchema.properties,\n ...HfTransformersOnnxModelSchema.properties,\n },\n required: [...ModelRecordSchema.required, ...HfTransformersOnnxModelSchema.required],\n additionalProperties: false,\n} as const satisfies DataPortSchemaObject;\n\nexport type HfTransformersOnnxModelRecord = FromSchema<typeof HfTransformersOnnxModelRecordSchema>;\n\nexport const HfTransformersOnnxModelConfigSchema = {\n type: \"object\",\n properties: {\n ...ModelConfigSchema.properties,\n ...HfTransformersOnnxModelSchema.properties,\n },\n required: [...ModelConfigSchema.required, ...HfTransformersOnnxModelSchema.required],\n additionalProperties: false,\n} as const satisfies DataPortSchemaObject;\n\nexport type HfTransformersOnnxModelConfig = FromSchema<typeof HfTransformersOnnxModelConfigSchema>;\n"
|
|
7
|
+
],
|
|
8
|
+
"mappings": ";AAMO,IAAM,uBAAuB;AAC7B,IAAM,iBAAiB;AAavB,IAAM,uBAAuB;AAAA,EAClC,MAAM;AAAA,EACN,MAAM;AAAA,EACN,MAAM;AAAA,EACN,IAAI;AAAA,EACJ,MAAM;AAAA,EACN,OAAO;AAAA,EACP,IAAI;AAAA,EACJ,MAAM;AAAA,EACN,OAAO;AACT;AAcO,IAAM,sBAAsB;AAAA,EACjC,aAAa;AAAA,EACb,wBAAwB;AAAA,EACxB,mBAAmB;AAAA,EACnB,wBAAwB;AAAA,EACxB,uBAAuB;AAAA,EACvB,eAAe;AAAA,EACf,aAAa;AAAA,EACb,sBAAsB;AAAA,EACtB,4BAA4B;AAAA,EAC5B,sBAAsB;AACxB;AAWO,IAAM,wBAAwB;AAAA,EACnC,sBAAsB;AAAA,EACtB,sBAAsB;AAAA,EACtB,oBAAoB;AAAA,EACpB,wBAAwB;AAAA,EACxB,kBAAkB;AAAA,EAClB,oBAAoB;AAAA,EACpB,4BAA4B;AAC9B;AAOO,IAAM,uBAAuB;AAAA,EAClC,wBAAwB;AAAA,EACxB,gCAAgC;AAAA,EAChC,kBAAkB;AACpB;AASO,IAAM,4BAA4B;AAAA,EACvC,+BAA+B;AAAA,EAC/B,iBAAiB;AAAA,EACjB,kCAAkC;AAAA,EAClC,kCAAkC;AAAA,EAClC,8BAA8B;AAChC;AAQO,IAAM,kBAAkB;AAAA,KAC1B;AAAA,KACA;AAAA,KACA;AAAA,KACA;AACL;;;AC3GA;AAIO,IAAM,gCAAgC;AAAA,EAC3C,MAAM;AAAA,EACN,YAAY;AAAA,IACV,UAAU;AAAA,MACR,OAAO;AAAA,MACP,aAAa;AAAA,IACf;AAAA,IACA,iBAAiB;AAAA,MACf,MAAM;AAAA,MACN,aAAa;AAAA,MACb,YAAY;AAAA,QACV,UAAU;AAAA,UACR,MAAM;AAAA,UACN,MAAM,OAAO,OAAO,eAAe;AAAA,UACnC,aAAa;AAAA,UACb,SAAS;AAAA,QACX;AAAA,QACA,YAAY;AAAA,UACV,MAAM;AAAA,UACN,aAAa;AAAA,QACf;AAAA,QACA,OAAO;AAAA,UACL,MAAM;AAAA,UACN,MAAM,OAAO,OAAO,oBAAoB;AAAA,UACxC,aAAa;AAAA,UACb,SAAS;AAAA,QACX;AAAA,QACA,QAAQ;AAAA,UACN,MAAM;AAAA,UACN,MAAM,CAAC,OAAO,OAAO,UAAU,QAAQ,OAAO;AAAA,UAC9C,aAAa;AAAA,UACb,SAAS;AAAA,QACX;AAAA,QACA,qBAAqB;AAAA,UACnB,MAAM;AAAA,UACN,OAAO,EAAE,MAAM,SAAS;AAAA,UACxB,aAAa;AAAA,UACb,eAAe;AAAA,QACjB;AAAA,QACA,sBAAsB;AAAA,UACpB,MAAM;AAAA,UACN,SAAS;AAAA,QACX;AAAA,QACA,sBAAsB;AAAA,UACpB,MAAM;AAAA,UACN,SAAS;AAAA,QACX;AAAA,QACA,0BAA0B;AAAA,UACxB,MAAM;AAAA,UACN,aAAa;AAAA,QACf;AAAA,QACA,mBAAmB;AAAA,UACjB,MAAM;AAAA,UACN,aAAa;AAAA,QACf;AAAA,QACA,SAAS;AAAA,UACP,MAAM;AAAA,UACN,MAAM,CAAC,QAAQ,cAAc,KAAK;AAAA,UAClC,aAAa;AAAA,UACb,SAAS;AAAA,QACX;AAAA,QACA,WAAW;AAAA,UACT,MAAM;AAAA,UACN,aAAa;AAAA,UACb,SAAS;AAAA,QACX;AAAA,QACA,gBAAgB;AAAA,UACd,MAAM;AAAA,UACN,aAAa;AAAA,QACf;AAAA,QACA,KAAK;AAAA,UACH,MAAM;AAAA,UACN,aAAa;AAAA,UACb,SAAS;AAAA,QACX;AAAA,MACF;AAAA,MACA,UAAU,CAAC,cAAc,UAAU;AAAA,MACnC,sBAAsB;AAAA,MACtB,IAAI;AAAA,QACF,YAAY;AAAA,UACV,UAAU;AAAA,YACR,OAAO;AAAA,UACT;AAAA,QACF;AAAA,MACF;AAAA,MACA,MAAM;AAAA,QACJ,UAAU,CAAC,mBAAmB;AAAA,MAChC;AAAA,IACF;AAAA,EACF;AAAA,EACA,UAAU,CAAC,YAAY,iBAAiB;AAAA,EACxC,sBAAsB;AACxB;AAEO,IAAM,sCAAsC;AAAA,EACjD,MAAM;AAAA,EACN,YAAY;AAAA,OACP,kBAAkB;AAAA,OAClB,8BAA8B;AAAA,EACnC;AAAA,EACA,UAAU,CAAC,GAAG,kBAAkB,UAAU,GAAG,8BAA8B,QAAQ;AAAA,EACnF,sBAAsB;AACxB;AAIO,IAAM,sCAAsC;AAAA,EACjD,MAAM;AAAA,EACN,YAAY;AAAA,OACP,kBAAkB;AAAA,OAClB,8BAA8B;AAAA,EACnC;AAAA,EACA,UAAU,CAAC,GAAG,kBAAkB,UAAU,GAAG,8BAA8B,QAAQ;AAAA,EACnF,sBAAsB;AACxB;",
|
|
9
|
+
"debugId": "633B1866FC05294D64756E2164756E21",
|
|
10
|
+
"names": []
|
|
11
|
+
}
|
package/dist/index.js
CHANGED
|
@@ -4,30 +4,30 @@ import {
|
|
|
4
4
|
AnthropicModelRecordSchema,
|
|
5
5
|
AnthropicModelSchema,
|
|
6
6
|
AnthropicProvider
|
|
7
|
-
} from "./index-
|
|
7
|
+
} from "./index-edjqamf9.js";
|
|
8
8
|
import {
|
|
9
9
|
GOOGLE_GEMINI,
|
|
10
10
|
GeminiModelConfigSchema,
|
|
11
11
|
GeminiModelRecordSchema,
|
|
12
12
|
GeminiModelSchema,
|
|
13
13
|
GoogleGeminiProvider
|
|
14
|
-
} from "./index-
|
|
14
|
+
} from "./index-qy5ksm4w.js";
|
|
15
15
|
import {
|
|
16
|
-
HfTransformersOnnxModelConfigSchema,
|
|
17
|
-
HfTransformersOnnxModelRecordSchema,
|
|
18
|
-
HfTransformersOnnxModelSchema,
|
|
19
16
|
HuggingFaceTransformersProvider
|
|
20
|
-
} from "./index-
|
|
17
|
+
} from "./index-795ethaq.js";
|
|
21
18
|
import {
|
|
22
19
|
AudioPipelineUseCase,
|
|
23
20
|
HF_TRANSFORMERS_ONNX,
|
|
24
21
|
HTF_CACHE_NAME,
|
|
22
|
+
HfTransformersOnnxModelConfigSchema,
|
|
23
|
+
HfTransformersOnnxModelRecordSchema,
|
|
24
|
+
HfTransformersOnnxModelSchema,
|
|
25
25
|
MultimodalPipelineUseCase,
|
|
26
26
|
PipelineUseCase,
|
|
27
27
|
QuantizationDataType,
|
|
28
28
|
TextPipelineUseCase,
|
|
29
29
|
VisionPipelineUseCase
|
|
30
|
-
} from "./index-
|
|
30
|
+
} from "./index-zqq3kw0n.js";
|
|
31
31
|
import {
|
|
32
32
|
LLAMACPP_DEFAULT_MODELS_DIR,
|
|
33
33
|
LOCAL_LLAMACPP,
|
|
@@ -57,8 +57,8 @@ import {
|
|
|
57
57
|
OpenAiModelRecordSchema,
|
|
58
58
|
OpenAiModelSchema,
|
|
59
59
|
OpenAiProvider
|
|
60
|
-
} from "./index-
|
|
61
|
-
import"./index-
|
|
60
|
+
} from "./index-p88ezt14.js";
|
|
61
|
+
import"./index-6j5pq722.js";
|
|
62
62
|
// src/tf-mediapipe/common/TFMP_Constants.ts
|
|
63
63
|
var TENSORFLOW_MEDIAPIPE = "TENSORFLOW_MEDIAPIPE";
|
|
64
64
|
var TextPipelineTask = {
|
|
@@ -211,4 +211,4 @@ export {
|
|
|
211
211
|
ANTHROPIC
|
|
212
212
|
};
|
|
213
213
|
|
|
214
|
-
//# debugId=
|
|
214
|
+
//# debugId=C02419B94BE3E93F64756E2164756E21
|
package/dist/index.js.map
CHANGED
|
@@ -7,6 +7,6 @@
|
|
|
7
7
|
"/**\n * @license\n * Copyright 2025 Steven Roussey <sroussey@gmail.com>\n * SPDX-License-Identifier: Apache-2.0\n */\n\nimport { AiProvider, type AiProviderReactiveRunFn, type AiProviderRunFn } from \"@workglow/ai\";\nimport { TENSORFLOW_MEDIAPIPE } from \"./common/TFMP_Constants\";\nimport type { TFMPModelConfig } from \"./common/TFMP_ModelSchema\";\n\n/**\n * AI provider for TensorFlow MediaPipe models.\n *\n * Supports text, vision, and gesture recognition tasks via @mediapipe packages.\n *\n * Task run functions are injected via the constructor so that the heavy\n * `@mediapipe/*` libraries are only imported where actually needed\n * (inline mode, worker server), not on the main thread in worker mode.\n *\n * @example\n * ```typescript\n * // Worker mode (main thread) -- lightweight, no heavy imports:\n * await new TensorFlowMediaPipeProvider().register({\n * mode: \"worker\",\n * worker: new Worker(new URL(\"./worker_tfmp.ts\", import.meta.url), { type: \"module\" }),\n * });\n *\n * // Inline mode -- caller provides the tasks:\n * import { TFMP_TASKS } from \"@workglow/ai-provider/tf-mediapipe\";\n * await new TensorFlowMediaPipeProvider(TFMP_TASKS).register({ mode: \"inline\" });\n *\n * // Worker side -- caller provides the tasks:\n * import { TFMP_TASKS } from \"@workglow/ai-provider/tf-mediapipe\";\n * new TensorFlowMediaPipeProvider(TFMP_TASKS).registerOnWorkerServer(workerServer);\n * ```\n */\nexport class TensorFlowMediaPipeProvider extends AiProvider<TFMPModelConfig> {\n readonly name = TENSORFLOW_MEDIAPIPE;\n\n readonly taskTypes = [\n \"DownloadModelTask\",\n \"UnloadModelTask\",\n \"TextEmbeddingTask\",\n \"TextLanguageDetectionTask\",\n \"TextClassificationTask\",\n \"ImageSegmentationTask\",\n \"ImageEmbeddingTask\",\n \"ImageClassificationTask\",\n \"ObjectDetectionTask\",\n \"GestureRecognizerTask\",\n \"HandLandmarkerTask\",\n \"FaceDetectorTask\",\n \"FaceLandmarkerTask\",\n \"PoseLandmarkerTask\",\n ] as const;\n\n constructor(\n tasks?: Record<string, AiProviderRunFn<any, any, TFMPModelConfig>>,\n reactiveTasks?: Record<string, AiProviderReactiveRunFn<any, any, TFMPModelConfig>>\n ) {\n super(tasks, undefined, reactiveTasks);\n }\n}\n"
|
|
8
8
|
],
|
|
9
9
|
"mappings": ";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAMO,IAAM,uBAAuB;AAsB7B,IAAM,mBAAmB;AAAA,EAC9B,iBAAiB;AAAA,EACjB,mBAAmB;AAAA,EACnB,0BAA0B;AAAA,EAC1B,cAAc;AAAA,EACd,oBAAoB;AAAA,EACpB,kBAAkB;AAAA,EAClB,wBAAwB;AAAA,EACxB,0BAA0B;AAAA,EAC1B,wBAAwB;AAAA,EACxB,6BAA6B;AAAA,EAC7B,0BAA0B;AAAA,EAC1B,8BAA8B;AAAA,EAC9B,2BAA2B;AAAA,EAC3B,yBAAyB;AAAA,EACzB,0BAA0B;AAAA,EAC1B,sCAAsC;AAAA,EACtC,0BAA0B;AAAA,EAC1B,0BAA0B;AAC5B;;ACzCA;AAIO,IAAM,kBAAkB;AAAA,EAC7B,MAAM;AAAA,EACN,YAAY;AAAA,IACV,UAAU;AAAA,MACR,OAAO;AAAA,MACP,aAAa;AAAA,IACf;AAAA,IACA,iBAAiB;AAAA,MACf,MAAM;AAAA,MACN,aAAa;AAAA,MACb,YAAY;AAAA,QACV,YAAY;AAAA,UACV,MAAM;AAAA,UACN,aAAa;AAAA,QACf;AAAA,QACA,aAAa;AAAA,UACX,MAAM;AAAA,UACN,MAAM,CAAC,QAAQ,SAAS,UAAU,OAAO;AAAA,UACzC,aAAa;AAAA,QACf;AAAA,QACA,UAAU;AAAA,UACR,MAAM;AAAA,UACN,MAAM,OAAO,OAAO,gBAAgB;AAAA,UACpC,aAAa;AAAA,QACf;AAAA,MACF;AAAA,MACA,UAAU,CAAC,cAAc,eAAe,UAAU;AAAA,MAClD,sBAAsB;AAAA,IACxB;AAAA,EACF;AAAA,EACA,UAAU,CAAC,YAAY,iBAAiB;AAAA,EACxC,sBAAsB;AACxB;AAEO,IAAM,wBAAwB;AAAA,EACnC,MAAM;AAAA,EACN,YAAY;AAAA,OACP,kBAAkB;AAAA,OAClB,gBAAgB;AAAA,EACrB;AAAA,EACA,UAAU,CAAC,GAAG,kBAAkB,UAAU,GAAG,gBAAgB,QAAQ;AAAA,EACrE,sBAAsB;AACxB;AAIO,IAAM,wBAAwB;AAAA,EACnC,MAAM;AAAA,EACN,YAAY;AAAA,OACP,kBAAkB;AAAA,OAClB,gBAAgB;AAAA,EACrB;AAAA,EACA,UAAU,CAAC,GAAG,kBAAkB,UAAU,GAAG,gBAAgB,QAAQ;AAAA,EACrE,sBAAsB;AACxB;;AC1DA;AA8BO,MAAM,oCAAoC,WAA4B;AAAA,EAClE,OAAO;AAAA,EAEP,YAAY;AAAA,IACnB;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF;AAAA,EAEA,WAAW,CACT,OACA,eACA;AAAA,IACA,MAAM,OAAO,WAAW,aAAa;AAAA;AAEzC;",
|
|
10
|
-
"debugId": "
|
|
10
|
+
"debugId": "C02419B94BE3E93F64756E2164756E21",
|
|
11
11
|
"names": []
|
|
12
12
|
}
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"HFI_JobRunFns.d.ts","sourceRoot":"","sources":["../../../src/provider-hf-inference/common/HFI_JobRunFns.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AAGH,OAAO,KAAK,EACV,eAAe,EACf,kBAAkB,EAClB,sBAAsB,EACtB,uBAAuB,EACvB,uBAAuB,EACvB,wBAAwB,EACxB,qBAAqB,EACrB,sBAAsB,EACtB,oBAAoB,EACpB,qBAAqB,EACtB,MAAM,cAAc,CAAC;
|
|
1
|
+
{"version":3,"file":"HFI_JobRunFns.d.ts","sourceRoot":"","sources":["../../../src/provider-hf-inference/common/HFI_JobRunFns.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AAGH,OAAO,KAAK,EACV,eAAe,EACf,kBAAkB,EAClB,sBAAsB,EACtB,uBAAuB,EACvB,uBAAuB,EACvB,wBAAwB,EACxB,qBAAqB,EACrB,sBAAsB,EACtB,oBAAoB,EACpB,qBAAqB,EACtB,MAAM,cAAc,CAAC;AAGtB,OAAO,KAAK,EAAE,sBAAsB,EAAE,MAAM,mBAAmB,CAAC;AA2ChE,eAAO,MAAM,kBAAkB,EAAE,eAAe,CAC9C,uBAAuB,EACvB,wBAAwB,EACxB,sBAAsB,CA2BvB,CAAC;AAEF,eAAO,MAAM,iBAAiB,EAAE,eAAe,CAC7C,sBAAsB,EACtB,uBAAuB,EACvB,sBAAsB,CAyCvB,CAAC;AAEF,eAAO,MAAM,gBAAgB,EAAE,eAAe,CAC5C,qBAAqB,EACrB,sBAAsB,EACtB,sBAAsB,CAqBvB,CAAC;AAEF,eAAO,MAAM,eAAe,EAAE,eAAe,CAC3C,oBAAoB,EACpB,qBAAqB,EACrB,sBAAsB,CAqBvB,CAAC;AAMF,eAAO,MAAM,yBAAyB,EAAE,kBAAkB,CACxD,uBAAuB,EACvB,wBAAwB,EACxB,sBAAsB,CA0BvB,CAAC;AAEF,eAAO,MAAM,uBAAuB,EAAE,kBAAkB,CACtD,qBAAqB,EACrB,sBAAsB,EACtB,sBAAsB,CAyBvB,CAAC;AAEF,eAAO,MAAM,sBAAsB,EAAE,kBAAkB,CACrD,oBAAoB,EACpB,qBAAqB,EACrB,sBAAsB,CAyBvB,CAAC;AAMF,eAAO,MAAM,SAAS,EAAE,MAAM,CAAC,MAAM,EAAE,eAAe,CAAC,GAAG,EAAE,GAAG,EAAE,sBAAsB,CAAC,CAKvF,CAAC;AAEF,eAAO,MAAM,gBAAgB,EAAE,MAAM,CACnC,MAAM,EACN,kBAAkB,CAAC,GAAG,EAAE,GAAG,EAAE,sBAAsB,CAAC,CAKrD,CAAC"}
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"HFI_ModelSchema.d.ts","sourceRoot":"","sources":["../../../src/provider-hf-inference/common/HFI_ModelSchema.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AAGH,OAAO,EAAwB,UAAU,EAAE,MAAM,gBAAgB,CAAC;AAGlE,eAAO,MAAM,sBAAsB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
|
1
|
+
{"version":3,"file":"HFI_ModelSchema.d.ts","sourceRoot":"","sources":["../../../src/provider-hf-inference/common/HFI_ModelSchema.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AAGH,OAAO,EAAwB,UAAU,EAAE,MAAM,gBAAgB,CAAC;AAGlE,eAAO,MAAM,sBAAsB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CAiCM,CAAC;AAE1C,eAAO,MAAM,4BAA4B;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CAQA,CAAC;AAE1C,MAAM,MAAM,sBAAsB,GAAG,UAAU,CAAC,OAAO,4BAA4B,CAAC,CAAC;AAErF,eAAO,MAAM,4BAA4B;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CAQA,CAAC;AAE1C,MAAM,MAAM,sBAAsB,GAAG,UAAU,CAAC,OAAO,4BAA4B,CAAC,CAAC"}
|
|
@@ -7,8 +7,9 @@ import {
|
|
|
7
7
|
} from "../index-3tvpdt0s.js";
|
|
8
8
|
import {
|
|
9
9
|
__require
|
|
10
|
-
} from "../index-
|
|
10
|
+
} from "../index-6j5pq722.js";
|
|
11
11
|
// src/provider-hf-inference/common/HFI_JobRunFns.ts
|
|
12
|
+
import { getLogger } from "@workglow/util";
|
|
12
13
|
var _sdk;
|
|
13
14
|
async function loadHfInferenceSDK() {
|
|
14
15
|
if (!_sdk) {
|
|
@@ -39,6 +40,9 @@ function getProvider(model) {
|
|
|
39
40
|
return model?.provider_config?.provider;
|
|
40
41
|
}
|
|
41
42
|
var HFI_TextGeneration = async (input, model, update_progress, signal) => {
|
|
43
|
+
const logger = getLogger();
|
|
44
|
+
const timerLabel = `hfi:TextGeneration:${model?.provider_config?.model_name}`;
|
|
45
|
+
logger.time(timerLabel, { model: model?.provider_config?.model_name });
|
|
42
46
|
update_progress(0, "Starting HF Inference text generation");
|
|
43
47
|
const client = await getClient(model);
|
|
44
48
|
const modelName = getModelName(model);
|
|
@@ -53,9 +57,13 @@ var HFI_TextGeneration = async (input, model, update_progress, signal) => {
|
|
|
53
57
|
provider
|
|
54
58
|
}, { signal });
|
|
55
59
|
update_progress(100, "Completed HF Inference text generation");
|
|
60
|
+
logger.timeEnd(timerLabel, { model: model?.provider_config?.model_name });
|
|
56
61
|
return { text: response.choices[0]?.message?.content ?? "" };
|
|
57
62
|
};
|
|
58
63
|
var HFI_TextEmbedding = async (input, model, update_progress, signal) => {
|
|
64
|
+
const logger = getLogger();
|
|
65
|
+
const timerLabel = `hfi:TextEmbedding:${model?.provider_config?.model_name}`;
|
|
66
|
+
logger.time(timerLabel, { model: model?.provider_config?.model_name });
|
|
59
67
|
update_progress(0, "Starting HF Inference text embedding");
|
|
60
68
|
const client = await getClient(model);
|
|
61
69
|
const modelName = getModelName(model);
|
|
@@ -65,6 +73,7 @@ var HFI_TextEmbedding = async (input, model, update_progress, signal) => {
|
|
|
65
73
|
inputs: text
|
|
66
74
|
}, { signal })));
|
|
67
75
|
update_progress(100, "Completed HF Inference text embedding");
|
|
76
|
+
logger.timeEnd(timerLabel, { model: model?.provider_config?.model_name, batch: true });
|
|
68
77
|
return {
|
|
69
78
|
vector: embeddings.map((embedding2) => new Float32Array(embedding2))
|
|
70
79
|
};
|
|
@@ -74,6 +83,7 @@ var HFI_TextEmbedding = async (input, model, update_progress, signal) => {
|
|
|
74
83
|
inputs: input.text
|
|
75
84
|
}, { signal });
|
|
76
85
|
update_progress(100, "Completed HF Inference text embedding");
|
|
86
|
+
logger.timeEnd(timerLabel, { model: model?.provider_config?.model_name });
|
|
77
87
|
return { vector: new Float32Array(embedding) };
|
|
78
88
|
};
|
|
79
89
|
var HFI_TextRewriter = async (input, model, update_progress, signal) => {
|
|
@@ -181,11 +191,11 @@ var HFI_STREAM_TASKS = {
|
|
|
181
191
|
TextSummaryTask: HFI_TextSummary_Stream
|
|
182
192
|
};
|
|
183
193
|
// src/provider-hf-inference/HFI_Worker.ts
|
|
184
|
-
import { globalServiceRegistry,
|
|
194
|
+
import { globalServiceRegistry, WORKER_SERVER } from "@workglow/util";
|
|
185
195
|
function HFI_WORKER_JOBRUN_REGISTER() {
|
|
186
196
|
const workerServer = globalServiceRegistry.get(WORKER_SERVER);
|
|
187
197
|
new HfInferenceProvider(HFI_TASKS, HFI_STREAM_TASKS).registerOnWorkerServer(workerServer);
|
|
188
|
-
|
|
198
|
+
workerServer.sendReady();
|
|
189
199
|
console.log("HFI_WORKER_JOBRUN registered");
|
|
190
200
|
}
|
|
191
201
|
export {
|
|
@@ -206,4 +216,4 @@ export {
|
|
|
206
216
|
HFI_STREAM_TASKS
|
|
207
217
|
};
|
|
208
218
|
|
|
209
|
-
//# debugId=
|
|
219
|
+
//# debugId=B9C2C3DC5FE3286564756E2164756E21
|
|
@@ -2,10 +2,10 @@
|
|
|
2
2
|
"version": 3,
|
|
3
3
|
"sources": ["../src/provider-hf-inference/common/HFI_JobRunFns.ts", "../src/provider-hf-inference/HFI_Worker.ts"],
|
|
4
4
|
"sourcesContent": [
|
|
5
|
-
"/**\n * @license\n * Copyright 2025 Steven Roussey <sroussey@gmail.com>\n * SPDX-License-Identifier: Apache-2.0\n */\n\nimport type { InferenceProviderOrPolicy } from \"@huggingface/inference\";\nimport type {\n AiProviderRunFn,\n AiProviderStreamFn,\n TextEmbeddingTaskInput,\n TextEmbeddingTaskOutput,\n TextGenerationTaskInput,\n TextGenerationTaskOutput,\n TextRewriterTaskInput,\n TextRewriterTaskOutput,\n TextSummaryTaskInput,\n TextSummaryTaskOutput,\n} from \"@workglow/ai\";\nimport type { StreamEvent } from \"@workglow/task-graph\";\nimport type { HfInferenceModelConfig } from \"./HFI_ModelSchema\";\n\nlet _sdk: typeof import(\"@huggingface/inference\") | undefined;\nasync function loadHfInferenceSDK() {\n if (!_sdk) {\n try {\n _sdk = await import(\"@huggingface/inference\");\n } catch {\n throw new Error(\n \"@huggingface/inference is required for Hugging Face Inference tasks. Install it with: bun add @huggingface/inference\"\n );\n }\n }\n return _sdk;\n}\n\nasync function getClient(model: HfInferenceModelConfig | undefined) {\n const sdk = await loadHfInferenceSDK();\n const apiKey =\n model?.provider_config?.api_key ||\n (typeof process !== \"undefined\" ? process.env?.HF_TOKEN : undefined);\n if (!apiKey) {\n throw new Error(\n \"Missing Hugging Face API key: set provider_config.api_key or the HF_TOKEN environment variable.\"\n );\n }\n return new sdk.InferenceClient(apiKey);\n}\n\nfunction getModelName(model: HfInferenceModelConfig | undefined): string {\n const name = model?.provider_config?.model_name;\n if (!name) {\n throw new Error(\"Missing model name in provider_config.model_name.\");\n }\n return name;\n}\n\nfunction getProvider(\n model: HfInferenceModelConfig | undefined\n): InferenceProviderOrPolicy | undefined {\n return model?.provider_config?.provider as InferenceProviderOrPolicy | undefined;\n}\n\nexport const HFI_TextGeneration: AiProviderRunFn<\n TextGenerationTaskInput,\n TextGenerationTaskOutput,\n HfInferenceModelConfig\n> = async (input, model, update_progress, signal) => {\n update_progress(0, \"Starting HF Inference text generation\");\n const client = await getClient(model);\n const modelName = getModelName(model);\n const provider = getProvider(model);\n\n const response = await client.chatCompletion(\n {\n model: modelName,\n messages: [{ role: \"user\", content: input.prompt }],\n max_tokens: input.maxTokens,\n temperature: input.temperature,\n top_p: input.topP,\n frequency_penalty: input.frequencyPenalty,\n provider,\n },\n { signal }\n );\n\n update_progress(100, \"Completed HF Inference text generation\");\n return { text: response.choices[0]?.message?.content ?? \"\" };\n};\n\nexport const HFI_TextEmbedding: AiProviderRunFn<\n TextEmbeddingTaskInput,\n TextEmbeddingTaskOutput,\n HfInferenceModelConfig\n> = async (input, model, update_progress, signal) => {\n update_progress(0, \"Starting HF Inference text embedding\");\n const client = await getClient(model);\n const modelName = getModelName(model);\n\n if (Array.isArray(input.text)) {\n const embeddings = await Promise.all(\n input.text.map((text) =>\n client.featureExtraction(\n {\n model: modelName,\n inputs: text,\n },\n { signal }\n )\n )\n );\n\n update_progress(100, \"Completed HF Inference text embedding\");\n return {\n vector: embeddings.map((embedding) => new Float32Array(embedding as unknown as number[])),\n };\n }\n\n const embedding = await client.featureExtraction(\n {\n model: modelName,\n inputs: input.text,\n },\n { signal }\n );\n\n update_progress(100, \"Completed HF Inference text embedding\");\n return { vector: new Float32Array(embedding as unknown as number[]) };\n};\n\nexport const HFI_TextRewriter: AiProviderRunFn<\n TextRewriterTaskInput,\n TextRewriterTaskOutput,\n HfInferenceModelConfig\n> = async (input, model, update_progress, signal) => {\n update_progress(0, \"Starting HF Inference text rewriting\");\n const client = await getClient(model);\n const modelName = getModelName(model);\n const provider = getProvider(model);\n\n const response = await client.chatCompletion(\n {\n model: modelName,\n messages: [\n { role: \"system\", content: input.prompt },\n { role: \"user\", content: input.text },\n ],\n provider,\n },\n { signal }\n );\n\n update_progress(100, \"Completed HF Inference text rewriting\");\n return { text: response.choices[0]?.message?.content ?? \"\" };\n};\n\nexport const HFI_TextSummary: AiProviderRunFn<\n TextSummaryTaskInput,\n TextSummaryTaskOutput,\n HfInferenceModelConfig\n> = async (input, model, update_progress, signal) => {\n update_progress(0, \"Starting HF Inference text summarization\");\n const client = await getClient(model);\n const modelName = getModelName(model);\n const provider = getProvider(model);\n\n const response = await client.chatCompletion(\n {\n model: modelName,\n messages: [\n { role: \"system\", content: \"Summarize the following text concisely.\" },\n { role: \"user\", content: input.text },\n ],\n provider,\n },\n { signal }\n );\n\n update_progress(100, \"Completed HF Inference text summarization\");\n return { text: response.choices[0]?.message?.content ?? \"\" };\n};\n\n// ========================================================================\n// Streaming implementations (append mode)\n// ========================================================================\n\nexport const HFI_TextGeneration_Stream: AiProviderStreamFn<\n TextGenerationTaskInput,\n TextGenerationTaskOutput,\n HfInferenceModelConfig\n> = async function* (input, model, signal): AsyncIterable<StreamEvent<TextGenerationTaskOutput>> {\n const client = await getClient(model);\n const modelName = getModelName(model);\n const provider = getProvider(model);\n\n const stream = client.chatCompletionStream(\n {\n model: modelName,\n messages: [{ role: \"user\", content: input.prompt }],\n max_tokens: input.maxTokens,\n temperature: input.temperature,\n top_p: input.topP,\n frequency_penalty: input.frequencyPenalty,\n provider,\n },\n { signal }\n );\n\n for await (const chunk of stream) {\n const delta = chunk.choices[0]?.delta?.content ?? \"\";\n if (delta) {\n yield { type: \"text-delta\", port: \"text\", textDelta: delta };\n }\n }\n yield { type: \"finish\", data: {} as TextGenerationTaskOutput };\n};\n\nexport const HFI_TextRewriter_Stream: AiProviderStreamFn<\n TextRewriterTaskInput,\n TextRewriterTaskOutput,\n HfInferenceModelConfig\n> = async function* (input, model, signal): AsyncIterable<StreamEvent<TextRewriterTaskOutput>> {\n const client = await getClient(model);\n const modelName = getModelName(model);\n const provider = getProvider(model);\n\n const stream = client.chatCompletionStream(\n {\n model: modelName,\n messages: [\n { role: \"system\", content: input.prompt },\n { role: \"user\", content: input.text },\n ],\n provider,\n },\n { signal }\n );\n\n for await (const chunk of stream) {\n const delta = chunk.choices[0]?.delta?.content ?? \"\";\n if (delta) {\n yield { type: \"text-delta\", port: \"text\", textDelta: delta };\n }\n }\n yield { type: \"finish\", data: {} as TextRewriterTaskOutput };\n};\n\nexport const HFI_TextSummary_Stream: AiProviderStreamFn<\n TextSummaryTaskInput,\n TextSummaryTaskOutput,\n HfInferenceModelConfig\n> = async function* (input, model, signal): AsyncIterable<StreamEvent<TextSummaryTaskOutput>> {\n const client = await getClient(model);\n const modelName = getModelName(model);\n const provider = getProvider(model);\n\n const stream = client.chatCompletionStream(\n {\n model: modelName,\n messages: [\n { role: \"system\", content: \"Summarize the following text concisely.\" },\n { role: \"user\", content: input.text },\n ],\n provider,\n },\n { signal }\n );\n\n for await (const chunk of stream) {\n const delta = chunk.choices[0]?.delta?.content ?? \"\";\n if (delta) {\n yield { type: \"text-delta\", port: \"text\", textDelta: delta };\n }\n }\n yield { type: \"finish\", data: {} as TextSummaryTaskOutput };\n};\n\n// ========================================================================\n// Task registries\n// ========================================================================\n\nexport const HFI_TASKS: Record<string, AiProviderRunFn<any, any, HfInferenceModelConfig>> = {\n TextGenerationTask: HFI_TextGeneration,\n TextEmbeddingTask: HFI_TextEmbedding,\n TextRewriterTask: HFI_TextRewriter,\n TextSummaryTask: HFI_TextSummary,\n};\n\nexport const HFI_STREAM_TASKS: Record<\n string,\n AiProviderStreamFn<any, any, HfInferenceModelConfig>\n> = {\n TextGenerationTask: HFI_TextGeneration_Stream,\n TextRewriterTask: HFI_TextRewriter_Stream,\n TextSummaryTask: HFI_TextSummary_Stream,\n};\n",
|
|
6
|
-
"/**\n * @license\n * Copyright 2025 Steven Roussey <sroussey@gmail.com>\n * SPDX-License-Identifier: Apache-2.0\n */\n\nimport { globalServiceRegistry,
|
|
5
|
+
"/**\n * @license\n * Copyright 2025 Steven Roussey <sroussey@gmail.com>\n * SPDX-License-Identifier: Apache-2.0\n */\n\nimport type { InferenceProviderOrPolicy } from \"@huggingface/inference\";\nimport type {\n AiProviderRunFn,\n AiProviderStreamFn,\n TextEmbeddingTaskInput,\n TextEmbeddingTaskOutput,\n TextGenerationTaskInput,\n TextGenerationTaskOutput,\n TextRewriterTaskInput,\n TextRewriterTaskOutput,\n TextSummaryTaskInput,\n TextSummaryTaskOutput,\n} from \"@workglow/ai\";\nimport type { StreamEvent } from \"@workglow/task-graph\";\nimport { getLogger } from \"@workglow/util\";\nimport type { HfInferenceModelConfig } from \"./HFI_ModelSchema\";\n\nlet _sdk: typeof import(\"@huggingface/inference\") | undefined;\nasync function loadHfInferenceSDK() {\n if (!_sdk) {\n try {\n _sdk = await import(\"@huggingface/inference\");\n } catch {\n throw new Error(\n \"@huggingface/inference is required for Hugging Face Inference tasks. Install it with: bun add @huggingface/inference\"\n );\n }\n }\n return _sdk;\n}\n\nasync function getClient(model: HfInferenceModelConfig | undefined) {\n const sdk = await loadHfInferenceSDK();\n const apiKey =\n model?.provider_config?.api_key ||\n (typeof process !== \"undefined\" ? process.env?.HF_TOKEN : undefined);\n if (!apiKey) {\n throw new Error(\n \"Missing Hugging Face API key: set provider_config.api_key or the HF_TOKEN environment variable.\"\n );\n }\n return new sdk.InferenceClient(apiKey);\n}\n\nfunction getModelName(model: HfInferenceModelConfig | undefined): string {\n const name = model?.provider_config?.model_name;\n if (!name) {\n throw new Error(\"Missing model name in provider_config.model_name.\");\n }\n return name;\n}\n\nfunction getProvider(\n model: HfInferenceModelConfig | undefined\n): InferenceProviderOrPolicy | undefined {\n return model?.provider_config?.provider as InferenceProviderOrPolicy | undefined;\n}\n\nexport const HFI_TextGeneration: AiProviderRunFn<\n TextGenerationTaskInput,\n TextGenerationTaskOutput,\n HfInferenceModelConfig\n> = async (input, model, update_progress, signal) => {\n const logger = getLogger();\n const timerLabel = `hfi:TextGeneration:${model?.provider_config?.model_name}`;\n logger.time(timerLabel, { model: model?.provider_config?.model_name });\n\n update_progress(0, \"Starting HF Inference text generation\");\n const client = await getClient(model);\n const modelName = getModelName(model);\n const provider = getProvider(model);\n\n const response = await client.chatCompletion(\n {\n model: modelName,\n messages: [{ role: \"user\", content: input.prompt }],\n max_tokens: input.maxTokens,\n temperature: input.temperature,\n top_p: input.topP,\n frequency_penalty: input.frequencyPenalty,\n provider,\n },\n { signal }\n );\n\n update_progress(100, \"Completed HF Inference text generation\");\n logger.timeEnd(timerLabel, { model: model?.provider_config?.model_name });\n return { text: response.choices[0]?.message?.content ?? \"\" };\n};\n\nexport const HFI_TextEmbedding: AiProviderRunFn<\n TextEmbeddingTaskInput,\n TextEmbeddingTaskOutput,\n HfInferenceModelConfig\n> = async (input, model, update_progress, signal) => {\n const logger = getLogger();\n const timerLabel = `hfi:TextEmbedding:${model?.provider_config?.model_name}`;\n logger.time(timerLabel, { model: model?.provider_config?.model_name });\n\n update_progress(0, \"Starting HF Inference text embedding\");\n const client = await getClient(model);\n const modelName = getModelName(model);\n\n if (Array.isArray(input.text)) {\n const embeddings = await Promise.all(\n input.text.map((text) =>\n client.featureExtraction(\n {\n model: modelName,\n inputs: text,\n },\n { signal }\n )\n )\n );\n\n update_progress(100, \"Completed HF Inference text embedding\");\n logger.timeEnd(timerLabel, { model: model?.provider_config?.model_name, batch: true });\n return {\n vector: embeddings.map((embedding) => new Float32Array(embedding as unknown as number[])),\n };\n }\n\n const embedding = await client.featureExtraction(\n {\n model: modelName,\n inputs: input.text,\n },\n { signal }\n );\n\n update_progress(100, \"Completed HF Inference text embedding\");\n logger.timeEnd(timerLabel, { model: model?.provider_config?.model_name });\n return { vector: new Float32Array(embedding as unknown as number[]) };\n};\n\nexport const HFI_TextRewriter: AiProviderRunFn<\n TextRewriterTaskInput,\n TextRewriterTaskOutput,\n HfInferenceModelConfig\n> = async (input, model, update_progress, signal) => {\n update_progress(0, \"Starting HF Inference text rewriting\");\n const client = await getClient(model);\n const modelName = getModelName(model);\n const provider = getProvider(model);\n\n const response = await client.chatCompletion(\n {\n model: modelName,\n messages: [\n { role: \"system\", content: input.prompt },\n { role: \"user\", content: input.text },\n ],\n provider,\n },\n { signal }\n );\n\n update_progress(100, \"Completed HF Inference text rewriting\");\n return { text: response.choices[0]?.message?.content ?? \"\" };\n};\n\nexport const HFI_TextSummary: AiProviderRunFn<\n TextSummaryTaskInput,\n TextSummaryTaskOutput,\n HfInferenceModelConfig\n> = async (input, model, update_progress, signal) => {\n update_progress(0, \"Starting HF Inference text summarization\");\n const client = await getClient(model);\n const modelName = getModelName(model);\n const provider = getProvider(model);\n\n const response = await client.chatCompletion(\n {\n model: modelName,\n messages: [\n { role: \"system\", content: \"Summarize the following text concisely.\" },\n { role: \"user\", content: input.text },\n ],\n provider,\n },\n { signal }\n );\n\n update_progress(100, \"Completed HF Inference text summarization\");\n return { text: response.choices[0]?.message?.content ?? \"\" };\n};\n\n// ========================================================================\n// Streaming implementations (append mode)\n// ========================================================================\n\nexport const HFI_TextGeneration_Stream: AiProviderStreamFn<\n TextGenerationTaskInput,\n TextGenerationTaskOutput,\n HfInferenceModelConfig\n> = async function* (input, model, signal): AsyncIterable<StreamEvent<TextGenerationTaskOutput>> {\n const client = await getClient(model);\n const modelName = getModelName(model);\n const provider = getProvider(model);\n\n const stream = client.chatCompletionStream(\n {\n model: modelName,\n messages: [{ role: \"user\", content: input.prompt }],\n max_tokens: input.maxTokens,\n temperature: input.temperature,\n top_p: input.topP,\n frequency_penalty: input.frequencyPenalty,\n provider,\n },\n { signal }\n );\n\n for await (const chunk of stream) {\n const delta = chunk.choices[0]?.delta?.content ?? \"\";\n if (delta) {\n yield { type: \"text-delta\", port: \"text\", textDelta: delta };\n }\n }\n yield { type: \"finish\", data: {} as TextGenerationTaskOutput };\n};\n\nexport const HFI_TextRewriter_Stream: AiProviderStreamFn<\n TextRewriterTaskInput,\n TextRewriterTaskOutput,\n HfInferenceModelConfig\n> = async function* (input, model, signal): AsyncIterable<StreamEvent<TextRewriterTaskOutput>> {\n const client = await getClient(model);\n const modelName = getModelName(model);\n const provider = getProvider(model);\n\n const stream = client.chatCompletionStream(\n {\n model: modelName,\n messages: [\n { role: \"system\", content: input.prompt },\n { role: \"user\", content: input.text },\n ],\n provider,\n },\n { signal }\n );\n\n for await (const chunk of stream) {\n const delta = chunk.choices[0]?.delta?.content ?? \"\";\n if (delta) {\n yield { type: \"text-delta\", port: \"text\", textDelta: delta };\n }\n }\n yield { type: \"finish\", data: {} as TextRewriterTaskOutput };\n};\n\nexport const HFI_TextSummary_Stream: AiProviderStreamFn<\n TextSummaryTaskInput,\n TextSummaryTaskOutput,\n HfInferenceModelConfig\n> = async function* (input, model, signal): AsyncIterable<StreamEvent<TextSummaryTaskOutput>> {\n const client = await getClient(model);\n const modelName = getModelName(model);\n const provider = getProvider(model);\n\n const stream = client.chatCompletionStream(\n {\n model: modelName,\n messages: [\n { role: \"system\", content: \"Summarize the following text concisely.\" },\n { role: \"user\", content: input.text },\n ],\n provider,\n },\n { signal }\n );\n\n for await (const chunk of stream) {\n const delta = chunk.choices[0]?.delta?.content ?? \"\";\n if (delta) {\n yield { type: \"text-delta\", port: \"text\", textDelta: delta };\n }\n }\n yield { type: \"finish\", data: {} as TextSummaryTaskOutput };\n};\n\n// ========================================================================\n// Task registries\n// ========================================================================\n\nexport const HFI_TASKS: Record<string, AiProviderRunFn<any, any, HfInferenceModelConfig>> = {\n TextGenerationTask: HFI_TextGeneration,\n TextEmbeddingTask: HFI_TextEmbedding,\n TextRewriterTask: HFI_TextRewriter,\n TextSummaryTask: HFI_TextSummary,\n};\n\nexport const HFI_STREAM_TASKS: Record<\n string,\n AiProviderStreamFn<any, any, HfInferenceModelConfig>\n> = {\n TextGenerationTask: HFI_TextGeneration_Stream,\n TextRewriterTask: HFI_TextRewriter_Stream,\n TextSummaryTask: HFI_TextSummary_Stream,\n};\n",
|
|
6
|
+
"/**\n * @license\n * Copyright 2025 Steven Roussey <sroussey@gmail.com>\n * SPDX-License-Identifier: Apache-2.0\n */\n\nimport { globalServiceRegistry, WORKER_SERVER } from \"@workglow/util\";\nimport { HFI_STREAM_TASKS, HFI_TASKS } from \"./common/HFI_JobRunFns\";\nimport { HfInferenceProvider } from \"./HfInferenceProvider\";\n\nexport function HFI_WORKER_JOBRUN_REGISTER() {\n const workerServer = globalServiceRegistry.get(WORKER_SERVER);\n new HfInferenceProvider(HFI_TASKS, HFI_STREAM_TASKS).registerOnWorkerServer(workerServer);\n workerServer.sendReady();\n console.log(\"HFI_WORKER_JOBRUN registered\");\n}\n"
|
|
7
7
|
],
|
|
8
|
-
"mappings": ";;;;;;;;;;;
|
|
9
|
-
"debugId": "
|
|
8
|
+
"mappings": ";;;;;;;;;;;AAoBA;AAGA,IAAI;AACJ,eAAe,kBAAkB,GAAG;AAAA,EAClC,IAAI,CAAC,MAAM;AAAA,IACT,IAAI;AAAA,MACF,OAAO,MAAa;AAAA,MACpB,MAAM;AAAA,MACN,MAAM,IAAI,MACR,sHACF;AAAA;AAAA,EAEJ;AAAA,EACA,OAAO;AAAA;AAGT,eAAe,SAAS,CAAC,OAA2C;AAAA,EAClE,MAAM,MAAM,MAAM,mBAAmB;AAAA,EACrC,MAAM,SACJ,OAAO,iBAAiB,YACvB,OAAO,YAAY,cAAc,QAAQ,KAAK,WAAW;AAAA,EAC5D,IAAI,CAAC,QAAQ;AAAA,IACX,MAAM,IAAI,MACR,iGACF;AAAA,EACF;AAAA,EACA,OAAO,IAAI,IAAI,gBAAgB,MAAM;AAAA;AAGvC,SAAS,YAAY,CAAC,OAAmD;AAAA,EACvE,MAAM,OAAO,OAAO,iBAAiB;AAAA,EACrC,IAAI,CAAC,MAAM;AAAA,IACT,MAAM,IAAI,MAAM,mDAAmD;AAAA,EACrE;AAAA,EACA,OAAO;AAAA;AAGT,SAAS,WAAW,CAClB,OACuC;AAAA,EACvC,OAAO,OAAO,iBAAiB;AAAA;AAG1B,IAAM,qBAIT,OAAO,OAAO,OAAO,iBAAiB,WAAW;AAAA,EACnD,MAAM,SAAS,UAAU;AAAA,EACzB,MAAM,aAAa,sBAAsB,OAAO,iBAAiB;AAAA,EACjE,OAAO,KAAK,YAAY,EAAE,OAAO,OAAO,iBAAiB,WAAW,CAAC;AAAA,EAErE,gBAAgB,GAAG,uCAAuC;AAAA,EAC1D,MAAM,SAAS,MAAM,UAAU,KAAK;AAAA,EACpC,MAAM,YAAY,aAAa,KAAK;AAAA,EACpC,MAAM,WAAW,YAAY,KAAK;AAAA,EAElC,MAAM,WAAW,MAAM,OAAO,eAC5B;AAAA,IACE,OAAO;AAAA,IACP,UAAU,CAAC,EAAE,MAAM,QAAQ,SAAS,MAAM,OAAO,CAAC;AAAA,IAClD,YAAY,MAAM;AAAA,IAClB,aAAa,MAAM;AAAA,IACnB,OAAO,MAAM;AAAA,IACb,mBAAmB,MAAM;AAAA,IACzB;AAAA,EACF,GACA,EAAE,OAAO,CACX;AAAA,EAEA,gBAAgB,KAAK,wCAAwC;AAAA,EAC7D,OAAO,QAAQ,YAAY,EAAE,OAAO,OAAO,iBAAiB,WAAW,CAAC;AAAA,EACxE,OAAO,EAAE,MAAM,SAAS,QAAQ,IAAI,SAAS,WAAW,GAAG;AAAA;AAGtD,IAAM,oBAIT,OAAO,OAAO,OAAO,iBAAiB,WAAW;AAAA,EACnD,MAAM,SAAS,UAAU;AAAA,EACzB,MAAM,aAAa,qBAAqB,OAAO,iBAAiB;AAAA,EAChE,OAAO,KAAK,YAAY,EAAE,OAAO,OAAO,iBAAiB,WAAW,CAAC;AAAA,EAErE,gBAAgB,GAAG,sCAAsC;AAAA,EACzD,MAAM,SAAS,MAAM,UAAU,KAAK;AAAA,EACpC,MAAM,YAAY,aAAa,KAAK;AAAA,EAEpC,IAAI,MAAM,QAAQ,MAAM,IAAI,GAAG;AAAA,IAC7B,MAAM,aAAa,MAAM,QAAQ,IAC/B,MAAM,KAAK,IAAI,CAAC,SACd,OAAO,kBACL;AAAA,MACE,OAAO;AAAA,MACP,QAAQ;AAAA,IACV,GACA,EAAE,OAAO,CACX,CACF,CACF;AAAA,IAEA,gBAAgB,KAAK,uCAAuC;AAAA,IAC5D,OAAO,QAAQ,YAAY,EAAE,OAAO,OAAO,iBAAiB,YAAY,OAAO,KAAK,CAAC;AAAA,IACrF,OAAO;AAAA,MACL,QAAQ,WAAW,IAAI,CAAC,eAAc,IAAI,aAAa,UAAgC,CAAC;AAAA,IAC1F;AAAA,EACF;AAAA,EAEA,MAAM,YAAY,MAAM,OAAO,kBAC7B;AAAA,IACE,OAAO;AAAA,IACP,QAAQ,MAAM;AAAA,EAChB,GACA,EAAE,OAAO,CACX;AAAA,EAEA,gBAAgB,KAAK,uCAAuC;AAAA,EAC5D,OAAO,QAAQ,YAAY,EAAE,OAAO,OAAO,iBAAiB,WAAW,CAAC;AAAA,EACxE,OAAO,EAAE,QAAQ,IAAI,aAAa,SAAgC,EAAE;AAAA;AAG/D,IAAM,mBAIT,OAAO,OAAO,OAAO,iBAAiB,WAAW;AAAA,EACnD,gBAAgB,GAAG,sCAAsC;AAAA,EACzD,MAAM,SAAS,MAAM,UAAU,KAAK;AAAA,EACpC,MAAM,YAAY,aAAa,KAAK;AAAA,EACpC,MAAM,WAAW,YAAY,KAAK;AAAA,EAElC,MAAM,WAAW,MAAM,OAAO,eAC5B;AAAA,IACE,OAAO;AAAA,IACP,UAAU;AAAA,MACR,EAAE,MAAM,UAAU,SAAS,MAAM,OAAO;AAAA,MACxC,EAAE,MAAM,QAAQ,SAAS,MAAM,KAAK;AAAA,IACtC;AAAA,IACA;AAAA,EACF,GACA,EAAE,OAAO,CACX;AAAA,EAEA,gBAAgB,KAAK,uCAAuC;AAAA,EAC5D,OAAO,EAAE,MAAM,SAAS,QAAQ,IAAI,SAAS,WAAW,GAAG;AAAA;AAGtD,IAAM,kBAIT,OAAO,OAAO,OAAO,iBAAiB,WAAW;AAAA,EACnD,gBAAgB,GAAG,0CAA0C;AAAA,EAC7D,MAAM,SAAS,MAAM,UAAU,KAAK;AAAA,EACpC,MAAM,YAAY,aAAa,KAAK;AAAA,EACpC,MAAM,WAAW,YAAY,KAAK;AAAA,EAElC,MAAM,WAAW,MAAM,OAAO,eAC5B;AAAA,IACE,OAAO;AAAA,IACP,UAAU;AAAA,MACR,EAAE,MAAM,UAAU,SAAS,0CAA0C;AAAA,MACrE,EAAE,MAAM,QAAQ,SAAS,MAAM,KAAK;AAAA,IACtC;AAAA,IACA;AAAA,EACF,GACA,EAAE,OAAO,CACX;AAAA,EAEA,gBAAgB,KAAK,2CAA2C;AAAA,EAChE,OAAO,EAAE,MAAM,SAAS,QAAQ,IAAI,SAAS,WAAW,GAAG;AAAA;AAOtD,IAAM,4BAIT,gBAAgB,CAAC,OAAO,OAAO,QAA8D;AAAA,EAC/F,MAAM,SAAS,MAAM,UAAU,KAAK;AAAA,EACpC,MAAM,YAAY,aAAa,KAAK;AAAA,EACpC,MAAM,WAAW,YAAY,KAAK;AAAA,EAElC,MAAM,SAAS,OAAO,qBACpB;AAAA,IACE,OAAO;AAAA,IACP,UAAU,CAAC,EAAE,MAAM,QAAQ,SAAS,MAAM,OAAO,CAAC;AAAA,IAClD,YAAY,MAAM;AAAA,IAClB,aAAa,MAAM;AAAA,IACnB,OAAO,MAAM;AAAA,IACb,mBAAmB,MAAM;AAAA,IACzB;AAAA,EACF,GACA,EAAE,OAAO,CACX;AAAA,EAEA,iBAAiB,SAAS,QAAQ;AAAA,IAChC,MAAM,QAAQ,MAAM,QAAQ,IAAI,OAAO,WAAW;AAAA,IAClD,IAAI,OAAO;AAAA,MACT,MAAM,EAAE,MAAM,cAAc,MAAM,QAAQ,WAAW,MAAM;AAAA,IAC7D;AAAA,EACF;AAAA,EACA,MAAM,EAAE,MAAM,UAAU,MAAM,CAAC,EAA8B;AAAA;AAGxD,IAAM,0BAIT,gBAAgB,CAAC,OAAO,OAAO,QAA4D;AAAA,EAC7F,MAAM,SAAS,MAAM,UAAU,KAAK;AAAA,EACpC,MAAM,YAAY,aAAa,KAAK;AAAA,EACpC,MAAM,WAAW,YAAY,KAAK;AAAA,EAElC,MAAM,SAAS,OAAO,qBACpB;AAAA,IACE,OAAO;AAAA,IACP,UAAU;AAAA,MACR,EAAE,MAAM,UAAU,SAAS,MAAM,OAAO;AAAA,MACxC,EAAE,MAAM,QAAQ,SAAS,MAAM,KAAK;AAAA,IACtC;AAAA,IACA;AAAA,EACF,GACA,EAAE,OAAO,CACX;AAAA,EAEA,iBAAiB,SAAS,QAAQ;AAAA,IAChC,MAAM,QAAQ,MAAM,QAAQ,IAAI,OAAO,WAAW;AAAA,IAClD,IAAI,OAAO;AAAA,MACT,MAAM,EAAE,MAAM,cAAc,MAAM,QAAQ,WAAW,MAAM;AAAA,IAC7D;AAAA,EACF;AAAA,EACA,MAAM,EAAE,MAAM,UAAU,MAAM,CAAC,EAA4B;AAAA;AAGtD,IAAM,yBAIT,gBAAgB,CAAC,OAAO,OAAO,QAA2D;AAAA,EAC5F,MAAM,SAAS,MAAM,UAAU,KAAK;AAAA,EACpC,MAAM,YAAY,aAAa,KAAK;AAAA,EACpC,MAAM,WAAW,YAAY,KAAK;AAAA,EAElC,MAAM,SAAS,OAAO,qBACpB;AAAA,IACE,OAAO;AAAA,IACP,UAAU;AAAA,MACR,EAAE,MAAM,UAAU,SAAS,0CAA0C;AAAA,MACrE,EAAE,MAAM,QAAQ,SAAS,MAAM,KAAK;AAAA,IACtC;AAAA,IACA;AAAA,EACF,GACA,EAAE,OAAO,CACX;AAAA,EAEA,iBAAiB,SAAS,QAAQ;AAAA,IAChC,MAAM,QAAQ,MAAM,QAAQ,IAAI,OAAO,WAAW;AAAA,IAClD,IAAI,OAAO;AAAA,MACT,MAAM,EAAE,MAAM,cAAc,MAAM,QAAQ,WAAW,MAAM;AAAA,IAC7D;AAAA,EACF;AAAA,EACA,MAAM,EAAE,MAAM,UAAU,MAAM,CAAC,EAA2B;AAAA;AAOrD,IAAM,YAA+E;AAAA,EAC1F,oBAAoB;AAAA,EACpB,mBAAmB;AAAA,EACnB,kBAAkB;AAAA,EAClB,iBAAiB;AACnB;AAEO,IAAM,mBAGT;AAAA,EACF,oBAAoB;AAAA,EACpB,kBAAkB;AAAA,EAClB,iBAAiB;AACnB;;AC7SA;AAIO,SAAS,0BAA0B,GAAG;AAAA,EAC3C,MAAM,eAAe,sBAAsB,IAAI,aAAa;AAAA,EAC5D,IAAI,oBAAoB,WAAW,gBAAgB,EAAE,uBAAuB,YAAY;AAAA,EACxF,aAAa,UAAU;AAAA,EACvB,QAAQ,IAAI,8BAA8B;AAAA;",
|
|
9
|
+
"debugId": "B9C2C3DC5FE3286564756E2164756E21",
|
|
10
10
|
"names": []
|
|
11
11
|
}
|
|
@@ -8,7 +8,7 @@ import {
|
|
|
8
8
|
} from "../index-cfd8ne0j.js";
|
|
9
9
|
import {
|
|
10
10
|
__require
|
|
11
|
-
} from "../index-
|
|
11
|
+
} from "../index-6j5pq722.js";
|
|
12
12
|
// src/provider-llamacpp/common/LlamaCpp_JobRunFns.ts
|
|
13
13
|
var _sdk;
|
|
14
14
|
async function loadSdk() {
|
|
@@ -345,11 +345,11 @@ var LLAMACPP_REACTIVE_TASKS = {
|
|
|
345
345
|
CountTokensTask: LlamaCpp_CountTokens_Reactive
|
|
346
346
|
};
|
|
347
347
|
// src/provider-llamacpp/LlamaCpp_Worker.ts
|
|
348
|
-
import { globalServiceRegistry,
|
|
348
|
+
import { globalServiceRegistry, WORKER_SERVER } from "@workglow/util";
|
|
349
349
|
function LLAMACPP_WORKER_JOBRUN_REGISTER() {
|
|
350
350
|
const workerServer = globalServiceRegistry.get(WORKER_SERVER);
|
|
351
351
|
new LlamaCppProvider(LLAMACPP_TASKS, LLAMACPP_STREAM_TASKS, LLAMACPP_REACTIVE_TASKS).registerOnWorkerServer(workerServer);
|
|
352
|
-
|
|
352
|
+
workerServer.sendReady();
|
|
353
353
|
console.log("LLAMACPP_WORKER_JOBRUN registered");
|
|
354
354
|
}
|
|
355
355
|
export {
|
|
@@ -377,4 +377,4 @@ export {
|
|
|
377
377
|
LLAMACPP_DEFAULT_MODELS_DIR
|
|
378
378
|
};
|
|
379
379
|
|
|
380
|
-
//# debugId=
|
|
380
|
+
//# debugId=4558BCC11EF9CE3C64756E2164756E21
|