@workglow/ai 0.1.2 → 0.2.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +8 -7
- package/dist/browser.js +1420 -397
- package/dist/browser.js.map +64 -58
- package/dist/bun.js +1420 -397
- package/dist/bun.js.map +64 -58
- package/dist/common.d.ts +1 -0
- package/dist/common.d.ts.map +1 -1
- package/dist/execution/DirectExecutionStrategy.d.ts +1 -1
- package/dist/execution/DirectExecutionStrategy.d.ts.map +1 -1
- package/dist/execution/QueuedExecutionStrategy.d.ts +2 -2
- package/dist/execution/QueuedExecutionStrategy.d.ts.map +1 -1
- package/dist/job/AiJob.d.ts +2 -1
- package/dist/job/AiJob.d.ts.map +1 -1
- package/dist/model/ModelRegistry.d.ts.map +1 -1
- package/dist/model/ModelRepository.d.ts +25 -15
- package/dist/model/ModelRepository.d.ts.map +1 -1
- package/dist/node.js +1420 -397
- package/dist/node.js.map +64 -58
- package/dist/provider/AiProvider.d.ts +2 -2
- package/dist/provider/AiProvider.d.ts.map +1 -1
- package/dist/provider/AiProviderRegistry.d.ts +3 -2
- package/dist/provider/AiProviderRegistry.d.ts.map +1 -1
- package/dist/provider/QueuedAiProvider.d.ts +2 -1
- package/dist/provider/QueuedAiProvider.d.ts.map +1 -1
- package/dist/task/AgentTask.d.ts +525 -0
- package/dist/task/AgentTask.d.ts.map +1 -0
- package/dist/task/AgentTypes.d.ts +183 -0
- package/dist/task/AgentTypes.d.ts.map +1 -0
- package/dist/task/AgentUtils.d.ts +55 -0
- package/dist/task/AgentUtils.d.ts.map +1 -0
- package/dist/task/BackgroundRemovalTask.d.ts +6 -4
- package/dist/task/BackgroundRemovalTask.d.ts.map +1 -1
- package/dist/task/ChunkRetrievalTask.d.ts +6 -4
- package/dist/task/ChunkRetrievalTask.d.ts.map +1 -1
- package/dist/task/ChunkToVectorTask.d.ts +6 -4
- package/dist/task/ChunkToVectorTask.d.ts.map +1 -1
- package/dist/task/ChunkVectorHybridSearchTask.d.ts +6 -4
- package/dist/task/ChunkVectorHybridSearchTask.d.ts.map +1 -1
- package/dist/task/ChunkVectorSearchTask.d.ts +6 -4
- package/dist/task/ChunkVectorSearchTask.d.ts.map +1 -1
- package/dist/task/ChunkVectorUpsertTask.d.ts +6 -4
- package/dist/task/ChunkVectorUpsertTask.d.ts.map +1 -1
- package/dist/task/ContextBuilderTask.d.ts +6 -4
- package/dist/task/ContextBuilderTask.d.ts.map +1 -1
- package/dist/task/CountTokensTask.d.ts +6 -4
- package/dist/task/CountTokensTask.d.ts.map +1 -1
- package/dist/task/DocumentEnricherTask.d.ts +6 -4
- package/dist/task/DocumentEnricherTask.d.ts.map +1 -1
- package/dist/task/DownloadModelTask.d.ts +7 -5
- package/dist/task/DownloadModelTask.d.ts.map +1 -1
- package/dist/task/FaceDetectorTask.d.ts +6 -4
- package/dist/task/FaceDetectorTask.d.ts.map +1 -1
- package/dist/task/FaceLandmarkerTask.d.ts +6 -4
- package/dist/task/FaceLandmarkerTask.d.ts.map +1 -1
- package/dist/task/GestureRecognizerTask.d.ts +6 -4
- package/dist/task/GestureRecognizerTask.d.ts.map +1 -1
- package/dist/task/HandLandmarkerTask.d.ts +6 -4
- package/dist/task/HandLandmarkerTask.d.ts.map +1 -1
- package/dist/task/HierarchicalChunkerTask.d.ts +6 -4
- package/dist/task/HierarchicalChunkerTask.d.ts.map +1 -1
- package/dist/task/HierarchyJoinTask.d.ts +6 -4
- package/dist/task/HierarchyJoinTask.d.ts.map +1 -1
- package/dist/task/ImageClassificationTask.d.ts +6 -4
- package/dist/task/ImageClassificationTask.d.ts.map +1 -1
- package/dist/task/ImageEmbeddingTask.d.ts +6 -4
- package/dist/task/ImageEmbeddingTask.d.ts.map +1 -1
- package/dist/task/ImageSegmentationTask.d.ts +6 -4
- package/dist/task/ImageSegmentationTask.d.ts.map +1 -1
- package/dist/task/ImageToTextTask.d.ts +6 -4
- package/dist/task/ImageToTextTask.d.ts.map +1 -1
- package/dist/task/MessageConversion.d.ts +52 -0
- package/dist/task/MessageConversion.d.ts.map +1 -0
- package/dist/task/ModelInfoTask.d.ts +7 -5
- package/dist/task/ModelInfoTask.d.ts.map +1 -1
- package/dist/task/ModelSearchTask.d.ts +6 -4
- package/dist/task/ModelSearchTask.d.ts.map +1 -1
- package/dist/task/ObjectDetectionTask.d.ts +6 -4
- package/dist/task/ObjectDetectionTask.d.ts.map +1 -1
- package/dist/task/PoseLandmarkerTask.d.ts +6 -4
- package/dist/task/PoseLandmarkerTask.d.ts.map +1 -1
- package/dist/task/QueryExpanderTask.d.ts +6 -4
- package/dist/task/QueryExpanderTask.d.ts.map +1 -1
- package/dist/task/RerankerTask.d.ts +6 -4
- package/dist/task/RerankerTask.d.ts.map +1 -1
- package/dist/task/StructuralParserTask.d.ts +6 -4
- package/dist/task/StructuralParserTask.d.ts.map +1 -1
- package/dist/task/StructuredGenerationTask.d.ts +6 -4
- package/dist/task/StructuredGenerationTask.d.ts.map +1 -1
- package/dist/task/TextChunkerTask.d.ts +6 -4
- package/dist/task/TextChunkerTask.d.ts.map +1 -1
- package/dist/task/TextClassificationTask.d.ts +6 -4
- package/dist/task/TextClassificationTask.d.ts.map +1 -1
- package/dist/task/TextEmbeddingTask.d.ts +6 -4
- package/dist/task/TextEmbeddingTask.d.ts.map +1 -1
- package/dist/task/TextFillMaskTask.d.ts +6 -4
- package/dist/task/TextFillMaskTask.d.ts.map +1 -1
- package/dist/task/TextGenerationTask.d.ts +6 -4
- package/dist/task/TextGenerationTask.d.ts.map +1 -1
- package/dist/task/TextLanguageDetectionTask.d.ts +6 -4
- package/dist/task/TextLanguageDetectionTask.d.ts.map +1 -1
- package/dist/task/TextNamedEntityRecognitionTask.d.ts +6 -4
- package/dist/task/TextNamedEntityRecognitionTask.d.ts.map +1 -1
- package/dist/task/TextQuestionAnswerTask.d.ts +6 -4
- package/dist/task/TextQuestionAnswerTask.d.ts.map +1 -1
- package/dist/task/TextRewriterTask.d.ts +6 -4
- package/dist/task/TextRewriterTask.d.ts.map +1 -1
- package/dist/task/TextSummaryTask.d.ts +6 -4
- package/dist/task/TextSummaryTask.d.ts.map +1 -1
- package/dist/task/TextTranslationTask.d.ts +6 -4
- package/dist/task/TextTranslationTask.d.ts.map +1 -1
- package/dist/task/ToolCallingTask.d.ts +348 -0
- package/dist/task/ToolCallingTask.d.ts.map +1 -0
- package/dist/task/ToolCallingUtils.d.ts +75 -0
- package/dist/task/ToolCallingUtils.d.ts.map +1 -0
- package/dist/task/TopicSegmenterTask.d.ts +6 -4
- package/dist/task/TopicSegmenterTask.d.ts.map +1 -1
- package/dist/task/UnloadModelTask.d.ts +6 -4
- package/dist/task/UnloadModelTask.d.ts.map +1 -1
- package/dist/task/VectorQuantizeTask.d.ts +6 -4
- package/dist/task/VectorQuantizeTask.d.ts.map +1 -1
- package/dist/task/VectorSimilarityTask.d.ts +6 -4
- package/dist/task/VectorSimilarityTask.d.ts.map +1 -1
- package/dist/task/base/AiTask.d.ts +8 -5
- package/dist/task/base/AiTask.d.ts.map +1 -1
- package/dist/task/base/AiVisionTask.d.ts +3 -7
- package/dist/task/base/AiVisionTask.d.ts.map +1 -1
- package/dist/task/base/StreamingAiTask.d.ts +4 -7
- package/dist/task/base/StreamingAiTask.d.ts.map +1 -1
- package/dist/task/index.d.ts +9 -1
- package/dist/task/index.d.ts.map +1 -1
- package/dist/worker.d.ts +2 -0
- package/dist/worker.d.ts.map +1 -1
- package/dist/worker.js +247 -25
- package/dist/worker.js.map +9 -7
- package/package.json +16 -11
package/dist/node.js
CHANGED
|
@@ -4,7 +4,8 @@ import {
|
|
|
4
4
|
Job,
|
|
5
5
|
JobStatus,
|
|
6
6
|
PermanentJobError,
|
|
7
|
-
RetryableJobError
|
|
7
|
+
RetryableJobError,
|
|
8
|
+
withJobErrorDiagnostics
|
|
8
9
|
} from "@workglow/job-queue";
|
|
9
10
|
import { getLogger } from "@workglow/util/worker";
|
|
10
11
|
|
|
@@ -137,16 +138,13 @@ function setAiProviderRegistry(pr) {
|
|
|
137
138
|
|
|
138
139
|
// src/job/AiJob.ts
|
|
139
140
|
var DEFAULT_AI_TIMEOUT_MS = 120000;
|
|
140
|
-
var
|
|
141
|
+
var LOCAL_INFERENCE_DEFAULT_TIMEOUT_MS = 300000;
|
|
141
142
|
function resolveAiJobTimeoutMs(aiProvider, explicitMs) {
|
|
142
143
|
if (explicitMs !== undefined) {
|
|
143
144
|
return explicitMs;
|
|
144
145
|
}
|
|
145
|
-
if (aiProvider === "LOCAL_LLAMACPP") {
|
|
146
|
-
return
|
|
147
|
-
}
|
|
148
|
-
if (aiProvider === "HF_TRANSFORMERS_ONNX" || aiProvider.startsWith("HF_TRANSFORMERS_ONNX")) {
|
|
149
|
-
return LOCAL_MODEL_DEFAULT_TIMEOUT_MS;
|
|
146
|
+
if (aiProvider === "LOCAL_LLAMACPP" || aiProvider === "HF_TRANSFORMERS_ONNX") {
|
|
147
|
+
return LOCAL_INFERENCE_DEFAULT_TIMEOUT_MS;
|
|
150
148
|
}
|
|
151
149
|
return DEFAULT_AI_TIMEOUT_MS;
|
|
152
150
|
}
|
|
@@ -159,39 +157,39 @@ function classifyProviderError(err, taskType, provider) {
|
|
|
159
157
|
const m = message.match(/\b([45]\d{2})\b/);
|
|
160
158
|
return m ? parseInt(m[1], 10) : undefined;
|
|
161
159
|
})();
|
|
162
|
-
if (err instanceof
|
|
163
|
-
return new AbortSignalJobError(`Provider call aborted for ${taskType} (${provider})
|
|
160
|
+
if (err instanceof Error && err.name === "AbortError") {
|
|
161
|
+
return new AbortSignalJobError(withJobErrorDiagnostics(`Provider call aborted for ${taskType} (${provider})`, err));
|
|
164
162
|
}
|
|
165
|
-
if (err instanceof
|
|
166
|
-
return new AbortSignalJobError(`Provider call timed out for ${taskType} (${provider})
|
|
163
|
+
if (err instanceof Error && err.name === "TimeoutError") {
|
|
164
|
+
return new AbortSignalJobError(withJobErrorDiagnostics(`Provider call timed out for ${taskType} (${provider})`, err));
|
|
167
165
|
}
|
|
168
166
|
if (message.includes("Pipeline download aborted") || message.includes("Operation aborted") || message.includes("operation was aborted") || message.includes("The operation was aborted")) {
|
|
169
|
-
return new AbortSignalJobError(`Provider call aborted for ${taskType} (${provider}): ${message}
|
|
167
|
+
return new AbortSignalJobError(withJobErrorDiagnostics(`Provider call aborted for ${taskType} (${provider}): ${message}`, err));
|
|
170
168
|
}
|
|
171
169
|
if (message.startsWith("HFT_NULL_PROCESSOR:")) {
|
|
172
|
-
return new RetryableJobError(message);
|
|
170
|
+
return new RetryableJobError(withJobErrorDiagnostics(message, err));
|
|
173
171
|
}
|
|
174
172
|
if (status === 429) {
|
|
175
173
|
const retryAfterMatch = message.match(/retry.after[:\s]*(\d+)/i);
|
|
176
174
|
const retryMs = retryAfterMatch ? parseInt(retryAfterMatch[1], 10) * 1000 : 30000;
|
|
177
|
-
return new RetryableJobError(`Rate limited by ${provider} for ${taskType}: ${message}`, new Date(Date.now() + retryMs));
|
|
175
|
+
return new RetryableJobError(withJobErrorDiagnostics(`Rate limited by ${provider} for ${taskType}: ${message}`, err), new Date(Date.now() + retryMs));
|
|
178
176
|
}
|
|
179
177
|
if (status === 401 || status === 403) {
|
|
180
|
-
return new PermanentJobError(`Authentication failed for ${provider} (${taskType}): ${message}
|
|
178
|
+
return new PermanentJobError(withJobErrorDiagnostics(`Authentication failed for ${provider} (${taskType}): ${message}`, err));
|
|
181
179
|
}
|
|
182
180
|
if (status === 400 || status === 404) {
|
|
183
|
-
return new PermanentJobError(`Invalid request to ${provider} for ${taskType}: ${message}
|
|
181
|
+
return new PermanentJobError(withJobErrorDiagnostics(`Invalid request to ${provider} for ${taskType}: ${message}`, err));
|
|
184
182
|
}
|
|
185
183
|
if (status && status >= 500) {
|
|
186
|
-
return new RetryableJobError(`Server error from ${provider} for ${taskType} (HTTP ${status}): ${message}
|
|
184
|
+
return new RetryableJobError(withJobErrorDiagnostics(`Server error from ${provider} for ${taskType} (HTTP ${status}): ${message}`, err));
|
|
187
185
|
}
|
|
188
186
|
if (message.includes("ECONNREFUSED") || message.includes("ECONNRESET") || message.includes("ETIMEDOUT") || message.includes("fetch failed") || message.includes("network") || err instanceof TypeError && message.includes("fetch")) {
|
|
189
|
-
return new RetryableJobError(`Network error calling ${provider} for ${taskType}: ${message}
|
|
187
|
+
return new RetryableJobError(withJobErrorDiagnostics(`Network error calling ${provider} for ${taskType}: ${message}`, err));
|
|
190
188
|
}
|
|
191
189
|
if (message.includes("timed out") || message.includes("timeout")) {
|
|
192
|
-
return new RetryableJobError(`Timeout calling ${provider} for ${taskType}: ${message}
|
|
190
|
+
return new RetryableJobError(withJobErrorDiagnostics(`Timeout calling ${provider} for ${taskType}: ${message}`, err));
|
|
193
191
|
}
|
|
194
|
-
return new PermanentJobError(`Provider ${provider} failed for ${taskType}: ${message}
|
|
192
|
+
return new PermanentJobError(withJobErrorDiagnostics(`Provider ${provider} failed for ${taskType}: ${message}`, err));
|
|
195
193
|
}
|
|
196
194
|
|
|
197
195
|
class AiJob extends Job {
|
|
@@ -297,12 +295,14 @@ class DirectExecutionStrategy {
|
|
|
297
295
|
}
|
|
298
296
|
|
|
299
297
|
// src/execution/QueuedExecutionStrategy.ts
|
|
300
|
-
import { ConcurrencyLimiter, JobQueueClient, JobQueueServer } from "@workglow/job-queue";
|
|
301
|
-
import { InMemoryQueueStorage } from "@workglow/storage";
|
|
302
298
|
import {
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
299
|
+
AbortSignalJobError as AbortSignalJobError2,
|
|
300
|
+
ConcurrencyLimiter,
|
|
301
|
+
JobQueueClient,
|
|
302
|
+
JobQueueServer
|
|
303
|
+
} from "@workglow/job-queue";
|
|
304
|
+
import { InMemoryQueueStorage } from "@workglow/storage";
|
|
305
|
+
import { getTaskQueueRegistry, TaskConfigurationError } from "@workglow/task-graph";
|
|
306
306
|
class QueuedExecutionStrategy {
|
|
307
307
|
queueName;
|
|
308
308
|
concurrency;
|
|
@@ -315,7 +315,7 @@ class QueuedExecutionStrategy {
|
|
|
315
315
|
}
|
|
316
316
|
async execute(jobInput, context, runnerId) {
|
|
317
317
|
if (context.signal.aborted) {
|
|
318
|
-
throw context.signal.reason ?? new
|
|
318
|
+
throw context.signal.reason ?? new AbortSignalJobError2("The operation was aborted");
|
|
319
319
|
}
|
|
320
320
|
const registeredQueue = await this.ensureQueue();
|
|
321
321
|
const { client } = registeredQueue;
|
|
@@ -334,7 +334,7 @@ class QueuedExecutionStrategy {
|
|
|
334
334
|
});
|
|
335
335
|
try {
|
|
336
336
|
if (context.signal.aborted) {
|
|
337
|
-
throw context.signal.reason ?? new
|
|
337
|
+
throw context.signal.reason ?? new AbortSignalJobError2("The operation was aborted");
|
|
338
338
|
}
|
|
339
339
|
const output = await handle.waitFor();
|
|
340
340
|
return output;
|
|
@@ -408,18 +408,106 @@ class QueuedExecutionStrategy {
|
|
|
408
408
|
}
|
|
409
409
|
}
|
|
410
410
|
|
|
411
|
+
// src/task/ToolCallingUtils.ts
|
|
412
|
+
import { getLogger as getLogger2 } from "@workglow/util/worker";
|
|
413
|
+
function buildToolDescription(tool) {
|
|
414
|
+
let desc = tool.description;
|
|
415
|
+
if (tool.outputSchema && typeof tool.outputSchema === "object") {
|
|
416
|
+
desc += `
|
|
417
|
+
|
|
418
|
+
Returns: ${JSON.stringify(tool.outputSchema)}`;
|
|
419
|
+
}
|
|
420
|
+
return desc;
|
|
421
|
+
}
|
|
422
|
+
function isAllowedToolName(name, allowedTools) {
|
|
423
|
+
return allowedTools.some((t) => t.name === name);
|
|
424
|
+
}
|
|
425
|
+
function filterValidToolCalls(toolCalls, allowedTools) {
|
|
426
|
+
return toolCalls.filter((tc) => {
|
|
427
|
+
if (tc.name && isAllowedToolName(tc.name, allowedTools)) {
|
|
428
|
+
return true;
|
|
429
|
+
}
|
|
430
|
+
getLogger2().warn(`Filtered out tool call with unknown name "${tc.name ?? "(missing)"}"`, {
|
|
431
|
+
callId: tc.id,
|
|
432
|
+
toolName: tc.name
|
|
433
|
+
});
|
|
434
|
+
return false;
|
|
435
|
+
});
|
|
436
|
+
}
|
|
437
|
+
|
|
411
438
|
// src/model/InMemoryModelRepository.ts
|
|
412
439
|
import { InMemoryTabularStorage } from "@workglow/storage";
|
|
413
440
|
|
|
414
441
|
// src/model/ModelRepository.ts
|
|
415
442
|
import { EventEmitter } from "@workglow/util";
|
|
443
|
+
import { compileSchema } from "@workglow/util/schema";
|
|
444
|
+
|
|
445
|
+
// src/model/ModelSchema.ts
|
|
446
|
+
var ModelConfigSchema = {
|
|
447
|
+
type: "object",
|
|
448
|
+
properties: {
|
|
449
|
+
model_id: { type: "string" },
|
|
450
|
+
tasks: { type: "array", items: { type: "string" }, "x-ui-editor": "multiselect" },
|
|
451
|
+
title: { type: "string" },
|
|
452
|
+
description: { type: "string", "x-ui-editor": "textarea" },
|
|
453
|
+
provider: { type: "string" },
|
|
454
|
+
provider_config: {
|
|
455
|
+
type: "object",
|
|
456
|
+
properties: {
|
|
457
|
+
credential_key: { type: "string", format: "credential", "x-ui-hidden": true }
|
|
458
|
+
},
|
|
459
|
+
additionalProperties: true,
|
|
460
|
+
default: {}
|
|
461
|
+
},
|
|
462
|
+
metadata: { type: "object", default: {}, "x-ui-hidden": true }
|
|
463
|
+
},
|
|
464
|
+
required: ["provider", "provider_config"],
|
|
465
|
+
format: "model",
|
|
466
|
+
additionalProperties: true
|
|
467
|
+
};
|
|
468
|
+
var ModelRecordSchema = {
|
|
469
|
+
type: "object",
|
|
470
|
+
properties: {
|
|
471
|
+
...ModelConfigSchema.properties
|
|
472
|
+
},
|
|
473
|
+
required: [
|
|
474
|
+
"model_id",
|
|
475
|
+
"tasks",
|
|
476
|
+
"provider",
|
|
477
|
+
"title",
|
|
478
|
+
"description",
|
|
479
|
+
"provider_config",
|
|
480
|
+
"metadata"
|
|
481
|
+
],
|
|
482
|
+
format: "model",
|
|
483
|
+
additionalProperties: false
|
|
484
|
+
};
|
|
485
|
+
var ModelPrimaryKeyNames = ["model_id"];
|
|
416
486
|
|
|
487
|
+
// src/model/ModelRepository.ts
|
|
417
488
|
class ModelRepository {
|
|
418
489
|
modelTabularRepository;
|
|
419
490
|
constructor(modelTabularRepository) {
|
|
420
491
|
this.modelTabularRepository = modelTabularRepository;
|
|
421
492
|
}
|
|
422
493
|
events = new EventEmitter;
|
|
494
|
+
compiledValidationSchema;
|
|
495
|
+
getValidationSchema() {
|
|
496
|
+
return ModelRecordSchema;
|
|
497
|
+
}
|
|
498
|
+
validateModelRecord(model) {
|
|
499
|
+
if (!this.compiledValidationSchema) {
|
|
500
|
+
this.compiledValidationSchema = compileSchema(this.getValidationSchema());
|
|
501
|
+
}
|
|
502
|
+
const result = this.compiledValidationSchema.validate(model);
|
|
503
|
+
if (!result.valid) {
|
|
504
|
+
const errorMessages = result.errors.map((e) => {
|
|
505
|
+
const path = e.data?.pointer || "";
|
|
506
|
+
return `${e.message}${path ? ` (${path})` : ""}`;
|
|
507
|
+
});
|
|
508
|
+
throw new Error(`Invalid model record: ${errorMessages.join(", ")}`);
|
|
509
|
+
}
|
|
510
|
+
}
|
|
423
511
|
async setupDatabase() {
|
|
424
512
|
await this.modelTabularRepository.setupDatabase?.();
|
|
425
513
|
}
|
|
@@ -436,10 +524,25 @@ class ModelRepository {
|
|
|
436
524
|
return this.events.waitOn(name);
|
|
437
525
|
}
|
|
438
526
|
async addModel(model) {
|
|
527
|
+
this.validateModelRecord(model);
|
|
528
|
+
const existing = await this.modelTabularRepository.get({ model_id: model.model_id });
|
|
529
|
+
if (existing) {
|
|
530
|
+
throw new Error(`Model with id "${model.model_id}" already exists`);
|
|
531
|
+
}
|
|
439
532
|
await this.modelTabularRepository.put(model);
|
|
440
533
|
this.events.emit("model_added", model);
|
|
441
534
|
return model;
|
|
442
535
|
}
|
|
536
|
+
async updateModel(model) {
|
|
537
|
+
this.validateModelRecord(model);
|
|
538
|
+
const existing = await this.modelTabularRepository.get({ model_id: model.model_id });
|
|
539
|
+
if (!existing) {
|
|
540
|
+
throw new Error(`Model with id "${model.model_id}" not found`);
|
|
541
|
+
}
|
|
542
|
+
await this.modelTabularRepository.put(model);
|
|
543
|
+
this.events.emit("model_updated", model);
|
|
544
|
+
return model;
|
|
545
|
+
}
|
|
443
546
|
async removeModel(model_id) {
|
|
444
547
|
const model = await this.modelTabularRepository.get({ model_id });
|
|
445
548
|
if (!model) {
|
|
@@ -498,48 +601,6 @@ class ModelRepository {
|
|
|
498
601
|
}
|
|
499
602
|
}
|
|
500
603
|
|
|
501
|
-
// src/model/ModelSchema.ts
|
|
502
|
-
var ModelConfigSchema = {
|
|
503
|
-
type: "object",
|
|
504
|
-
properties: {
|
|
505
|
-
model_id: { type: "string" },
|
|
506
|
-
tasks: { type: "array", items: { type: "string" }, "x-ui-editor": "multiselect" },
|
|
507
|
-
title: { type: "string" },
|
|
508
|
-
description: { type: "string", "x-ui-editor": "textarea" },
|
|
509
|
-
provider: { type: "string" },
|
|
510
|
-
provider_config: {
|
|
511
|
-
type: "object",
|
|
512
|
-
properties: {
|
|
513
|
-
credential_key: { type: "string", format: "credential", "x-ui-hidden": true }
|
|
514
|
-
},
|
|
515
|
-
additionalProperties: true,
|
|
516
|
-
default: {}
|
|
517
|
-
},
|
|
518
|
-
metadata: { type: "object", default: {}, "x-ui-hidden": true }
|
|
519
|
-
},
|
|
520
|
-
required: ["provider", "provider_config"],
|
|
521
|
-
format: "model",
|
|
522
|
-
additionalProperties: true
|
|
523
|
-
};
|
|
524
|
-
var ModelRecordSchema = {
|
|
525
|
-
type: "object",
|
|
526
|
-
properties: {
|
|
527
|
-
...ModelConfigSchema.properties
|
|
528
|
-
},
|
|
529
|
-
required: [
|
|
530
|
-
"model_id",
|
|
531
|
-
"tasks",
|
|
532
|
-
"provider",
|
|
533
|
-
"title",
|
|
534
|
-
"description",
|
|
535
|
-
"provider_config",
|
|
536
|
-
"metadata"
|
|
537
|
-
],
|
|
538
|
-
format: "model",
|
|
539
|
-
additionalProperties: false
|
|
540
|
-
};
|
|
541
|
-
var ModelPrimaryKeyNames = ["model_id"];
|
|
542
|
-
|
|
543
604
|
// src/model/InMemoryModelRepository.ts
|
|
544
605
|
class InMemoryModelRepository extends ModelRepository {
|
|
545
606
|
constructor() {
|
|
@@ -555,9 +616,7 @@ import {
|
|
|
555
616
|
registerInputResolver
|
|
556
617
|
} from "@workglow/util";
|
|
557
618
|
var MODEL_REPOSITORY = createServiceToken("model.repository");
|
|
558
|
-
|
|
559
|
-
globalServiceRegistry2.register(MODEL_REPOSITORY, () => new InMemoryModelRepository, true);
|
|
560
|
-
}
|
|
619
|
+
globalServiceRegistry2.registerIfAbsent(MODEL_REPOSITORY, () => new InMemoryModelRepository, true);
|
|
561
620
|
function getGlobalModelRepository() {
|
|
562
621
|
return globalServiceRegistry2.get(MODEL_REPOSITORY);
|
|
563
622
|
}
|
|
@@ -588,10 +647,7 @@ registerInputCompactor("model", async (value, _format, registry) => {
|
|
|
588
647
|
});
|
|
589
648
|
|
|
590
649
|
// src/provider/AiProvider.ts
|
|
591
|
-
import {
|
|
592
|
-
globalServiceRegistry as globalServiceRegistry3,
|
|
593
|
-
WORKER_MANAGER as WORKER_MANAGER2
|
|
594
|
-
} from "@workglow/util/worker";
|
|
650
|
+
import { globalServiceRegistry as globalServiceRegistry3, WORKER_MANAGER as WORKER_MANAGER2 } from "@workglow/util/worker";
|
|
595
651
|
function resolveAiProviderGpuQueueConcurrency(concurrency) {
|
|
596
652
|
if (concurrency === undefined) {
|
|
597
653
|
return 1;
|
|
@@ -711,8 +767,74 @@ class QueuedAiProvider extends AiProvider {
|
|
|
711
767
|
// src/task/index.ts
|
|
712
768
|
import { TaskRegistry } from "@workglow/task-graph";
|
|
713
769
|
|
|
714
|
-
// src/task/
|
|
715
|
-
import { CreateWorkflow, Workflow } from "@workglow/task-graph";
|
|
770
|
+
// src/task/AgentTask.ts
|
|
771
|
+
import { CreateWorkflow as CreateWorkflow2, Task as Task2, Workflow as Workflow2 } from "@workglow/task-graph";
|
|
772
|
+
import { getLogger as getLogger4 } from "@workglow/util";
|
|
773
|
+
|
|
774
|
+
// src/task/AgentTypes.ts
|
|
775
|
+
import { parseDataUri } from "@workglow/util/media";
|
|
776
|
+
function imageBlock(mimeType, data) {
|
|
777
|
+
return { type: "image", mimeType, data };
|
|
778
|
+
}
|
|
779
|
+
function audioBlock(mimeType, data) {
|
|
780
|
+
return { type: "audio", mimeType, data };
|
|
781
|
+
}
|
|
782
|
+
function imageBlockFromDataUri(dataUri) {
|
|
783
|
+
const { mimeType, base64 } = parseDataUri(dataUri);
|
|
784
|
+
return { type: "image", mimeType, data: base64 };
|
|
785
|
+
}
|
|
786
|
+
function audioBlockFromDataUri(dataUri) {
|
|
787
|
+
const { mimeType, base64 } = parseDataUri(dataUri);
|
|
788
|
+
return { type: "audio", mimeType, data: base64 };
|
|
789
|
+
}
|
|
790
|
+
function userMessage(prompt) {
|
|
791
|
+
return { role: "user", content: prompt };
|
|
792
|
+
}
|
|
793
|
+
function assistantMessage(text, toolCalls) {
|
|
794
|
+
const content = [];
|
|
795
|
+
if (text) {
|
|
796
|
+
content.push({ type: "text", text });
|
|
797
|
+
}
|
|
798
|
+
if (toolCalls) {
|
|
799
|
+
for (const tc of toolCalls) {
|
|
800
|
+
content.push({
|
|
801
|
+
type: "tool_use",
|
|
802
|
+
id: tc.id,
|
|
803
|
+
name: tc.name,
|
|
804
|
+
input: tc.input
|
|
805
|
+
});
|
|
806
|
+
}
|
|
807
|
+
}
|
|
808
|
+
return { role: "assistant", content };
|
|
809
|
+
}
|
|
810
|
+
function toolMessage(results) {
|
|
811
|
+
return {
|
|
812
|
+
role: "tool",
|
|
813
|
+
content: results.map((r) => {
|
|
814
|
+
const jsonText = JSON.stringify(r.output);
|
|
815
|
+
const content = r.mediaContent && r.mediaContent.length > 0 ? [{ type: "text", text: jsonText }, ...r.mediaContent] : jsonText;
|
|
816
|
+
return {
|
|
817
|
+
type: "tool_result",
|
|
818
|
+
tool_use_id: r.toolCallId,
|
|
819
|
+
content,
|
|
820
|
+
is_error: r.isError || undefined
|
|
821
|
+
};
|
|
822
|
+
})
|
|
823
|
+
};
|
|
824
|
+
}
|
|
825
|
+
function toolSourceDefinitions(sources) {
|
|
826
|
+
return sources.map((s) => s.definition);
|
|
827
|
+
}
|
|
828
|
+
function findToolSource(sources, name) {
|
|
829
|
+
return sources.find((s) => s.definition.name === name);
|
|
830
|
+
}
|
|
831
|
+
|
|
832
|
+
// src/task/AgentUtils.ts
|
|
833
|
+
import { getTaskConstructors as getTaskConstructors2, TaskAbortedError } from "@workglow/task-graph";
|
|
834
|
+
import { getLogger as getLogger3 } from "@workglow/util";
|
|
835
|
+
|
|
836
|
+
// src/task/ToolCallingTask.ts
|
|
837
|
+
import { CreateWorkflow, getTaskConstructors, Workflow } from "@workglow/task-graph";
|
|
716
838
|
|
|
717
839
|
// src/task/base/AiTaskSchemas.ts
|
|
718
840
|
var TypeLanguage = (annotations = {}) => ({
|
|
@@ -871,11 +993,12 @@ var TypeCategory = {
|
|
|
871
993
|
description: "Classification category with label and score"
|
|
872
994
|
};
|
|
873
995
|
|
|
874
|
-
// src/task/base/
|
|
875
|
-
import {
|
|
996
|
+
// src/task/base/StreamingAiTask.ts
|
|
997
|
+
import { getStreamingPorts, TaskConfigurationError as TaskConfigurationError3 } from "@workglow/task-graph";
|
|
876
998
|
|
|
877
999
|
// src/task/base/AiTask.ts
|
|
878
1000
|
import {
|
|
1001
|
+
Entitlements,
|
|
879
1002
|
Task,
|
|
880
1003
|
TaskConfigSchema,
|
|
881
1004
|
TaskConfigurationError as TaskConfigurationError2,
|
|
@@ -894,6 +1017,27 @@ var aiTaskConfigSchema = {
|
|
|
894
1017
|
|
|
895
1018
|
class AiTask extends Task {
|
|
896
1019
|
static type = "AiTask";
|
|
1020
|
+
static hasDynamicEntitlements = true;
|
|
1021
|
+
static entitlements() {
|
|
1022
|
+
return {
|
|
1023
|
+
entitlements: [{ id: Entitlements.AI_INFERENCE, reason: "Runs AI model inference" }]
|
|
1024
|
+
};
|
|
1025
|
+
}
|
|
1026
|
+
entitlements() {
|
|
1027
|
+
const base = [
|
|
1028
|
+
{ id: Entitlements.AI_INFERENCE, reason: "Runs AI model inference" }
|
|
1029
|
+
];
|
|
1030
|
+
const runModel = this.runInputData?.model;
|
|
1031
|
+
const modelId = typeof runModel === "string" ? runModel : typeof this.defaults.model === "string" ? this.defaults.model : undefined;
|
|
1032
|
+
if (modelId) {
|
|
1033
|
+
base.push({
|
|
1034
|
+
id: Entitlements.AI_MODEL,
|
|
1035
|
+
reason: `Uses model ${modelId}`,
|
|
1036
|
+
resources: [modelId]
|
|
1037
|
+
});
|
|
1038
|
+
}
|
|
1039
|
+
return { entitlements: base };
|
|
1040
|
+
}
|
|
897
1041
|
static configSchema() {
|
|
898
1042
|
return aiTaskConfigSchema;
|
|
899
1043
|
}
|
|
@@ -956,71 +1100,773 @@ class AiTask extends Task {
|
|
|
956
1100
|
return reactiveFn(input, output, model);
|
|
957
1101
|
}
|
|
958
1102
|
}
|
|
959
|
-
return super.executeReactive(input, output, context);
|
|
1103
|
+
return super.executeReactive(input, output, context);
|
|
1104
|
+
}
|
|
1105
|
+
async validateInput(input) {
|
|
1106
|
+
const inputSchema = this.inputSchema();
|
|
1107
|
+
if (typeof inputSchema === "boolean") {
|
|
1108
|
+
if (inputSchema === false) {
|
|
1109
|
+
throw new TaskConfigurationError2(`AiTask: Input schema is 'false' and accepts no inputs`);
|
|
1110
|
+
}
|
|
1111
|
+
return true;
|
|
1112
|
+
}
|
|
1113
|
+
const modelTaskProperties = Object.entries(inputSchema.properties || {}).filter(([key, schema]) => schemaFormat(schema)?.startsWith("model:"));
|
|
1114
|
+
for (const [key] of modelTaskProperties) {
|
|
1115
|
+
const model = input[key];
|
|
1116
|
+
if (typeof model === "object" && model !== null) {
|
|
1117
|
+
const tasks = model.tasks;
|
|
1118
|
+
if (Array.isArray(tasks) && tasks.length > 0 && !tasks.includes(this.type)) {
|
|
1119
|
+
const modelId = model.model_id ?? "(inline config)";
|
|
1120
|
+
throw new TaskConfigurationError2(`AiTask: Model "${modelId}" for '${key}' is not compatible with task '${this.type}'. ` + `Model supports: [${tasks.join(", ")}]`);
|
|
1121
|
+
}
|
|
1122
|
+
} else if (model !== undefined && model !== null) {
|
|
1123
|
+
throw new TaskConfigurationError2(`AiTask: Invalid model for '${key}' - expected ModelConfig object but got ${typeof model}. ` + `Ensure the model ID was registered in the ModelRepository before running the task.`);
|
|
1124
|
+
}
|
|
1125
|
+
}
|
|
1126
|
+
const modelPlainProperties = Object.entries(inputSchema.properties || {}).filter(([key, schema]) => schemaFormat(schema) === "model");
|
|
1127
|
+
for (const [key] of modelPlainProperties) {
|
|
1128
|
+
const model = input[key];
|
|
1129
|
+
if (model !== undefined && model !== null && typeof model !== "object") {
|
|
1130
|
+
throw new TaskConfigurationError2(`AiTask: Invalid model for '${key}' - expected ModelConfig object but got ${typeof model}. ` + `Ensure the model ID was registered in the ModelRepository before running the task.`);
|
|
1131
|
+
}
|
|
1132
|
+
}
|
|
1133
|
+
return super.validateInput(input);
|
|
1134
|
+
}
|
|
1135
|
+
async narrowInput(input, registry) {
|
|
1136
|
+
const inputSchema = this.inputSchema();
|
|
1137
|
+
if (typeof inputSchema === "boolean") {
|
|
1138
|
+
if (inputSchema === false) {
|
|
1139
|
+
throw new TaskConfigurationError2(`AiTask: Input schema is 'false' and accepts no inputs`);
|
|
1140
|
+
}
|
|
1141
|
+
return input;
|
|
1142
|
+
}
|
|
1143
|
+
const modelTaskProperties = Object.entries(inputSchema.properties || {}).filter(([key, schema]) => schemaFormat(schema)?.startsWith("model:"));
|
|
1144
|
+
if (modelTaskProperties.length > 0) {
|
|
1145
|
+
const modelRepo = registry.get(MODEL_REPOSITORY);
|
|
1146
|
+
const taskModels = await modelRepo.findModelsByTask(this.type) ?? [];
|
|
1147
|
+
for (const [key, propSchema] of modelTaskProperties) {
|
|
1148
|
+
const requestedModel = input[key];
|
|
1149
|
+
if (typeof requestedModel === "string") {
|
|
1150
|
+
const found = taskModels?.find((m) => m.model_id === requestedModel);
|
|
1151
|
+
if (!found) {
|
|
1152
|
+
input[key] = undefined;
|
|
1153
|
+
}
|
|
1154
|
+
} else if (typeof requestedModel === "object" && requestedModel !== null) {
|
|
1155
|
+
const model = requestedModel;
|
|
1156
|
+
const tasks = model.tasks;
|
|
1157
|
+
if (Array.isArray(tasks) && tasks.length > 0 && !tasks.includes(this.type)) {
|
|
1158
|
+
input[key] = undefined;
|
|
1159
|
+
}
|
|
1160
|
+
}
|
|
1161
|
+
}
|
|
1162
|
+
}
|
|
1163
|
+
return input;
|
|
1164
|
+
}
|
|
1165
|
+
}
|
|
1166
|
+
|
|
1167
|
+
// src/task/base/StreamingAiTask.ts
|
|
1168
|
+
class StreamingAiTask extends AiTask {
|
|
1169
|
+
static type = "StreamingAiTask";
|
|
1170
|
+
async* executeStream(input, context) {
|
|
1171
|
+
const model = input.model;
|
|
1172
|
+
if (!model || typeof model !== "object") {
|
|
1173
|
+
throw new TaskConfigurationError3("StreamingAiTask: Model was not resolved to ModelConfig - this indicates a bug in the resolution system");
|
|
1174
|
+
}
|
|
1175
|
+
const jobInput = await this.getJobInput(input);
|
|
1176
|
+
const strategy = getAiProviderRegistry().getStrategy(model);
|
|
1177
|
+
const outSchema = this.outputSchema();
|
|
1178
|
+
const ports = getStreamingPorts(outSchema);
|
|
1179
|
+
let defaultPort = "text";
|
|
1180
|
+
if (ports.length > 0) {
|
|
1181
|
+
defaultPort = ports[0].port;
|
|
1182
|
+
} else {
|
|
1183
|
+
if (typeof outSchema === "object" && outSchema.properties) {
|
|
1184
|
+
const firstProp = Object.keys(outSchema.properties)[0];
|
|
1185
|
+
if (firstProp)
|
|
1186
|
+
defaultPort = firstProp;
|
|
1187
|
+
}
|
|
1188
|
+
}
|
|
1189
|
+
for await (const event of strategy.executeStream(jobInput, context, this.runConfig.runnerId)) {
|
|
1190
|
+
if (event.type === "text-delta") {
|
|
1191
|
+
yield { ...event, port: event.port ?? defaultPort };
|
|
1192
|
+
} else if (event.type === "object-delta") {
|
|
1193
|
+
yield { ...event, port: event.port ?? defaultPort };
|
|
1194
|
+
} else {
|
|
1195
|
+
yield event;
|
|
1196
|
+
}
|
|
1197
|
+
}
|
|
1198
|
+
}
|
|
1199
|
+
}
|
|
1200
|
+
|
|
1201
|
+
// src/task/ToolCallingTask.ts
|
|
1202
|
+
function taskTypesToTools(taskNames, registry) {
|
|
1203
|
+
const constructors = getTaskConstructors(registry);
|
|
1204
|
+
return taskNames.map((name) => {
|
|
1205
|
+
const ctor = constructors.get(name);
|
|
1206
|
+
if (!ctor) {
|
|
1207
|
+
throw new Error(`taskTypesToTools: Unknown task type "${name}" — not found in task constructors registry (ServiceRegistry: ${registry ? "custom" : "default"})`);
|
|
1208
|
+
}
|
|
1209
|
+
const configSchema = "configSchema" in ctor && typeof ctor.configSchema === "function" ? ctor.configSchema() : undefined;
|
|
1210
|
+
return {
|
|
1211
|
+
name: ctor.type,
|
|
1212
|
+
description: ctor.description ?? "",
|
|
1213
|
+
inputSchema: ctor.inputSchema(),
|
|
1214
|
+
outputSchema: ctor.outputSchema(),
|
|
1215
|
+
...configSchema ? { configSchema } : {},
|
|
1216
|
+
taskType: name
|
|
1217
|
+
};
|
|
1218
|
+
});
|
|
1219
|
+
}
|
|
1220
|
+
var ToolDefinitionSchema = {
|
|
1221
|
+
type: "object",
|
|
1222
|
+
properties: {
|
|
1223
|
+
name: {
|
|
1224
|
+
type: "string",
|
|
1225
|
+
title: "Name",
|
|
1226
|
+
description: "The tool name"
|
|
1227
|
+
},
|
|
1228
|
+
description: {
|
|
1229
|
+
type: "string",
|
|
1230
|
+
title: "Description",
|
|
1231
|
+
description: "A description of what the tool does"
|
|
1232
|
+
},
|
|
1233
|
+
inputSchema: {
|
|
1234
|
+
type: "object",
|
|
1235
|
+
title: "Input Schema",
|
|
1236
|
+
description: "JSON Schema describing the tool's input parameters",
|
|
1237
|
+
additionalProperties: true
|
|
1238
|
+
},
|
|
1239
|
+
outputSchema: {
|
|
1240
|
+
type: "object",
|
|
1241
|
+
title: "Output Schema",
|
|
1242
|
+
description: "JSON Schema describing what the tool returns",
|
|
1243
|
+
additionalProperties: true
|
|
1244
|
+
},
|
|
1245
|
+
configSchema: {
|
|
1246
|
+
type: "object",
|
|
1247
|
+
title: "Config Schema",
|
|
1248
|
+
description: "JSON Schema describing the task's configuration options (not sent to the LLM)",
|
|
1249
|
+
additionalProperties: true
|
|
1250
|
+
},
|
|
1251
|
+
config: {
|
|
1252
|
+
type: "object",
|
|
1253
|
+
title: "Config",
|
|
1254
|
+
description: "Concrete configuration values for the backing task (not sent to the LLM)",
|
|
1255
|
+
additionalProperties: true
|
|
1256
|
+
}
|
|
1257
|
+
},
|
|
1258
|
+
required: ["name", "description", "inputSchema"],
|
|
1259
|
+
additionalProperties: true
|
|
1260
|
+
};
|
|
1261
|
+
var ToolCallSchema = {
|
|
1262
|
+
type: "object",
|
|
1263
|
+
properties: {
|
|
1264
|
+
id: {
|
|
1265
|
+
type: "string",
|
|
1266
|
+
title: "ID",
|
|
1267
|
+
description: "Unique identifier for this tool call"
|
|
1268
|
+
},
|
|
1269
|
+
name: {
|
|
1270
|
+
type: "string",
|
|
1271
|
+
title: "Name",
|
|
1272
|
+
description: "The name of the tool to invoke"
|
|
1273
|
+
},
|
|
1274
|
+
input: {
|
|
1275
|
+
type: "object",
|
|
1276
|
+
title: "Input",
|
|
1277
|
+
description: "The input arguments for the tool call",
|
|
1278
|
+
additionalProperties: true
|
|
1279
|
+
}
|
|
1280
|
+
},
|
|
1281
|
+
required: ["id", "name", "input"],
|
|
1282
|
+
additionalProperties: false
|
|
1283
|
+
};
|
|
1284
|
+
var modelSchema = TypeModel("model:ToolCallingTask");
|
|
1285
|
+
var ToolCallingInputSchema = {
|
|
1286
|
+
type: "object",
|
|
1287
|
+
properties: {
|
|
1288
|
+
model: modelSchema,
|
|
1289
|
+
prompt: {
|
|
1290
|
+
oneOf: [
|
|
1291
|
+
{ type: "string", title: "Prompt", description: "The prompt to send to the model" },
|
|
1292
|
+
{
|
|
1293
|
+
type: "array",
|
|
1294
|
+
title: "Prompt",
|
|
1295
|
+
description: "The prompt as an array of strings or content blocks",
|
|
1296
|
+
items: {
|
|
1297
|
+
oneOf: [
|
|
1298
|
+
{ type: "string" },
|
|
1299
|
+
{
|
|
1300
|
+
type: "object",
|
|
1301
|
+
properties: {
|
|
1302
|
+
type: { type: "string", enum: ["text", "image", "audio"] }
|
|
1303
|
+
},
|
|
1304
|
+
required: ["type"],
|
|
1305
|
+
additionalProperties: true
|
|
1306
|
+
}
|
|
1307
|
+
]
|
|
1308
|
+
}
|
|
1309
|
+
}
|
|
1310
|
+
],
|
|
1311
|
+
title: "Prompt",
|
|
1312
|
+
description: "The prompt to send to the model"
|
|
1313
|
+
},
|
|
1314
|
+
systemPrompt: {
|
|
1315
|
+
type: "string",
|
|
1316
|
+
title: "System Prompt",
|
|
1317
|
+
description: "Optional system instructions for the model"
|
|
1318
|
+
},
|
|
1319
|
+
messages: {
|
|
1320
|
+
type: "array",
|
|
1321
|
+
title: "Messages",
|
|
1322
|
+
description: "Full conversation history for multi-turn interactions. When provided, used instead of prompt to construct the messages array sent to the provider.",
|
|
1323
|
+
items: {
|
|
1324
|
+
type: "object",
|
|
1325
|
+
properties: {
|
|
1326
|
+
role: { type: "string", enum: ["user", "assistant", "tool"] },
|
|
1327
|
+
content: {}
|
|
1328
|
+
},
|
|
1329
|
+
required: ["role", "content"],
|
|
1330
|
+
additionalProperties: true
|
|
1331
|
+
}
|
|
1332
|
+
},
|
|
1333
|
+
tools: {
|
|
1334
|
+
type: "array",
|
|
1335
|
+
format: "tasks",
|
|
1336
|
+
title: "Tools",
|
|
1337
|
+
description: "Tool definitions available for the model to call",
|
|
1338
|
+
items: {
|
|
1339
|
+
oneOf: [
|
|
1340
|
+
{ type: "string", format: "tasks", description: "Task type name" },
|
|
1341
|
+
ToolDefinitionSchema
|
|
1342
|
+
]
|
|
1343
|
+
}
|
|
1344
|
+
},
|
|
1345
|
+
toolChoice: {
|
|
1346
|
+
type: "string",
|
|
1347
|
+
title: "Tool Choice",
|
|
1348
|
+
description: 'Controls tool selection: "auto" (model decides), "none" (no tools), "required" (must call a tool), or a specific tool name',
|
|
1349
|
+
"x-ui-group": "Configuration"
|
|
1350
|
+
},
|
|
1351
|
+
maxTokens: {
|
|
1352
|
+
type: "number",
|
|
1353
|
+
title: "Max Tokens",
|
|
1354
|
+
description: "The maximum number of tokens to generate",
|
|
1355
|
+
minimum: 1,
|
|
1356
|
+
"x-ui-group": "Configuration"
|
|
1357
|
+
},
|
|
1358
|
+
temperature: {
|
|
1359
|
+
type: "number",
|
|
1360
|
+
title: "Temperature",
|
|
1361
|
+
description: "The temperature to use for sampling",
|
|
1362
|
+
minimum: 0,
|
|
1363
|
+
maximum: 2,
|
|
1364
|
+
"x-ui-group": "Configuration"
|
|
1365
|
+
}
|
|
1366
|
+
},
|
|
1367
|
+
required: ["model", "prompt", "tools"],
|
|
1368
|
+
additionalProperties: false
|
|
1369
|
+
};
|
|
1370
|
+
var ToolCallingOutputSchema = {
|
|
1371
|
+
type: "object",
|
|
1372
|
+
properties: {
|
|
1373
|
+
text: {
|
|
1374
|
+
type: "string",
|
|
1375
|
+
title: "Text",
|
|
1376
|
+
description: "Any text content generated by the model",
|
|
1377
|
+
"x-stream": "append"
|
|
1378
|
+
},
|
|
1379
|
+
toolCalls: {
|
|
1380
|
+
type: "array",
|
|
1381
|
+
items: ToolCallSchema,
|
|
1382
|
+
title: "Tool Calls",
|
|
1383
|
+
description: "Tool calls requested by the model",
|
|
1384
|
+
"x-stream": "object"
|
|
1385
|
+
}
|
|
1386
|
+
},
|
|
1387
|
+
required: ["text", "toolCalls"],
|
|
1388
|
+
additionalProperties: false
|
|
1389
|
+
};
|
|
1390
|
+
|
|
1391
|
+
class ToolCallingTask extends StreamingAiTask {
|
|
1392
|
+
static type = "ToolCallingTask";
|
|
1393
|
+
static category = "AI Text Model";
|
|
1394
|
+
static title = "Tool Calling";
|
|
1395
|
+
static description = "Sends a prompt with tool definitions to a language model and returns text along with any tool calls the model requests";
|
|
1396
|
+
static inputSchema() {
|
|
1397
|
+
return ToolCallingInputSchema;
|
|
1398
|
+
}
|
|
1399
|
+
static outputSchema() {
|
|
1400
|
+
return ToolCallingOutputSchema;
|
|
1401
|
+
}
|
|
1402
|
+
}
|
|
1403
|
+
var toolCalling = (input, config) => {
|
|
1404
|
+
return new ToolCallingTask(config).run(input);
|
|
1405
|
+
};
|
|
1406
|
+
Workflow.prototype.toolCalling = CreateWorkflow(ToolCallingTask);
|
|
1407
|
+
|
|
1408
|
+
// src/task/AgentUtils.ts
|
|
1409
|
+
function resolveToolConfig(toolName, config, taskConfigSchema) {
|
|
1410
|
+
if (config && !taskConfigSchema) {
|
|
1411
|
+
getLogger3().warn(`AgentTask: Tool "${toolName}" provided config but task has no configSchema — config ignored`);
|
|
1412
|
+
return {};
|
|
1413
|
+
}
|
|
1414
|
+
return config;
|
|
1415
|
+
}
|
|
1416
|
+
function buildToolSources(tools, registry) {
|
|
1417
|
+
if (!tools || tools.length === 0)
|
|
1418
|
+
return [];
|
|
1419
|
+
const stringNames = tools.filter((t) => typeof t === "string");
|
|
1420
|
+
const resolvedDefs = new Map(taskTypesToTools(stringNames, registry).map((d) => [d.taskType, d]));
|
|
1421
|
+
const constructors = getTaskConstructors2(registry);
|
|
1422
|
+
const sources = [];
|
|
1423
|
+
for (const tool of tools) {
|
|
1424
|
+
if (typeof tool === "string") {
|
|
1425
|
+
const def = resolvedDefs.get(tool);
|
|
1426
|
+
if (def) {
|
|
1427
|
+
const { taskType, ...definition } = def;
|
|
1428
|
+
sources.push({
|
|
1429
|
+
type: "registry",
|
|
1430
|
+
definition,
|
|
1431
|
+
taskType
|
|
1432
|
+
});
|
|
1433
|
+
}
|
|
1434
|
+
} else if (tool.type === "function" || !tool.type && tool.execute) {
|
|
1435
|
+
if (!tool.execute) {
|
|
1436
|
+
getLogger3().warn(`AgentTask: Tool "${tool.name}" has type "function" but no execute function — will throw on invocation`);
|
|
1437
|
+
}
|
|
1438
|
+
const { execute, configSchema: _cs, config: _c, type: _t, ...definition } = tool;
|
|
1439
|
+
sources.push({
|
|
1440
|
+
type: "function",
|
|
1441
|
+
definition,
|
|
1442
|
+
run: execute ?? (async () => {
|
|
1443
|
+
throw new Error(`No execute function for tool "${tool.name}"`);
|
|
1444
|
+
})
|
|
1445
|
+
});
|
|
1446
|
+
} else if (tool.type === "task") {
|
|
1447
|
+
const ctor = constructors.get(tool.name);
|
|
1448
|
+
if (!ctor) {
|
|
1449
|
+
getLogger3().warn(`AgentTask: Tool "${tool.name}" has type "task" but is not in TaskRegistry — will throw on invocation`);
|
|
1450
|
+
const { execute: _e, configSchema: _cs, config: _c, type: _t, ...definition } = tool;
|
|
1451
|
+
sources.push({
|
|
1452
|
+
type: "function",
|
|
1453
|
+
definition,
|
|
1454
|
+
run: async () => {
|
|
1455
|
+
throw new Error(`Task "${tool.name}" not found in TaskRegistry`);
|
|
1456
|
+
}
|
|
1457
|
+
});
|
|
1458
|
+
} else {
|
|
1459
|
+
const safeConfig = resolveToolConfig(tool.name, tool.config, ctor.configSchema?.());
|
|
1460
|
+
const { execute: _e, configSchema: _cs, config: _c, type: _t, ...definition } = tool;
|
|
1461
|
+
sources.push({
|
|
1462
|
+
type: "registry",
|
|
1463
|
+
definition,
|
|
1464
|
+
taskType: tool.name,
|
|
1465
|
+
config: safeConfig
|
|
1466
|
+
});
|
|
1467
|
+
}
|
|
1468
|
+
} else {
|
|
1469
|
+
const ctor = constructors.get(tool.name);
|
|
1470
|
+
if (ctor) {
|
|
1471
|
+
const safeConfig = resolveToolConfig(tool.name, tool.config, ctor.configSchema?.());
|
|
1472
|
+
const { execute: _e, configSchema: _cs, config: _c, type: _t, ...definition } = tool;
|
|
1473
|
+
sources.push({
|
|
1474
|
+
type: "registry",
|
|
1475
|
+
definition,
|
|
1476
|
+
taskType: tool.name,
|
|
1477
|
+
config: safeConfig
|
|
1478
|
+
});
|
|
1479
|
+
} else {
|
|
1480
|
+
const { execute: _e, configSchema: _cs, config: _c, type: _t, ...definition } = tool;
|
|
1481
|
+
sources.push({
|
|
1482
|
+
type: "function",
|
|
1483
|
+
definition,
|
|
1484
|
+
run: async () => {
|
|
1485
|
+
throw new Error(`No executor registered for tool "${tool.name}"`);
|
|
1486
|
+
}
|
|
1487
|
+
});
|
|
1488
|
+
}
|
|
1489
|
+
}
|
|
1490
|
+
}
|
|
1491
|
+
return sources;
|
|
1492
|
+
}
|
|
1493
|
+
async function executeToolCall(toolCall, sources, context, hooks) {
|
|
1494
|
+
const source = findToolSource(sources, toolCall.name);
|
|
1495
|
+
if (!source) {
|
|
1496
|
+
getLogger3().warn(`AgentTask: Unknown tool "${toolCall.name}" — not found in tool sources`);
|
|
1497
|
+
return {
|
|
1498
|
+
toolCallId: toolCall.id,
|
|
1499
|
+
toolName: toolCall.name,
|
|
1500
|
+
output: { error: `Unknown tool: ${toolCall.name}` },
|
|
1501
|
+
isError: true
|
|
1502
|
+
};
|
|
1503
|
+
}
|
|
1504
|
+
let effectiveCall = toolCall;
|
|
1505
|
+
if (hooks?.beforeToolCall) {
|
|
1506
|
+
const decision = await hooks.beforeToolCall(toolCall, source);
|
|
1507
|
+
if (decision.action === "deny") {
|
|
1508
|
+
return {
|
|
1509
|
+
toolCallId: toolCall.id,
|
|
1510
|
+
toolName: toolCall.name,
|
|
1511
|
+
output: { error: decision.reason ?? "Tool call denied by hook" },
|
|
1512
|
+
isError: true
|
|
1513
|
+
};
|
|
1514
|
+
}
|
|
1515
|
+
if (decision.action === "modify") {
|
|
1516
|
+
effectiveCall = { ...toolCall, input: decision.input };
|
|
1517
|
+
}
|
|
1518
|
+
}
|
|
1519
|
+
try {
|
|
1520
|
+
let output;
|
|
1521
|
+
switch (source.type) {
|
|
1522
|
+
case "registry": {
|
|
1523
|
+
const ctor = getTaskConstructors2(context.registry).get(source.taskType);
|
|
1524
|
+
if (!ctor) {
|
|
1525
|
+
throw new Error(`Task type "${source.taskType}" not found in TaskRegistry`);
|
|
1526
|
+
}
|
|
1527
|
+
const taskConfig = source.config ?? {};
|
|
1528
|
+
const task = context.own(new ctor({}, taskConfig));
|
|
1529
|
+
output = await task.run(effectiveCall.input) ?? {};
|
|
1530
|
+
break;
|
|
1531
|
+
}
|
|
1532
|
+
case "function": {
|
|
1533
|
+
output = await source.run(effectiveCall.input);
|
|
1534
|
+
break;
|
|
1535
|
+
}
|
|
1536
|
+
}
|
|
1537
|
+
let result = {
|
|
1538
|
+
toolCallId: toolCall.id,
|
|
1539
|
+
toolName: toolCall.name,
|
|
1540
|
+
output,
|
|
1541
|
+
isError: false
|
|
1542
|
+
};
|
|
1543
|
+
if (hooks?.afterToolCall) {
|
|
1544
|
+
result = await hooks.afterToolCall(toolCall, result);
|
|
1545
|
+
}
|
|
1546
|
+
return result;
|
|
1547
|
+
} catch (err) {
|
|
1548
|
+
const error = err instanceof Error ? err : new Error(String(err));
|
|
1549
|
+
if (hooks?.onToolError) {
|
|
1550
|
+
const action = await hooks.onToolError(toolCall, error);
|
|
1551
|
+
if (action.action === "result") {
|
|
1552
|
+
return {
|
|
1553
|
+
toolCallId: toolCall.id,
|
|
1554
|
+
toolName: toolCall.name,
|
|
1555
|
+
output: action.output,
|
|
1556
|
+
isError: false
|
|
1557
|
+
};
|
|
1558
|
+
}
|
|
1559
|
+
}
|
|
1560
|
+
getLogger3().warn(`AgentTask: Tool "${toolCall.name}" failed: ${error.message}`);
|
|
1561
|
+
return {
|
|
1562
|
+
toolCallId: toolCall.id,
|
|
1563
|
+
toolName: toolCall.name,
|
|
1564
|
+
output: { error: error.message },
|
|
1565
|
+
isError: true
|
|
1566
|
+
};
|
|
1567
|
+
}
|
|
1568
|
+
}
|
|
1569
|
+
async function executeToolCalls(toolCalls, sources, context, hooks, maxConcurrency = 5) {
|
|
1570
|
+
const calls = toolCalls;
|
|
1571
|
+
if (calls.length === 0)
|
|
1572
|
+
return [];
|
|
1573
|
+
const concurrency = Math.max(1, Math.min(maxConcurrency, calls.length));
|
|
1574
|
+
const results = new Array(calls.length);
|
|
1575
|
+
let cursor = 0;
|
|
1576
|
+
const workers = Array.from({ length: concurrency }, async () => {
|
|
1577
|
+
while (true) {
|
|
1578
|
+
if (context.signal.aborted) {
|
|
1579
|
+
throw context.signal.reason ?? new TaskAbortedError("The operation was aborted");
|
|
1580
|
+
}
|
|
1581
|
+
const position = cursor;
|
|
1582
|
+
cursor += 1;
|
|
1583
|
+
if (position >= calls.length)
|
|
1584
|
+
return;
|
|
1585
|
+
results[position] = await executeToolCall(calls[position], sources, context, hooks);
|
|
1586
|
+
}
|
|
1587
|
+
});
|
|
1588
|
+
await Promise.all(workers);
|
|
1589
|
+
return results;
|
|
1590
|
+
}
|
|
1591
|
+
function hasToolCalls(toolCalls) {
|
|
1592
|
+
return toolCalls !== undefined && toolCalls.length > 0;
|
|
1593
|
+
}
|
|
1594
|
+
function countAssistantToolUses(messages) {
|
|
1595
|
+
let n = 0;
|
|
1596
|
+
for (const m of messages) {
|
|
1597
|
+
if (m.role !== "assistant")
|
|
1598
|
+
continue;
|
|
1599
|
+
for (const block of m.content) {
|
|
1600
|
+
if (block.type === "tool_use")
|
|
1601
|
+
n += 1;
|
|
1602
|
+
}
|
|
1603
|
+
}
|
|
1604
|
+
return n;
|
|
1605
|
+
}
|
|
1606
|
+
|
|
1607
|
+
// src/task/AgentTask.ts
|
|
1608
|
+
var MAX_CONTEXT_MESSAGES = 1000;
|
|
1609
|
+
var modelSchema2 = TypeModel("model:ToolCallingTask");
|
|
1610
|
+
var AgentInputSchema = {
|
|
1611
|
+
type: "object",
|
|
1612
|
+
properties: {
|
|
1613
|
+
model: modelSchema2,
|
|
1614
|
+
prompt: {
|
|
1615
|
+
oneOf: [
|
|
1616
|
+
{ type: "string" },
|
|
1617
|
+
{
|
|
1618
|
+
type: "array",
|
|
1619
|
+
items: {
|
|
1620
|
+
type: "object",
|
|
1621
|
+
properties: {
|
|
1622
|
+
type: { type: "string", enum: ["text", "image", "audio"] }
|
|
1623
|
+
},
|
|
1624
|
+
required: ["type"],
|
|
1625
|
+
additionalProperties: true
|
|
1626
|
+
}
|
|
1627
|
+
}
|
|
1628
|
+
],
|
|
1629
|
+
title: "Prompt",
|
|
1630
|
+
description: "The user prompt to start the agent loop. Can be a string or an array of content blocks (text, image, audio)."
|
|
1631
|
+
},
|
|
1632
|
+
systemPrompt: {
|
|
1633
|
+
type: "string",
|
|
1634
|
+
title: "System Prompt",
|
|
1635
|
+
description: "Optional system instructions for the agent"
|
|
1636
|
+
},
|
|
1637
|
+
tools: {
|
|
1638
|
+
type: "array",
|
|
1639
|
+
format: "tasks",
|
|
1640
|
+
title: "Tools",
|
|
1641
|
+
description: "Tools available to the agent. Each entry is a task type name (string, resolved from TaskRegistry) or a full ToolDefinition object with optional config for configurable tasks.",
|
|
1642
|
+
items: {
|
|
1643
|
+
oneOf: [
|
|
1644
|
+
{ type: "string", format: "tasks", description: "Task type name" },
|
|
1645
|
+
ToolDefinitionSchema
|
|
1646
|
+
]
|
|
1647
|
+
}
|
|
1648
|
+
},
|
|
1649
|
+
stopTool: {
|
|
1650
|
+
type: "string",
|
|
1651
|
+
title: "Stop Tool",
|
|
1652
|
+
description: "Name of a tool that signals agent completion. When called, the loop ends and the tool input becomes structuredOutput.",
|
|
1653
|
+
"x-ui-group": "Configuration"
|
|
1654
|
+
},
|
|
1655
|
+
maxIterations: {
|
|
1656
|
+
type: "number",
|
|
1657
|
+
title: "Max Iterations",
|
|
1658
|
+
description: "Maximum number of agent loop iterations (default: 10)",
|
|
1659
|
+
minimum: 1,
|
|
1660
|
+
"x-ui-group": "Configuration"
|
|
1661
|
+
},
|
|
1662
|
+
maxContextMessages: {
|
|
1663
|
+
type: "number",
|
|
1664
|
+
title: "Max Context Messages",
|
|
1665
|
+
description: "Maximum messages in conversation history. Older messages are trimmed to prevent context overflow.",
|
|
1666
|
+
minimum: 3,
|
|
1667
|
+
"x-ui-group": "Configuration"
|
|
1668
|
+
},
|
|
1669
|
+
maxTokens: {
|
|
1670
|
+
type: "number",
|
|
1671
|
+
title: "Max Tokens",
|
|
1672
|
+
description: "Maximum tokens per LLM call",
|
|
1673
|
+
minimum: 1,
|
|
1674
|
+
"x-ui-group": "Configuration"
|
|
1675
|
+
},
|
|
1676
|
+
temperature: {
|
|
1677
|
+
type: "number",
|
|
1678
|
+
title: "Temperature",
|
|
1679
|
+
description: "Sampling temperature for LLM calls",
|
|
1680
|
+
minimum: 0,
|
|
1681
|
+
maximum: 2,
|
|
1682
|
+
"x-ui-group": "Configuration"
|
|
1683
|
+
}
|
|
1684
|
+
},
|
|
1685
|
+
required: ["model", "prompt"],
|
|
1686
|
+
additionalProperties: false
|
|
1687
|
+
};
|
|
1688
|
+
var AgentOutputSchema = {
|
|
1689
|
+
type: "object",
|
|
1690
|
+
properties: {
|
|
1691
|
+
text: {
|
|
1692
|
+
type: "string",
|
|
1693
|
+
title: "Text",
|
|
1694
|
+
description: "The final text response from the agent",
|
|
1695
|
+
"x-stream": "append"
|
|
1696
|
+
},
|
|
1697
|
+
messages: {
|
|
1698
|
+
type: "array",
|
|
1699
|
+
title: "Messages",
|
|
1700
|
+
description: "Full conversation history including all tool calls and results",
|
|
1701
|
+
items: {
|
|
1702
|
+
type: "object",
|
|
1703
|
+
additionalProperties: true
|
|
1704
|
+
}
|
|
1705
|
+
},
|
|
1706
|
+
iterations: {
|
|
1707
|
+
type: "number",
|
|
1708
|
+
title: "Iterations",
|
|
1709
|
+
description: "Number of LLM calls made during the agent loop"
|
|
1710
|
+
},
|
|
1711
|
+
toolCallCount: {
|
|
1712
|
+
type: "number",
|
|
1713
|
+
title: "Tool Call Count",
|
|
1714
|
+
description: "Total number of tool calls the assistant requested (tool_use blocks in assistant messages)"
|
|
1715
|
+
},
|
|
1716
|
+
structuredOutput: {
|
|
1717
|
+
type: "object",
|
|
1718
|
+
title: "Structured Output",
|
|
1719
|
+
description: "Present when the agent terminated via a stop tool",
|
|
1720
|
+
additionalProperties: true
|
|
1721
|
+
}
|
|
1722
|
+
},
|
|
1723
|
+
required: ["text", "messages", "iterations", "toolCallCount"],
|
|
1724
|
+
additionalProperties: false
|
|
1725
|
+
};
|
|
1726
|
+
|
|
1727
|
+
class AgentTask extends Task2 {
|
|
1728
|
+
static type = "AgentTask";
|
|
1729
|
+
static category = "AI Agent";
|
|
1730
|
+
static title = "Agent";
|
|
1731
|
+
static description = "Multi-turn agentic loop that calls an LLM with tools, executes tool calls, and iterates until done";
|
|
1732
|
+
static cacheable = false;
|
|
1733
|
+
static inputSchema() {
|
|
1734
|
+
return AgentInputSchema;
|
|
960
1735
|
}
|
|
961
|
-
|
|
962
|
-
|
|
963
|
-
|
|
964
|
-
|
|
965
|
-
|
|
966
|
-
|
|
967
|
-
|
|
968
|
-
|
|
969
|
-
const modelTaskProperties = Object.entries(inputSchema.properties || {}).filter(([key, schema]) => schemaFormat(schema)?.startsWith("model:"));
|
|
970
|
-
for (const [key] of modelTaskProperties) {
|
|
971
|
-
const model = input[key];
|
|
972
|
-
if (typeof model === "object" && model !== null) {
|
|
973
|
-
const tasks = model.tasks;
|
|
974
|
-
if (Array.isArray(tasks) && tasks.length > 0 && !tasks.includes(this.type)) {
|
|
975
|
-
const modelId = model.model_id ?? "(inline config)";
|
|
976
|
-
throw new TaskConfigurationError2(`AiTask: Model "${modelId}" for '${key}' is not compatible with task '${this.type}'. ` + `Model supports: [${tasks.join(", ")}]`);
|
|
977
|
-
}
|
|
978
|
-
} else if (model !== undefined && model !== null) {
|
|
979
|
-
throw new TaskConfigurationError2(`AiTask: Invalid model for '${key}' - expected ModelConfig object but got ${typeof model}. ` + `Ensure the model ID was registered in the ModelRepository before running the task.`);
|
|
1736
|
+
static outputSchema() {
|
|
1737
|
+
return AgentOutputSchema;
|
|
1738
|
+
}
|
|
1739
|
+
async execute(input, context) {
|
|
1740
|
+
let result;
|
|
1741
|
+
for await (const event of this.agentLoop(input, context)) {
|
|
1742
|
+
if (event.type === "finish") {
|
|
1743
|
+
result = event.data;
|
|
980
1744
|
}
|
|
981
1745
|
}
|
|
982
|
-
|
|
983
|
-
|
|
984
|
-
const model = input[key];
|
|
985
|
-
if (model !== undefined && model !== null && typeof model !== "object") {
|
|
986
|
-
throw new TaskConfigurationError2(`AiTask: Invalid model for '${key}' - expected ModelConfig object but got ${typeof model}. ` + `Ensure the model ID was registered in the ModelRepository before running the task.`);
|
|
987
|
-
}
|
|
1746
|
+
if (!result) {
|
|
1747
|
+
throw new Error("AgentTask: loop ended without producing output");
|
|
988
1748
|
}
|
|
989
|
-
return
|
|
1749
|
+
return result;
|
|
990
1750
|
}
|
|
991
|
-
async
|
|
992
|
-
|
|
993
|
-
|
|
994
|
-
|
|
995
|
-
|
|
1751
|
+
async* executeStream(input, context) {
|
|
1752
|
+
yield* this.agentLoop(input, context);
|
|
1753
|
+
}
|
|
1754
|
+
async* agentLoop(input, context) {
|
|
1755
|
+
const maxIterations = input.maxIterations ?? 10;
|
|
1756
|
+
const hooks = this.config.hooks;
|
|
1757
|
+
const maxConcurrency = this.config.maxConcurrency ?? 5;
|
|
1758
|
+
const toolSources = this.resolveToolSources(input, context);
|
|
1759
|
+
const toolDefs = this.resolveToolDefs(toolSources, input.stopTool);
|
|
1760
|
+
const messages = [userMessage(input.prompt)];
|
|
1761
|
+
let finalText = "";
|
|
1762
|
+
let structuredOutput;
|
|
1763
|
+
for (let iteration = 0;iteration < maxIterations; iteration++) {
|
|
1764
|
+
if (context.signal.aborted)
|
|
1765
|
+
break;
|
|
1766
|
+
if (hooks?.onIteration) {
|
|
1767
|
+
const action = await hooks.onIteration(iteration, messages, {
|
|
1768
|
+
totalToolCalls: countAssistantToolUses(messages)
|
|
1769
|
+
});
|
|
1770
|
+
if (action.action === "stop")
|
|
1771
|
+
break;
|
|
996
1772
|
}
|
|
997
|
-
|
|
998
|
-
|
|
999
|
-
|
|
1000
|
-
|
|
1001
|
-
|
|
1002
|
-
const
|
|
1003
|
-
|
|
1004
|
-
|
|
1005
|
-
|
|
1006
|
-
|
|
1007
|
-
|
|
1008
|
-
|
|
1773
|
+
await context.updateProgress(Math.round(iteration / maxIterations * 100), `Agent iteration ${iteration + 1}`);
|
|
1774
|
+
const contextMessages = this.trimMessages(messages, input.maxContextMessages ?? MAX_CONTEXT_MESSAGES);
|
|
1775
|
+
const llmTask = context.own(new ToolCallingTask);
|
|
1776
|
+
let iterationText = "";
|
|
1777
|
+
let toolCalls = [];
|
|
1778
|
+
for await (const event of llmTask.executeStream({
|
|
1779
|
+
model: input.model,
|
|
1780
|
+
prompt: input.prompt,
|
|
1781
|
+
systemPrompt: input.systemPrompt,
|
|
1782
|
+
tools: toolDefs,
|
|
1783
|
+
messages: contextMessages,
|
|
1784
|
+
maxTokens: input.maxTokens,
|
|
1785
|
+
temperature: input.temperature
|
|
1786
|
+
}, context)) {
|
|
1787
|
+
if (event.type === "text-delta") {
|
|
1788
|
+
yield { type: "text-delta", port: "text", textDelta: event.textDelta };
|
|
1789
|
+
iterationText += event.textDelta;
|
|
1790
|
+
} else if (event.type === "object-delta" && event.port === "toolCalls") {
|
|
1791
|
+
const items = event.objectDelta;
|
|
1792
|
+
for (const item of items) {
|
|
1793
|
+
const idx = toolCalls.findIndex((tc) => tc.id === item.id);
|
|
1794
|
+
if (idx >= 0)
|
|
1795
|
+
toolCalls[idx] = item;
|
|
1796
|
+
else
|
|
1797
|
+
toolCalls.push(item);
|
|
1009
1798
|
}
|
|
1010
|
-
} else if (
|
|
1011
|
-
const
|
|
1012
|
-
|
|
1013
|
-
if (
|
|
1014
|
-
|
|
1799
|
+
} else if (event.type === "finish") {
|
|
1800
|
+
const data = event.data;
|
|
1801
|
+
iterationText = data?.text ?? iterationText;
|
|
1802
|
+
if (data?.toolCalls && data.toolCalls.length > 0) {
|
|
1803
|
+
toolCalls = data.toolCalls;
|
|
1015
1804
|
}
|
|
1016
1805
|
}
|
|
1017
1806
|
}
|
|
1807
|
+
finalText = iterationText;
|
|
1808
|
+
messages.push(assistantMessage(iterationText, toolCalls));
|
|
1809
|
+
if (input.stopTool) {
|
|
1810
|
+
const stopCall = toolCalls.find((tc) => tc.name === input.stopTool);
|
|
1811
|
+
if (stopCall) {
|
|
1812
|
+
structuredOutput = stopCall.input;
|
|
1813
|
+
break;
|
|
1814
|
+
}
|
|
1815
|
+
}
|
|
1816
|
+
if (!hasToolCalls(toolCalls)) {
|
|
1817
|
+
break;
|
|
1818
|
+
}
|
|
1819
|
+
const results = await executeToolCalls(toolCalls, toolSources, context, hooks, maxConcurrency);
|
|
1820
|
+
messages.push(toolMessage(results));
|
|
1018
1821
|
}
|
|
1019
|
-
|
|
1822
|
+
const output = {
|
|
1823
|
+
text: finalText,
|
|
1824
|
+
messages,
|
|
1825
|
+
iterations: messages.filter((m) => m.role === "assistant").length,
|
|
1826
|
+
toolCallCount: countAssistantToolUses(messages),
|
|
1827
|
+
...structuredOutput !== undefined ? { structuredOutput } : {}
|
|
1828
|
+
};
|
|
1829
|
+
yield { type: "finish", data: output };
|
|
1830
|
+
}
|
|
1831
|
+
resolveToolSources(input, context) {
|
|
1832
|
+
return buildToolSources(input.tools, context.registry);
|
|
1833
|
+
}
|
|
1834
|
+
resolveToolDefs(toolSources, stopTool) {
|
|
1835
|
+
const defs = toolSourceDefinitions(toolSources);
|
|
1836
|
+
if (stopTool && !defs.some((d) => d.name === stopTool)) {
|
|
1837
|
+
defs.push({
|
|
1838
|
+
name: stopTool,
|
|
1839
|
+
description: "Call this tool when you have completed the task. Pass your final structured result as the input.",
|
|
1840
|
+
inputSchema: { type: "object", additionalProperties: true }
|
|
1841
|
+
});
|
|
1842
|
+
}
|
|
1843
|
+
return defs;
|
|
1844
|
+
}
|
|
1845
|
+
trimMessages(messages, maxContextMessages) {
|
|
1846
|
+
if (!maxContextMessages || messages.length <= maxContextMessages) {
|
|
1847
|
+
return messages;
|
|
1848
|
+
}
|
|
1849
|
+
getLogger4().debug(`AgentTask: Trimming context from ${messages.length} to ${maxContextMessages} messages`);
|
|
1850
|
+
const tail = messages.slice(1);
|
|
1851
|
+
let startIdx = tail.length - (maxContextMessages - 1);
|
|
1852
|
+
if (startIdx < 0)
|
|
1853
|
+
startIdx = 0;
|
|
1854
|
+
while (startIdx > 0 && startIdx < tail.length && tail[startIdx].role === "tool") {
|
|
1855
|
+
startIdx -= 1;
|
|
1856
|
+
}
|
|
1857
|
+
return [messages[0], ...tail.slice(startIdx)];
|
|
1020
1858
|
}
|
|
1021
1859
|
}
|
|
1860
|
+
var agent = (input, config) => {
|
|
1861
|
+
return new AgentTask(config).run(input);
|
|
1862
|
+
};
|
|
1863
|
+
Workflow2.prototype.agent = CreateWorkflow2(AgentTask);
|
|
1864
|
+
|
|
1865
|
+
// src/task/BackgroundRemovalTask.ts
|
|
1866
|
+
import { CreateWorkflow as CreateWorkflow3, Workflow as Workflow3 } from "@workglow/task-graph";
|
|
1022
1867
|
|
|
1023
1868
|
// src/task/base/AiVisionTask.ts
|
|
1869
|
+
import { convertImageDataToUseableForm } from "@workglow/util/media";
|
|
1024
1870
|
class AiVisionTask extends AiTask {
|
|
1025
1871
|
static type = "AiVisionTask";
|
|
1026
1872
|
async getJobInput(input) {
|
|
@@ -1041,7 +1887,7 @@ class AiVisionTask extends AiTask {
|
|
|
1041
1887
|
}
|
|
1042
1888
|
|
|
1043
1889
|
// src/task/BackgroundRemovalTask.ts
|
|
1044
|
-
var
|
|
1890
|
+
var modelSchema3 = TypeModel("model:BackgroundRemovalTask");
|
|
1045
1891
|
var processedImageSchema = {
|
|
1046
1892
|
type: "string",
|
|
1047
1893
|
contentEncoding: "base64",
|
|
@@ -1053,7 +1899,7 @@ var BackgroundRemovalInputSchema = {
|
|
|
1053
1899
|
type: "object",
|
|
1054
1900
|
properties: {
|
|
1055
1901
|
image: TypeImageInput,
|
|
1056
|
-
model:
|
|
1902
|
+
model: modelSchema3
|
|
1057
1903
|
},
|
|
1058
1904
|
required: ["image", "model"],
|
|
1059
1905
|
additionalProperties: false
|
|
@@ -1080,24 +1926,24 @@ class BackgroundRemovalTask extends AiVisionTask {
|
|
|
1080
1926
|
}
|
|
1081
1927
|
}
|
|
1082
1928
|
var backgroundRemoval = (input, config) => {
|
|
1083
|
-
return new BackgroundRemovalTask(
|
|
1929
|
+
return new BackgroundRemovalTask(config).run(input);
|
|
1084
1930
|
};
|
|
1085
|
-
|
|
1931
|
+
Workflow3.prototype.backgroundRemoval = CreateWorkflow3(BackgroundRemovalTask);
|
|
1086
1932
|
|
|
1087
1933
|
// src/task/ChunkRetrievalTask.ts
|
|
1088
1934
|
import { TypeKnowledgeBase } from "@workglow/knowledge-base";
|
|
1089
|
-
import { CreateWorkflow as
|
|
1935
|
+
import { CreateWorkflow as CreateWorkflow5, Task as Task3, Workflow as Workflow5 } from "@workglow/task-graph";
|
|
1090
1936
|
import {
|
|
1091
1937
|
isTypedArray,
|
|
1092
1938
|
TypedArraySchema as TypedArraySchema2
|
|
1093
1939
|
} from "@workglow/util/schema";
|
|
1094
1940
|
|
|
1095
1941
|
// src/task/TextEmbeddingTask.ts
|
|
1096
|
-
import { CreateWorkflow as
|
|
1942
|
+
import { CreateWorkflow as CreateWorkflow4, Workflow as Workflow4 } from "@workglow/task-graph";
|
|
1097
1943
|
import {
|
|
1098
1944
|
TypedArraySchema
|
|
1099
1945
|
} from "@workglow/util/schema";
|
|
1100
|
-
var
|
|
1946
|
+
var modelSchema4 = TypeModel("model:TextEmbeddingTask");
|
|
1101
1947
|
var TextEmbeddingInputSchema = {
|
|
1102
1948
|
type: "object",
|
|
1103
1949
|
properties: {
|
|
@@ -1106,7 +1952,7 @@ var TextEmbeddingInputSchema = {
|
|
|
1106
1952
|
title: "Text",
|
|
1107
1953
|
description: "The text to embed"
|
|
1108
1954
|
}),
|
|
1109
|
-
model:
|
|
1955
|
+
model: modelSchema4
|
|
1110
1956
|
},
|
|
1111
1957
|
required: ["text", "model"],
|
|
1112
1958
|
additionalProperties: false
|
|
@@ -1136,9 +1982,9 @@ class TextEmbeddingTask extends AiTask {
|
|
|
1136
1982
|
}
|
|
1137
1983
|
}
|
|
1138
1984
|
var textEmbedding = async (input, config) => {
|
|
1139
|
-
return new TextEmbeddingTask(
|
|
1985
|
+
return new TextEmbeddingTask(config).run(input);
|
|
1140
1986
|
};
|
|
1141
|
-
|
|
1987
|
+
Workflow4.prototype.textEmbedding = CreateWorkflow4(TextEmbeddingTask);
|
|
1142
1988
|
|
|
1143
1989
|
// src/task/ChunkRetrievalTask.ts
|
|
1144
1990
|
var inputSchema = {
|
|
@@ -1263,7 +2109,7 @@ var outputSchema = {
|
|
|
1263
2109
|
additionalProperties: false
|
|
1264
2110
|
};
|
|
1265
2111
|
|
|
1266
|
-
class ChunkRetrievalTask extends
|
|
2112
|
+
class ChunkRetrievalTask extends Task3 {
|
|
1267
2113
|
static type = "ChunkRetrievalTask";
|
|
1268
2114
|
static category = "RAG";
|
|
1269
2115
|
static title = "Chunk Retrieval";
|
|
@@ -1291,8 +2137,8 @@ class ChunkRetrievalTask extends Task2 {
|
|
|
1291
2137
|
if (!model) {
|
|
1292
2138
|
throw new Error("Model is required when query is a string. Please provide a model with format 'model:TextEmbeddingTask'.");
|
|
1293
2139
|
}
|
|
1294
|
-
const embeddingTask = context.own(new TextEmbeddingTask
|
|
1295
|
-
const embeddingResult = await embeddingTask.run();
|
|
2140
|
+
const embeddingTask = context.own(new TextEmbeddingTask);
|
|
2141
|
+
const embeddingResult = await embeddingTask.run({ text: query, model });
|
|
1296
2142
|
queryVectors = Array.isArray(embeddingResult.vector) ? embeddingResult.vector : [embeddingResult.vector];
|
|
1297
2143
|
} else if (isTypedArray(query) || Array.isArray(query) && query.every(isTypedArray)) {
|
|
1298
2144
|
queryVectors = Array.isArray(query) ? query : [query];
|
|
@@ -1328,13 +2174,13 @@ class ChunkRetrievalTask extends Task2 {
|
|
|
1328
2174
|
}
|
|
1329
2175
|
}
|
|
1330
2176
|
var chunkRetrieval = (input, config) => {
|
|
1331
|
-
return new ChunkRetrievalTask(
|
|
2177
|
+
return new ChunkRetrievalTask(config).run(input);
|
|
1332
2178
|
};
|
|
1333
|
-
|
|
2179
|
+
Workflow5.prototype.chunkRetrieval = CreateWorkflow5(ChunkRetrievalTask);
|
|
1334
2180
|
|
|
1335
2181
|
// src/task/ChunkToVectorTask.ts
|
|
1336
2182
|
import { ChunkRecordSchema } from "@workglow/knowledge-base";
|
|
1337
|
-
import { CreateWorkflow as
|
|
2183
|
+
import { CreateWorkflow as CreateWorkflow6, Task as Task4, Workflow as Workflow6 } from "@workglow/task-graph";
|
|
1338
2184
|
import {
|
|
1339
2185
|
TypedArraySchema as TypedArraySchema3
|
|
1340
2186
|
} from "@workglow/util/schema";
|
|
@@ -1409,7 +2255,7 @@ var outputSchema2 = {
|
|
|
1409
2255
|
additionalProperties: false
|
|
1410
2256
|
};
|
|
1411
2257
|
|
|
1412
|
-
class ChunkToVectorTask extends
|
|
2258
|
+
class ChunkToVectorTask extends Task4 {
|
|
1413
2259
|
static type = "ChunkToVectorTask";
|
|
1414
2260
|
static category = "Document";
|
|
1415
2261
|
static title = "Chunk to Vector";
|
|
@@ -1458,13 +2304,13 @@ class ChunkToVectorTask extends Task3 {
|
|
|
1458
2304
|
}
|
|
1459
2305
|
}
|
|
1460
2306
|
var chunkToVector = (input, config) => {
|
|
1461
|
-
return new ChunkToVectorTask(
|
|
2307
|
+
return new ChunkToVectorTask(config).run(input);
|
|
1462
2308
|
};
|
|
1463
|
-
|
|
2309
|
+
Workflow6.prototype.chunkToVector = CreateWorkflow6(ChunkToVectorTask);
|
|
1464
2310
|
|
|
1465
2311
|
// src/task/ChunkVectorHybridSearchTask.ts
|
|
1466
2312
|
import { TypeKnowledgeBase as TypeKnowledgeBase2 } from "@workglow/knowledge-base";
|
|
1467
|
-
import { CreateWorkflow as
|
|
2313
|
+
import { CreateWorkflow as CreateWorkflow7, Task as Task5, Workflow as Workflow7 } from "@workglow/task-graph";
|
|
1468
2314
|
import {
|
|
1469
2315
|
TypedArraySchema as TypedArraySchema4
|
|
1470
2316
|
} from "@workglow/util/schema";
|
|
@@ -1577,7 +2423,7 @@ var outputSchema3 = {
|
|
|
1577
2423
|
additionalProperties: false
|
|
1578
2424
|
};
|
|
1579
2425
|
|
|
1580
|
-
class ChunkVectorHybridSearchTask extends
|
|
2426
|
+
class ChunkVectorHybridSearchTask extends Task5 {
|
|
1581
2427
|
static type = "ChunkVectorHybridSearchTask";
|
|
1582
2428
|
static category = "RAG";
|
|
1583
2429
|
static title = "Hybrid Search";
|
|
@@ -1628,13 +2474,13 @@ class ChunkVectorHybridSearchTask extends Task4 {
|
|
|
1628
2474
|
}
|
|
1629
2475
|
}
|
|
1630
2476
|
var hybridSearch = async (input, config) => {
|
|
1631
|
-
return new ChunkVectorHybridSearchTask(
|
|
2477
|
+
return new ChunkVectorHybridSearchTask(config).run(input);
|
|
1632
2478
|
};
|
|
1633
|
-
|
|
2479
|
+
Workflow7.prototype.hybridSearch = CreateWorkflow7(ChunkVectorHybridSearchTask);
|
|
1634
2480
|
|
|
1635
2481
|
// src/task/ChunkVectorSearchTask.ts
|
|
1636
2482
|
import { TypeKnowledgeBase as TypeKnowledgeBase3 } from "@workglow/knowledge-base";
|
|
1637
|
-
import { CreateWorkflow as
|
|
2483
|
+
import { CreateWorkflow as CreateWorkflow8, Task as Task6, Workflow as Workflow8 } from "@workglow/task-graph";
|
|
1638
2484
|
import {
|
|
1639
2485
|
TypedArraySchema as TypedArraySchema5
|
|
1640
2486
|
} from "@workglow/util/schema";
|
|
@@ -1717,7 +2563,7 @@ var outputSchema4 = {
|
|
|
1717
2563
|
additionalProperties: false
|
|
1718
2564
|
};
|
|
1719
2565
|
|
|
1720
|
-
class ChunkVectorSearchTask extends
|
|
2566
|
+
class ChunkVectorSearchTask extends Task6 {
|
|
1721
2567
|
static type = "ChunkVectorSearchTask";
|
|
1722
2568
|
static category = "Vector Store";
|
|
1723
2569
|
static title = "Vector Store Search";
|
|
@@ -1747,13 +2593,13 @@ class ChunkVectorSearchTask extends Task5 {
|
|
|
1747
2593
|
}
|
|
1748
2594
|
}
|
|
1749
2595
|
var vectorStoreSearch = (input, config) => {
|
|
1750
|
-
return new ChunkVectorSearchTask(
|
|
2596
|
+
return new ChunkVectorSearchTask(config).run(input);
|
|
1751
2597
|
};
|
|
1752
|
-
|
|
2598
|
+
Workflow8.prototype.vectorStoreSearch = CreateWorkflow8(ChunkVectorSearchTask);
|
|
1753
2599
|
|
|
1754
2600
|
// src/task/ChunkVectorUpsertTask.ts
|
|
1755
2601
|
import { TypeKnowledgeBase as TypeKnowledgeBase4 } from "@workglow/knowledge-base";
|
|
1756
|
-
import { CreateWorkflow as
|
|
2602
|
+
import { CreateWorkflow as CreateWorkflow9, Task as Task7, Workflow as Workflow9 } from "@workglow/task-graph";
|
|
1757
2603
|
import {
|
|
1758
2604
|
TypedArraySchema as TypedArraySchema6
|
|
1759
2605
|
} from "@workglow/util/schema";
|
|
@@ -1807,7 +2653,7 @@ var outputSchema5 = {
|
|
|
1807
2653
|
additionalProperties: false
|
|
1808
2654
|
};
|
|
1809
2655
|
|
|
1810
|
-
class ChunkVectorUpsertTask extends
|
|
2656
|
+
class ChunkVectorUpsertTask extends Task7 {
|
|
1811
2657
|
static type = "ChunkVectorUpsertTask";
|
|
1812
2658
|
static category = "Vector Store";
|
|
1813
2659
|
static title = "Add to Vector Store";
|
|
@@ -1865,21 +2711,17 @@ class ChunkVectorUpsertTask extends Task6 {
|
|
|
1865
2711
|
}
|
|
1866
2712
|
}
|
|
1867
2713
|
var chunkVectorUpsert = (input, config) => {
|
|
1868
|
-
return new ChunkVectorUpsertTask(
|
|
2714
|
+
return new ChunkVectorUpsertTask(config).run(input);
|
|
1869
2715
|
};
|
|
1870
|
-
|
|
2716
|
+
Workflow9.prototype.chunkVectorUpsert = CreateWorkflow9(ChunkVectorUpsertTask);
|
|
1871
2717
|
|
|
1872
2718
|
// src/task/ContextBuilderTask.ts
|
|
1873
2719
|
import { estimateTokens } from "@workglow/knowledge-base";
|
|
1874
|
-
import {
|
|
1875
|
-
CreateWorkflow as CreateWorkflow9,
|
|
1876
|
-
Task as Task7,
|
|
1877
|
-
Workflow as Workflow9
|
|
1878
|
-
} from "@workglow/task-graph";
|
|
2720
|
+
import { CreateWorkflow as CreateWorkflow11, Task as Task8, Workflow as Workflow11 } from "@workglow/task-graph";
|
|
1879
2721
|
|
|
1880
2722
|
// src/task/CountTokensTask.ts
|
|
1881
|
-
import { CreateWorkflow as
|
|
1882
|
-
var
|
|
2723
|
+
import { CreateWorkflow as CreateWorkflow10, Workflow as Workflow10 } from "@workglow/task-graph";
|
|
2724
|
+
var modelSchema5 = TypeModel("model");
|
|
1883
2725
|
var CountTokensInputSchema = {
|
|
1884
2726
|
type: "object",
|
|
1885
2727
|
properties: {
|
|
@@ -1888,7 +2730,7 @@ var CountTokensInputSchema = {
|
|
|
1888
2730
|
title: "Text",
|
|
1889
2731
|
description: "The text to count tokens for"
|
|
1890
2732
|
},
|
|
1891
|
-
model:
|
|
2733
|
+
model: modelSchema5
|
|
1892
2734
|
},
|
|
1893
2735
|
required: ["text", "model"],
|
|
1894
2736
|
additionalProperties: false
|
|
@@ -1920,9 +2762,9 @@ class CountTokensTask extends AiTask {
|
|
|
1920
2762
|
}
|
|
1921
2763
|
}
|
|
1922
2764
|
var countTokens = async (input, config) => {
|
|
1923
|
-
return new CountTokensTask(
|
|
2765
|
+
return new CountTokensTask(config).run(input);
|
|
1924
2766
|
};
|
|
1925
|
-
|
|
2767
|
+
Workflow10.prototype.countTokens = CreateWorkflow10(CountTokensTask);
|
|
1926
2768
|
|
|
1927
2769
|
// src/task/ContextBuilderTask.ts
|
|
1928
2770
|
var ContextFormat = {
|
|
@@ -1932,7 +2774,7 @@ var ContextFormat = {
|
|
|
1932
2774
|
MARKDOWN: "markdown",
|
|
1933
2775
|
JSON: "json"
|
|
1934
2776
|
};
|
|
1935
|
-
var
|
|
2777
|
+
var modelSchema6 = TypeModel("model", {
|
|
1936
2778
|
title: "Model",
|
|
1937
2779
|
description: "Model to use for token counting (optional, falls back to estimation)"
|
|
1938
2780
|
});
|
|
@@ -1996,7 +2838,7 @@ var inputSchema6 = {
|
|
|
1996
2838
|
|
|
1997
2839
|
`
|
|
1998
2840
|
},
|
|
1999
|
-
model:
|
|
2841
|
+
model: modelSchema6
|
|
2000
2842
|
},
|
|
2001
2843
|
required: ["chunks"],
|
|
2002
2844
|
additionalProperties: false
|
|
@@ -2029,7 +2871,7 @@ var outputSchema6 = {
|
|
|
2029
2871
|
additionalProperties: false
|
|
2030
2872
|
};
|
|
2031
2873
|
|
|
2032
|
-
class ContextBuilderTask extends
|
|
2874
|
+
class ContextBuilderTask extends Task8 {
|
|
2033
2875
|
static type = "ContextBuilderTask";
|
|
2034
2876
|
static category = "RAG";
|
|
2035
2877
|
static title = "Context Builder";
|
|
@@ -2056,7 +2898,7 @@ class ContextBuilderTask extends Task7 {
|
|
|
2056
2898
|
} = input;
|
|
2057
2899
|
let countFn = async (text) => estimateTokens(text);
|
|
2058
2900
|
if (input.model) {
|
|
2059
|
-
const countTask = context.own(new CountTokensTask({ model: input.model }));
|
|
2901
|
+
const countTask = context.own(new CountTokensTask({ defaults: { model: input.model } }));
|
|
2060
2902
|
countFn = async (text) => {
|
|
2061
2903
|
try {
|
|
2062
2904
|
const result = await countTask.run({ text });
|
|
@@ -2220,20 +3062,17 @@ class ContextBuilderTask extends Task7 {
|
|
|
2220
3062
|
}
|
|
2221
3063
|
}
|
|
2222
3064
|
var contextBuilder = (input, config) => {
|
|
2223
|
-
return new ContextBuilderTask(
|
|
3065
|
+
return new ContextBuilderTask(config).run(input);
|
|
2224
3066
|
};
|
|
2225
|
-
|
|
3067
|
+
Workflow11.prototype.contextBuilder = CreateWorkflow11(ContextBuilderTask);
|
|
2226
3068
|
|
|
2227
3069
|
// src/task/DocumentEnricherTask.ts
|
|
2228
|
-
import {
|
|
2229
|
-
|
|
2230
|
-
hasChildren
|
|
2231
|
-
} from "@workglow/knowledge-base";
|
|
2232
|
-
import { CreateWorkflow as CreateWorkflow12, Task as Task8, Workflow as Workflow12 } from "@workglow/task-graph";
|
|
3070
|
+
import { getChildren, hasChildren } from "@workglow/knowledge-base";
|
|
3071
|
+
import { CreateWorkflow as CreateWorkflow14, Task as Task9, Workflow as Workflow14 } from "@workglow/task-graph";
|
|
2233
3072
|
|
|
2234
3073
|
// src/task/TextNamedEntityRecognitionTask.ts
|
|
2235
|
-
import { CreateWorkflow as
|
|
2236
|
-
var
|
|
3074
|
+
import { CreateWorkflow as CreateWorkflow12, Workflow as Workflow12 } from "@workglow/task-graph";
|
|
3075
|
+
var modelSchema7 = TypeModel("model:TextNamedEntityRecognitionTask");
|
|
2237
3076
|
var TextNamedEntityRecognitionInputSchema = {
|
|
2238
3077
|
type: "object",
|
|
2239
3078
|
properties: {
|
|
@@ -2252,7 +3091,7 @@ var TextNamedEntityRecognitionInputSchema = {
|
|
|
2252
3091
|
"x-ui-group": "Configuration",
|
|
2253
3092
|
"x-ui-group-open": false
|
|
2254
3093
|
},
|
|
2255
|
-
model:
|
|
3094
|
+
model: modelSchema7
|
|
2256
3095
|
},
|
|
2257
3096
|
required: ["text", "model"],
|
|
2258
3097
|
additionalProperties: false
|
|
@@ -2305,53 +3144,13 @@ class TextNamedEntityRecognitionTask extends AiTask {
|
|
|
2305
3144
|
}
|
|
2306
3145
|
}
|
|
2307
3146
|
var textNamedEntityRecognition = (input, config) => {
|
|
2308
|
-
return new TextNamedEntityRecognitionTask(
|
|
3147
|
+
return new TextNamedEntityRecognitionTask(config).run(input);
|
|
2309
3148
|
};
|
|
2310
|
-
|
|
2311
|
-
|
|
2312
|
-
// src/task/TextSummaryTask.ts
|
|
2313
|
-
import { CreateWorkflow as CreateWorkflow11, Workflow as Workflow11 } from "@workglow/task-graph";
|
|
2314
|
-
|
|
2315
|
-
// src/task/base/StreamingAiTask.ts
|
|
2316
|
-
import {
|
|
2317
|
-
getStreamingPorts,
|
|
2318
|
-
TaskConfigurationError as TaskConfigurationError3
|
|
2319
|
-
} from "@workglow/task-graph";
|
|
2320
|
-
class StreamingAiTask extends AiTask {
|
|
2321
|
-
static type = "StreamingAiTask";
|
|
2322
|
-
async* executeStream(input, context) {
|
|
2323
|
-
const model = input.model;
|
|
2324
|
-
if (!model || typeof model !== "object") {
|
|
2325
|
-
throw new TaskConfigurationError3("StreamingAiTask: Model was not resolved to ModelConfig - this indicates a bug in the resolution system");
|
|
2326
|
-
}
|
|
2327
|
-
const jobInput = await this.getJobInput(input);
|
|
2328
|
-
const strategy = getAiProviderRegistry().getStrategy(model);
|
|
2329
|
-
const outSchema = this.outputSchema();
|
|
2330
|
-
const ports = getStreamingPorts(outSchema);
|
|
2331
|
-
let defaultPort = "text";
|
|
2332
|
-
if (ports.length > 0) {
|
|
2333
|
-
defaultPort = ports[0].port;
|
|
2334
|
-
} else {
|
|
2335
|
-
if (typeof outSchema === "object" && outSchema.properties) {
|
|
2336
|
-
const firstProp = Object.keys(outSchema.properties)[0];
|
|
2337
|
-
if (firstProp)
|
|
2338
|
-
defaultPort = firstProp;
|
|
2339
|
-
}
|
|
2340
|
-
}
|
|
2341
|
-
for await (const event of strategy.executeStream(jobInput, context, this.runConfig.runnerId)) {
|
|
2342
|
-
if (event.type === "text-delta") {
|
|
2343
|
-
yield { ...event, port: event.port ?? defaultPort };
|
|
2344
|
-
} else if (event.type === "object-delta") {
|
|
2345
|
-
yield { ...event, port: event.port ?? defaultPort };
|
|
2346
|
-
} else {
|
|
2347
|
-
yield event;
|
|
2348
|
-
}
|
|
2349
|
-
}
|
|
2350
|
-
}
|
|
2351
|
-
}
|
|
3149
|
+
Workflow12.prototype.textNamedEntityRecognition = CreateWorkflow12(TextNamedEntityRecognitionTask);
|
|
2352
3150
|
|
|
2353
3151
|
// src/task/TextSummaryTask.ts
|
|
2354
|
-
|
|
3152
|
+
import { CreateWorkflow as CreateWorkflow13, Workflow as Workflow13 } from "@workglow/task-graph";
|
|
3153
|
+
var modelSchema8 = TypeModel("model:TextSummaryTask");
|
|
2355
3154
|
var TextSummaryInputSchema = {
|
|
2356
3155
|
type: "object",
|
|
2357
3156
|
properties: {
|
|
@@ -2360,7 +3159,7 @@ var TextSummaryInputSchema = {
|
|
|
2360
3159
|
title: "Text",
|
|
2361
3160
|
description: "The text to summarize"
|
|
2362
3161
|
},
|
|
2363
|
-
model:
|
|
3162
|
+
model: modelSchema8
|
|
2364
3163
|
},
|
|
2365
3164
|
required: ["text", "model"],
|
|
2366
3165
|
additionalProperties: false
|
|
@@ -2392,9 +3191,9 @@ class TextSummaryTask extends StreamingAiTask {
|
|
|
2392
3191
|
}
|
|
2393
3192
|
}
|
|
2394
3193
|
var textSummary = async (input, config) => {
|
|
2395
|
-
return new TextSummaryTask(
|
|
3194
|
+
return new TextSummaryTask(config).run(input);
|
|
2396
3195
|
};
|
|
2397
|
-
|
|
3196
|
+
Workflow13.prototype.textSummary = CreateWorkflow13(TextSummaryTask);
|
|
2398
3197
|
|
|
2399
3198
|
// src/task/DocumentEnricherTask.ts
|
|
2400
3199
|
var inputSchema7 = {
|
|
@@ -2466,7 +3265,7 @@ var outputSchema7 = {
|
|
|
2466
3265
|
additionalProperties: false
|
|
2467
3266
|
};
|
|
2468
3267
|
|
|
2469
|
-
class DocumentEnricherTask extends
|
|
3268
|
+
class DocumentEnricherTask extends Task9 {
|
|
2470
3269
|
static type = "DocumentEnricherTask";
|
|
2471
3270
|
static category = "Document";
|
|
2472
3271
|
static title = "Document Enricher";
|
|
@@ -2494,7 +3293,7 @@ class DocumentEnricherTask extends Task8 {
|
|
|
2494
3293
|
let summaryCount = 0;
|
|
2495
3294
|
let entityCount = 0;
|
|
2496
3295
|
const extract = extractEntities && nerModel ? async (text) => {
|
|
2497
|
-
const result = await context.own(new TextNamedEntityRecognitionTask({ text, model: nerModel })
|
|
3296
|
+
const result = await context.own(new TextNamedEntityRecognitionTask).run({ text, model: nerModel });
|
|
2498
3297
|
return result.entities.map((e) => ({
|
|
2499
3298
|
type: e.entity,
|
|
2500
3299
|
text: e.word,
|
|
@@ -2623,17 +3422,17 @@ class DocumentEnricherTask extends Task8 {
|
|
|
2623
3422
|
}
|
|
2624
3423
|
}
|
|
2625
3424
|
var documentEnricher = (input, config) => {
|
|
2626
|
-
return new DocumentEnricherTask(
|
|
3425
|
+
return new DocumentEnricherTask(config).run(input);
|
|
2627
3426
|
};
|
|
2628
|
-
|
|
3427
|
+
Workflow14.prototype.documentEnricher = CreateWorkflow14(DocumentEnricherTask);
|
|
2629
3428
|
|
|
2630
3429
|
// src/task/DownloadModelTask.ts
|
|
2631
|
-
import { CreateWorkflow as
|
|
2632
|
-
var
|
|
3430
|
+
import { CreateWorkflow as CreateWorkflow15, Workflow as Workflow15 } from "@workglow/task-graph";
|
|
3431
|
+
var modelSchema9 = TypeModel("model");
|
|
2633
3432
|
var DownloadModelInputSchema = {
|
|
2634
3433
|
type: "object",
|
|
2635
3434
|
properties: {
|
|
2636
|
-
model:
|
|
3435
|
+
model: modelSchema9
|
|
2637
3436
|
},
|
|
2638
3437
|
required: ["model"],
|
|
2639
3438
|
additionalProperties: false
|
|
@@ -2641,7 +3440,7 @@ var DownloadModelInputSchema = {
|
|
|
2641
3440
|
var DownloadModelOutputSchema = {
|
|
2642
3441
|
type: "object",
|
|
2643
3442
|
properties: {
|
|
2644
|
-
model:
|
|
3443
|
+
model: modelSchema9
|
|
2645
3444
|
},
|
|
2646
3445
|
required: ["model"],
|
|
2647
3446
|
additionalProperties: false
|
|
@@ -2660,8 +3459,8 @@ class DownloadModelTask extends AiTask {
|
|
|
2660
3459
|
}
|
|
2661
3460
|
static cacheable = false;
|
|
2662
3461
|
files = [];
|
|
2663
|
-
constructor(
|
|
2664
|
-
super(
|
|
3462
|
+
constructor(config = {}) {
|
|
3463
|
+
super(config);
|
|
2665
3464
|
this.on("progress", this.processProgress.bind(this));
|
|
2666
3465
|
this.on("start", () => {
|
|
2667
3466
|
this.files = [];
|
|
@@ -2693,13 +3492,13 @@ class DownloadModelTask extends AiTask {
|
|
|
2693
3492
|
}
|
|
2694
3493
|
}
|
|
2695
3494
|
var downloadModel = (input, config) => {
|
|
2696
|
-
return new DownloadModelTask(
|
|
3495
|
+
return new DownloadModelTask(config).run(input);
|
|
2697
3496
|
};
|
|
2698
|
-
|
|
3497
|
+
Workflow15.prototype.downloadModel = CreateWorkflow15(DownloadModelTask);
|
|
2699
3498
|
|
|
2700
3499
|
// src/task/FaceDetectorTask.ts
|
|
2701
|
-
import { CreateWorkflow as
|
|
2702
|
-
var
|
|
3500
|
+
import { CreateWorkflow as CreateWorkflow16, Workflow as Workflow16 } from "@workglow/task-graph";
|
|
3501
|
+
var modelSchema10 = TypeModel("model:FaceDetectorTask");
|
|
2703
3502
|
var TypeBoundingBox2 = {
|
|
2704
3503
|
type: "object",
|
|
2705
3504
|
properties: {
|
|
@@ -2772,7 +3571,7 @@ var FaceDetectorInputSchema = {
|
|
|
2772
3571
|
type: "object",
|
|
2773
3572
|
properties: {
|
|
2774
3573
|
image: TypeImageInput,
|
|
2775
|
-
model:
|
|
3574
|
+
model: modelSchema10,
|
|
2776
3575
|
minDetectionConfidence: {
|
|
2777
3576
|
type: "number",
|
|
2778
3577
|
minimum: 0,
|
|
@@ -2824,13 +3623,13 @@ class FaceDetectorTask extends AiVisionTask {
|
|
|
2824
3623
|
}
|
|
2825
3624
|
}
|
|
2826
3625
|
var faceDetector = (input, config) => {
|
|
2827
|
-
return new FaceDetectorTask(
|
|
3626
|
+
return new FaceDetectorTask(config).run(input);
|
|
2828
3627
|
};
|
|
2829
|
-
|
|
3628
|
+
Workflow16.prototype.faceDetector = CreateWorkflow16(FaceDetectorTask);
|
|
2830
3629
|
|
|
2831
3630
|
// src/task/FaceLandmarkerTask.ts
|
|
2832
|
-
import { CreateWorkflow as
|
|
2833
|
-
var
|
|
3631
|
+
import { CreateWorkflow as CreateWorkflow17, Workflow as Workflow17 } from "@workglow/task-graph";
|
|
3632
|
+
var modelSchema11 = TypeModel("model:FaceLandmarkerTask");
|
|
2834
3633
|
var TypeLandmark = {
|
|
2835
3634
|
type: "object",
|
|
2836
3635
|
properties: {
|
|
@@ -2902,7 +3701,7 @@ var FaceLandmarkerInputSchema = {
|
|
|
2902
3701
|
type: "object",
|
|
2903
3702
|
properties: {
|
|
2904
3703
|
image: TypeImageInput,
|
|
2905
|
-
model:
|
|
3704
|
+
model: modelSchema11,
|
|
2906
3705
|
numFaces: {
|
|
2907
3706
|
type: "number",
|
|
2908
3707
|
minimum: 1,
|
|
@@ -2986,13 +3785,13 @@ class FaceLandmarkerTask extends AiVisionTask {
|
|
|
2986
3785
|
}
|
|
2987
3786
|
}
|
|
2988
3787
|
var faceLandmarker = (input, config) => {
|
|
2989
|
-
return new FaceLandmarkerTask(
|
|
3788
|
+
return new FaceLandmarkerTask(config).run(input);
|
|
2990
3789
|
};
|
|
2991
|
-
|
|
3790
|
+
Workflow17.prototype.faceLandmarker = CreateWorkflow17(FaceLandmarkerTask);
|
|
2992
3791
|
|
|
2993
3792
|
// src/task/GestureRecognizerTask.ts
|
|
2994
|
-
import { CreateWorkflow as
|
|
2995
|
-
var
|
|
3793
|
+
import { CreateWorkflow as CreateWorkflow18, Workflow as Workflow18 } from "@workglow/task-graph";
|
|
3794
|
+
var modelSchema12 = TypeModel("model:GestureRecognizerTask");
|
|
2996
3795
|
var TypeLandmark2 = {
|
|
2997
3796
|
type: "object",
|
|
2998
3797
|
properties: {
|
|
@@ -3084,7 +3883,7 @@ var GestureRecognizerInputSchema = {
|
|
|
3084
3883
|
type: "object",
|
|
3085
3884
|
properties: {
|
|
3086
3885
|
image: TypeImageInput,
|
|
3087
|
-
model:
|
|
3886
|
+
model: modelSchema12,
|
|
3088
3887
|
numHands: {
|
|
3089
3888
|
type: "number",
|
|
3090
3889
|
minimum: 1,
|
|
@@ -3154,13 +3953,13 @@ class GestureRecognizerTask extends AiVisionTask {
|
|
|
3154
3953
|
}
|
|
3155
3954
|
}
|
|
3156
3955
|
var gestureRecognizer = (input, config) => {
|
|
3157
|
-
return new GestureRecognizerTask(
|
|
3956
|
+
return new GestureRecognizerTask(config).run(input);
|
|
3158
3957
|
};
|
|
3159
|
-
|
|
3958
|
+
Workflow18.prototype.gestureRecognizer = CreateWorkflow18(GestureRecognizerTask);
|
|
3160
3959
|
|
|
3161
3960
|
// src/task/HandLandmarkerTask.ts
|
|
3162
|
-
import { CreateWorkflow as
|
|
3163
|
-
var
|
|
3961
|
+
import { CreateWorkflow as CreateWorkflow19, Workflow as Workflow19 } from "@workglow/task-graph";
|
|
3962
|
+
var modelSchema13 = TypeModel("model:HandLandmarkerTask");
|
|
3164
3963
|
var TypeLandmark3 = {
|
|
3165
3964
|
type: "object",
|
|
3166
3965
|
properties: {
|
|
@@ -3229,7 +4028,7 @@ var HandLandmarkerInputSchema = {
|
|
|
3229
4028
|
type: "object",
|
|
3230
4029
|
properties: {
|
|
3231
4030
|
image: TypeImageInput,
|
|
3232
|
-
model:
|
|
4031
|
+
model: modelSchema13,
|
|
3233
4032
|
numHands: {
|
|
3234
4033
|
type: "number",
|
|
3235
4034
|
minimum: 1,
|
|
@@ -3299,9 +4098,9 @@ class HandLandmarkerTask extends AiVisionTask {
|
|
|
3299
4098
|
}
|
|
3300
4099
|
}
|
|
3301
4100
|
var handLandmarker = (input, config) => {
|
|
3302
|
-
return new HandLandmarkerTask(
|
|
4101
|
+
return new HandLandmarkerTask(config).run(input);
|
|
3303
4102
|
};
|
|
3304
|
-
|
|
4103
|
+
Workflow19.prototype.handLandmarker = CreateWorkflow19(HandLandmarkerTask);
|
|
3305
4104
|
|
|
3306
4105
|
// src/task/HierarchicalChunkerTask.ts
|
|
3307
4106
|
import {
|
|
@@ -3310,9 +4109,9 @@ import {
|
|
|
3310
4109
|
getChildren as getChildren2,
|
|
3311
4110
|
hasChildren as hasChildren2
|
|
3312
4111
|
} from "@workglow/knowledge-base";
|
|
3313
|
-
import { CreateWorkflow as
|
|
4112
|
+
import { CreateWorkflow as CreateWorkflow20, Task as Task10, Workflow as Workflow20 } from "@workglow/task-graph";
|
|
3314
4113
|
import { uuid4 } from "@workglow/util";
|
|
3315
|
-
var
|
|
4114
|
+
var modelSchema14 = TypeModel("model", {
|
|
3316
4115
|
title: "Model",
|
|
3317
4116
|
description: "Model to use for token counting"
|
|
3318
4117
|
});
|
|
@@ -3356,7 +4155,7 @@ var inputSchema8 = {
|
|
|
3356
4155
|
description: "Strategy for chunking",
|
|
3357
4156
|
default: "hierarchical"
|
|
3358
4157
|
},
|
|
3359
|
-
model:
|
|
4158
|
+
model: modelSchema14
|
|
3360
4159
|
},
|
|
3361
4160
|
required: ["doc_id", "documentTree"],
|
|
3362
4161
|
additionalProperties: false
|
|
@@ -3391,7 +4190,7 @@ var outputSchema8 = {
|
|
|
3391
4190
|
additionalProperties: false
|
|
3392
4191
|
};
|
|
3393
4192
|
|
|
3394
|
-
class HierarchicalChunkerTask extends
|
|
4193
|
+
class HierarchicalChunkerTask extends Task10 {
|
|
3395
4194
|
static type = "HierarchicalChunkerTask";
|
|
3396
4195
|
static category = "Document";
|
|
3397
4196
|
static title = "Hierarchical Chunker";
|
|
@@ -3426,7 +4225,7 @@ class HierarchicalChunkerTask extends Task9 {
|
|
|
3426
4225
|
};
|
|
3427
4226
|
let countFn = async (text) => estimateTokens2(text);
|
|
3428
4227
|
if (input.model) {
|
|
3429
|
-
const countTask = context.own(new CountTokensTask({ model: input.model }));
|
|
4228
|
+
const countTask = context.own(new CountTokensTask({ defaults: { model: input.model } }));
|
|
3430
4229
|
countFn = async (text) => {
|
|
3431
4230
|
try {
|
|
3432
4231
|
const result = await countTask.run({ text });
|
|
@@ -3533,16 +4332,13 @@ class HierarchicalChunkerTask extends Task9 {
|
|
|
3533
4332
|
}
|
|
3534
4333
|
}
|
|
3535
4334
|
var hierarchicalChunker = (input, config) => {
|
|
3536
|
-
return new HierarchicalChunkerTask(
|
|
4335
|
+
return new HierarchicalChunkerTask(config).run(input);
|
|
3537
4336
|
};
|
|
3538
|
-
|
|
4337
|
+
Workflow20.prototype.hierarchicalChunker = CreateWorkflow20(HierarchicalChunkerTask);
|
|
3539
4338
|
|
|
3540
4339
|
// src/task/HierarchyJoinTask.ts
|
|
3541
|
-
import {
|
|
3542
|
-
|
|
3543
|
-
TypeKnowledgeBase as TypeKnowledgeBase5
|
|
3544
|
-
} from "@workglow/knowledge-base";
|
|
3545
|
-
import { CreateWorkflow as CreateWorkflow19, Task as Task10, Workflow as Workflow19 } from "@workglow/task-graph";
|
|
4340
|
+
import { ChunkRecordArraySchema, TypeKnowledgeBase as TypeKnowledgeBase5 } from "@workglow/knowledge-base";
|
|
4341
|
+
import { CreateWorkflow as CreateWorkflow21, Task as Task11, Workflow as Workflow21 } from "@workglow/task-graph";
|
|
3546
4342
|
var inputSchema9 = {
|
|
3547
4343
|
type: "object",
|
|
3548
4344
|
properties: {
|
|
@@ -3617,7 +4413,7 @@ var outputSchema9 = {
|
|
|
3617
4413
|
additionalProperties: false
|
|
3618
4414
|
};
|
|
3619
4415
|
|
|
3620
|
-
class HierarchyJoinTask extends
|
|
4416
|
+
class HierarchyJoinTask extends Task11 {
|
|
3621
4417
|
static type = "HierarchyJoinTask";
|
|
3622
4418
|
static category = "RAG";
|
|
3623
4419
|
static title = "Hierarchy Join";
|
|
@@ -3709,18 +4505,18 @@ class HierarchyJoinTask extends Task10 {
|
|
|
3709
4505
|
}
|
|
3710
4506
|
}
|
|
3711
4507
|
var hierarchyJoin = (input, config) => {
|
|
3712
|
-
return new HierarchyJoinTask(
|
|
4508
|
+
return new HierarchyJoinTask(config).run(input);
|
|
3713
4509
|
};
|
|
3714
|
-
|
|
4510
|
+
Workflow21.prototype.hierarchyJoin = CreateWorkflow21(HierarchyJoinTask);
|
|
3715
4511
|
|
|
3716
4512
|
// src/task/ImageClassificationTask.ts
|
|
3717
|
-
import { CreateWorkflow as
|
|
3718
|
-
var
|
|
4513
|
+
import { CreateWorkflow as CreateWorkflow22, Workflow as Workflow22 } from "@workglow/task-graph";
|
|
4514
|
+
var modelSchema15 = TypeModel("model:ImageClassificationTask");
|
|
3719
4515
|
var ImageClassificationInputSchema = {
|
|
3720
4516
|
type: "object",
|
|
3721
4517
|
properties: {
|
|
3722
4518
|
image: TypeImageInput,
|
|
3723
|
-
model:
|
|
4519
|
+
model: modelSchema15,
|
|
3724
4520
|
categories: {
|
|
3725
4521
|
type: "array",
|
|
3726
4522
|
items: {
|
|
@@ -3772,21 +4568,21 @@ class ImageClassificationTask extends AiVisionTask {
|
|
|
3772
4568
|
}
|
|
3773
4569
|
}
|
|
3774
4570
|
var imageClassification = (input, config) => {
|
|
3775
|
-
return new ImageClassificationTask(
|
|
4571
|
+
return new ImageClassificationTask(config).run(input);
|
|
3776
4572
|
};
|
|
3777
|
-
|
|
4573
|
+
Workflow22.prototype.imageClassification = CreateWorkflow22(ImageClassificationTask);
|
|
3778
4574
|
|
|
3779
4575
|
// src/task/ImageEmbeddingTask.ts
|
|
3780
|
-
import { CreateWorkflow as
|
|
4576
|
+
import { CreateWorkflow as CreateWorkflow23, Workflow as Workflow23 } from "@workglow/task-graph";
|
|
3781
4577
|
import {
|
|
3782
4578
|
TypedArraySchema as TypedArraySchema7
|
|
3783
4579
|
} from "@workglow/util/schema";
|
|
3784
|
-
var
|
|
4580
|
+
var modelSchema16 = TypeModel("model:ImageEmbeddingTask");
|
|
3785
4581
|
var ImageEmbeddingInputSchema = {
|
|
3786
4582
|
type: "object",
|
|
3787
4583
|
properties: {
|
|
3788
4584
|
image: TypeSingleOrArray(TypeImageInput),
|
|
3789
|
-
model:
|
|
4585
|
+
model: modelSchema16
|
|
3790
4586
|
},
|
|
3791
4587
|
required: ["image", "model"],
|
|
3792
4588
|
additionalProperties: false
|
|
@@ -3816,18 +4612,18 @@ class ImageEmbeddingTask extends AiVisionTask {
|
|
|
3816
4612
|
}
|
|
3817
4613
|
}
|
|
3818
4614
|
var imageEmbedding = (input, config) => {
|
|
3819
|
-
return new ImageEmbeddingTask(
|
|
4615
|
+
return new ImageEmbeddingTask(config).run(input);
|
|
3820
4616
|
};
|
|
3821
|
-
|
|
4617
|
+
Workflow23.prototype.imageEmbedding = CreateWorkflow23(ImageEmbeddingTask);
|
|
3822
4618
|
|
|
3823
4619
|
// src/task/ImageSegmentationTask.ts
|
|
3824
|
-
import { CreateWorkflow as
|
|
3825
|
-
var
|
|
4620
|
+
import { CreateWorkflow as CreateWorkflow24, Workflow as Workflow24 } from "@workglow/task-graph";
|
|
4621
|
+
var modelSchema17 = TypeModel("model:ImageSegmentationTask");
|
|
3826
4622
|
var ImageSegmentationInputSchema = {
|
|
3827
4623
|
type: "object",
|
|
3828
4624
|
properties: {
|
|
3829
4625
|
image: TypeImageInput,
|
|
3830
|
-
model:
|
|
4626
|
+
model: modelSchema17,
|
|
3831
4627
|
threshold: {
|
|
3832
4628
|
type: "number",
|
|
3833
4629
|
title: "Threshold",
|
|
@@ -3904,13 +4700,13 @@ class ImageSegmentationTask extends AiVisionTask {
|
|
|
3904
4700
|
}
|
|
3905
4701
|
}
|
|
3906
4702
|
var imageSegmentation = (input, config) => {
|
|
3907
|
-
return new ImageSegmentationTask(
|
|
4703
|
+
return new ImageSegmentationTask(config).run(input);
|
|
3908
4704
|
};
|
|
3909
|
-
|
|
4705
|
+
Workflow24.prototype.imageSegmentation = CreateWorkflow24(ImageSegmentationTask);
|
|
3910
4706
|
|
|
3911
4707
|
// src/task/ImageToTextTask.ts
|
|
3912
|
-
import { CreateWorkflow as
|
|
3913
|
-
var
|
|
4708
|
+
import { CreateWorkflow as CreateWorkflow25, Workflow as Workflow25 } from "@workglow/task-graph";
|
|
4709
|
+
var modelSchema18 = TypeModel("model:ImageToTextTask");
|
|
3914
4710
|
var generatedTextSchema = {
|
|
3915
4711
|
type: "string",
|
|
3916
4712
|
title: "Text",
|
|
@@ -3920,7 +4716,7 @@ var ImageToTextInputSchema = {
|
|
|
3920
4716
|
type: "object",
|
|
3921
4717
|
properties: {
|
|
3922
4718
|
image: TypeImageInput,
|
|
3923
|
-
model:
|
|
4719
|
+
model: modelSchema18,
|
|
3924
4720
|
maxTokens: {
|
|
3925
4721
|
type: "number",
|
|
3926
4722
|
title: "Max Tokens",
|
|
@@ -3959,17 +4755,17 @@ class ImageToTextTask extends AiVisionTask {
|
|
|
3959
4755
|
}
|
|
3960
4756
|
}
|
|
3961
4757
|
var imageToText = (input, config) => {
|
|
3962
|
-
return new ImageToTextTask(
|
|
4758
|
+
return new ImageToTextTask(config).run(input);
|
|
3963
4759
|
};
|
|
3964
|
-
|
|
4760
|
+
Workflow25.prototype.imageToText = CreateWorkflow25(ImageToTextTask);
|
|
3965
4761
|
|
|
3966
4762
|
// src/task/ModelInfoTask.ts
|
|
3967
|
-
import { CreateWorkflow as
|
|
3968
|
-
var
|
|
4763
|
+
import { CreateWorkflow as CreateWorkflow26, Workflow as Workflow26 } from "@workglow/task-graph";
|
|
4764
|
+
var modelSchema19 = TypeModel("model");
|
|
3969
4765
|
var ModelInfoInputSchema = {
|
|
3970
4766
|
type: "object",
|
|
3971
4767
|
properties: {
|
|
3972
|
-
model:
|
|
4768
|
+
model: modelSchema19,
|
|
3973
4769
|
detail: {
|
|
3974
4770
|
type: "string",
|
|
3975
4771
|
enum: ["cached_status", "files", "files_with_metadata"],
|
|
@@ -3982,7 +4778,7 @@ var ModelInfoInputSchema = {
|
|
|
3982
4778
|
var ModelInfoOutputSchema = {
|
|
3983
4779
|
type: "object",
|
|
3984
4780
|
properties: {
|
|
3985
|
-
model:
|
|
4781
|
+
model: modelSchema19,
|
|
3986
4782
|
is_local: { type: "boolean" },
|
|
3987
4783
|
is_remote: { type: "boolean" },
|
|
3988
4784
|
supports_browser: { type: "boolean" },
|
|
@@ -4030,12 +4826,12 @@ class ModelInfoTask extends AiTask {
|
|
|
4030
4826
|
}
|
|
4031
4827
|
}
|
|
4032
4828
|
var modelInfo = (input, config) => {
|
|
4033
|
-
return new ModelInfoTask(
|
|
4829
|
+
return new ModelInfoTask(config).run(input);
|
|
4034
4830
|
};
|
|
4035
|
-
|
|
4831
|
+
Workflow26.prototype.modelInfo = CreateWorkflow26(ModelInfoTask);
|
|
4036
4832
|
|
|
4037
4833
|
// src/task/ModelSearchTask.ts
|
|
4038
|
-
import { CreateWorkflow as
|
|
4834
|
+
import { CreateWorkflow as CreateWorkflow27, Task as Task12, Workflow as Workflow27 } from "@workglow/task-graph";
|
|
4039
4835
|
var ModelSearchInputSchema = {
|
|
4040
4836
|
type: "object",
|
|
4041
4837
|
properties: {
|
|
@@ -4100,7 +4896,7 @@ var ModelSearchOutputSchema = {
|
|
|
4100
4896
|
additionalProperties: false
|
|
4101
4897
|
};
|
|
4102
4898
|
|
|
4103
|
-
class ModelSearchTask extends
|
|
4899
|
+
class ModelSearchTask extends Task12 {
|
|
4104
4900
|
static type = "ModelSearchTask";
|
|
4105
4901
|
static category = "AI Model";
|
|
4106
4902
|
static title = "Model Search";
|
|
@@ -4124,13 +4920,13 @@ class ModelSearchTask extends Task11 {
|
|
|
4124
4920
|
}
|
|
4125
4921
|
}
|
|
4126
4922
|
var modelSearch = (input, config) => {
|
|
4127
|
-
return new ModelSearchTask(
|
|
4923
|
+
return new ModelSearchTask(config).run(input);
|
|
4128
4924
|
};
|
|
4129
|
-
|
|
4925
|
+
Workflow27.prototype.modelSearch = CreateWorkflow27(ModelSearchTask);
|
|
4130
4926
|
|
|
4131
4927
|
// src/task/ObjectDetectionTask.ts
|
|
4132
|
-
import { CreateWorkflow as
|
|
4133
|
-
var
|
|
4928
|
+
import { CreateWorkflow as CreateWorkflow28, Workflow as Workflow28 } from "@workglow/task-graph";
|
|
4929
|
+
var modelSchema20 = TypeModel("model:ObjectDetectionTask");
|
|
4134
4930
|
var detectionSchema = {
|
|
4135
4931
|
type: "object",
|
|
4136
4932
|
properties: {
|
|
@@ -4155,7 +4951,7 @@ var ObjectDetectionInputSchema = {
|
|
|
4155
4951
|
type: "object",
|
|
4156
4952
|
properties: {
|
|
4157
4953
|
image: TypeImageInput,
|
|
4158
|
-
model:
|
|
4954
|
+
model: modelSchema20,
|
|
4159
4955
|
labels: {
|
|
4160
4956
|
type: "array",
|
|
4161
4957
|
items: {
|
|
@@ -4207,13 +5003,13 @@ class ObjectDetectionTask extends AiVisionTask {
|
|
|
4207
5003
|
}
|
|
4208
5004
|
}
|
|
4209
5005
|
var objectDetection = (input, config) => {
|
|
4210
|
-
return new ObjectDetectionTask(
|
|
5006
|
+
return new ObjectDetectionTask(config).run(input);
|
|
4211
5007
|
};
|
|
4212
|
-
|
|
5008
|
+
Workflow28.prototype.objectDetection = CreateWorkflow28(ObjectDetectionTask);
|
|
4213
5009
|
|
|
4214
5010
|
// src/task/PoseLandmarkerTask.ts
|
|
4215
|
-
import { CreateWorkflow as
|
|
4216
|
-
var
|
|
5011
|
+
import { CreateWorkflow as CreateWorkflow29, Workflow as Workflow29 } from "@workglow/task-graph";
|
|
5012
|
+
var modelSchema21 = TypeModel("model:PoseLandmarkerTask");
|
|
4217
5013
|
var TypePoseLandmark = {
|
|
4218
5014
|
type: "object",
|
|
4219
5015
|
properties: {
|
|
@@ -4292,7 +5088,7 @@ var PoseLandmarkerInputSchema = {
|
|
|
4292
5088
|
type: "object",
|
|
4293
5089
|
properties: {
|
|
4294
5090
|
image: TypeImageInput,
|
|
4295
|
-
model:
|
|
5091
|
+
model: modelSchema21,
|
|
4296
5092
|
numPoses: {
|
|
4297
5093
|
type: "number",
|
|
4298
5094
|
minimum: 1,
|
|
@@ -4369,12 +5165,12 @@ class PoseLandmarkerTask extends AiVisionTask {
|
|
|
4369
5165
|
}
|
|
4370
5166
|
}
|
|
4371
5167
|
var poseLandmarker = (input, config) => {
|
|
4372
|
-
return new PoseLandmarkerTask(
|
|
5168
|
+
return new PoseLandmarkerTask(config).run(input);
|
|
4373
5169
|
};
|
|
4374
|
-
|
|
5170
|
+
Workflow29.prototype.poseLandmarker = CreateWorkflow29(PoseLandmarkerTask);
|
|
4375
5171
|
|
|
4376
5172
|
// src/task/QueryExpanderTask.ts
|
|
4377
|
-
import { CreateWorkflow as
|
|
5173
|
+
import { CreateWorkflow as CreateWorkflow30, Task as Task13, Workflow as Workflow30 } from "@workglow/task-graph";
|
|
4378
5174
|
var QueryExpansionMethod = {
|
|
4379
5175
|
MULTI_QUERY: "multi-query",
|
|
4380
5176
|
HYDE: "hyde",
|
|
@@ -4442,7 +5238,7 @@ var outputSchema10 = {
|
|
|
4442
5238
|
additionalProperties: false
|
|
4443
5239
|
};
|
|
4444
5240
|
|
|
4445
|
-
class QueryExpanderTask extends
|
|
5241
|
+
class QueryExpanderTask extends Task13 {
|
|
4446
5242
|
static type = "QueryExpanderTask";
|
|
4447
5243
|
static category = "RAG";
|
|
4448
5244
|
static title = "Query Expander";
|
|
@@ -4582,16 +5378,16 @@ class QueryExpanderTask extends Task12 {
|
|
|
4582
5378
|
}
|
|
4583
5379
|
}
|
|
4584
5380
|
var queryExpander = (input, config) => {
|
|
4585
|
-
return new QueryExpanderTask(
|
|
5381
|
+
return new QueryExpanderTask(config).run(input);
|
|
4586
5382
|
};
|
|
4587
|
-
|
|
5383
|
+
Workflow30.prototype.queryExpander = CreateWorkflow30(QueryExpanderTask);
|
|
4588
5384
|
|
|
4589
5385
|
// src/task/RerankerTask.ts
|
|
4590
|
-
import { CreateWorkflow as
|
|
5386
|
+
import { CreateWorkflow as CreateWorkflow32, Task as Task14, Workflow as Workflow32 } from "@workglow/task-graph";
|
|
4591
5387
|
|
|
4592
5388
|
// src/task/TextClassificationTask.ts
|
|
4593
|
-
import { CreateWorkflow as
|
|
4594
|
-
var
|
|
5389
|
+
import { CreateWorkflow as CreateWorkflow31, Workflow as Workflow31 } from "@workglow/task-graph";
|
|
5390
|
+
var modelSchema22 = TypeModel("model:TextClassificationTask");
|
|
4595
5391
|
var TextClassificationInputSchema = {
|
|
4596
5392
|
type: "object",
|
|
4597
5393
|
properties: {
|
|
@@ -4618,7 +5414,7 @@ var TextClassificationInputSchema = {
|
|
|
4618
5414
|
description: "The maximum number of categories to return",
|
|
4619
5415
|
"x-ui-group": "Configuration"
|
|
4620
5416
|
},
|
|
4621
|
-
model:
|
|
5417
|
+
model: modelSchema22
|
|
4622
5418
|
},
|
|
4623
5419
|
required: ["text", "model"],
|
|
4624
5420
|
additionalProperties: false
|
|
@@ -4666,9 +5462,9 @@ class TextClassificationTask extends AiTask {
|
|
|
4666
5462
|
}
|
|
4667
5463
|
}
|
|
4668
5464
|
var textClassification = (input, config) => {
|
|
4669
|
-
return new TextClassificationTask(
|
|
5465
|
+
return new TextClassificationTask(config).run(input);
|
|
4670
5466
|
};
|
|
4671
|
-
|
|
5467
|
+
Workflow31.prototype.textClassification = CreateWorkflow31(TextClassificationTask);
|
|
4672
5468
|
|
|
4673
5469
|
// src/task/RerankerTask.ts
|
|
4674
5470
|
var inputSchema11 = {
|
|
@@ -4763,7 +5559,7 @@ var outputSchema11 = {
|
|
|
4763
5559
|
additionalProperties: false
|
|
4764
5560
|
};
|
|
4765
5561
|
|
|
4766
|
-
class RerankerTask extends
|
|
5562
|
+
class RerankerTask extends Task14 {
|
|
4767
5563
|
static type = "RerankerTask";
|
|
4768
5564
|
static category = "RAG";
|
|
4769
5565
|
static title = "Reranker";
|
|
@@ -4814,8 +5610,8 @@ class RerankerTask extends Task13 {
|
|
|
4814
5610
|
}
|
|
4815
5611
|
const items = await Promise.all(chunks.map(async (chunk, index) => {
|
|
4816
5612
|
const pairText = `${query} [SEP] ${chunk}`;
|
|
4817
|
-
const task = context.own(new TextClassificationTask({
|
|
4818
|
-
const result = await task.run();
|
|
5613
|
+
const task = context.own(new TextClassificationTask({ defaults: { model } }));
|
|
5614
|
+
const result = await task.run({ text: pairText, maxCategories: 2 });
|
|
4819
5615
|
const crossScore = this.extractCrossEncoderScore(result.categories);
|
|
4820
5616
|
return {
|
|
4821
5617
|
chunk,
|
|
@@ -4890,13 +5686,13 @@ class RerankerTask extends Task13 {
|
|
|
4890
5686
|
}
|
|
4891
5687
|
}
|
|
4892
5688
|
var reranker = (input, config) => {
|
|
4893
|
-
return new RerankerTask(
|
|
5689
|
+
return new RerankerTask(config).run(input);
|
|
4894
5690
|
};
|
|
4895
|
-
|
|
5691
|
+
Workflow32.prototype.reranker = CreateWorkflow32(RerankerTask);
|
|
4896
5692
|
|
|
4897
5693
|
// src/task/StructuralParserTask.ts
|
|
4898
5694
|
import { StructuralParser } from "@workglow/knowledge-base";
|
|
4899
|
-
import { CreateWorkflow as
|
|
5695
|
+
import { CreateWorkflow as CreateWorkflow33, Task as Task15, Workflow as Workflow33 } from "@workglow/task-graph";
|
|
4900
5696
|
import { uuid4 as uuid42 } from "@workglow/util";
|
|
4901
5697
|
var inputSchema12 = {
|
|
4902
5698
|
type: "object",
|
|
@@ -4954,7 +5750,7 @@ var outputSchema12 = {
|
|
|
4954
5750
|
additionalProperties: false
|
|
4955
5751
|
};
|
|
4956
5752
|
|
|
4957
|
-
class StructuralParserTask extends
|
|
5753
|
+
class StructuralParserTask extends Task15 {
|
|
4958
5754
|
static type = "StructuralParserTask";
|
|
4959
5755
|
static category = "Document";
|
|
4960
5756
|
static title = "Structural Parser";
|
|
@@ -4995,17 +5791,17 @@ class StructuralParserTask extends Task14 {
|
|
|
4995
5791
|
}
|
|
4996
5792
|
}
|
|
4997
5793
|
var structuralParser = (input, config) => {
|
|
4998
|
-
return new StructuralParserTask(
|
|
5794
|
+
return new StructuralParserTask(config).run(input);
|
|
4999
5795
|
};
|
|
5000
|
-
|
|
5796
|
+
Workflow33.prototype.structuralParser = CreateWorkflow33(StructuralParserTask);
|
|
5001
5797
|
|
|
5002
5798
|
// src/task/StructuredGenerationTask.ts
|
|
5003
|
-
import { CreateWorkflow as
|
|
5004
|
-
var
|
|
5799
|
+
import { CreateWorkflow as CreateWorkflow34, Workflow as Workflow34 } from "@workglow/task-graph";
|
|
5800
|
+
var modelSchema23 = TypeModel("model:StructuredGenerationTask");
|
|
5005
5801
|
var StructuredGenerationInputSchema = {
|
|
5006
5802
|
type: "object",
|
|
5007
5803
|
properties: {
|
|
5008
|
-
model:
|
|
5804
|
+
model: modelSchema23,
|
|
5009
5805
|
prompt: {
|
|
5010
5806
|
type: "string",
|
|
5011
5807
|
title: "Prompt",
|
|
@@ -5066,12 +5862,12 @@ class StructuredGenerationTask extends StreamingAiTask {
|
|
|
5066
5862
|
}
|
|
5067
5863
|
}
|
|
5068
5864
|
var structuredGeneration = (input, config) => {
|
|
5069
|
-
return new StructuredGenerationTask(
|
|
5865
|
+
return new StructuredGenerationTask(config).run(input);
|
|
5070
5866
|
};
|
|
5071
|
-
|
|
5867
|
+
Workflow34.prototype.structuredGeneration = CreateWorkflow34(StructuredGenerationTask);
|
|
5072
5868
|
|
|
5073
5869
|
// src/task/TextChunkerTask.ts
|
|
5074
|
-
import { CreateWorkflow as
|
|
5870
|
+
import { CreateWorkflow as CreateWorkflow35, Task as Task16, Workflow as Workflow35 } from "@workglow/task-graph";
|
|
5075
5871
|
var ChunkingStrategy = {
|
|
5076
5872
|
FIXED: "fixed",
|
|
5077
5873
|
SENTENCE: "sentence",
|
|
@@ -5140,7 +5936,7 @@ var outputSchema13 = {
|
|
|
5140
5936
|
additionalProperties: false
|
|
5141
5937
|
};
|
|
5142
5938
|
|
|
5143
|
-
class TextChunkerTask extends
|
|
5939
|
+
class TextChunkerTask extends Task16 {
|
|
5144
5940
|
static type = "TextChunkerTask";
|
|
5145
5941
|
static category = "Document";
|
|
5146
5942
|
static title = "Text Chunker";
|
|
@@ -5318,13 +6114,13 @@ class TextChunkerTask extends Task15 {
|
|
|
5318
6114
|
}
|
|
5319
6115
|
}
|
|
5320
6116
|
var textChunker = (input, config) => {
|
|
5321
|
-
return new TextChunkerTask(
|
|
6117
|
+
return new TextChunkerTask(config).run(input);
|
|
5322
6118
|
};
|
|
5323
|
-
|
|
6119
|
+
Workflow35.prototype.textChunker = CreateWorkflow35(TextChunkerTask);
|
|
5324
6120
|
|
|
5325
6121
|
// src/task/TextFillMaskTask.ts
|
|
5326
|
-
import { CreateWorkflow as
|
|
5327
|
-
var
|
|
6122
|
+
import { CreateWorkflow as CreateWorkflow36, Workflow as Workflow36 } from "@workglow/task-graph";
|
|
6123
|
+
var modelSchema24 = TypeModel("model:TextFillMaskTask");
|
|
5328
6124
|
var TextFillMaskInputSchema = {
|
|
5329
6125
|
type: "object",
|
|
5330
6126
|
properties: {
|
|
@@ -5333,7 +6129,7 @@ var TextFillMaskInputSchema = {
|
|
|
5333
6129
|
title: "Text",
|
|
5334
6130
|
description: "The text with a mask token to fill"
|
|
5335
6131
|
},
|
|
5336
|
-
model:
|
|
6132
|
+
model: modelSchema24
|
|
5337
6133
|
},
|
|
5338
6134
|
required: ["text", "model"],
|
|
5339
6135
|
additionalProperties: false
|
|
@@ -5386,23 +6182,23 @@ class TextFillMaskTask extends AiTask {
|
|
|
5386
6182
|
}
|
|
5387
6183
|
}
|
|
5388
6184
|
var textFillMask = (input, config) => {
|
|
5389
|
-
return new TextFillMaskTask(
|
|
6185
|
+
return new TextFillMaskTask(config).run(input);
|
|
5390
6186
|
};
|
|
5391
|
-
|
|
6187
|
+
Workflow36.prototype.textFillMask = CreateWorkflow36(TextFillMaskTask);
|
|
5392
6188
|
|
|
5393
6189
|
// src/task/TextGenerationTask.ts
|
|
5394
|
-
import { CreateWorkflow as
|
|
6190
|
+
import { CreateWorkflow as CreateWorkflow37, Workflow as Workflow37 } from "@workglow/task-graph";
|
|
5395
6191
|
var generatedTextSchema2 = {
|
|
5396
6192
|
type: "string",
|
|
5397
6193
|
title: "Text",
|
|
5398
6194
|
description: "The generated text",
|
|
5399
6195
|
"x-stream": "append"
|
|
5400
6196
|
};
|
|
5401
|
-
var
|
|
6197
|
+
var modelSchema25 = TypeModel("model:TextGenerationTask");
|
|
5402
6198
|
var TextGenerationInputSchema = {
|
|
5403
6199
|
type: "object",
|
|
5404
6200
|
properties: {
|
|
5405
|
-
model:
|
|
6201
|
+
model: modelSchema25,
|
|
5406
6202
|
prompt: {
|
|
5407
6203
|
type: "string",
|
|
5408
6204
|
title: "Prompt",
|
|
@@ -5474,13 +6270,13 @@ class TextGenerationTask extends StreamingAiTask {
|
|
|
5474
6270
|
}
|
|
5475
6271
|
}
|
|
5476
6272
|
var textGeneration = (input, config) => {
|
|
5477
|
-
return new TextGenerationTask(
|
|
6273
|
+
return new TextGenerationTask(config).run(input);
|
|
5478
6274
|
};
|
|
5479
|
-
|
|
6275
|
+
Workflow37.prototype.textGeneration = CreateWorkflow37(TextGenerationTask);
|
|
5480
6276
|
|
|
5481
6277
|
// src/task/TextLanguageDetectionTask.ts
|
|
5482
|
-
import { CreateWorkflow as
|
|
5483
|
-
var
|
|
6278
|
+
import { CreateWorkflow as CreateWorkflow38, Workflow as Workflow38 } from "@workglow/task-graph";
|
|
6279
|
+
var modelSchema26 = TypeModel("model:TextLanguageDetectionTask");
|
|
5484
6280
|
var TextLanguageDetectionInputSchema = {
|
|
5485
6281
|
type: "object",
|
|
5486
6282
|
properties: {
|
|
@@ -5497,7 +6293,7 @@ var TextLanguageDetectionInputSchema = {
|
|
|
5497
6293
|
title: "Max Languages",
|
|
5498
6294
|
description: "The maximum number of languages to return"
|
|
5499
6295
|
},
|
|
5500
|
-
model:
|
|
6296
|
+
model: modelSchema26
|
|
5501
6297
|
},
|
|
5502
6298
|
required: ["text", "model"],
|
|
5503
6299
|
additionalProperties: false
|
|
@@ -5545,12 +6341,12 @@ class TextLanguageDetectionTask extends AiTask {
|
|
|
5545
6341
|
}
|
|
5546
6342
|
}
|
|
5547
6343
|
var textLanguageDetection = (input, config) => {
|
|
5548
|
-
return new TextLanguageDetectionTask(
|
|
6344
|
+
return new TextLanguageDetectionTask(config).run(input);
|
|
5549
6345
|
};
|
|
5550
|
-
|
|
6346
|
+
Workflow38.prototype.textLanguageDetection = CreateWorkflow38(TextLanguageDetectionTask);
|
|
5551
6347
|
|
|
5552
6348
|
// src/task/TextQuestionAnswerTask.ts
|
|
5553
|
-
import { CreateWorkflow as
|
|
6349
|
+
import { CreateWorkflow as CreateWorkflow39, Workflow as Workflow39 } from "@workglow/task-graph";
|
|
5554
6350
|
var contextSchema = {
|
|
5555
6351
|
type: "string",
|
|
5556
6352
|
title: "Context",
|
|
@@ -5567,13 +6363,13 @@ var textSchema = {
|
|
|
5567
6363
|
description: "The generated text",
|
|
5568
6364
|
"x-stream": "append"
|
|
5569
6365
|
};
|
|
5570
|
-
var
|
|
6366
|
+
var modelSchema27 = TypeModel("model:TextQuestionAnswerTask");
|
|
5571
6367
|
var TextQuestionAnswerInputSchema = {
|
|
5572
6368
|
type: "object",
|
|
5573
6369
|
properties: {
|
|
5574
6370
|
context: contextSchema,
|
|
5575
6371
|
question: questionSchema,
|
|
5576
|
-
model:
|
|
6372
|
+
model: modelSchema27
|
|
5577
6373
|
},
|
|
5578
6374
|
required: ["context", "question", "model"],
|
|
5579
6375
|
additionalProperties: false
|
|
@@ -5600,13 +6396,13 @@ class TextQuestionAnswerTask extends StreamingAiTask {
|
|
|
5600
6396
|
}
|
|
5601
6397
|
}
|
|
5602
6398
|
var textQuestionAnswer = (input, config) => {
|
|
5603
|
-
return new TextQuestionAnswerTask(
|
|
6399
|
+
return new TextQuestionAnswerTask(config).run(input);
|
|
5604
6400
|
};
|
|
5605
|
-
|
|
6401
|
+
Workflow39.prototype.textQuestionAnswer = CreateWorkflow39(TextQuestionAnswerTask);
|
|
5606
6402
|
|
|
5607
6403
|
// src/task/TextRewriterTask.ts
|
|
5608
|
-
import { CreateWorkflow as
|
|
5609
|
-
var
|
|
6404
|
+
import { CreateWorkflow as CreateWorkflow40, Workflow as Workflow40 } from "@workglow/task-graph";
|
|
6405
|
+
var modelSchema28 = TypeModel("model:TextRewriterTask");
|
|
5610
6406
|
var TextRewriterInputSchema = {
|
|
5611
6407
|
type: "object",
|
|
5612
6408
|
properties: {
|
|
@@ -5620,7 +6416,7 @@ var TextRewriterInputSchema = {
|
|
|
5620
6416
|
title: "Prompt",
|
|
5621
6417
|
description: "The prompt to direct the rewriting"
|
|
5622
6418
|
},
|
|
5623
|
-
model:
|
|
6419
|
+
model: modelSchema28
|
|
5624
6420
|
},
|
|
5625
6421
|
required: ["text", "prompt", "model"],
|
|
5626
6422
|
additionalProperties: false
|
|
@@ -5652,13 +6448,13 @@ class TextRewriterTask extends StreamingAiTask {
|
|
|
5652
6448
|
}
|
|
5653
6449
|
}
|
|
5654
6450
|
var textRewriter = (input, config) => {
|
|
5655
|
-
return new TextRewriterTask(
|
|
6451
|
+
return new TextRewriterTask(config).run(input);
|
|
5656
6452
|
};
|
|
5657
|
-
|
|
6453
|
+
Workflow40.prototype.textRewriter = CreateWorkflow40(TextRewriterTask);
|
|
5658
6454
|
|
|
5659
6455
|
// src/task/TextTranslationTask.ts
|
|
5660
|
-
import { CreateWorkflow as
|
|
5661
|
-
var
|
|
6456
|
+
import { CreateWorkflow as CreateWorkflow41, Workflow as Workflow41 } from "@workglow/task-graph";
|
|
6457
|
+
var modelSchema29 = TypeModel("model:TextTranslationTask");
|
|
5662
6458
|
var translationTextSchema = {
|
|
5663
6459
|
type: "string",
|
|
5664
6460
|
title: "Text",
|
|
@@ -5685,7 +6481,7 @@ var TextTranslationInputSchema = {
|
|
|
5685
6481
|
minLength: 2,
|
|
5686
6482
|
maxLength: 2
|
|
5687
6483
|
}),
|
|
5688
|
-
model:
|
|
6484
|
+
model: modelSchema29
|
|
5689
6485
|
},
|
|
5690
6486
|
required: ["text", "source_lang", "target_lang", "model"],
|
|
5691
6487
|
additionalProperties: false
|
|
@@ -5718,12 +6514,12 @@ class TextTranslationTask extends StreamingAiTask {
|
|
|
5718
6514
|
}
|
|
5719
6515
|
}
|
|
5720
6516
|
var textTranslation = (input, config) => {
|
|
5721
|
-
return new TextTranslationTask(
|
|
6517
|
+
return new TextTranslationTask(config).run(input);
|
|
5722
6518
|
};
|
|
5723
|
-
|
|
6519
|
+
Workflow41.prototype.textTranslation = CreateWorkflow41(TextTranslationTask);
|
|
5724
6520
|
|
|
5725
6521
|
// src/task/TopicSegmenterTask.ts
|
|
5726
|
-
import { CreateWorkflow as
|
|
6522
|
+
import { CreateWorkflow as CreateWorkflow42, Task as Task17, Workflow as Workflow42 } from "@workglow/task-graph";
|
|
5727
6523
|
var SegmentationMethod = {
|
|
5728
6524
|
HEURISTIC: "heuristic",
|
|
5729
6525
|
EMBEDDING_SIMILARITY: "embedding-similarity",
|
|
@@ -5798,7 +6594,7 @@ var outputSchema14 = {
|
|
|
5798
6594
|
additionalProperties: false
|
|
5799
6595
|
};
|
|
5800
6596
|
|
|
5801
|
-
class TopicSegmenterTask extends
|
|
6597
|
+
class TopicSegmenterTask extends Task17 {
|
|
5802
6598
|
static type = "TopicSegmenterTask";
|
|
5803
6599
|
static category = "Document";
|
|
5804
6600
|
static title = "Topic Segmenter";
|
|
@@ -6001,17 +6797,17 @@ class TopicSegmenterTask extends Task16 {
|
|
|
6001
6797
|
}
|
|
6002
6798
|
}
|
|
6003
6799
|
var topicSegmenter = (input, config) => {
|
|
6004
|
-
return new TopicSegmenterTask(
|
|
6800
|
+
return new TopicSegmenterTask(config).run(input);
|
|
6005
6801
|
};
|
|
6006
|
-
|
|
6802
|
+
Workflow42.prototype.topicSegmenter = CreateWorkflow42(TopicSegmenterTask);
|
|
6007
6803
|
|
|
6008
6804
|
// src/task/UnloadModelTask.ts
|
|
6009
|
-
import { CreateWorkflow as
|
|
6010
|
-
var
|
|
6805
|
+
import { CreateWorkflow as CreateWorkflow43, Workflow as Workflow43 } from "@workglow/task-graph";
|
|
6806
|
+
var modelSchema30 = TypeModel("model");
|
|
6011
6807
|
var UnloadModelInputSchema = {
|
|
6012
6808
|
type: "object",
|
|
6013
6809
|
properties: {
|
|
6014
|
-
model:
|
|
6810
|
+
model: modelSchema30
|
|
6015
6811
|
},
|
|
6016
6812
|
required: ["model"],
|
|
6017
6813
|
additionalProperties: false
|
|
@@ -6019,7 +6815,7 @@ var UnloadModelInputSchema = {
|
|
|
6019
6815
|
var UnloadModelOutputSchema = {
|
|
6020
6816
|
type: "object",
|
|
6021
6817
|
properties: {
|
|
6022
|
-
model:
|
|
6818
|
+
model: modelSchema30
|
|
6023
6819
|
},
|
|
6024
6820
|
required: ["model"],
|
|
6025
6821
|
additionalProperties: false
|
|
@@ -6039,12 +6835,12 @@ class UnloadModelTask extends AiTask {
|
|
|
6039
6835
|
static cacheable = false;
|
|
6040
6836
|
}
|
|
6041
6837
|
var unloadModel = (input, config) => {
|
|
6042
|
-
return new UnloadModelTask(
|
|
6838
|
+
return new UnloadModelTask(config).run(input);
|
|
6043
6839
|
};
|
|
6044
|
-
|
|
6840
|
+
Workflow43.prototype.unloadModel = CreateWorkflow43(UnloadModelTask);
|
|
6045
6841
|
|
|
6046
6842
|
// src/task/VectorQuantizeTask.ts
|
|
6047
|
-
import { CreateWorkflow as
|
|
6843
|
+
import { CreateWorkflow as CreateWorkflow44, Task as Task18, Workflow as Workflow44 } from "@workglow/task-graph";
|
|
6048
6844
|
import {
|
|
6049
6845
|
normalizeNumberArray,
|
|
6050
6846
|
TensorType,
|
|
@@ -6124,7 +6920,7 @@ var outputSchema15 = {
|
|
|
6124
6920
|
additionalProperties: false
|
|
6125
6921
|
};
|
|
6126
6922
|
|
|
6127
|
-
class VectorQuantizeTask extends
|
|
6923
|
+
class VectorQuantizeTask extends Task18 {
|
|
6128
6924
|
static type = "VectorQuantizeTask";
|
|
6129
6925
|
static category = "Vector";
|
|
6130
6926
|
static title = "Quantize";
|
|
@@ -6222,12 +7018,12 @@ class VectorQuantizeTask extends Task17 {
|
|
|
6222
7018
|
}
|
|
6223
7019
|
}
|
|
6224
7020
|
var vectorQuantize = (input, config) => {
|
|
6225
|
-
return new VectorQuantizeTask(
|
|
7021
|
+
return new VectorQuantizeTask(config).run(input);
|
|
6226
7022
|
};
|
|
6227
|
-
|
|
7023
|
+
Workflow44.prototype.vectorQuantize = CreateWorkflow44(VectorQuantizeTask);
|
|
6228
7024
|
|
|
6229
7025
|
// src/task/VectorSimilarityTask.ts
|
|
6230
|
-
import { CreateWorkflow as
|
|
7026
|
+
import { CreateWorkflow as CreateWorkflow45, GraphAsTask, Workflow as Workflow45 } from "@workglow/task-graph";
|
|
6231
7027
|
import {
|
|
6232
7028
|
cosineSimilarity,
|
|
6233
7029
|
hammingSimilarity,
|
|
@@ -6331,13 +7127,210 @@ class VectorSimilarityTask extends GraphAsTask {
|
|
|
6331
7127
|
}
|
|
6332
7128
|
}
|
|
6333
7129
|
var similarity = (input, config) => {
|
|
6334
|
-
return new VectorSimilarityTask(
|
|
6335
|
-
};
|
|
6336
|
-
|
|
7130
|
+
return new VectorSimilarityTask(config).run(input);
|
|
7131
|
+
};
|
|
7132
|
+
Workflow45.prototype.similarity = CreateWorkflow45(VectorSimilarityTask);
|
|
7133
|
+
// src/task/MessageConversion.ts
|
|
7134
|
+
function getInputMessages(input) {
|
|
7135
|
+
const messages = input.messages;
|
|
7136
|
+
if (!messages || messages.length === 0)
|
|
7137
|
+
return;
|
|
7138
|
+
return messages;
|
|
7139
|
+
}
|
|
7140
|
+
function toOpenAIMessages(input) {
|
|
7141
|
+
const messages = [];
|
|
7142
|
+
if (input.systemPrompt) {
|
|
7143
|
+
messages.push({ role: "system", content: input.systemPrompt });
|
|
7144
|
+
}
|
|
7145
|
+
const inputMessages = getInputMessages(input);
|
|
7146
|
+
if (!inputMessages) {
|
|
7147
|
+
if (!Array.isArray(input.prompt)) {
|
|
7148
|
+
messages.push({ role: "user", content: input.prompt });
|
|
7149
|
+
} else if (input.prompt.every((item) => typeof item === "string")) {
|
|
7150
|
+
messages.push({ role: "user", content: input.prompt.join(`
|
|
7151
|
+
`) });
|
|
7152
|
+
} else {
|
|
7153
|
+
const parts = [];
|
|
7154
|
+
for (const item of input.prompt) {
|
|
7155
|
+
if (typeof item === "string") {
|
|
7156
|
+
parts.push({ type: "text", text: item });
|
|
7157
|
+
} else {
|
|
7158
|
+
const b = item;
|
|
7159
|
+
if (b.type === "text") {
|
|
7160
|
+
parts.push({ type: "text", text: b.text });
|
|
7161
|
+
} else if (b.type === "image") {
|
|
7162
|
+
parts.push({
|
|
7163
|
+
type: "image_url",
|
|
7164
|
+
image_url: { url: `data:${b.mimeType};base64,${b.data}` }
|
|
7165
|
+
});
|
|
7166
|
+
} else if (b.type === "audio") {
|
|
7167
|
+
const format = b.mimeType.replace(/^audio\//, "");
|
|
7168
|
+
parts.push({
|
|
7169
|
+
type: "input_audio",
|
|
7170
|
+
input_audio: { data: b.data, format }
|
|
7171
|
+
});
|
|
7172
|
+
}
|
|
7173
|
+
}
|
|
7174
|
+
}
|
|
7175
|
+
messages.push({ role: "user", content: parts });
|
|
7176
|
+
}
|
|
7177
|
+
return messages;
|
|
7178
|
+
}
|
|
7179
|
+
for (const msg of inputMessages) {
|
|
7180
|
+
if (msg.role === "user") {
|
|
7181
|
+
if (typeof msg.content === "string") {
|
|
7182
|
+
messages.push({ role: "user", content: msg.content });
|
|
7183
|
+
} else if (Array.isArray(msg.content) && msg.content.length > 0 && typeof msg.content[0]?.type === "string") {
|
|
7184
|
+
const parts = [];
|
|
7185
|
+
for (const block of msg.content) {
|
|
7186
|
+
const b = block;
|
|
7187
|
+
if (b.type === "text") {
|
|
7188
|
+
parts.push({ type: "text", text: b.text });
|
|
7189
|
+
} else if (b.type === "image") {
|
|
7190
|
+
parts.push({
|
|
7191
|
+
type: "image_url",
|
|
7192
|
+
image_url: { url: `data:${b.mimeType};base64,${b.data}` }
|
|
7193
|
+
});
|
|
7194
|
+
} else if (b.type === "audio") {
|
|
7195
|
+
const format = b.mimeType.replace(/^audio\//, "");
|
|
7196
|
+
parts.push({
|
|
7197
|
+
type: "input_audio",
|
|
7198
|
+
input_audio: { data: b.data, format }
|
|
7199
|
+
});
|
|
7200
|
+
}
|
|
7201
|
+
}
|
|
7202
|
+
messages.push({ role: "user", content: parts });
|
|
7203
|
+
} else {
|
|
7204
|
+
try {
|
|
7205
|
+
messages.push({ role: "user", content: JSON.stringify(msg.content) });
|
|
7206
|
+
} catch {
|
|
7207
|
+
messages.push({ role: "user", content: String(msg.content) });
|
|
7208
|
+
}
|
|
7209
|
+
}
|
|
7210
|
+
} else if (msg.role === "assistant") {
|
|
7211
|
+
if (typeof msg.content === "string") {
|
|
7212
|
+
messages.push({ role: "assistant", content: msg.content.length > 0 ? msg.content : null });
|
|
7213
|
+
} else if (Array.isArray(msg.content)) {
|
|
7214
|
+
const textParts = msg.content.filter((b) => b.type === "text").map((b) => b.text).join("");
|
|
7215
|
+
const toolCalls = msg.content.filter((b) => b.type === "tool_use").map((b) => ({
|
|
7216
|
+
id: b.id,
|
|
7217
|
+
type: "function",
|
|
7218
|
+
function: {
|
|
7219
|
+
name: b.name,
|
|
7220
|
+
arguments: JSON.stringify(b.input)
|
|
7221
|
+
}
|
|
7222
|
+
}));
|
|
7223
|
+
const entry = {
|
|
7224
|
+
role: "assistant",
|
|
7225
|
+
content: textParts.length > 0 ? textParts : null
|
|
7226
|
+
};
|
|
7227
|
+
if (toolCalls.length > 0) {
|
|
7228
|
+
entry.tool_calls = toolCalls;
|
|
7229
|
+
}
|
|
7230
|
+
messages.push(entry);
|
|
7231
|
+
}
|
|
7232
|
+
} else if (msg.role === "tool" && Array.isArray(msg.content)) {
|
|
7233
|
+
for (const block of msg.content) {
|
|
7234
|
+
const b = block;
|
|
7235
|
+
let content;
|
|
7236
|
+
if (typeof b.content === "string") {
|
|
7237
|
+
content = b.content;
|
|
7238
|
+
} else if (Array.isArray(b.content)) {
|
|
7239
|
+
const parts = [];
|
|
7240
|
+
for (const inner of b.content) {
|
|
7241
|
+
if (inner.type === "text") {
|
|
7242
|
+
parts.push({ type: "text", text: inner.text });
|
|
7243
|
+
} else if (inner.type === "image") {
|
|
7244
|
+
parts.push({
|
|
7245
|
+
type: "image_url",
|
|
7246
|
+
image_url: { url: `data:${inner.mimeType};base64,${inner.data}` }
|
|
7247
|
+
});
|
|
7248
|
+
}
|
|
7249
|
+
}
|
|
7250
|
+
content = parts;
|
|
7251
|
+
} else {
|
|
7252
|
+
content = "";
|
|
7253
|
+
}
|
|
7254
|
+
messages.push({
|
|
7255
|
+
role: "tool",
|
|
7256
|
+
content,
|
|
7257
|
+
tool_call_id: b.tool_use_id
|
|
7258
|
+
});
|
|
7259
|
+
}
|
|
7260
|
+
}
|
|
7261
|
+
}
|
|
7262
|
+
return messages;
|
|
7263
|
+
}
|
|
7264
|
+
function toTextFlatMessages(input) {
|
|
7265
|
+
const messages = [];
|
|
7266
|
+
if (input.systemPrompt) {
|
|
7267
|
+
messages.push({ role: "system", content: input.systemPrompt });
|
|
7268
|
+
}
|
|
7269
|
+
const inputMessages = getInputMessages(input);
|
|
7270
|
+
if (!inputMessages) {
|
|
7271
|
+
let promptContent;
|
|
7272
|
+
if (!Array.isArray(input.prompt)) {
|
|
7273
|
+
promptContent = input.prompt;
|
|
7274
|
+
} else {
|
|
7275
|
+
promptContent = input.prompt.map((item) => {
|
|
7276
|
+
if (typeof item === "string")
|
|
7277
|
+
return item;
|
|
7278
|
+
const b = item;
|
|
7279
|
+
return b.type === "text" ? b.text : "";
|
|
7280
|
+
}).filter((s) => s !== "").join(`
|
|
7281
|
+
`);
|
|
7282
|
+
}
|
|
7283
|
+
messages.push({ role: "user", content: promptContent });
|
|
7284
|
+
return messages;
|
|
7285
|
+
}
|
|
7286
|
+
for (const msg of inputMessages) {
|
|
7287
|
+
if (msg.role === "user") {
|
|
7288
|
+
let content = "";
|
|
7289
|
+
if (typeof msg.content === "string") {
|
|
7290
|
+
content = msg.content;
|
|
7291
|
+
} else if (Array.isArray(msg.content) && msg.content.length > 0 && typeof msg.content[0]?.type === "string") {
|
|
7292
|
+
content = msg.content.filter((b) => b.type === "text").map((b) => b.text).join("");
|
|
7293
|
+
} else if (msg.content != null) {
|
|
7294
|
+
try {
|
|
7295
|
+
content = JSON.stringify(msg.content);
|
|
7296
|
+
} catch {
|
|
7297
|
+
content = String(msg.content);
|
|
7298
|
+
}
|
|
7299
|
+
}
|
|
7300
|
+
messages.push({ role: "user", content });
|
|
7301
|
+
} else if (msg.role === "assistant") {
|
|
7302
|
+
if (typeof msg.content === "string") {
|
|
7303
|
+
if (msg.content) {
|
|
7304
|
+
messages.push({ role: "assistant", content: msg.content });
|
|
7305
|
+
}
|
|
7306
|
+
} else if (Array.isArray(msg.content)) {
|
|
7307
|
+
const text = msg.content.filter((b) => b.type === "text").map((b) => b.text).join("");
|
|
7308
|
+
if (text) {
|
|
7309
|
+
messages.push({ role: "assistant", content: text });
|
|
7310
|
+
}
|
|
7311
|
+
}
|
|
7312
|
+
} else if (msg.role === "tool" && Array.isArray(msg.content)) {
|
|
7313
|
+
for (const block of msg.content) {
|
|
7314
|
+
const b = block;
|
|
7315
|
+
let content;
|
|
7316
|
+
if (typeof b.content === "string") {
|
|
7317
|
+
content = b.content;
|
|
7318
|
+
} else if (Array.isArray(b.content)) {
|
|
7319
|
+
content = b.content.filter((inner) => inner.type === "text").map((inner) => inner.text).join("");
|
|
7320
|
+
} else {
|
|
7321
|
+
content = "";
|
|
7322
|
+
}
|
|
7323
|
+
messages.push({ role: "tool", content });
|
|
7324
|
+
}
|
|
7325
|
+
}
|
|
7326
|
+
}
|
|
7327
|
+
return messages;
|
|
7328
|
+
}
|
|
6337
7329
|
|
|
6338
7330
|
// src/task/index.ts
|
|
6339
7331
|
var registerAiTasks = () => {
|
|
6340
7332
|
const tasks = [
|
|
7333
|
+
AgentTask,
|
|
6341
7334
|
BackgroundRemovalTask,
|
|
6342
7335
|
ChunkToVectorTask,
|
|
6343
7336
|
CountTokensTask,
|
|
@@ -6377,6 +7370,7 @@ var registerAiTasks = () => {
|
|
|
6377
7370
|
TextRewriterTask,
|
|
6378
7371
|
TextSummaryTask,
|
|
6379
7372
|
TextTranslationTask,
|
|
7373
|
+
ToolCallingTask,
|
|
6380
7374
|
TopicSegmenterTask,
|
|
6381
7375
|
UnloadModelTask,
|
|
6382
7376
|
VectorQuantizeTask,
|
|
@@ -6388,8 +7382,14 @@ var registerAiTasks = () => {
|
|
|
6388
7382
|
export {
|
|
6389
7383
|
vectorStoreSearch,
|
|
6390
7384
|
vectorQuantize,
|
|
7385
|
+
userMessage,
|
|
6391
7386
|
unloadModel,
|
|
6392
7387
|
topicSegmenter,
|
|
7388
|
+
toolSourceDefinitions,
|
|
7389
|
+
toolMessage,
|
|
7390
|
+
toolCalling,
|
|
7391
|
+
toTextFlatMessages,
|
|
7392
|
+
toOpenAIMessages,
|
|
6393
7393
|
textTranslation,
|
|
6394
7394
|
textSummary,
|
|
6395
7395
|
textRewriter,
|
|
@@ -6401,6 +7401,7 @@ export {
|
|
|
6401
7401
|
textEmbedding,
|
|
6402
7402
|
textClassification,
|
|
6403
7403
|
textChunker,
|
|
7404
|
+
taskTypesToTools,
|
|
6404
7405
|
structuredGeneration,
|
|
6405
7406
|
structuralParser,
|
|
6406
7407
|
similarity,
|
|
@@ -6414,27 +7415,42 @@ export {
|
|
|
6414
7415
|
objectDetection,
|
|
6415
7416
|
modelSearch,
|
|
6416
7417
|
modelInfo,
|
|
7418
|
+
isAllowedToolName,
|
|
6417
7419
|
imageToText,
|
|
6418
7420
|
imageSegmentation,
|
|
6419
7421
|
imageEmbedding,
|
|
6420
7422
|
imageClassification,
|
|
7423
|
+
imageBlockFromDataUri,
|
|
7424
|
+
imageBlock,
|
|
6421
7425
|
hybridSearch,
|
|
6422
7426
|
hierarchyJoin,
|
|
6423
7427
|
hierarchicalChunker,
|
|
7428
|
+
hasToolCalls,
|
|
6424
7429
|
handLandmarker,
|
|
6425
7430
|
getGlobalModelRepository,
|
|
6426
7431
|
getAiProviderRegistry,
|
|
6427
7432
|
gestureRecognizer,
|
|
7433
|
+
findToolSource,
|
|
7434
|
+
filterValidToolCalls,
|
|
6428
7435
|
faceLandmarker,
|
|
6429
7436
|
faceDetector,
|
|
7437
|
+
executeToolCalls,
|
|
7438
|
+
executeToolCall,
|
|
6430
7439
|
downloadModel,
|
|
6431
7440
|
documentEnricher,
|
|
6432
7441
|
countTokens,
|
|
7442
|
+
countAssistantToolUses,
|
|
6433
7443
|
contextBuilder,
|
|
6434
7444
|
chunkVectorUpsert,
|
|
6435
7445
|
chunkToVector,
|
|
6436
7446
|
chunkRetrieval,
|
|
7447
|
+
buildToolSources,
|
|
7448
|
+
buildToolDescription,
|
|
6437
7449
|
backgroundRemoval,
|
|
7450
|
+
audioBlockFromDataUri,
|
|
7451
|
+
audioBlock,
|
|
7452
|
+
assistantMessage,
|
|
7453
|
+
agent,
|
|
6438
7454
|
VectorSimilarityTask,
|
|
6439
7455
|
VectorQuantizeTask,
|
|
6440
7456
|
UnloadModelTask,
|
|
@@ -6448,6 +7464,10 @@ export {
|
|
|
6448
7464
|
TypeBoundingBox,
|
|
6449
7465
|
TypeAudioInput,
|
|
6450
7466
|
TopicSegmenterTask,
|
|
7467
|
+
ToolDefinitionSchema,
|
|
7468
|
+
ToolCallingTask,
|
|
7469
|
+
ToolCallingOutputSchema,
|
|
7470
|
+
ToolCallingInputSchema,
|
|
6451
7471
|
TextTranslationTask,
|
|
6452
7472
|
TextTranslationOutputSchema,
|
|
6453
7473
|
TextTranslationInputSchema,
|
|
@@ -6551,7 +7571,10 @@ export {
|
|
|
6551
7571
|
AiTask,
|
|
6552
7572
|
AiProviderRegistry,
|
|
6553
7573
|
AiProvider,
|
|
6554
|
-
AiJob
|
|
7574
|
+
AiJob,
|
|
7575
|
+
AgentTask,
|
|
7576
|
+
AgentOutputSchema,
|
|
7577
|
+
AgentInputSchema
|
|
6555
7578
|
};
|
|
6556
7579
|
|
|
6557
|
-
//# debugId=
|
|
7580
|
+
//# debugId=B28911D966FA5D9064756E2164756E21
|