@promptbook/wizard 0.100.0-4 → 0.100.0-41
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1 -0
- package/esm/index.es.js +424 -87
- package/esm/index.es.js.map +1 -1
- package/esm/typings/src/_packages/components.index.d.ts +14 -0
- package/esm/typings/src/_packages/core.index.d.ts +26 -0
- package/esm/typings/src/_packages/types.index.d.ts +34 -0
- package/esm/typings/src/book-2.0/agent-source/parseAgentSource.d.ts +30 -0
- package/esm/typings/src/book-2.0/agent-source/parseAgentSource.test.d.ts +1 -0
- package/esm/typings/src/book-2.0/agent-source/string_book.d.ts +26 -0
- package/esm/typings/src/book-2.0/commitments/ACTION/ACTION.d.ts +38 -0
- package/esm/typings/src/book-2.0/commitments/FORMAT/FORMAT.d.ts +39 -0
- package/esm/typings/src/book-2.0/commitments/KNOWLEDGE/FrontendRAGService.d.ts +48 -0
- package/esm/typings/src/book-2.0/commitments/KNOWLEDGE/KNOWLEDGE.d.ts +51 -0
- package/esm/typings/src/book-2.0/commitments/KNOWLEDGE/RAGService.d.ts +54 -0
- package/esm/typings/src/book-2.0/commitments/KNOWLEDGE/processors/BaseKnowledgeProcessor.d.ts +45 -0
- package/esm/typings/src/book-2.0/commitments/KNOWLEDGE/processors/PdfProcessor.d.ts +31 -0
- package/esm/typings/src/book-2.0/commitments/KNOWLEDGE/processors/ProcessorFactory.d.ts +23 -0
- package/esm/typings/src/book-2.0/commitments/KNOWLEDGE/processors/TextProcessor.d.ts +18 -0
- package/esm/typings/src/book-2.0/commitments/KNOWLEDGE/types.d.ts +56 -0
- package/esm/typings/src/book-2.0/commitments/KNOWLEDGE/utils/ragHelper.d.ts +34 -0
- package/esm/typings/src/book-2.0/commitments/META_IMAGE/META_IMAGE.d.ts +44 -0
- package/esm/typings/src/book-2.0/commitments/META_LINK/META_LINK.d.ts +56 -0
- package/esm/typings/src/book-2.0/commitments/MODEL/MODEL.d.ts +39 -0
- package/esm/typings/src/book-2.0/commitments/NOTE/NOTE.d.ts +49 -0
- package/esm/typings/src/book-2.0/commitments/PERSONA/PERSONA.d.ts +46 -0
- package/esm/typings/src/book-2.0/commitments/RULE/RULE.d.ts +44 -0
- package/esm/typings/src/book-2.0/commitments/SAMPLE/SAMPLE.d.ts +44 -0
- package/esm/typings/src/book-2.0/commitments/STYLE/STYLE.d.ts +38 -0
- package/esm/typings/src/book-2.0/commitments/_base/BaseCommitmentDefinition.d.ts +52 -0
- package/esm/typings/src/book-2.0/commitments/_base/BookCommitment.d.ts +5 -0
- package/esm/typings/src/book-2.0/commitments/_base/CommitmentDefinition.d.ts +48 -0
- package/esm/typings/src/book-2.0/commitments/_base/NotYetImplementedCommitmentDefinition.d.ts +22 -0
- package/esm/typings/src/book-2.0/commitments/_base/createEmptyAgentModelRequirements.d.ts +19 -0
- package/esm/typings/src/book-2.0/commitments/_misc/AgentModelRequirements.d.ts +37 -0
- package/esm/typings/src/book-2.0/commitments/_misc/AgentSourceParseResult.d.ts +18 -0
- package/esm/typings/src/book-2.0/commitments/_misc/ParsedCommitment.d.ts +22 -0
- package/esm/typings/src/book-2.0/commitments/_misc/createAgentModelRequirements.d.ts +61 -0
- package/esm/typings/src/book-2.0/commitments/_misc/createAgentModelRequirementsWithCommitments.d.ts +35 -0
- package/esm/typings/src/book-2.0/commitments/_misc/createCommitmentRegex.d.ts +20 -0
- package/esm/typings/src/book-2.0/commitments/_misc/parseAgentSourceWithCommitments.d.ts +24 -0
- package/esm/typings/src/book-2.0/commitments/_misc/removeCommentsFromSystemMessage.d.ts +11 -0
- package/esm/typings/src/book-2.0/commitments/index.d.ts +56 -0
- package/esm/typings/src/book-2.0/utils/profileImageUtils.d.ts +39 -0
- package/esm/typings/src/book-components/AvatarProfile/AvatarChip/AvatarChip.d.ts +35 -0
- package/esm/typings/src/book-components/AvatarProfile/AvatarChip/AvatarChipFromSource.d.ts +21 -0
- package/esm/typings/src/book-components/AvatarProfile/AvatarChip/index.d.ts +2 -0
- package/esm/typings/src/book-components/BookEditor/BookEditor.d.ts +35 -0
- package/esm/typings/src/book-components/BookEditor/config.d.ts +10 -0
- package/esm/typings/src/book-components/BookEditor/injectCssModuleIntoShadowRoot.d.ts +11 -0
- package/esm/typings/src/book-components/_common/react-utils/classNames.d.ts +7 -0
- package/esm/typings/src/book-components/_common/react-utils/collectCssTextsForClass.d.ts +7 -0
- package/esm/typings/src/book-components/_common/react-utils/escapeHtml.d.ts +6 -0
- package/esm/typings/src/book-components/_common/react-utils/escapeRegex.d.ts +6 -0
- package/esm/typings/src/config.d.ts +6 -0
- package/esm/typings/src/execution/AvailableModel.d.ts +4 -0
- package/esm/typings/src/execution/ExecutionTask.d.ts +27 -0
- package/esm/typings/src/execution/createPipelineExecutor/40-executeAttempts.d.ts +6 -1
- package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +0 -5
- package/esm/typings/src/llm-providers/anthropic-claude/anthropic-claude-models.d.ts +1 -1
- package/esm/typings/src/llm-providers/deepseek/deepseek-models.d.ts +1 -1
- package/esm/typings/src/llm-providers/google/google-models.d.ts +1 -1
- package/esm/typings/src/llm-providers/ollama/ollama-models.d.ts +1 -1
- package/esm/typings/src/llm-providers/openai/openai-models.d.ts +1 -1
- package/esm/typings/src/pipeline/book-notation.d.ts +2 -1
- package/esm/typings/src/types/ModelRequirements.d.ts +0 -2
- package/esm/typings/src/types/typeAliases.d.ts +6 -0
- package/esm/typings/src/version.d.ts +1 -1
- package/package.json +2 -2
- package/umd/index.umd.js +424 -87
- package/umd/index.umd.js.map +1 -1
package/esm/index.es.js
CHANGED
@@ -38,7 +38,7 @@ const BOOK_LANGUAGE_VERSION = '1.0.0';
|
|
38
38
|
* @generated
|
39
39
|
* @see https://github.com/webgptorg/promptbook
|
40
40
|
*/
|
41
|
-
const PROMPTBOOK_ENGINE_VERSION = '0.100.0-
|
41
|
+
const PROMPTBOOK_ENGINE_VERSION = '0.100.0-41';
|
42
42
|
/**
|
43
43
|
* TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
|
44
44
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
@@ -312,6 +312,12 @@ let DEFAULT_IS_VERBOSE = false;
|
|
312
312
|
* @public exported from `@promptbook/core`
|
313
313
|
*/
|
314
314
|
const DEFAULT_IS_AUTO_INSTALLED = false;
|
315
|
+
/**
|
316
|
+
* Default simulated duration for a task in milliseconds (used for progress reporting)
|
317
|
+
*
|
318
|
+
* @public exported from `@promptbook/core`
|
319
|
+
*/
|
320
|
+
const DEFAULT_TASK_SIMULATED_DURATION_MS = 5 * 60 * 1000; // 5 minutes
|
315
321
|
/**
|
316
322
|
* Default rate limits (requests per minute)
|
317
323
|
*
|
@@ -1702,7 +1708,7 @@ function pricing(value) {
|
|
1702
1708
|
/**
|
1703
1709
|
* List of available Anthropic Claude models with pricing
|
1704
1710
|
*
|
1705
|
-
* Note:
|
1711
|
+
* Note: Synced with official API docs at 2025-08-20
|
1706
1712
|
*
|
1707
1713
|
* @see https://docs.anthropic.com/en/docs/models-overview
|
1708
1714
|
* @public exported from `@promptbook/anthropic-claude`
|
@@ -1712,12 +1718,52 @@ const ANTHROPIC_CLAUDE_MODELS = exportJson({
|
|
1712
1718
|
value: [
|
1713
1719
|
{
|
1714
1720
|
modelVariant: 'CHAT',
|
1715
|
-
modelTitle: 'Claude
|
1716
|
-
modelName: 'claude-
|
1717
|
-
modelDescription: '
|
1721
|
+
modelTitle: 'Claude Opus 4.1',
|
1722
|
+
modelName: 'claude-opus-4-1-20250805',
|
1723
|
+
modelDescription: 'Most powerful and capable Claude model with 200K token context window. Features superior reasoning capabilities, exceptional coding abilities, and advanced multimodal understanding. Sets new standards in complex reasoning and analytical tasks with enhanced safety measures. Ideal for the most demanding enterprise applications requiring maximum intelligence.',
|
1718
1724
|
pricing: {
|
1719
|
-
prompt: pricing(`$
|
1720
|
-
output: pricing(`$
|
1725
|
+
prompt: pricing(`$15.00 / 1M tokens`),
|
1726
|
+
output: pricing(`$75.00 / 1M tokens`),
|
1727
|
+
},
|
1728
|
+
},
|
1729
|
+
{
|
1730
|
+
modelVariant: 'CHAT',
|
1731
|
+
modelTitle: 'Claude Opus 4',
|
1732
|
+
modelName: 'claude-opus-4-20250514',
|
1733
|
+
modelDescription: 'Previous flagship Claude model with 200K token context window. Features very high intelligence and capability with exceptional performance across reasoning, coding, and creative tasks. Maintains strong safety guardrails while delivering sophisticated outputs for complex professional applications.',
|
1734
|
+
pricing: {
|
1735
|
+
prompt: pricing(`$15.00 / 1M tokens`),
|
1736
|
+
output: pricing(`$75.00 / 1M tokens`),
|
1737
|
+
},
|
1738
|
+
},
|
1739
|
+
{
|
1740
|
+
modelVariant: 'CHAT',
|
1741
|
+
modelTitle: 'Claude Sonnet 4',
|
1742
|
+
modelName: 'claude-sonnet-4-20250514',
|
1743
|
+
modelDescription: 'High-performance Claude model with exceptional reasoning capabilities and 200K token context window (1M context beta available). Features balanced intelligence and efficiency with enhanced multimodal understanding. Offers optimal performance for most enterprise applications requiring sophisticated AI capabilities.',
|
1744
|
+
pricing: {
|
1745
|
+
prompt: pricing(`$3.00 / 1M tokens`),
|
1746
|
+
output: pricing(`$15.00 / 1M tokens`),
|
1747
|
+
},
|
1748
|
+
},
|
1749
|
+
{
|
1750
|
+
modelVariant: 'CHAT',
|
1751
|
+
modelTitle: 'Claude Sonnet 3.7',
|
1752
|
+
modelName: 'claude-3-7-sonnet-20250219',
|
1753
|
+
modelDescription: 'High-performance Claude model with early extended thinking capabilities and 200K token context window. Features enhanced reasoning chains, improved factual accuracy, and toggleable extended thinking for complex problem-solving. Ideal for applications requiring deep analytical capabilities.',
|
1754
|
+
pricing: {
|
1755
|
+
prompt: pricing(`$3.00 / 1M tokens`),
|
1756
|
+
output: pricing(`$15.00 / 1M tokens`),
|
1757
|
+
},
|
1758
|
+
},
|
1759
|
+
{
|
1760
|
+
modelVariant: 'CHAT',
|
1761
|
+
modelTitle: 'Claude Haiku 3.5',
|
1762
|
+
modelName: 'claude-3-5-haiku-20241022',
|
1763
|
+
modelDescription: 'Fastest Claude model with 200K token context window optimized for intelligence at blazing speeds. Features enhanced reasoning and contextual understanding while maintaining sub-second response times. Perfect for real-time applications, customer-facing deployments, and high-throughput services.',
|
1764
|
+
pricing: {
|
1765
|
+
prompt: pricing(`$0.80 / 1M tokens`),
|
1766
|
+
output: pricing(`$4.00 / 1M tokens`),
|
1721
1767
|
},
|
1722
1768
|
},
|
1723
1769
|
{
|
@@ -2423,8 +2469,7 @@ class AnthropicClaudeExecutionTools {
|
|
2423
2469
|
const rawPromptContent = templateParameters(content, { ...parameters, modelName });
|
2424
2470
|
const rawRequest = {
|
2425
2471
|
model: modelRequirements.modelName || this.getDefaultChatModel().modelName,
|
2426
|
-
max_tokens: modelRequirements.maxTokens ||
|
2427
|
-
// <- TODO: [🌾] Make some global max cap for maxTokens
|
2472
|
+
max_tokens: modelRequirements.maxTokens || 8192,
|
2428
2473
|
temperature: modelRequirements.temperature,
|
2429
2474
|
system: modelRequirements.systemMessage,
|
2430
2475
|
messages: [
|
@@ -2483,59 +2528,6 @@ class AnthropicClaudeExecutionTools {
|
|
2483
2528
|
},
|
2484
2529
|
});
|
2485
2530
|
}
|
2486
|
-
/**
|
2487
|
-
* Calls Anthropic Claude API to use a completion model.
|
2488
|
-
*/
|
2489
|
-
async callCompletionModel(prompt) {
|
2490
|
-
if (this.options.isVerbose) {
|
2491
|
-
console.info('🖋 Anthropic Claude callCompletionModel call');
|
2492
|
-
}
|
2493
|
-
const { content, parameters, modelRequirements } = prompt;
|
2494
|
-
if (modelRequirements.modelVariant !== 'COMPLETION') {
|
2495
|
-
throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
|
2496
|
-
}
|
2497
|
-
const client = await this.getClient();
|
2498
|
-
const modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
|
2499
|
-
const rawPromptContent = templateParameters(content, { ...parameters, modelName });
|
2500
|
-
const rawRequest = {
|
2501
|
-
model: modelName,
|
2502
|
-
max_tokens_to_sample: modelRequirements.maxTokens || 2000,
|
2503
|
-
temperature: modelRequirements.temperature,
|
2504
|
-
prompt: rawPromptContent,
|
2505
|
-
};
|
2506
|
-
const start = $getCurrentDate();
|
2507
|
-
const rawResponse = await this.limiter
|
2508
|
-
.schedule(() => client.completions.create(rawRequest))
|
2509
|
-
.catch((error) => {
|
2510
|
-
if (this.options.isVerbose) {
|
2511
|
-
console.info(colors.bgRed('error'), error);
|
2512
|
-
}
|
2513
|
-
throw error;
|
2514
|
-
});
|
2515
|
-
if (this.options.isVerbose) {
|
2516
|
-
console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
|
2517
|
-
}
|
2518
|
-
if (!rawResponse.completion) {
|
2519
|
-
throw new PipelineExecutionError('No completion from Anthropic Claude');
|
2520
|
-
}
|
2521
|
-
const resultContent = rawResponse.completion;
|
2522
|
-
const complete = $getCurrentDate();
|
2523
|
-
const usage = computeAnthropicClaudeUsage(rawPromptContent, resultContent, rawResponse);
|
2524
|
-
return exportJson({
|
2525
|
-
name: 'promptResult',
|
2526
|
-
message: `Result of \`AnthropicClaudeExecutionTools.callCompletionModel\``,
|
2527
|
-
order: [],
|
2528
|
-
value: {
|
2529
|
-
content: resultContent,
|
2530
|
-
modelName: rawResponse.model || modelName,
|
2531
|
-
timing: { start, complete },
|
2532
|
-
usage,
|
2533
|
-
rawPromptContent,
|
2534
|
-
rawRequest,
|
2535
|
-
rawResponse,
|
2536
|
-
},
|
2537
|
-
});
|
2538
|
-
}
|
2539
2531
|
// <- Note: [🤖] callXxxModel
|
2540
2532
|
/**
|
2541
2533
|
* Get the model that should be used as default
|
@@ -2557,7 +2549,7 @@ class AnthropicClaudeExecutionTools {
|
|
2557
2549
|
* Default model for chat variant.
|
2558
2550
|
*/
|
2559
2551
|
getDefaultChatModel() {
|
2560
|
-
return this.getDefaultModel('claude-
|
2552
|
+
return this.getDefaultModel('claude-sonnet-4-20250514');
|
2561
2553
|
}
|
2562
2554
|
}
|
2563
2555
|
/**
|
@@ -2700,7 +2692,7 @@ const _AzureOpenAiMetadataRegistration = $llmToolsMetadataRegister.register({
|
|
2700
2692
|
/**
|
2701
2693
|
* List of available OpenAI models with pricing
|
2702
2694
|
*
|
2703
|
-
* Note:
|
2695
|
+
* Note: Synced with official API docs at 2025-08-20
|
2704
2696
|
*
|
2705
2697
|
* @see https://platform.openai.com/docs/models/
|
2706
2698
|
* @see https://openai.com/api/pricing/
|
@@ -2709,6 +2701,138 @@ const _AzureOpenAiMetadataRegistration = $llmToolsMetadataRegister.register({
|
|
2709
2701
|
const OPENAI_MODELS = exportJson({
|
2710
2702
|
name: 'OPENAI_MODELS',
|
2711
2703
|
value: [
|
2704
|
+
/**/
|
2705
|
+
{
|
2706
|
+
modelVariant: 'CHAT',
|
2707
|
+
modelTitle: 'gpt-5',
|
2708
|
+
modelName: 'gpt-5',
|
2709
|
+
modelDescription: "OpenAI's most advanced language model with unprecedented reasoning capabilities and 200K context window. Features revolutionary improvements in complex problem-solving, scientific reasoning, and creative tasks. Demonstrates human-level performance across diverse domains with enhanced safety measures and alignment. Represents the next generation of AI with superior understanding, nuanced responses, and advanced multimodal capabilities.",
|
2710
|
+
pricing: {
|
2711
|
+
prompt: pricing(`$1.25 / 1M tokens`),
|
2712
|
+
output: pricing(`$10.00 / 1M tokens`),
|
2713
|
+
},
|
2714
|
+
},
|
2715
|
+
/**/
|
2716
|
+
/**/
|
2717
|
+
{
|
2718
|
+
modelVariant: 'CHAT',
|
2719
|
+
modelTitle: 'gpt-5-mini',
|
2720
|
+
modelName: 'gpt-5-mini',
|
2721
|
+
modelDescription: "A faster, cost-efficient version of GPT-5 for well-defined tasks with 200K context window. Maintains core GPT-5 capabilities while offering 5x faster inference and significantly lower costs. Features enhanced instruction following and reduced latency for production applications requiring quick responses with high quality.",
|
2722
|
+
pricing: {
|
2723
|
+
prompt: pricing(`$0.25 / 1M tokens`),
|
2724
|
+
output: pricing(`$2.00 / 1M tokens`),
|
2725
|
+
},
|
2726
|
+
},
|
2727
|
+
/**/
|
2728
|
+
/**/
|
2729
|
+
{
|
2730
|
+
modelVariant: 'CHAT',
|
2731
|
+
modelTitle: 'gpt-5-nano',
|
2732
|
+
modelName: 'gpt-5-nano',
|
2733
|
+
modelDescription: "The fastest, most cost-efficient version of GPT-5 with 200K context window. Optimized for summarization, classification, and simple reasoning tasks. Features 10x faster inference than base GPT-5 while maintaining good quality for straightforward applications. Ideal for high-volume, cost-sensitive deployments.",
|
2734
|
+
pricing: {
|
2735
|
+
prompt: pricing(`$0.05 / 1M tokens`),
|
2736
|
+
output: pricing(`$0.40 / 1M tokens`),
|
2737
|
+
},
|
2738
|
+
},
|
2739
|
+
/**/
|
2740
|
+
/**/
|
2741
|
+
{
|
2742
|
+
modelVariant: 'CHAT',
|
2743
|
+
modelTitle: 'gpt-4.1',
|
2744
|
+
modelName: 'gpt-4.1',
|
2745
|
+
modelDescription: "Smartest non-reasoning model with 128K context window. Enhanced version of GPT-4 with improved instruction following, better factual accuracy, and reduced hallucinations. Features advanced function calling capabilities and superior performance on coding tasks. Ideal for applications requiring high intelligence without reasoning overhead.",
|
2746
|
+
pricing: {
|
2747
|
+
prompt: pricing(`$3.00 / 1M tokens`),
|
2748
|
+
output: pricing(`$12.00 / 1M tokens`),
|
2749
|
+
},
|
2750
|
+
},
|
2751
|
+
/**/
|
2752
|
+
/**/
|
2753
|
+
{
|
2754
|
+
modelVariant: 'CHAT',
|
2755
|
+
modelTitle: 'gpt-4.1-mini',
|
2756
|
+
modelName: 'gpt-4.1-mini',
|
2757
|
+
modelDescription: "Smaller, faster version of GPT-4.1 with 128K context window. Balances intelligence and efficiency with 3x faster inference than base GPT-4.1. Maintains strong capabilities across text generation, reasoning, and coding while offering better cost-performance ratio for most applications.",
|
2758
|
+
pricing: {
|
2759
|
+
prompt: pricing(`$0.80 / 1M tokens`),
|
2760
|
+
output: pricing(`$3.20 / 1M tokens`),
|
2761
|
+
},
|
2762
|
+
},
|
2763
|
+
/**/
|
2764
|
+
/**/
|
2765
|
+
{
|
2766
|
+
modelVariant: 'CHAT',
|
2767
|
+
modelTitle: 'gpt-4.1-nano',
|
2768
|
+
modelName: 'gpt-4.1-nano',
|
2769
|
+
modelDescription: "Fastest, most cost-efficient version of GPT-4.1 with 128K context window. Optimized for high-throughput applications requiring good quality at minimal cost. Features 5x faster inference than GPT-4.1 while maintaining adequate performance for most general-purpose tasks.",
|
2770
|
+
pricing: {
|
2771
|
+
prompt: pricing(`$0.20 / 1M tokens`),
|
2772
|
+
output: pricing(`$0.80 / 1M tokens`),
|
2773
|
+
},
|
2774
|
+
},
|
2775
|
+
/**/
|
2776
|
+
/**/
|
2777
|
+
{
|
2778
|
+
modelVariant: 'CHAT',
|
2779
|
+
modelTitle: 'o3',
|
2780
|
+
modelName: 'o3',
|
2781
|
+
modelDescription: "Advanced reasoning model with 128K context window specializing in complex logical, mathematical, and analytical tasks. Successor to o1 with enhanced step-by-step problem-solving capabilities and superior performance on STEM-focused problems. Ideal for professional applications requiring deep analytical thinking and precise reasoning.",
|
2782
|
+
pricing: {
|
2783
|
+
prompt: pricing(`$15.00 / 1M tokens`),
|
2784
|
+
output: pricing(`$60.00 / 1M tokens`),
|
2785
|
+
},
|
2786
|
+
},
|
2787
|
+
/**/
|
2788
|
+
/**/
|
2789
|
+
{
|
2790
|
+
modelVariant: 'CHAT',
|
2791
|
+
modelTitle: 'o3-pro',
|
2792
|
+
modelName: 'o3-pro',
|
2793
|
+
modelDescription: "Enhanced version of o3 with more compute allocated for better responses on the most challenging problems. Features extended reasoning time and improved accuracy on complex analytical tasks. Designed for applications where maximum reasoning quality is more important than response speed.",
|
2794
|
+
pricing: {
|
2795
|
+
prompt: pricing(`$30.00 / 1M tokens`),
|
2796
|
+
output: pricing(`$120.00 / 1M tokens`),
|
2797
|
+
},
|
2798
|
+
},
|
2799
|
+
/**/
|
2800
|
+
/**/
|
2801
|
+
{
|
2802
|
+
modelVariant: 'CHAT',
|
2803
|
+
modelTitle: 'o4-mini',
|
2804
|
+
modelName: 'o4-mini',
|
2805
|
+
modelDescription: "Fast, cost-efficient reasoning model with 128K context window. Successor to o1-mini with improved analytical capabilities while maintaining speed advantages. Features enhanced mathematical reasoning and logical problem-solving at significantly lower cost than full reasoning models.",
|
2806
|
+
pricing: {
|
2807
|
+
prompt: pricing(`$4.00 / 1M tokens`),
|
2808
|
+
output: pricing(`$16.00 / 1M tokens`),
|
2809
|
+
},
|
2810
|
+
},
|
2811
|
+
/**/
|
2812
|
+
/**/
|
2813
|
+
{
|
2814
|
+
modelVariant: 'CHAT',
|
2815
|
+
modelTitle: 'o3-deep-research',
|
2816
|
+
modelName: 'o3-deep-research',
|
2817
|
+
modelDescription: "Most powerful deep research model with 128K context window. Specialized for comprehensive research tasks, literature analysis, and complex information synthesis. Features advanced citation capabilities and enhanced factual accuracy for academic and professional research applications.",
|
2818
|
+
pricing: {
|
2819
|
+
prompt: pricing(`$25.00 / 1M tokens`),
|
2820
|
+
output: pricing(`$100.00 / 1M tokens`),
|
2821
|
+
},
|
2822
|
+
},
|
2823
|
+
/**/
|
2824
|
+
/**/
|
2825
|
+
{
|
2826
|
+
modelVariant: 'CHAT',
|
2827
|
+
modelTitle: 'o4-mini-deep-research',
|
2828
|
+
modelName: 'o4-mini-deep-research',
|
2829
|
+
modelDescription: "Faster, more affordable deep research model with 128K context window. Balances research capabilities with cost efficiency, offering good performance on literature review, fact-checking, and information synthesis tasks at a more accessible price point.",
|
2830
|
+
pricing: {
|
2831
|
+
prompt: pricing(`$12.00 / 1M tokens`),
|
2832
|
+
output: pricing(`$48.00 / 1M tokens`),
|
2833
|
+
},
|
2834
|
+
},
|
2835
|
+
/**/
|
2712
2836
|
/*/
|
2713
2837
|
{
|
2714
2838
|
modelTitle: 'dall-e-3',
|
@@ -3229,7 +3353,6 @@ class AzureOpenAiExecutionTools {
|
|
3229
3353
|
const modelName = prompt.modelRequirements.modelName || this.options.deploymentName;
|
3230
3354
|
const modelSettings = {
|
3231
3355
|
maxTokens: modelRequirements.maxTokens,
|
3232
|
-
// <- TODO: [🌾] Make some global max cap for maxTokens
|
3233
3356
|
temperature: modelRequirements.temperature,
|
3234
3357
|
user: (_a = this.options.userId) === null || _a === void 0 ? void 0 : _a.toString(),
|
3235
3358
|
// <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
|
@@ -3335,8 +3458,7 @@ class AzureOpenAiExecutionTools {
|
|
3335
3458
|
try {
|
3336
3459
|
const modelName = prompt.modelRequirements.modelName || this.options.deploymentName;
|
3337
3460
|
const modelSettings = {
|
3338
|
-
maxTokens: modelRequirements.maxTokens
|
3339
|
-
// <- TODO: [🌾] Make some global max cap for maxTokens
|
3461
|
+
maxTokens: modelRequirements.maxTokens,
|
3340
3462
|
temperature: modelRequirements.temperature,
|
3341
3463
|
user: (_a = this.options.userId) === null || _a === void 0 ? void 0 : _a.toString(),
|
3342
3464
|
// <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
|
@@ -3720,7 +3842,7 @@ function createExecutionToolsFromVercelProvider(options) {
|
|
3720
3842
|
/**
|
3721
3843
|
* List of available Deepseek models with descriptions
|
3722
3844
|
*
|
3723
|
-
* Note:
|
3845
|
+
* Note: Synced with official API docs at 2025-08-20
|
3724
3846
|
*
|
3725
3847
|
* @see https://www.deepseek.com/models
|
3726
3848
|
* @public exported from `@promptbook/deepseek`
|
@@ -3730,12 +3852,32 @@ const DEEPSEEK_MODELS = exportJson({
|
|
3730
3852
|
value: [
|
3731
3853
|
{
|
3732
3854
|
modelVariant: 'CHAT',
|
3733
|
-
modelTitle: '
|
3734
|
-
modelName: 'deepseek-chat
|
3735
|
-
modelDescription: 'Latest flagship general-purpose model with
|
3855
|
+
modelTitle: 'DeepSeek V3',
|
3856
|
+
modelName: 'deepseek-chat',
|
3857
|
+
modelDescription: 'Latest flagship general-purpose model with 128K context window. Features exceptional reasoning capabilities, advanced code generation, and strong performance across diverse domains. Offers competitive performance with leading models while maintaining cost efficiency. Ideal for complex reasoning, coding, and knowledge-intensive tasks.',
|
3736
3858
|
pricing: {
|
3737
|
-
prompt: pricing(`$
|
3738
|
-
output: pricing(`$
|
3859
|
+
prompt: pricing(`$0.14 / 1M tokens`),
|
3860
|
+
output: pricing(`$0.28 / 1M tokens`),
|
3861
|
+
},
|
3862
|
+
},
|
3863
|
+
{
|
3864
|
+
modelVariant: 'CHAT',
|
3865
|
+
modelTitle: 'DeepSeek R1',
|
3866
|
+
modelName: 'deepseek-reasoner',
|
3867
|
+
modelDescription: 'Advanced reasoning model with 128K context window specializing in complex problem-solving and analytical thinking. Features explicit reasoning chains, enhanced mathematical capabilities, and superior performance on STEM tasks. Designed for applications requiring deep analytical reasoning and step-by-step problem solving.',
|
3868
|
+
pricing: {
|
3869
|
+
prompt: pricing(`$0.55 / 1M tokens`),
|
3870
|
+
output: pricing(`$2.19 / 1M tokens`),
|
3871
|
+
},
|
3872
|
+
},
|
3873
|
+
{
|
3874
|
+
modelVariant: 'CHAT',
|
3875
|
+
modelTitle: 'DeepSeek Coder V2',
|
3876
|
+
modelName: 'deepseek-coder',
|
3877
|
+
modelDescription: 'Specialized coding model with 128K context window optimized for software development tasks. Features exceptional code generation, debugging, and refactoring capabilities across 40+ programming languages. Particularly strong in understanding complex codebases and implementing solutions based on natural language specifications.',
|
3878
|
+
pricing: {
|
3879
|
+
prompt: pricing(`$0.14 / 1M tokens`),
|
3880
|
+
output: pricing(`$0.28 / 1M tokens`),
|
3739
3881
|
},
|
3740
3882
|
},
|
3741
3883
|
{
|
@@ -3969,7 +4111,7 @@ const _GoogleMetadataRegistration = $llmToolsMetadataRegister.register({
|
|
3969
4111
|
/**
|
3970
4112
|
* List of available Google models with descriptions
|
3971
4113
|
*
|
3972
|
-
* Note:
|
4114
|
+
* Note: Synced with official API docs at 2025-08-20
|
3973
4115
|
*
|
3974
4116
|
* @see https://ai.google.dev/models/gemini
|
3975
4117
|
* @public exported from `@promptbook/google`
|
@@ -3980,11 +4122,51 @@ const GOOGLE_MODELS = exportJson({
|
|
3980
4122
|
{
|
3981
4123
|
modelVariant: 'CHAT',
|
3982
4124
|
modelTitle: 'Gemini 2.5 Pro',
|
3983
|
-
modelName: 'gemini-2.5-pro
|
3984
|
-
modelDescription: '
|
4125
|
+
modelName: 'gemini-2.5-pro',
|
4126
|
+
modelDescription: 'State-of-the-art thinking model with 1M token context window capable of reasoning over complex problems in code, math, and STEM. Features enhanced thinking capabilities, advanced multimodal understanding, and superior performance on analytical tasks. Ideal for complex enterprise applications requiring maximum intelligence and reasoning.',
|
3985
4127
|
pricing: {
|
3986
|
-
prompt: pricing(`$
|
3987
|
-
output: pricing(`$
|
4128
|
+
prompt: pricing(`$7.00 / 1M tokens`),
|
4129
|
+
output: pricing(`$21.00 / 1M tokens`),
|
4130
|
+
},
|
4131
|
+
},
|
4132
|
+
{
|
4133
|
+
modelVariant: 'CHAT',
|
4134
|
+
modelTitle: 'Gemini 2.5 Flash',
|
4135
|
+
modelName: 'gemini-2.5-flash',
|
4136
|
+
modelDescription: 'Best model in terms of price-performance with 1M token context window offering well-rounded capabilities. Features adaptive thinking, cost efficiency, and enhanced reasoning for large-scale processing. Ideal for low-latency, high-volume tasks that require thinking and agentic use cases.',
|
4137
|
+
pricing: {
|
4138
|
+
prompt: pricing(`$0.35 / 1M tokens`),
|
4139
|
+
output: pricing(`$1.05 / 1M tokens`),
|
4140
|
+
},
|
4141
|
+
},
|
4142
|
+
{
|
4143
|
+
modelVariant: 'CHAT',
|
4144
|
+
modelTitle: 'Gemini 2.5 Flash Lite',
|
4145
|
+
modelName: 'gemini-2.5-flash-lite',
|
4146
|
+
modelDescription: 'Cost-efficient Gemini 2.5 Flash model optimized for high throughput with 1M token context window. Features thinking capabilities while maintaining the most cost-efficient pricing. Perfect for real-time, low-latency use cases requiring good quality at scale.',
|
4147
|
+
pricing: {
|
4148
|
+
prompt: pricing(`$0.20 / 1M tokens`),
|
4149
|
+
output: pricing(`$0.60 / 1M tokens`),
|
4150
|
+
},
|
4151
|
+
},
|
4152
|
+
{
|
4153
|
+
modelVariant: 'CHAT',
|
4154
|
+
modelTitle: 'Gemini 2.0 Flash',
|
4155
|
+
modelName: 'gemini-2.0-flash',
|
4156
|
+
modelDescription: 'Next-generation model with 1M token context window delivering improved capabilities, superior speed, and realtime streaming. Features enhanced function calling, code execution, and search capabilities. Ideal for applications requiring cutting-edge AI capabilities with fast response times.',
|
4157
|
+
pricing: {
|
4158
|
+
prompt: pricing(`$0.25 / 1M tokens`),
|
4159
|
+
output: pricing(`$0.75 / 1M tokens`),
|
4160
|
+
},
|
4161
|
+
},
|
4162
|
+
{
|
4163
|
+
modelVariant: 'CHAT',
|
4164
|
+
modelTitle: 'Gemini 2.0 Flash Lite',
|
4165
|
+
modelName: 'gemini-2.0-flash-lite',
|
4166
|
+
modelDescription: 'Cost-efficient Gemini 2.0 Flash model optimized for low latency with 1M token context window. Balances performance and cost with enhanced efficiency for high-volume applications. Perfect for applications requiring good quality responses at minimal cost.',
|
4167
|
+
pricing: {
|
4168
|
+
prompt: pricing(`$0.15 / 1M tokens`),
|
4169
|
+
output: pricing(`$0.45 / 1M tokens`),
|
3988
4170
|
},
|
3989
4171
|
},
|
3990
4172
|
{
|
@@ -4425,7 +4607,6 @@ class OpenAiCompatibleExecutionTools {
|
|
4425
4607
|
const modelSettings = {
|
4426
4608
|
model: modelName,
|
4427
4609
|
max_tokens: modelRequirements.maxTokens,
|
4428
|
-
// <- TODO: [🌾] Make some global max cap for maxTokens
|
4429
4610
|
temperature: modelRequirements.temperature,
|
4430
4611
|
// <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
|
4431
4612
|
// <- Note: [🧆]
|
@@ -4521,8 +4702,7 @@ class OpenAiCompatibleExecutionTools {
|
|
4521
4702
|
const modelName = modelRequirements.modelName || this.getDefaultCompletionModel().modelName;
|
4522
4703
|
const modelSettings = {
|
4523
4704
|
model: modelName,
|
4524
|
-
max_tokens: modelRequirements.maxTokens
|
4525
|
-
// <- TODO: [🌾] Make some global max cap for maxTokens
|
4705
|
+
max_tokens: modelRequirements.maxTokens,
|
4526
4706
|
temperature: modelRequirements.temperature,
|
4527
4707
|
// <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
|
4528
4708
|
// <- Note: [🧆]
|
@@ -4671,7 +4851,7 @@ class OpenAiCompatibleExecutionTools {
|
|
4671
4851
|
/**
|
4672
4852
|
* List of available models in Ollama library
|
4673
4853
|
*
|
4674
|
-
* Note:
|
4854
|
+
* Note: Synced with official API docs at 2025-08-20
|
4675
4855
|
*
|
4676
4856
|
* @see https://ollama.com/library
|
4677
4857
|
* @public exported from `@promptbook/ollama`
|
@@ -4679,6 +4859,24 @@ class OpenAiCompatibleExecutionTools {
|
|
4679
4859
|
const OLLAMA_MODELS = exportJson({
|
4680
4860
|
name: 'OLLAMA_MODELS',
|
4681
4861
|
value: [
|
4862
|
+
{
|
4863
|
+
modelVariant: 'CHAT',
|
4864
|
+
modelTitle: 'llama3.3',
|
4865
|
+
modelName: 'llama3.3',
|
4866
|
+
modelDescription: 'Meta Llama 3.3 (70B parameters) with 128K context window. Latest generation foundation model with significantly enhanced reasoning, instruction following, and multilingual capabilities. Features improved performance on complex tasks and better factual accuracy compared to Llama 3.1.',
|
4867
|
+
},
|
4868
|
+
{
|
4869
|
+
modelVariant: 'CHAT',
|
4870
|
+
modelTitle: 'llama3.2',
|
4871
|
+
modelName: 'llama3.2',
|
4872
|
+
modelDescription: 'Meta Llama 3.2 (1B-90B parameters) with 128K context window. Enhanced model with improved reasoning capabilities, better instruction following, and multimodal support in larger variants. Features significant performance improvements over Llama 3.1 across diverse tasks.',
|
4873
|
+
},
|
4874
|
+
{
|
4875
|
+
modelVariant: 'CHAT',
|
4876
|
+
modelTitle: 'llama3.1',
|
4877
|
+
modelName: 'llama3.1',
|
4878
|
+
modelDescription: 'Meta Llama 3.1 (8B-405B parameters) with 128K context window. Advanced foundation model with enhanced reasoning, improved multilingual capabilities, and better performance on complex tasks. Features significant improvements in code generation and mathematical reasoning.',
|
4879
|
+
},
|
4682
4880
|
{
|
4683
4881
|
modelVariant: 'CHAT',
|
4684
4882
|
modelTitle: 'llama3',
|
@@ -5173,7 +5371,7 @@ class OpenAiExecutionTools extends OpenAiCompatibleExecutionTools {
|
|
5173
5371
|
* Default model for chat variant.
|
5174
5372
|
*/
|
5175
5373
|
getDefaultChatModel() {
|
5176
|
-
return this.getDefaultModel('gpt-
|
5374
|
+
return this.getDefaultModel('gpt-5');
|
5177
5375
|
}
|
5178
5376
|
/**
|
5179
5377
|
* Default model for completion variant.
|
@@ -5241,8 +5439,6 @@ class OpenAiAssistantExecutionTools extends OpenAiExecutionTools {
|
|
5241
5439
|
const modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
|
5242
5440
|
const modelSettings = {
|
5243
5441
|
model: modelName,
|
5244
|
-
max_tokens: modelRequirements.maxTokens,
|
5245
|
-
// <- TODO: [🌾] Make some global max cap for maxTokens
|
5246
5442
|
|
5247
5443
|
temperature: modelRequirements.temperature,
|
5248
5444
|
|
@@ -6975,7 +7171,7 @@ function assertsTaskSuccessful(executionResult) {
|
|
6975
7171
|
* @private internal helper function
|
6976
7172
|
*/
|
6977
7173
|
function createTask(options) {
|
6978
|
-
const { taskType, taskProcessCallback } = options;
|
7174
|
+
const { taskType, taskProcessCallback, tldrProvider } = options;
|
6979
7175
|
let { title } = options;
|
6980
7176
|
// TODO: [🐙] DRY
|
6981
7177
|
const taskId = `${taskType.toLowerCase().substring(0, 4)}-${$randomToken(8 /* <- TODO: To global config + Use Base58 to avoid similar char conflicts */)}`;
|
@@ -7048,6 +7244,78 @@ function createTask(options) {
|
|
7048
7244
|
return status;
|
7049
7245
|
// <- Note: [1] --||--
|
7050
7246
|
},
|
7247
|
+
get tldr() {
|
7248
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k;
|
7249
|
+
// Use custom tldr provider if available
|
7250
|
+
if (tldrProvider) {
|
7251
|
+
return tldrProvider(createdAt, status, currentValue, errors, warnings);
|
7252
|
+
}
|
7253
|
+
// Fallback to default implementation
|
7254
|
+
const cv = currentValue;
|
7255
|
+
// If explicit percent is provided, use it
|
7256
|
+
let percentRaw = (_f = (_d = (_b = (_a = cv === null || cv === void 0 ? void 0 : cv.tldr) === null || _a === void 0 ? void 0 : _a.percent) !== null && _b !== void 0 ? _b : (_c = cv === null || cv === void 0 ? void 0 : cv.usage) === null || _c === void 0 ? void 0 : _c.percent) !== null && _d !== void 0 ? _d : (_e = cv === null || cv === void 0 ? void 0 : cv.progress) === null || _e === void 0 ? void 0 : _e.percent) !== null && _f !== void 0 ? _f : cv === null || cv === void 0 ? void 0 : cv.percent;
|
7257
|
+
// Simulate progress if not provided
|
7258
|
+
if (typeof percentRaw !== 'number') {
|
7259
|
+
// Simulate progress: evenly split across subtasks, based on elapsed time
|
7260
|
+
const now = new Date();
|
7261
|
+
const elapsedMs = now.getTime() - createdAt.getTime();
|
7262
|
+
const totalMs = DEFAULT_TASK_SIMULATED_DURATION_MS;
|
7263
|
+
// If subtasks are defined, split progress evenly
|
7264
|
+
const subtaskCount = Array.isArray(cv === null || cv === void 0 ? void 0 : cv.subtasks) ? cv.subtasks.length : 1;
|
7265
|
+
const completedSubtasks = Array.isArray(cv === null || cv === void 0 ? void 0 : cv.subtasks)
|
7266
|
+
? cv.subtasks.filter((s) => s.done || s.completed).length
|
7267
|
+
: 0;
|
7268
|
+
// Progress from completed subtasks
|
7269
|
+
const subtaskProgress = subtaskCount > 0 ? completedSubtasks / subtaskCount : 0;
|
7270
|
+
// Progress from elapsed time for current subtask
|
7271
|
+
const timeProgress = Math.min(elapsedMs / totalMs, 1);
|
7272
|
+
// Combine: completed subtasks + time progress for current subtask
|
7273
|
+
percentRaw = Math.min(subtaskProgress + (1 / subtaskCount) * timeProgress, 1);
|
7274
|
+
if (status === 'FINISHED')
|
7275
|
+
percentRaw = 1;
|
7276
|
+
if (status === 'ERROR')
|
7277
|
+
percentRaw = 0;
|
7278
|
+
}
|
7279
|
+
// Clamp to [0,1]
|
7280
|
+
let percent = Number(percentRaw) || 0;
|
7281
|
+
if (percent < 0)
|
7282
|
+
percent = 0;
|
7283
|
+
if (percent > 1)
|
7284
|
+
percent = 1;
|
7285
|
+
// Build a short message: prefer explicit tldr.message, then common summary/message fields, then errors/warnings, then status
|
7286
|
+
const messageFromResult = (_k = (_j = (_h = (_g = cv === null || cv === void 0 ? void 0 : cv.tldr) === null || _g === void 0 ? void 0 : _g.message) !== null && _h !== void 0 ? _h : cv === null || cv === void 0 ? void 0 : cv.message) !== null && _j !== void 0 ? _j : cv === null || cv === void 0 ? void 0 : cv.summary) !== null && _k !== void 0 ? _k : cv === null || cv === void 0 ? void 0 : cv.statusMessage;
|
7287
|
+
let message = messageFromResult;
|
7288
|
+
if (!message) {
|
7289
|
+
// If subtasks, show current subtask
|
7290
|
+
if (Array.isArray(cv === null || cv === void 0 ? void 0 : cv.subtasks) && cv.subtasks.length > 0) {
|
7291
|
+
const current = cv.subtasks.find((s) => !s.done && !s.completed);
|
7292
|
+
if (current && current.title) {
|
7293
|
+
message = `Working on ${current.title}`;
|
7294
|
+
}
|
7295
|
+
}
|
7296
|
+
if (!message) {
|
7297
|
+
if (errors.length) {
|
7298
|
+
message = errors[errors.length - 1].message || 'Error';
|
7299
|
+
}
|
7300
|
+
else if (warnings.length) {
|
7301
|
+
message = warnings[warnings.length - 1].message || 'Warning';
|
7302
|
+
}
|
7303
|
+
else if (status === 'FINISHED') {
|
7304
|
+
message = 'Finished';
|
7305
|
+
}
|
7306
|
+
else if (status === 'ERROR') {
|
7307
|
+
message = 'Error';
|
7308
|
+
}
|
7309
|
+
else {
|
7310
|
+
message = 'Running';
|
7311
|
+
}
|
7312
|
+
}
|
7313
|
+
}
|
7314
|
+
return {
|
7315
|
+
percent: percent,
|
7316
|
+
message,
|
7317
|
+
};
|
7318
|
+
},
|
7051
7319
|
get createdAt() {
|
7052
7320
|
return createdAt;
|
7053
7321
|
// <- Note: [1] --||--
|
@@ -8920,7 +9188,7 @@ function validatePromptResult(options) {
|
|
8920
9188
|
*/
|
8921
9189
|
async function executeAttempts(options) {
|
8922
9190
|
const { jokerParameterNames, priority, maxAttempts, // <- Note: [💂]
|
8923
|
-
preparedContent, parameters, task, preparedPipeline, tools, $executionReport, pipelineIdentification, maxExecutionAttempts, } = options;
|
9191
|
+
preparedContent, parameters, task, preparedPipeline, tools, $executionReport, pipelineIdentification, maxExecutionAttempts, onProgress, } = options;
|
8924
9192
|
const $ongoingTaskResult = {
|
8925
9193
|
$result: null,
|
8926
9194
|
$resultString: null,
|
@@ -9164,6 +9432,10 @@ async function executeAttempts(options) {
|
|
9164
9432
|
result: $ongoingTaskResult.$resultString,
|
9165
9433
|
error: error,
|
9166
9434
|
});
|
9435
|
+
// Report failed attempt
|
9436
|
+
onProgress({
|
9437
|
+
errors: [error],
|
9438
|
+
});
|
9167
9439
|
}
|
9168
9440
|
finally {
|
9169
9441
|
if (!isJokerAttempt &&
|
@@ -10045,6 +10317,71 @@ function createPipelineExecutor(options) {
|
|
10045
10317
|
updateOngoingResult(newOngoingResult);
|
10046
10318
|
});
|
10047
10319
|
},
|
10320
|
+
tldrProvider(createdAt, status, currentValue, errors) {
|
10321
|
+
var _a;
|
10322
|
+
// Better progress estimation based on pipeline structure
|
10323
|
+
const cv = currentValue;
|
10324
|
+
// Handle finished/error states
|
10325
|
+
if (status === 'FINISHED') {
|
10326
|
+
return {
|
10327
|
+
percent: 1,
|
10328
|
+
message: 'Finished',
|
10329
|
+
};
|
10330
|
+
}
|
10331
|
+
if (status === 'ERROR') {
|
10332
|
+
const errorMessage = errors.length > 0 ? errors[errors.length - 1].message : 'Error';
|
10333
|
+
return {
|
10334
|
+
percent: 0,
|
10335
|
+
message: errorMessage,
|
10336
|
+
};
|
10337
|
+
}
|
10338
|
+
// Calculate progress based on pipeline tasks
|
10339
|
+
const totalTasks = pipeline.tasks.length;
|
10340
|
+
let completedTasks = 0;
|
10341
|
+
let currentTaskName = '';
|
10342
|
+
// Check execution report for completed tasks
|
10343
|
+
if ((_a = cv === null || cv === void 0 ? void 0 : cv.executionReport) === null || _a === void 0 ? void 0 : _a.promptExecutions) {
|
10344
|
+
const executedTaskTitles = new Set(cv.executionReport.promptExecutions.map((execution) => execution.prompt.title));
|
10345
|
+
// Count completed tasks by matching titles
|
10346
|
+
const completedTasksByTitle = pipeline.tasks.filter(task => executedTaskTitles.has(task.title));
|
10347
|
+
completedTasks = completedTasksByTitle.length;
|
10348
|
+
// Find current task being executed (first task not yet completed)
|
10349
|
+
const remainingTasks = pipeline.tasks.filter(task => !executedTaskTitles.has(task.title));
|
10350
|
+
if (remainingTasks.length > 0) {
|
10351
|
+
currentTaskName = remainingTasks[0].name;
|
10352
|
+
}
|
10353
|
+
}
|
10354
|
+
// Calculate progress percentage
|
10355
|
+
let percent = totalTasks > 0 ? completedTasks / totalTasks : 0;
|
10356
|
+
// Add time-based progress for current task (assuming 5 minutes total)
|
10357
|
+
if (completedTasks < totalTasks) {
|
10358
|
+
const elapsedMs = new Date().getTime() - createdAt.getTime();
|
10359
|
+
const totalMs = 5 * 60 * 1000; // 5 minutes
|
10360
|
+
const timeProgress = Math.min(elapsedMs / totalMs, 1);
|
10361
|
+
// Add partial progress for current task
|
10362
|
+
percent += (1 / totalTasks) * timeProgress;
|
10363
|
+
}
|
10364
|
+
// Clamp to [0,1]
|
10365
|
+
percent = Math.min(Math.max(percent, 0), 1);
|
10366
|
+
// Generate message
|
10367
|
+
let message = '';
|
10368
|
+
if (currentTaskName) {
|
10369
|
+
// Find the task to get its title
|
10370
|
+
const currentTask = pipeline.tasks.find(task => task.name === currentTaskName);
|
10371
|
+
const taskTitle = (currentTask === null || currentTask === void 0 ? void 0 : currentTask.title) || currentTaskName;
|
10372
|
+
message = `Working on task ${taskTitle}`;
|
10373
|
+
}
|
10374
|
+
else if (completedTasks === 0) {
|
10375
|
+
message = 'Starting pipeline execution';
|
10376
|
+
}
|
10377
|
+
else {
|
10378
|
+
message = `Processing pipeline (${completedTasks}/${totalTasks} tasks completed)`;
|
10379
|
+
}
|
10380
|
+
return {
|
10381
|
+
percent,
|
10382
|
+
message,
|
10383
|
+
};
|
10384
|
+
},
|
10048
10385
|
});
|
10049
10386
|
// <- TODO: Make types such as there is no need to do `as` for `createTask`
|
10050
10387
|
return pipelineExecutor;
|