@promptbook/wizard 0.100.0-4 → 0.100.0-40

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (62) hide show
  1. package/README.md +1 -0
  2. package/esm/index.es.js +424 -87
  3. package/esm/index.es.js.map +1 -1
  4. package/esm/typings/src/_packages/components.index.d.ts +4 -0
  5. package/esm/typings/src/_packages/core.index.d.ts +26 -0
  6. package/esm/typings/src/_packages/types.index.d.ts +30 -0
  7. package/esm/typings/src/book-2.0/agent-source/parseAgentSource.d.ts +30 -0
  8. package/esm/typings/src/book-2.0/agent-source/parseAgentSource.test.d.ts +1 -0
  9. package/esm/typings/src/book-2.0/agent-source/string_book.d.ts +26 -0
  10. package/esm/typings/src/book-2.0/commitments/ACTION/ACTION.d.ts +38 -0
  11. package/esm/typings/src/book-2.0/commitments/FORMAT/FORMAT.d.ts +39 -0
  12. package/esm/typings/src/book-2.0/commitments/KNOWLEDGE/FrontendRAGService.d.ts +48 -0
  13. package/esm/typings/src/book-2.0/commitments/KNOWLEDGE/KNOWLEDGE.d.ts +51 -0
  14. package/esm/typings/src/book-2.0/commitments/KNOWLEDGE/RAGService.d.ts +54 -0
  15. package/esm/typings/src/book-2.0/commitments/KNOWLEDGE/processors/BaseKnowledgeProcessor.d.ts +45 -0
  16. package/esm/typings/src/book-2.0/commitments/KNOWLEDGE/processors/PdfProcessor.d.ts +31 -0
  17. package/esm/typings/src/book-2.0/commitments/KNOWLEDGE/processors/ProcessorFactory.d.ts +23 -0
  18. package/esm/typings/src/book-2.0/commitments/KNOWLEDGE/processors/TextProcessor.d.ts +18 -0
  19. package/esm/typings/src/book-2.0/commitments/KNOWLEDGE/types.d.ts +56 -0
  20. package/esm/typings/src/book-2.0/commitments/KNOWLEDGE/utils/ragHelper.d.ts +34 -0
  21. package/esm/typings/src/book-2.0/commitments/META_IMAGE/META_IMAGE.d.ts +44 -0
  22. package/esm/typings/src/book-2.0/commitments/META_LINK/META_LINK.d.ts +56 -0
  23. package/esm/typings/src/book-2.0/commitments/MODEL/MODEL.d.ts +39 -0
  24. package/esm/typings/src/book-2.0/commitments/NOTE/NOTE.d.ts +49 -0
  25. package/esm/typings/src/book-2.0/commitments/PERSONA/PERSONA.d.ts +46 -0
  26. package/esm/typings/src/book-2.0/commitments/RULE/RULE.d.ts +44 -0
  27. package/esm/typings/src/book-2.0/commitments/SAMPLE/SAMPLE.d.ts +44 -0
  28. package/esm/typings/src/book-2.0/commitments/STYLE/STYLE.d.ts +38 -0
  29. package/esm/typings/src/book-2.0/commitments/_base/BaseCommitmentDefinition.d.ts +52 -0
  30. package/esm/typings/src/book-2.0/commitments/_base/BookCommitment.d.ts +5 -0
  31. package/esm/typings/src/book-2.0/commitments/_base/CommitmentDefinition.d.ts +48 -0
  32. package/esm/typings/src/book-2.0/commitments/_base/NotYetImplementedCommitmentDefinition.d.ts +22 -0
  33. package/esm/typings/src/book-2.0/commitments/_base/createEmptyAgentModelRequirements.d.ts +19 -0
  34. package/esm/typings/src/book-2.0/commitments/_misc/AgentModelRequirements.d.ts +37 -0
  35. package/esm/typings/src/book-2.0/commitments/_misc/AgentSourceParseResult.d.ts +18 -0
  36. package/esm/typings/src/book-2.0/commitments/_misc/ParsedCommitment.d.ts +22 -0
  37. package/esm/typings/src/book-2.0/commitments/_misc/createAgentModelRequirements.d.ts +61 -0
  38. package/esm/typings/src/book-2.0/commitments/_misc/createAgentModelRequirementsWithCommitments.d.ts +35 -0
  39. package/esm/typings/src/book-2.0/commitments/_misc/createCommitmentRegex.d.ts +20 -0
  40. package/esm/typings/src/book-2.0/commitments/_misc/parseAgentSourceWithCommitments.d.ts +24 -0
  41. package/esm/typings/src/book-2.0/commitments/_misc/removeCommentsFromSystemMessage.d.ts +11 -0
  42. package/esm/typings/src/book-2.0/commitments/index.d.ts +56 -0
  43. package/esm/typings/src/book-2.0/utils/profileImageUtils.d.ts +39 -0
  44. package/esm/typings/src/book-components/BookEditor/BookEditor.d.ts +30 -0
  45. package/esm/typings/src/book-components/_common/react-utils/classNames.d.ts +7 -0
  46. package/esm/typings/src/config.d.ts +6 -0
  47. package/esm/typings/src/execution/AvailableModel.d.ts +4 -0
  48. package/esm/typings/src/execution/ExecutionTask.d.ts +27 -0
  49. package/esm/typings/src/execution/createPipelineExecutor/40-executeAttempts.d.ts +6 -1
  50. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +0 -5
  51. package/esm/typings/src/llm-providers/anthropic-claude/anthropic-claude-models.d.ts +1 -1
  52. package/esm/typings/src/llm-providers/deepseek/deepseek-models.d.ts +1 -1
  53. package/esm/typings/src/llm-providers/google/google-models.d.ts +1 -1
  54. package/esm/typings/src/llm-providers/ollama/ollama-models.d.ts +1 -1
  55. package/esm/typings/src/llm-providers/openai/openai-models.d.ts +1 -1
  56. package/esm/typings/src/pipeline/book-notation.d.ts +2 -1
  57. package/esm/typings/src/types/ModelRequirements.d.ts +0 -2
  58. package/esm/typings/src/types/typeAliases.d.ts +6 -0
  59. package/esm/typings/src/version.d.ts +1 -1
  60. package/package.json +2 -2
  61. package/umd/index.umd.js +424 -87
  62. package/umd/index.umd.js.map +1 -1
package/umd/index.umd.js CHANGED
@@ -49,7 +49,7 @@
49
49
  * @generated
50
50
  * @see https://github.com/webgptorg/promptbook
51
51
  */
52
- const PROMPTBOOK_ENGINE_VERSION = '0.100.0-4';
52
+ const PROMPTBOOK_ENGINE_VERSION = '0.100.0-40';
53
53
  /**
54
54
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
55
55
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -323,6 +323,12 @@
323
323
  * @public exported from `@promptbook/core`
324
324
  */
325
325
  const DEFAULT_IS_AUTO_INSTALLED = false;
326
+ /**
327
+ * Default simulated duration for a task in milliseconds (used for progress reporting)
328
+ *
329
+ * @public exported from `@promptbook/core`
330
+ */
331
+ const DEFAULT_TASK_SIMULATED_DURATION_MS = 5 * 60 * 1000; // 5 minutes
326
332
  /**
327
333
  * Default rate limits (requests per minute)
328
334
  *
@@ -1713,7 +1719,7 @@
1713
1719
  /**
1714
1720
  * List of available Anthropic Claude models with pricing
1715
1721
  *
1716
- * Note: Done at 2025-05-06
1722
+ * Note: Synced with official API docs at 2025-08-20
1717
1723
  *
1718
1724
  * @see https://docs.anthropic.com/en/docs/models-overview
1719
1725
  * @public exported from `@promptbook/anthropic-claude`
@@ -1723,12 +1729,52 @@
1723
1729
  value: [
1724
1730
  {
1725
1731
  modelVariant: 'CHAT',
1726
- modelTitle: 'Claude 3.5 Sonnet',
1727
- modelName: 'claude-3-5-sonnet-20240620',
1728
- modelDescription: 'Latest Claude model with 200K token context window. Features state-of-the-art reasoning capabilities, sophisticated code generation, and enhanced multilingual understanding. Offers superior accuracy with 30% fewer hallucinations than Claude 3 Sonnet. Provides exceptional performance for complex enterprise applications while maintaining fast response times.',
1732
+ modelTitle: 'Claude Opus 4.1',
1733
+ modelName: 'claude-opus-4-1-20250805',
1734
+ modelDescription: 'Most powerful and capable Claude model with 200K token context window. Features superior reasoning capabilities, exceptional coding abilities, and advanced multimodal understanding. Sets new standards in complex reasoning and analytical tasks with enhanced safety measures. Ideal for the most demanding enterprise applications requiring maximum intelligence.',
1729
1735
  pricing: {
1730
- prompt: pricing(`$2.50 / 1M tokens`),
1731
- output: pricing(`$12.50 / 1M tokens`),
1736
+ prompt: pricing(`$15.00 / 1M tokens`),
1737
+ output: pricing(`$75.00 / 1M tokens`),
1738
+ },
1739
+ },
1740
+ {
1741
+ modelVariant: 'CHAT',
1742
+ modelTitle: 'Claude Opus 4',
1743
+ modelName: 'claude-opus-4-20250514',
1744
+ modelDescription: 'Previous flagship Claude model with 200K token context window. Features very high intelligence and capability with exceptional performance across reasoning, coding, and creative tasks. Maintains strong safety guardrails while delivering sophisticated outputs for complex professional applications.',
1745
+ pricing: {
1746
+ prompt: pricing(`$15.00 / 1M tokens`),
1747
+ output: pricing(`$75.00 / 1M tokens`),
1748
+ },
1749
+ },
1750
+ {
1751
+ modelVariant: 'CHAT',
1752
+ modelTitle: 'Claude Sonnet 4',
1753
+ modelName: 'claude-sonnet-4-20250514',
1754
+ modelDescription: 'High-performance Claude model with exceptional reasoning capabilities and 200K token context window (1M context beta available). Features balanced intelligence and efficiency with enhanced multimodal understanding. Offers optimal performance for most enterprise applications requiring sophisticated AI capabilities.',
1755
+ pricing: {
1756
+ prompt: pricing(`$3.00 / 1M tokens`),
1757
+ output: pricing(`$15.00 / 1M tokens`),
1758
+ },
1759
+ },
1760
+ {
1761
+ modelVariant: 'CHAT',
1762
+ modelTitle: 'Claude Sonnet 3.7',
1763
+ modelName: 'claude-3-7-sonnet-20250219',
1764
+ modelDescription: 'High-performance Claude model with early extended thinking capabilities and 200K token context window. Features enhanced reasoning chains, improved factual accuracy, and toggleable extended thinking for complex problem-solving. Ideal for applications requiring deep analytical capabilities.',
1765
+ pricing: {
1766
+ prompt: pricing(`$3.00 / 1M tokens`),
1767
+ output: pricing(`$15.00 / 1M tokens`),
1768
+ },
1769
+ },
1770
+ {
1771
+ modelVariant: 'CHAT',
1772
+ modelTitle: 'Claude Haiku 3.5',
1773
+ modelName: 'claude-3-5-haiku-20241022',
1774
+ modelDescription: 'Fastest Claude model with 200K token context window optimized for intelligence at blazing speeds. Features enhanced reasoning and contextual understanding while maintaining sub-second response times. Perfect for real-time applications, customer-facing deployments, and high-throughput services.',
1775
+ pricing: {
1776
+ prompt: pricing(`$0.80 / 1M tokens`),
1777
+ output: pricing(`$4.00 / 1M tokens`),
1732
1778
  },
1733
1779
  },
1734
1780
  {
@@ -2434,8 +2480,7 @@
2434
2480
  const rawPromptContent = templateParameters(content, { ...parameters, modelName });
2435
2481
  const rawRequest = {
2436
2482
  model: modelRequirements.modelName || this.getDefaultChatModel().modelName,
2437
- max_tokens: modelRequirements.maxTokens || 4096,
2438
- // <- TODO: [🌾] Make some global max cap for maxTokens
2483
+ max_tokens: modelRequirements.maxTokens || 8192,
2439
2484
  temperature: modelRequirements.temperature,
2440
2485
  system: modelRequirements.systemMessage,
2441
2486
  messages: [
@@ -2494,59 +2539,6 @@
2494
2539
  },
2495
2540
  });
2496
2541
  }
2497
- /**
2498
- * Calls Anthropic Claude API to use a completion model.
2499
- */
2500
- async callCompletionModel(prompt) {
2501
- if (this.options.isVerbose) {
2502
- console.info('🖋 Anthropic Claude callCompletionModel call');
2503
- }
2504
- const { content, parameters, modelRequirements } = prompt;
2505
- if (modelRequirements.modelVariant !== 'COMPLETION') {
2506
- throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
2507
- }
2508
- const client = await this.getClient();
2509
- const modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
2510
- const rawPromptContent = templateParameters(content, { ...parameters, modelName });
2511
- const rawRequest = {
2512
- model: modelName,
2513
- max_tokens_to_sample: modelRequirements.maxTokens || 2000,
2514
- temperature: modelRequirements.temperature,
2515
- prompt: rawPromptContent,
2516
- };
2517
- const start = $getCurrentDate();
2518
- const rawResponse = await this.limiter
2519
- .schedule(() => client.completions.create(rawRequest))
2520
- .catch((error) => {
2521
- if (this.options.isVerbose) {
2522
- console.info(colors__default["default"].bgRed('error'), error);
2523
- }
2524
- throw error;
2525
- });
2526
- if (this.options.isVerbose) {
2527
- console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
2528
- }
2529
- if (!rawResponse.completion) {
2530
- throw new PipelineExecutionError('No completion from Anthropic Claude');
2531
- }
2532
- const resultContent = rawResponse.completion;
2533
- const complete = $getCurrentDate();
2534
- const usage = computeAnthropicClaudeUsage(rawPromptContent, resultContent, rawResponse);
2535
- return exportJson({
2536
- name: 'promptResult',
2537
- message: `Result of \`AnthropicClaudeExecutionTools.callCompletionModel\``,
2538
- order: [],
2539
- value: {
2540
- content: resultContent,
2541
- modelName: rawResponse.model || modelName,
2542
- timing: { start, complete },
2543
- usage,
2544
- rawPromptContent,
2545
- rawRequest,
2546
- rawResponse,
2547
- },
2548
- });
2549
- }
2550
2542
  // <- Note: [🤖] callXxxModel
2551
2543
  /**
2552
2544
  * Get the model that should be used as default
@@ -2568,7 +2560,7 @@
2568
2560
  * Default model for chat variant.
2569
2561
  */
2570
2562
  getDefaultChatModel() {
2571
- return this.getDefaultModel('claude-3-5-sonnet');
2563
+ return this.getDefaultModel('claude-sonnet-4-20250514');
2572
2564
  }
2573
2565
  }
2574
2566
  /**
@@ -2711,7 +2703,7 @@
2711
2703
  /**
2712
2704
  * List of available OpenAI models with pricing
2713
2705
  *
2714
- * Note: Done at 2025-05-06
2706
+ * Note: Synced with official API docs at 2025-08-20
2715
2707
  *
2716
2708
  * @see https://platform.openai.com/docs/models/
2717
2709
  * @see https://openai.com/api/pricing/
@@ -2720,6 +2712,138 @@
2720
2712
  const OPENAI_MODELS = exportJson({
2721
2713
  name: 'OPENAI_MODELS',
2722
2714
  value: [
2715
+ /**/
2716
+ {
2717
+ modelVariant: 'CHAT',
2718
+ modelTitle: 'gpt-5',
2719
+ modelName: 'gpt-5',
2720
+ modelDescription: "OpenAI's most advanced language model with unprecedented reasoning capabilities and 200K context window. Features revolutionary improvements in complex problem-solving, scientific reasoning, and creative tasks. Demonstrates human-level performance across diverse domains with enhanced safety measures and alignment. Represents the next generation of AI with superior understanding, nuanced responses, and advanced multimodal capabilities.",
2721
+ pricing: {
2722
+ prompt: pricing(`$1.25 / 1M tokens`),
2723
+ output: pricing(`$10.00 / 1M tokens`),
2724
+ },
2725
+ },
2726
+ /**/
2727
+ /**/
2728
+ {
2729
+ modelVariant: 'CHAT',
2730
+ modelTitle: 'gpt-5-mini',
2731
+ modelName: 'gpt-5-mini',
2732
+ modelDescription: "A faster, cost-efficient version of GPT-5 for well-defined tasks with 200K context window. Maintains core GPT-5 capabilities while offering 5x faster inference and significantly lower costs. Features enhanced instruction following and reduced latency for production applications requiring quick responses with high quality.",
2733
+ pricing: {
2734
+ prompt: pricing(`$0.25 / 1M tokens`),
2735
+ output: pricing(`$2.00 / 1M tokens`),
2736
+ },
2737
+ },
2738
+ /**/
2739
+ /**/
2740
+ {
2741
+ modelVariant: 'CHAT',
2742
+ modelTitle: 'gpt-5-nano',
2743
+ modelName: 'gpt-5-nano',
2744
+ modelDescription: "The fastest, most cost-efficient version of GPT-5 with 200K context window. Optimized for summarization, classification, and simple reasoning tasks. Features 10x faster inference than base GPT-5 while maintaining good quality for straightforward applications. Ideal for high-volume, cost-sensitive deployments.",
2745
+ pricing: {
2746
+ prompt: pricing(`$0.05 / 1M tokens`),
2747
+ output: pricing(`$0.40 / 1M tokens`),
2748
+ },
2749
+ },
2750
+ /**/
2751
+ /**/
2752
+ {
2753
+ modelVariant: 'CHAT',
2754
+ modelTitle: 'gpt-4.1',
2755
+ modelName: 'gpt-4.1',
2756
+ modelDescription: "Smartest non-reasoning model with 128K context window. Enhanced version of GPT-4 with improved instruction following, better factual accuracy, and reduced hallucinations. Features advanced function calling capabilities and superior performance on coding tasks. Ideal for applications requiring high intelligence without reasoning overhead.",
2757
+ pricing: {
2758
+ prompt: pricing(`$3.00 / 1M tokens`),
2759
+ output: pricing(`$12.00 / 1M tokens`),
2760
+ },
2761
+ },
2762
+ /**/
2763
+ /**/
2764
+ {
2765
+ modelVariant: 'CHAT',
2766
+ modelTitle: 'gpt-4.1-mini',
2767
+ modelName: 'gpt-4.1-mini',
2768
+ modelDescription: "Smaller, faster version of GPT-4.1 with 128K context window. Balances intelligence and efficiency with 3x faster inference than base GPT-4.1. Maintains strong capabilities across text generation, reasoning, and coding while offering better cost-performance ratio for most applications.",
2769
+ pricing: {
2770
+ prompt: pricing(`$0.80 / 1M tokens`),
2771
+ output: pricing(`$3.20 / 1M tokens`),
2772
+ },
2773
+ },
2774
+ /**/
2775
+ /**/
2776
+ {
2777
+ modelVariant: 'CHAT',
2778
+ modelTitle: 'gpt-4.1-nano',
2779
+ modelName: 'gpt-4.1-nano',
2780
+ modelDescription: "Fastest, most cost-efficient version of GPT-4.1 with 128K context window. Optimized for high-throughput applications requiring good quality at minimal cost. Features 5x faster inference than GPT-4.1 while maintaining adequate performance for most general-purpose tasks.",
2781
+ pricing: {
2782
+ prompt: pricing(`$0.20 / 1M tokens`),
2783
+ output: pricing(`$0.80 / 1M tokens`),
2784
+ },
2785
+ },
2786
+ /**/
2787
+ /**/
2788
+ {
2789
+ modelVariant: 'CHAT',
2790
+ modelTitle: 'o3',
2791
+ modelName: 'o3',
2792
+ modelDescription: "Advanced reasoning model with 128K context window specializing in complex logical, mathematical, and analytical tasks. Successor to o1 with enhanced step-by-step problem-solving capabilities and superior performance on STEM-focused problems. Ideal for professional applications requiring deep analytical thinking and precise reasoning.",
2793
+ pricing: {
2794
+ prompt: pricing(`$15.00 / 1M tokens`),
2795
+ output: pricing(`$60.00 / 1M tokens`),
2796
+ },
2797
+ },
2798
+ /**/
2799
+ /**/
2800
+ {
2801
+ modelVariant: 'CHAT',
2802
+ modelTitle: 'o3-pro',
2803
+ modelName: 'o3-pro',
2804
+ modelDescription: "Enhanced version of o3 with more compute allocated for better responses on the most challenging problems. Features extended reasoning time and improved accuracy on complex analytical tasks. Designed for applications where maximum reasoning quality is more important than response speed.",
2805
+ pricing: {
2806
+ prompt: pricing(`$30.00 / 1M tokens`),
2807
+ output: pricing(`$120.00 / 1M tokens`),
2808
+ },
2809
+ },
2810
+ /**/
2811
+ /**/
2812
+ {
2813
+ modelVariant: 'CHAT',
2814
+ modelTitle: 'o4-mini',
2815
+ modelName: 'o4-mini',
2816
+ modelDescription: "Fast, cost-efficient reasoning model with 128K context window. Successor to o1-mini with improved analytical capabilities while maintaining speed advantages. Features enhanced mathematical reasoning and logical problem-solving at significantly lower cost than full reasoning models.",
2817
+ pricing: {
2818
+ prompt: pricing(`$4.00 / 1M tokens`),
2819
+ output: pricing(`$16.00 / 1M tokens`),
2820
+ },
2821
+ },
2822
+ /**/
2823
+ /**/
2824
+ {
2825
+ modelVariant: 'CHAT',
2826
+ modelTitle: 'o3-deep-research',
2827
+ modelName: 'o3-deep-research',
2828
+ modelDescription: "Most powerful deep research model with 128K context window. Specialized for comprehensive research tasks, literature analysis, and complex information synthesis. Features advanced citation capabilities and enhanced factual accuracy for academic and professional research applications.",
2829
+ pricing: {
2830
+ prompt: pricing(`$25.00 / 1M tokens`),
2831
+ output: pricing(`$100.00 / 1M tokens`),
2832
+ },
2833
+ },
2834
+ /**/
2835
+ /**/
2836
+ {
2837
+ modelVariant: 'CHAT',
2838
+ modelTitle: 'o4-mini-deep-research',
2839
+ modelName: 'o4-mini-deep-research',
2840
+ modelDescription: "Faster, more affordable deep research model with 128K context window. Balances research capabilities with cost efficiency, offering good performance on literature review, fact-checking, and information synthesis tasks at a more accessible price point.",
2841
+ pricing: {
2842
+ prompt: pricing(`$12.00 / 1M tokens`),
2843
+ output: pricing(`$48.00 / 1M tokens`),
2844
+ },
2845
+ },
2846
+ /**/
2723
2847
  /*/
2724
2848
  {
2725
2849
  modelTitle: 'dall-e-3',
@@ -3240,7 +3364,6 @@
3240
3364
  const modelName = prompt.modelRequirements.modelName || this.options.deploymentName;
3241
3365
  const modelSettings = {
3242
3366
  maxTokens: modelRequirements.maxTokens,
3243
- // <- TODO: [🌾] Make some global max cap for maxTokens
3244
3367
  temperature: modelRequirements.temperature,
3245
3368
  user: (_a = this.options.userId) === null || _a === void 0 ? void 0 : _a.toString(),
3246
3369
  // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
@@ -3346,8 +3469,7 @@
3346
3469
  try {
3347
3470
  const modelName = prompt.modelRequirements.modelName || this.options.deploymentName;
3348
3471
  const modelSettings = {
3349
- maxTokens: modelRequirements.maxTokens || 2000,
3350
- // <- TODO: [🌾] Make some global max cap for maxTokens
3472
+ maxTokens: modelRequirements.maxTokens,
3351
3473
  temperature: modelRequirements.temperature,
3352
3474
  user: (_a = this.options.userId) === null || _a === void 0 ? void 0 : _a.toString(),
3353
3475
  // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
@@ -3731,7 +3853,7 @@
3731
3853
  /**
3732
3854
  * List of available Deepseek models with descriptions
3733
3855
  *
3734
- * Note: Done at 2025-05-06
3856
+ * Note: Synced with official API docs at 2025-08-20
3735
3857
  *
3736
3858
  * @see https://www.deepseek.com/models
3737
3859
  * @public exported from `@promptbook/deepseek`
@@ -3741,12 +3863,32 @@
3741
3863
  value: [
3742
3864
  {
3743
3865
  modelVariant: 'CHAT',
3744
- modelTitle: 'Deepseek Chat Pro',
3745
- modelName: 'deepseek-chat-pro',
3746
- modelDescription: 'Latest flagship general-purpose model with 256K context window. Enhanced from base Chat model with 40% improvement on complex reasoning tasks and specialized domain knowledge. Features advanced prompt optimization and improved contextual memory. Ideal for enterprise applications requiring highest quality responses.',
3866
+ modelTitle: 'DeepSeek V3',
3867
+ modelName: 'deepseek-chat',
3868
+ modelDescription: 'Latest flagship general-purpose model with 128K context window. Features exceptional reasoning capabilities, advanced code generation, and strong performance across diverse domains. Offers competitive performance with leading models while maintaining cost efficiency. Ideal for complex reasoning, coding, and knowledge-intensive tasks.',
3747
3869
  pricing: {
3748
- prompt: pricing(`$1.20 / 1M tokens`),
3749
- output: pricing(`$2.40 / 1M tokens`),
3870
+ prompt: pricing(`$0.14 / 1M tokens`),
3871
+ output: pricing(`$0.28 / 1M tokens`),
3872
+ },
3873
+ },
3874
+ {
3875
+ modelVariant: 'CHAT',
3876
+ modelTitle: 'DeepSeek R1',
3877
+ modelName: 'deepseek-reasoner',
3878
+ modelDescription: 'Advanced reasoning model with 128K context window specializing in complex problem-solving and analytical thinking. Features explicit reasoning chains, enhanced mathematical capabilities, and superior performance on STEM tasks. Designed for applications requiring deep analytical reasoning and step-by-step problem solving.',
3879
+ pricing: {
3880
+ prompt: pricing(`$0.55 / 1M tokens`),
3881
+ output: pricing(`$2.19 / 1M tokens`),
3882
+ },
3883
+ },
3884
+ {
3885
+ modelVariant: 'CHAT',
3886
+ modelTitle: 'DeepSeek Coder V2',
3887
+ modelName: 'deepseek-coder',
3888
+ modelDescription: 'Specialized coding model with 128K context window optimized for software development tasks. Features exceptional code generation, debugging, and refactoring capabilities across 40+ programming languages. Particularly strong in understanding complex codebases and implementing solutions based on natural language specifications.',
3889
+ pricing: {
3890
+ prompt: pricing(`$0.14 / 1M tokens`),
3891
+ output: pricing(`$0.28 / 1M tokens`),
3750
3892
  },
3751
3893
  },
3752
3894
  {
@@ -3980,7 +4122,7 @@
3980
4122
  /**
3981
4123
  * List of available Google models with descriptions
3982
4124
  *
3983
- * Note: Done at 2025-05-06
4125
+ * Note: Synced with official API docs at 2025-08-20
3984
4126
  *
3985
4127
  * @see https://ai.google.dev/models/gemini
3986
4128
  * @public exported from `@promptbook/google`
@@ -3991,11 +4133,51 @@
3991
4133
  {
3992
4134
  modelVariant: 'CHAT',
3993
4135
  modelTitle: 'Gemini 2.5 Pro',
3994
- modelName: 'gemini-2.5-pro-preview-03-25',
3995
- modelDescription: 'Latest advanced multimodal model with 1M token context window. Features exceptional reasoning across complex tasks, sophisticated function calling, and advanced image analysis (16MP resolution). Demonstrates superior performance in math, coding, and knowledge-intensive tasks with 30% improvement over Gemini 1.5 Pro. Ideal for enterprise applications requiring deep contextual understanding.',
4136
+ modelName: 'gemini-2.5-pro',
4137
+ modelDescription: 'State-of-the-art thinking model with 1M token context window capable of reasoning over complex problems in code, math, and STEM. Features enhanced thinking capabilities, advanced multimodal understanding, and superior performance on analytical tasks. Ideal for complex enterprise applications requiring maximum intelligence and reasoning.',
3996
4138
  pricing: {
3997
- prompt: pricing(`$8.00 / 1M tokens`),
3998
- output: pricing(`$24.00 / 1M tokens`),
4139
+ prompt: pricing(`$7.00 / 1M tokens`),
4140
+ output: pricing(`$21.00 / 1M tokens`),
4141
+ },
4142
+ },
4143
+ {
4144
+ modelVariant: 'CHAT',
4145
+ modelTitle: 'Gemini 2.5 Flash',
4146
+ modelName: 'gemini-2.5-flash',
4147
+ modelDescription: 'Best model in terms of price-performance with 1M token context window offering well-rounded capabilities. Features adaptive thinking, cost efficiency, and enhanced reasoning for large-scale processing. Ideal for low-latency, high-volume tasks that require thinking and agentic use cases.',
4148
+ pricing: {
4149
+ prompt: pricing(`$0.35 / 1M tokens`),
4150
+ output: pricing(`$1.05 / 1M tokens`),
4151
+ },
4152
+ },
4153
+ {
4154
+ modelVariant: 'CHAT',
4155
+ modelTitle: 'Gemini 2.5 Flash Lite',
4156
+ modelName: 'gemini-2.5-flash-lite',
4157
+ modelDescription: 'Cost-efficient Gemini 2.5 Flash model optimized for high throughput with 1M token context window. Features thinking capabilities while maintaining the most cost-efficient pricing. Perfect for real-time, low-latency use cases requiring good quality at scale.',
4158
+ pricing: {
4159
+ prompt: pricing(`$0.20 / 1M tokens`),
4160
+ output: pricing(`$0.60 / 1M tokens`),
4161
+ },
4162
+ },
4163
+ {
4164
+ modelVariant: 'CHAT',
4165
+ modelTitle: 'Gemini 2.0 Flash',
4166
+ modelName: 'gemini-2.0-flash',
4167
+ modelDescription: 'Next-generation model with 1M token context window delivering improved capabilities, superior speed, and realtime streaming. Features enhanced function calling, code execution, and search capabilities. Ideal for applications requiring cutting-edge AI capabilities with fast response times.',
4168
+ pricing: {
4169
+ prompt: pricing(`$0.25 / 1M tokens`),
4170
+ output: pricing(`$0.75 / 1M tokens`),
4171
+ },
4172
+ },
4173
+ {
4174
+ modelVariant: 'CHAT',
4175
+ modelTitle: 'Gemini 2.0 Flash Lite',
4176
+ modelName: 'gemini-2.0-flash-lite',
4177
+ modelDescription: 'Cost-efficient Gemini 2.0 Flash model optimized for low latency with 1M token context window. Balances performance and cost with enhanced efficiency for high-volume applications. Perfect for applications requiring good quality responses at minimal cost.',
4178
+ pricing: {
4179
+ prompt: pricing(`$0.15 / 1M tokens`),
4180
+ output: pricing(`$0.45 / 1M tokens`),
3999
4181
  },
4000
4182
  },
4001
4183
  {
@@ -4436,7 +4618,6 @@
4436
4618
  const modelSettings = {
4437
4619
  model: modelName,
4438
4620
  max_tokens: modelRequirements.maxTokens,
4439
- // <- TODO: [🌾] Make some global max cap for maxTokens
4440
4621
  temperature: modelRequirements.temperature,
4441
4622
  // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
4442
4623
  // <- Note: [🧆]
@@ -4532,8 +4713,7 @@
4532
4713
  const modelName = modelRequirements.modelName || this.getDefaultCompletionModel().modelName;
4533
4714
  const modelSettings = {
4534
4715
  model: modelName,
4535
- max_tokens: modelRequirements.maxTokens || 2000,
4536
- // <- TODO: [🌾] Make some global max cap for maxTokens
4716
+ max_tokens: modelRequirements.maxTokens,
4537
4717
  temperature: modelRequirements.temperature,
4538
4718
  // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
4539
4719
  // <- Note: [🧆]
@@ -4682,7 +4862,7 @@
4682
4862
  /**
4683
4863
  * List of available models in Ollama library
4684
4864
  *
4685
- * Note: Done at 2025-05-19
4865
+ * Note: Synced with official API docs at 2025-08-20
4686
4866
  *
4687
4867
  * @see https://ollama.com/library
4688
4868
  * @public exported from `@promptbook/ollama`
@@ -4690,6 +4870,24 @@
4690
4870
  const OLLAMA_MODELS = exportJson({
4691
4871
  name: 'OLLAMA_MODELS',
4692
4872
  value: [
4873
+ {
4874
+ modelVariant: 'CHAT',
4875
+ modelTitle: 'llama3.3',
4876
+ modelName: 'llama3.3',
4877
+ modelDescription: 'Meta Llama 3.3 (70B parameters) with 128K context window. Latest generation foundation model with significantly enhanced reasoning, instruction following, and multilingual capabilities. Features improved performance on complex tasks and better factual accuracy compared to Llama 3.1.',
4878
+ },
4879
+ {
4880
+ modelVariant: 'CHAT',
4881
+ modelTitle: 'llama3.2',
4882
+ modelName: 'llama3.2',
4883
+ modelDescription: 'Meta Llama 3.2 (1B-90B parameters) with 128K context window. Enhanced model with improved reasoning capabilities, better instruction following, and multimodal support in larger variants. Features significant performance improvements over Llama 3.1 across diverse tasks.',
4884
+ },
4885
+ {
4886
+ modelVariant: 'CHAT',
4887
+ modelTitle: 'llama3.1',
4888
+ modelName: 'llama3.1',
4889
+ modelDescription: 'Meta Llama 3.1 (8B-405B parameters) with 128K context window. Advanced foundation model with enhanced reasoning, improved multilingual capabilities, and better performance on complex tasks. Features significant improvements in code generation and mathematical reasoning.',
4890
+ },
4693
4891
  {
4694
4892
  modelVariant: 'CHAT',
4695
4893
  modelTitle: 'llama3',
@@ -5184,7 +5382,7 @@
5184
5382
  * Default model for chat variant.
5185
5383
  */
5186
5384
  getDefaultChatModel() {
5187
- return this.getDefaultModel('gpt-4-turbo');
5385
+ return this.getDefaultModel('gpt-5');
5188
5386
  }
5189
5387
  /**
5190
5388
  * Default model for completion variant.
@@ -5252,8 +5450,6 @@
5252
5450
  const modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
5253
5451
  const modelSettings = {
5254
5452
  model: modelName,
5255
- max_tokens: modelRequirements.maxTokens,
5256
- // <- TODO: [🌾] Make some global max cap for maxTokens
5257
5453
 
5258
5454
  temperature: modelRequirements.temperature,
5259
5455
 
@@ -6986,7 +7182,7 @@
6986
7182
  * @private internal helper function
6987
7183
  */
6988
7184
  function createTask(options) {
6989
- const { taskType, taskProcessCallback } = options;
7185
+ const { taskType, taskProcessCallback, tldrProvider } = options;
6990
7186
  let { title } = options;
6991
7187
  // TODO: [🐙] DRY
6992
7188
  const taskId = `${taskType.toLowerCase().substring(0, 4)}-${$randomToken(8 /* <- TODO: To global config + Use Base58 to avoid similar char conflicts */)}`;
@@ -7059,6 +7255,78 @@
7059
7255
  return status;
7060
7256
  // <- Note: [1] --||--
7061
7257
  },
7258
+ get tldr() {
7259
+ var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k;
7260
+ // Use custom tldr provider if available
7261
+ if (tldrProvider) {
7262
+ return tldrProvider(createdAt, status, currentValue, errors, warnings);
7263
+ }
7264
+ // Fallback to default implementation
7265
+ const cv = currentValue;
7266
+ // If explicit percent is provided, use it
7267
+ let percentRaw = (_f = (_d = (_b = (_a = cv === null || cv === void 0 ? void 0 : cv.tldr) === null || _a === void 0 ? void 0 : _a.percent) !== null && _b !== void 0 ? _b : (_c = cv === null || cv === void 0 ? void 0 : cv.usage) === null || _c === void 0 ? void 0 : _c.percent) !== null && _d !== void 0 ? _d : (_e = cv === null || cv === void 0 ? void 0 : cv.progress) === null || _e === void 0 ? void 0 : _e.percent) !== null && _f !== void 0 ? _f : cv === null || cv === void 0 ? void 0 : cv.percent;
7268
+ // Simulate progress if not provided
7269
+ if (typeof percentRaw !== 'number') {
7270
+ // Simulate progress: evenly split across subtasks, based on elapsed time
7271
+ const now = new Date();
7272
+ const elapsedMs = now.getTime() - createdAt.getTime();
7273
+ const totalMs = DEFAULT_TASK_SIMULATED_DURATION_MS;
7274
+ // If subtasks are defined, split progress evenly
7275
+ const subtaskCount = Array.isArray(cv === null || cv === void 0 ? void 0 : cv.subtasks) ? cv.subtasks.length : 1;
7276
+ const completedSubtasks = Array.isArray(cv === null || cv === void 0 ? void 0 : cv.subtasks)
7277
+ ? cv.subtasks.filter((s) => s.done || s.completed).length
7278
+ : 0;
7279
+ // Progress from completed subtasks
7280
+ const subtaskProgress = subtaskCount > 0 ? completedSubtasks / subtaskCount : 0;
7281
+ // Progress from elapsed time for current subtask
7282
+ const timeProgress = Math.min(elapsedMs / totalMs, 1);
7283
+ // Combine: completed subtasks + time progress for current subtask
7284
+ percentRaw = Math.min(subtaskProgress + (1 / subtaskCount) * timeProgress, 1);
7285
+ if (status === 'FINISHED')
7286
+ percentRaw = 1;
7287
+ if (status === 'ERROR')
7288
+ percentRaw = 0;
7289
+ }
7290
+ // Clamp to [0,1]
7291
+ let percent = Number(percentRaw) || 0;
7292
+ if (percent < 0)
7293
+ percent = 0;
7294
+ if (percent > 1)
7295
+ percent = 1;
7296
+ // Build a short message: prefer explicit tldr.message, then common summary/message fields, then errors/warnings, then status
7297
+ const messageFromResult = (_k = (_j = (_h = (_g = cv === null || cv === void 0 ? void 0 : cv.tldr) === null || _g === void 0 ? void 0 : _g.message) !== null && _h !== void 0 ? _h : cv === null || cv === void 0 ? void 0 : cv.message) !== null && _j !== void 0 ? _j : cv === null || cv === void 0 ? void 0 : cv.summary) !== null && _k !== void 0 ? _k : cv === null || cv === void 0 ? void 0 : cv.statusMessage;
7298
+ let message = messageFromResult;
7299
+ if (!message) {
7300
+ // If subtasks, show current subtask
7301
+ if (Array.isArray(cv === null || cv === void 0 ? void 0 : cv.subtasks) && cv.subtasks.length > 0) {
7302
+ const current = cv.subtasks.find((s) => !s.done && !s.completed);
7303
+ if (current && current.title) {
7304
+ message = `Working on ${current.title}`;
7305
+ }
7306
+ }
7307
+ if (!message) {
7308
+ if (errors.length) {
7309
+ message = errors[errors.length - 1].message || 'Error';
7310
+ }
7311
+ else if (warnings.length) {
7312
+ message = warnings[warnings.length - 1].message || 'Warning';
7313
+ }
7314
+ else if (status === 'FINISHED') {
7315
+ message = 'Finished';
7316
+ }
7317
+ else if (status === 'ERROR') {
7318
+ message = 'Error';
7319
+ }
7320
+ else {
7321
+ message = 'Running';
7322
+ }
7323
+ }
7324
+ }
7325
+ return {
7326
+ percent: percent,
7327
+ message,
7328
+ };
7329
+ },
7062
7330
  get createdAt() {
7063
7331
  return createdAt;
7064
7332
  // <- Note: [1] --||--
@@ -8931,7 +9199,7 @@
8931
9199
  */
8932
9200
  async function executeAttempts(options) {
8933
9201
  const { jokerParameterNames, priority, maxAttempts, // <- Note: [💂]
8934
- preparedContent, parameters, task, preparedPipeline, tools, $executionReport, pipelineIdentification, maxExecutionAttempts, } = options;
9202
+ preparedContent, parameters, task, preparedPipeline, tools, $executionReport, pipelineIdentification, maxExecutionAttempts, onProgress, } = options;
8935
9203
  const $ongoingTaskResult = {
8936
9204
  $result: null,
8937
9205
  $resultString: null,
@@ -9175,6 +9443,10 @@
9175
9443
  result: $ongoingTaskResult.$resultString,
9176
9444
  error: error,
9177
9445
  });
9446
+ // Report failed attempt
9447
+ onProgress({
9448
+ errors: [error],
9449
+ });
9178
9450
  }
9179
9451
  finally {
9180
9452
  if (!isJokerAttempt &&
@@ -10056,6 +10328,71 @@
10056
10328
  updateOngoingResult(newOngoingResult);
10057
10329
  });
10058
10330
  },
10331
+ tldrProvider(createdAt, status, currentValue, errors) {
10332
+ var _a;
10333
+ // Better progress estimation based on pipeline structure
10334
+ const cv = currentValue;
10335
+ // Handle finished/error states
10336
+ if (status === 'FINISHED') {
10337
+ return {
10338
+ percent: 1,
10339
+ message: 'Finished',
10340
+ };
10341
+ }
10342
+ if (status === 'ERROR') {
10343
+ const errorMessage = errors.length > 0 ? errors[errors.length - 1].message : 'Error';
10344
+ return {
10345
+ percent: 0,
10346
+ message: errorMessage,
10347
+ };
10348
+ }
10349
+ // Calculate progress based on pipeline tasks
10350
+ const totalTasks = pipeline.tasks.length;
10351
+ let completedTasks = 0;
10352
+ let currentTaskName = '';
10353
+ // Check execution report for completed tasks
10354
+ if ((_a = cv === null || cv === void 0 ? void 0 : cv.executionReport) === null || _a === void 0 ? void 0 : _a.promptExecutions) {
10355
+ const executedTaskTitles = new Set(cv.executionReport.promptExecutions.map((execution) => execution.prompt.title));
10356
+ // Count completed tasks by matching titles
10357
+ const completedTasksByTitle = pipeline.tasks.filter(task => executedTaskTitles.has(task.title));
10358
+ completedTasks = completedTasksByTitle.length;
10359
+ // Find current task being executed (first task not yet completed)
10360
+ const remainingTasks = pipeline.tasks.filter(task => !executedTaskTitles.has(task.title));
10361
+ if (remainingTasks.length > 0) {
10362
+ currentTaskName = remainingTasks[0].name;
10363
+ }
10364
+ }
10365
+ // Calculate progress percentage
10366
+ let percent = totalTasks > 0 ? completedTasks / totalTasks : 0;
10367
+ // Add time-based progress for current task (assuming 5 minutes total)
10368
+ if (completedTasks < totalTasks) {
10369
+ const elapsedMs = new Date().getTime() - createdAt.getTime();
10370
+ const totalMs = 5 * 60 * 1000; // 5 minutes
10371
+ const timeProgress = Math.min(elapsedMs / totalMs, 1);
10372
+ // Add partial progress for current task
10373
+ percent += (1 / totalTasks) * timeProgress;
10374
+ }
10375
+ // Clamp to [0,1]
10376
+ percent = Math.min(Math.max(percent, 0), 1);
10377
+ // Generate message
10378
+ let message = '';
10379
+ if (currentTaskName) {
10380
+ // Find the task to get its title
10381
+ const currentTask = pipeline.tasks.find(task => task.name === currentTaskName);
10382
+ const taskTitle = (currentTask === null || currentTask === void 0 ? void 0 : currentTask.title) || currentTaskName;
10383
+ message = `Working on task ${taskTitle}`;
10384
+ }
10385
+ else if (completedTasks === 0) {
10386
+ message = 'Starting pipeline execution';
10387
+ }
10388
+ else {
10389
+ message = `Processing pipeline (${completedTasks}/${totalTasks} tasks completed)`;
10390
+ }
10391
+ return {
10392
+ percent,
10393
+ message,
10394
+ };
10395
+ },
10059
10396
  });
10060
10397
  // <- TODO: Make types such as there is no need to do `as` for `createTask`
10061
10398
  return pipelineExecutor;