@promptbook/wizard 0.100.0-8 → 0.100.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (129) hide show
  1. package/README.md +7 -14
  2. package/esm/index.es.js +651 -150
  3. package/esm/index.es.js.map +1 -1
  4. package/esm/typings/src/_packages/color.index.d.ts +50 -0
  5. package/esm/typings/src/_packages/components.index.d.ts +36 -0
  6. package/esm/typings/src/_packages/core.index.d.ts +30 -0
  7. package/esm/typings/src/_packages/types.index.d.ts +38 -0
  8. package/esm/typings/src/book-2.0/agent-source/parseAgentSource.d.ts +30 -0
  9. package/esm/typings/src/book-2.0/agent-source/parseAgentSource.test.d.ts +1 -0
  10. package/esm/typings/src/book-2.0/agent-source/string_book.d.ts +26 -0
  11. package/esm/typings/src/book-2.0/commitments/ACTION/ACTION.d.ts +38 -0
  12. package/esm/typings/src/book-2.0/commitments/FORMAT/FORMAT.d.ts +39 -0
  13. package/esm/typings/src/book-2.0/commitments/KNOWLEDGE/KNOWLEDGE.d.ts +45 -0
  14. package/esm/typings/src/book-2.0/commitments/META_IMAGE/META_IMAGE.d.ts +44 -0
  15. package/esm/typings/src/book-2.0/commitments/META_LINK/META_LINK.d.ts +56 -0
  16. package/esm/typings/src/book-2.0/commitments/MODEL/MODEL.d.ts +39 -0
  17. package/esm/typings/src/book-2.0/commitments/NOTE/NOTE.d.ts +49 -0
  18. package/esm/typings/src/book-2.0/commitments/PERSONA/PERSONA.d.ts +46 -0
  19. package/esm/typings/src/book-2.0/commitments/RULE/RULE.d.ts +44 -0
  20. package/esm/typings/src/book-2.0/commitments/SAMPLE/SAMPLE.d.ts +44 -0
  21. package/esm/typings/src/book-2.0/commitments/STYLE/STYLE.d.ts +38 -0
  22. package/esm/typings/src/book-2.0/commitments/_base/BaseCommitmentDefinition.d.ts +52 -0
  23. package/esm/typings/src/book-2.0/commitments/_base/BookCommitment.d.ts +5 -0
  24. package/esm/typings/src/book-2.0/commitments/_base/CommitmentDefinition.d.ts +48 -0
  25. package/esm/typings/src/book-2.0/commitments/_base/NotYetImplementedCommitmentDefinition.d.ts +22 -0
  26. package/esm/typings/src/book-2.0/commitments/_base/createEmptyAgentModelRequirements.d.ts +19 -0
  27. package/esm/typings/src/book-2.0/commitments/_misc/AgentModelRequirements.d.ts +37 -0
  28. package/esm/typings/src/book-2.0/commitments/_misc/AgentSourceParseResult.d.ts +18 -0
  29. package/esm/typings/src/book-2.0/commitments/_misc/ParsedCommitment.d.ts +22 -0
  30. package/esm/typings/src/book-2.0/commitments/_misc/createAgentModelRequirements.d.ts +62 -0
  31. package/esm/typings/src/book-2.0/commitments/_misc/createAgentModelRequirementsWithCommitments.d.ts +36 -0
  32. package/esm/typings/src/book-2.0/commitments/_misc/createCommitmentRegex.d.ts +20 -0
  33. package/esm/typings/src/book-2.0/commitments/_misc/parseAgentSourceWithCommitments.d.ts +24 -0
  34. package/esm/typings/src/book-2.0/commitments/_misc/removeCommentsFromSystemMessage.d.ts +11 -0
  35. package/esm/typings/src/book-2.0/commitments/index.d.ts +56 -0
  36. package/esm/typings/src/book-2.0/utils/profileImageUtils.d.ts +39 -0
  37. package/esm/typings/src/book-components/AvatarProfile/AvatarChip/AvatarChip.d.ts +35 -0
  38. package/esm/typings/src/book-components/AvatarProfile/AvatarChip/AvatarChipFromSource.d.ts +21 -0
  39. package/esm/typings/src/book-components/AvatarProfile/AvatarChip/index.d.ts +2 -0
  40. package/esm/typings/src/book-components/AvatarProfile/AvatarProfile/AvatarProfile.d.ts +26 -0
  41. package/esm/typings/src/book-components/AvatarProfile/AvatarProfile/AvatarProfileFromSource.d.ts +19 -0
  42. package/esm/typings/src/book-components/BookEditor/BookEditor.d.ts +35 -0
  43. package/esm/typings/src/book-components/BookEditor/BookEditorInner.d.ts +15 -0
  44. package/esm/typings/src/book-components/BookEditor/config.d.ts +10 -0
  45. package/esm/typings/src/book-components/BookEditor/injectCssModuleIntoShadowRoot.d.ts +11 -0
  46. package/esm/typings/src/book-components/Chat/Chat/Chat.d.ts +20 -0
  47. package/esm/typings/src/book-components/Chat/Chat/ChatProps.d.ts +110 -0
  48. package/esm/typings/src/book-components/Chat/LlmChat/LlmChat.d.ts +14 -0
  49. package/esm/typings/src/book-components/Chat/LlmChat/LlmChat.test.d.ts +1 -0
  50. package/esm/typings/src/book-components/Chat/LlmChat/LlmChatProps.d.ts +24 -0
  51. package/esm/typings/src/book-components/Chat/types/ChatMessage.d.ts +16 -0
  52. package/esm/typings/src/book-components/Chat/types/ChatParticipant.d.ts +32 -0
  53. package/esm/typings/src/book-components/Chat/utils/ChatPersistence.d.ts +25 -0
  54. package/esm/typings/src/book-components/Chat/utils/ExportFormat.d.ts +4 -0
  55. package/esm/typings/src/book-components/Chat/utils/addUtmParamsToUrl.d.ts +7 -0
  56. package/esm/typings/src/book-components/Chat/utils/createShortLinkForChat.d.ts +7 -0
  57. package/esm/typings/src/book-components/Chat/utils/downloadFile.d.ts +6 -0
  58. package/esm/typings/src/book-components/Chat/utils/exportChatHistory.d.ts +9 -0
  59. package/esm/typings/src/book-components/Chat/utils/generatePdfContent.d.ts +8 -0
  60. package/esm/typings/src/book-components/Chat/utils/generateQrDataUrl.d.ts +7 -0
  61. package/esm/typings/src/book-components/Chat/utils/getPromptbookBranding.d.ts +6 -0
  62. package/esm/typings/src/book-components/Chat/utils/messagesToHtml.d.ts +8 -0
  63. package/esm/typings/src/book-components/Chat/utils/messagesToJson.d.ts +7 -0
  64. package/esm/typings/src/book-components/Chat/utils/messagesToMarkdown.d.ts +8 -0
  65. package/esm/typings/src/book-components/Chat/utils/messagesToText.d.ts +8 -0
  66. package/esm/typings/src/book-components/_common/react-utils/classNames.d.ts +7 -0
  67. package/esm/typings/src/book-components/_common/react-utils/collectCssTextsForClass.d.ts +7 -0
  68. package/esm/typings/src/book-components/_common/react-utils/escapeHtml.d.ts +6 -0
  69. package/esm/typings/src/book-components/_common/react-utils/escapeRegex.d.ts +6 -0
  70. package/esm/typings/src/config.d.ts +19 -0
  71. package/esm/typings/src/execution/AvailableModel.d.ts +4 -0
  72. package/esm/typings/src/execution/ExecutionTask.d.ts +27 -1
  73. package/esm/typings/src/execution/LlmExecutionTools.d.ts +8 -0
  74. package/esm/typings/src/execution/createPipelineExecutor/40-executeAttempts.d.ts +6 -1
  75. package/esm/typings/src/llm-providers/_common/filterModels.d.ts +0 -3
  76. package/esm/typings/src/llm-providers/_common/profiles/llmProviderProfiles.d.ts +81 -0
  77. package/esm/typings/src/llm-providers/_common/profiles/test/llmProviderProfiles.test.d.ts +1 -0
  78. package/esm/typings/src/llm-providers/_multiple/MultipleLlmExecutionTools.d.ts +5 -0
  79. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +5 -5
  80. package/esm/typings/src/llm-providers/anthropic-claude/anthropic-claude-models.d.ts +1 -1
  81. package/esm/typings/src/llm-providers/deepseek/deepseek-models.d.ts +1 -1
  82. package/esm/typings/src/llm-providers/google/google-models.d.ts +1 -1
  83. package/esm/typings/src/llm-providers/mocked/MockedEchoLlmExecutionTools.d.ts +5 -0
  84. package/esm/typings/src/llm-providers/ollama/ollama-models.d.ts +1 -1
  85. package/esm/typings/src/llm-providers/openai/OpenAiCompatibleExecutionTools.d.ts +8 -0
  86. package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +5 -0
  87. package/esm/typings/src/llm-providers/openai/openai-models.d.ts +1 -1
  88. package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +5 -0
  89. package/esm/typings/src/pipeline/book-notation.d.ts +2 -1
  90. package/esm/typings/src/playground/permanent/error-handling-playground.d.ts +5 -0
  91. package/esm/typings/src/types/ModelRequirements.d.ts +0 -2
  92. package/esm/typings/src/types/typeAliases.d.ts +6 -0
  93. package/esm/typings/src/utils/color/$randomColor.d.ts +11 -0
  94. package/esm/typings/src/utils/color/Color.d.ts +180 -0
  95. package/esm/typings/src/utils/color/css-colors.d.ts +159 -0
  96. package/esm/typings/src/utils/color/internal-utils/checkChannelValue.d.ts +14 -0
  97. package/esm/typings/src/utils/color/internal-utils/hslToRgb.d.ts +17 -0
  98. package/esm/typings/src/utils/color/internal-utils/rgbToHsl.d.ts +17 -0
  99. package/esm/typings/src/utils/color/operators/ColorTransformer.d.ts +5 -0
  100. package/esm/typings/src/utils/color/operators/darken.d.ts +9 -0
  101. package/esm/typings/src/utils/color/operators/furthest.d.ts +16 -0
  102. package/esm/typings/src/utils/color/operators/grayscale.d.ts +9 -0
  103. package/esm/typings/src/utils/color/operators/lighten.d.ts +12 -0
  104. package/esm/typings/src/utils/color/operators/mixWithColor.d.ts +11 -0
  105. package/esm/typings/src/utils/color/operators/nearest.d.ts +10 -0
  106. package/esm/typings/src/utils/color/operators/negative.d.ts +7 -0
  107. package/esm/typings/src/utils/color/operators/negativeLightness.d.ts +7 -0
  108. package/esm/typings/src/utils/color/operators/withAlpha.d.ts +9 -0
  109. package/esm/typings/src/utils/color/utils/areColorsEqual.d.ts +14 -0
  110. package/esm/typings/src/utils/color/utils/colorDistance.d.ts +21 -0
  111. package/esm/typings/src/utils/color/utils/colorHue.d.ts +11 -0
  112. package/esm/typings/src/utils/color/utils/colorHueDistance.d.ts +11 -0
  113. package/esm/typings/src/utils/color/utils/colorHueDistance.test.d.ts +1 -0
  114. package/esm/typings/src/utils/color/utils/colorLuminance.d.ts +9 -0
  115. package/esm/typings/src/utils/color/utils/colorSatulightion.d.ts +7 -0
  116. package/esm/typings/src/utils/color/utils/colorSaturation.d.ts +9 -0
  117. package/esm/typings/src/utils/color/utils/colorToDataUrl.d.ts +10 -0
  118. package/esm/typings/src/utils/color/utils/mixColors.d.ts +11 -0
  119. package/esm/typings/src/utils/organization/preserve.d.ts +21 -0
  120. package/esm/typings/src/utils/take/classes/TakeChain.d.ts +11 -0
  121. package/esm/typings/src/utils/take/interfaces/ITakeChain.d.ts +12 -0
  122. package/esm/typings/src/utils/take/interfaces/Takeable.d.ts +7 -0
  123. package/esm/typings/src/utils/take/take.d.ts +12 -0
  124. package/esm/typings/src/utils/take/take.test.d.ts +1 -0
  125. package/esm/typings/src/version.d.ts +1 -1
  126. package/package.json +2 -3
  127. package/umd/index.umd.js +655 -154
  128. package/umd/index.umd.js.map +1 -1
  129. package/esm/typings/src/scripting/javascript/utils/preserve.d.ts +0 -14
package/esm/index.es.js CHANGED
@@ -12,8 +12,9 @@ import { forTime } from 'waitasecond';
12
12
  import { SHA256 } from 'crypto-js';
13
13
  import hexEncoder from 'crypto-js/enc-hex';
14
14
  import { basename, join, dirname, relative } from 'path';
15
- import { format } from 'prettier';
16
15
  import parserHtml from 'prettier/parser-html';
16
+ import parserMarkdown from 'prettier/parser-markdown';
17
+ import { format } from 'prettier/standalone';
17
18
  import { Subject } from 'rxjs';
18
19
  import sha256 from 'crypto-js/sha256';
19
20
  import { lookup, extension } from 'mime-types';
@@ -38,7 +39,7 @@ const BOOK_LANGUAGE_VERSION = '1.0.0';
38
39
  * @generated
39
40
  * @see https://github.com/webgptorg/promptbook
40
41
  */
41
- const PROMPTBOOK_ENGINE_VERSION = '0.100.0-8';
42
+ const PROMPTBOOK_ENGINE_VERSION = '0.100.0';
42
43
  /**
43
44
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
44
45
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -312,6 +313,12 @@ let DEFAULT_IS_VERBOSE = false;
312
313
  * @public exported from `@promptbook/core`
313
314
  */
314
315
  const DEFAULT_IS_AUTO_INSTALLED = false;
316
+ /**
317
+ * Default simulated duration for a task in milliseconds (used for progress reporting)
318
+ *
319
+ * @public exported from `@promptbook/core`
320
+ */
321
+ const DEFAULT_TASK_SIMULATED_DURATION_MS = 5 * 60 * 1000; // 5 minutes
315
322
  /**
316
323
  * Default rate limits (requests per minute)
317
324
  *
@@ -320,6 +327,13 @@ const DEFAULT_IS_AUTO_INSTALLED = false;
320
327
  * @public exported from `@promptbook/core`
321
328
  */
322
329
  const DEFAULT_MAX_REQUESTS_PER_MINUTE = 60;
330
+ /**
331
+ * API request timeout in milliseconds
332
+ * Can be overridden via API_REQUEST_TIMEOUT environment variable
333
+ *
334
+ * @public exported from `@promptbook/core`
335
+ */
336
+ const API_REQUEST_TIMEOUT = parseInt(process.env.API_REQUEST_TIMEOUT || '90000');
323
337
  /**
324
338
  * Indicates whether pipeline logic validation is enabled. When true, the pipeline logic is checked for consistency.
325
339
  *
@@ -1321,6 +1335,76 @@ function deserializeError(error) {
1321
1335
  return deserializedError;
1322
1336
  }
1323
1337
 
1338
+ /**
1339
+ * Predefined profiles for LLM providers to maintain consistency across the application
1340
+ * These profiles represent each provider as a virtual persona in chat interfaces
1341
+ *
1342
+ * @private !!!!
1343
+ */
1344
+ const LLM_PROVIDER_PROFILES = {
1345
+ OPENAI: {
1346
+ name: 'OPENAI',
1347
+ fullname: 'OpenAI GPT',
1348
+ color: '#10a37f', // OpenAI's signature green
1349
+ // Note: avatarSrc could be added when we have provider logos available
1350
+ },
1351
+ ANTHROPIC: {
1352
+ name: 'ANTHROPIC',
1353
+ fullname: 'Anthropic Claude',
1354
+ color: '#d97706', // Anthropic's orange/amber color
1355
+ },
1356
+ AZURE_OPENAI: {
1357
+ name: 'AZURE_OPENAI',
1358
+ fullname: 'Azure OpenAI',
1359
+ color: '#0078d4', // Microsoft Azure blue
1360
+ },
1361
+ GOOGLE: {
1362
+ name: 'GOOGLE',
1363
+ fullname: 'Google Gemini',
1364
+ color: '#4285f4', // Google blue
1365
+ },
1366
+ DEEPSEEK: {
1367
+ name: 'DEEPSEEK',
1368
+ fullname: 'DeepSeek',
1369
+ color: '#7c3aed', // Purple color for DeepSeek
1370
+ },
1371
+ OLLAMA: {
1372
+ name: 'OLLAMA',
1373
+ fullname: 'Ollama',
1374
+ color: '#059669', // Emerald green for local models
1375
+ },
1376
+ REMOTE: {
1377
+ name: 'REMOTE',
1378
+ fullname: 'Remote Server',
1379
+ color: '#6b7280', // Gray for remote/proxy connections
1380
+ },
1381
+ MOCKED_ECHO: {
1382
+ name: 'MOCKED_ECHO',
1383
+ fullname: 'Echo (Test)',
1384
+ color: '#8b5cf6', // Purple for test/mock tools
1385
+ },
1386
+ MOCKED_FAKE: {
1387
+ name: 'MOCKED_FAKE',
1388
+ fullname: 'Fake LLM (Test)',
1389
+ color: '#ec4899', // Pink for fake/test tools
1390
+ },
1391
+ VERCEL: {
1392
+ name: 'VERCEL',
1393
+ fullname: 'Vercel AI',
1394
+ color: '#000000', // Vercel's black
1395
+ },
1396
+ MULTIPLE: {
1397
+ name: 'MULTIPLE',
1398
+ fullname: 'Multiple Providers',
1399
+ color: '#6366f1', // Indigo for combined/multiple providers
1400
+ },
1401
+ };
1402
+ /**
1403
+ * TODO: Refactor this - each profile must be alongside the provider definition
1404
+ * TODO: Unite `AvatarProfileProps` and `ChatParticipant`
1405
+ * Note: [💞] Ignore a discrepancy between file name and entity name
1406
+ */
1407
+
1324
1408
  /**
1325
1409
  * Tests if given string is valid URL.
1326
1410
  *
@@ -1434,6 +1518,9 @@ class RemoteLlmExecutionTools {
1434
1518
  get description() {
1435
1519
  return `Models from Promptbook remote server ${this.options.remoteServerUrl}`;
1436
1520
  }
1521
+ get profile() {
1522
+ return LLM_PROVIDER_PROFILES.REMOTE;
1523
+ }
1437
1524
  /**
1438
1525
  * Check the configuration of all execution tools
1439
1526
  */
@@ -1702,7 +1789,7 @@ function pricing(value) {
1702
1789
  /**
1703
1790
  * List of available Anthropic Claude models with pricing
1704
1791
  *
1705
- * Note: Done at 2025-05-06
1792
+ * Note: Synced with official API docs at 2025-08-20
1706
1793
  *
1707
1794
  * @see https://docs.anthropic.com/en/docs/models-overview
1708
1795
  * @public exported from `@promptbook/anthropic-claude`
@@ -1712,12 +1799,52 @@ const ANTHROPIC_CLAUDE_MODELS = exportJson({
1712
1799
  value: [
1713
1800
  {
1714
1801
  modelVariant: 'CHAT',
1715
- modelTitle: 'Claude 3.5 Sonnet',
1716
- modelName: 'claude-3-5-sonnet-20240620',
1717
- modelDescription: 'Latest Claude model with 200K token context window. Features state-of-the-art reasoning capabilities, sophisticated code generation, and enhanced multilingual understanding. Offers superior accuracy with 30% fewer hallucinations than Claude 3 Sonnet. Provides exceptional performance for complex enterprise applications while maintaining fast response times.',
1802
+ modelTitle: 'Claude Opus 4.1',
1803
+ modelName: 'claude-opus-4-1-20250805',
1804
+ modelDescription: 'Most powerful and capable Claude model with 200K token context window. Features superior reasoning capabilities, exceptional coding abilities, and advanced multimodal understanding. Sets new standards in complex reasoning and analytical tasks with enhanced safety measures. Ideal for the most demanding enterprise applications requiring maximum intelligence.',
1718
1805
  pricing: {
1719
- prompt: pricing(`$2.50 / 1M tokens`),
1720
- output: pricing(`$12.50 / 1M tokens`),
1806
+ prompt: pricing(`$15.00 / 1M tokens`),
1807
+ output: pricing(`$75.00 / 1M tokens`),
1808
+ },
1809
+ },
1810
+ {
1811
+ modelVariant: 'CHAT',
1812
+ modelTitle: 'Claude Opus 4',
1813
+ modelName: 'claude-opus-4-20250514',
1814
+ modelDescription: 'Previous flagship Claude model with 200K token context window. Features very high intelligence and capability with exceptional performance across reasoning, coding, and creative tasks. Maintains strong safety guardrails while delivering sophisticated outputs for complex professional applications.',
1815
+ pricing: {
1816
+ prompt: pricing(`$15.00 / 1M tokens`),
1817
+ output: pricing(`$75.00 / 1M tokens`),
1818
+ },
1819
+ },
1820
+ {
1821
+ modelVariant: 'CHAT',
1822
+ modelTitle: 'Claude Sonnet 4',
1823
+ modelName: 'claude-sonnet-4-20250514',
1824
+ modelDescription: 'High-performance Claude model with exceptional reasoning capabilities and 200K token context window (1M context beta available). Features balanced intelligence and efficiency with enhanced multimodal understanding. Offers optimal performance for most enterprise applications requiring sophisticated AI capabilities.',
1825
+ pricing: {
1826
+ prompt: pricing(`$3.00 / 1M tokens`),
1827
+ output: pricing(`$15.00 / 1M tokens`),
1828
+ },
1829
+ },
1830
+ {
1831
+ modelVariant: 'CHAT',
1832
+ modelTitle: 'Claude Sonnet 3.7',
1833
+ modelName: 'claude-3-7-sonnet-20250219',
1834
+ modelDescription: 'High-performance Claude model with early extended thinking capabilities and 200K token context window. Features enhanced reasoning chains, improved factual accuracy, and toggleable extended thinking for complex problem-solving. Ideal for applications requiring deep analytical capabilities.',
1835
+ pricing: {
1836
+ prompt: pricing(`$3.00 / 1M tokens`),
1837
+ output: pricing(`$15.00 / 1M tokens`),
1838
+ },
1839
+ },
1840
+ {
1841
+ modelVariant: 'CHAT',
1842
+ modelTitle: 'Claude Haiku 3.5',
1843
+ modelName: 'claude-3-5-haiku-20241022',
1844
+ modelDescription: 'Fastest Claude model with 200K token context window optimized for intelligence at blazing speeds. Features enhanced reasoning and contextual understanding while maintaining sub-second response times. Perfect for real-time applications, customer-facing deployments, and high-throughput services.',
1845
+ pricing: {
1846
+ prompt: pricing(`$0.80 / 1M tokens`),
1847
+ output: pricing(`$4.00 / 1M tokens`),
1721
1848
  },
1722
1849
  },
1723
1850
  {
@@ -2383,6 +2510,9 @@ class AnthropicClaudeExecutionTools {
2383
2510
  get description() {
2384
2511
  return 'Use all models provided by Anthropic Claude';
2385
2512
  }
2513
+ get profile() {
2514
+ return LLM_PROVIDER_PROFILES.ANTHROPIC;
2515
+ }
2386
2516
  async getClient() {
2387
2517
  if (this.client === null) {
2388
2518
  // Note: Passing only Anthropic Claude relevant options to Anthropic constructor
@@ -2423,8 +2553,7 @@ class AnthropicClaudeExecutionTools {
2423
2553
  const rawPromptContent = templateParameters(content, { ...parameters, modelName });
2424
2554
  const rawRequest = {
2425
2555
  model: modelRequirements.modelName || this.getDefaultChatModel().modelName,
2426
- max_tokens: modelRequirements.maxTokens || 4096,
2427
- // <- TODO: [🌾] Make some global max cap for maxTokens
2556
+ max_tokens: modelRequirements.maxTokens || 8192,
2428
2557
  temperature: modelRequirements.temperature,
2429
2558
  system: modelRequirements.systemMessage,
2430
2559
  messages: [
@@ -2483,59 +2612,6 @@ class AnthropicClaudeExecutionTools {
2483
2612
  },
2484
2613
  });
2485
2614
  }
2486
- /**
2487
- * Calls Anthropic Claude API to use a completion model.
2488
- */
2489
- async callCompletionModel(prompt) {
2490
- if (this.options.isVerbose) {
2491
- console.info('🖋 Anthropic Claude callCompletionModel call');
2492
- }
2493
- const { content, parameters, modelRequirements } = prompt;
2494
- if (modelRequirements.modelVariant !== 'COMPLETION') {
2495
- throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
2496
- }
2497
- const client = await this.getClient();
2498
- const modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
2499
- const rawPromptContent = templateParameters(content, { ...parameters, modelName });
2500
- const rawRequest = {
2501
- model: modelName,
2502
- max_tokens_to_sample: modelRequirements.maxTokens || 2000,
2503
- temperature: modelRequirements.temperature,
2504
- prompt: rawPromptContent,
2505
- };
2506
- const start = $getCurrentDate();
2507
- const rawResponse = await this.limiter
2508
- .schedule(() => client.completions.create(rawRequest))
2509
- .catch((error) => {
2510
- if (this.options.isVerbose) {
2511
- console.info(colors.bgRed('error'), error);
2512
- }
2513
- throw error;
2514
- });
2515
- if (this.options.isVerbose) {
2516
- console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
2517
- }
2518
- if (!rawResponse.completion) {
2519
- throw new PipelineExecutionError('No completion from Anthropic Claude');
2520
- }
2521
- const resultContent = rawResponse.completion;
2522
- const complete = $getCurrentDate();
2523
- const usage = computeAnthropicClaudeUsage(rawPromptContent, resultContent, rawResponse);
2524
- return exportJson({
2525
- name: 'promptResult',
2526
- message: `Result of \`AnthropicClaudeExecutionTools.callCompletionModel\``,
2527
- order: [],
2528
- value: {
2529
- content: resultContent,
2530
- modelName: rawResponse.model || modelName,
2531
- timing: { start, complete },
2532
- usage,
2533
- rawPromptContent,
2534
- rawRequest,
2535
- rawResponse,
2536
- },
2537
- });
2538
- }
2539
2615
  // <- Note: [🤖] callXxxModel
2540
2616
  /**
2541
2617
  * Get the model that should be used as default
@@ -2557,7 +2633,7 @@ class AnthropicClaudeExecutionTools {
2557
2633
  * Default model for chat variant.
2558
2634
  */
2559
2635
  getDefaultChatModel() {
2560
- return this.getDefaultModel('claude-3-5-sonnet');
2636
+ return this.getDefaultModel('claude-sonnet-4-20250514');
2561
2637
  }
2562
2638
  }
2563
2639
  /**
@@ -2700,7 +2776,7 @@ const _AzureOpenAiMetadataRegistration = $llmToolsMetadataRegister.register({
2700
2776
  /**
2701
2777
  * List of available OpenAI models with pricing
2702
2778
  *
2703
- * Note: Done at 2025-05-06
2779
+ * Note: Synced with official API docs at 2025-08-20
2704
2780
  *
2705
2781
  * @see https://platform.openai.com/docs/models/
2706
2782
  * @see https://openai.com/api/pricing/
@@ -2709,6 +2785,138 @@ const _AzureOpenAiMetadataRegistration = $llmToolsMetadataRegister.register({
2709
2785
  const OPENAI_MODELS = exportJson({
2710
2786
  name: 'OPENAI_MODELS',
2711
2787
  value: [
2788
+ /**/
2789
+ {
2790
+ modelVariant: 'CHAT',
2791
+ modelTitle: 'gpt-5',
2792
+ modelName: 'gpt-5',
2793
+ modelDescription: "OpenAI's most advanced language model with unprecedented reasoning capabilities and 200K context window. Features revolutionary improvements in complex problem-solving, scientific reasoning, and creative tasks. Demonstrates human-level performance across diverse domains with enhanced safety measures and alignment. Represents the next generation of AI with superior understanding, nuanced responses, and advanced multimodal capabilities.",
2794
+ pricing: {
2795
+ prompt: pricing(`$1.25 / 1M tokens`),
2796
+ output: pricing(`$10.00 / 1M tokens`),
2797
+ },
2798
+ },
2799
+ /**/
2800
+ /**/
2801
+ {
2802
+ modelVariant: 'CHAT',
2803
+ modelTitle: 'gpt-5-mini',
2804
+ modelName: 'gpt-5-mini',
2805
+ modelDescription: "A faster, cost-efficient version of GPT-5 for well-defined tasks with 200K context window. Maintains core GPT-5 capabilities while offering 5x faster inference and significantly lower costs. Features enhanced instruction following and reduced latency for production applications requiring quick responses with high quality.",
2806
+ pricing: {
2807
+ prompt: pricing(`$0.25 / 1M tokens`),
2808
+ output: pricing(`$2.00 / 1M tokens`),
2809
+ },
2810
+ },
2811
+ /**/
2812
+ /**/
2813
+ {
2814
+ modelVariant: 'CHAT',
2815
+ modelTitle: 'gpt-5-nano',
2816
+ modelName: 'gpt-5-nano',
2817
+ modelDescription: "The fastest, most cost-efficient version of GPT-5 with 200K context window. Optimized for summarization, classification, and simple reasoning tasks. Features 10x faster inference than base GPT-5 while maintaining good quality for straightforward applications. Ideal for high-volume, cost-sensitive deployments.",
2818
+ pricing: {
2819
+ prompt: pricing(`$0.05 / 1M tokens`),
2820
+ output: pricing(`$0.40 / 1M tokens`),
2821
+ },
2822
+ },
2823
+ /**/
2824
+ /**/
2825
+ {
2826
+ modelVariant: 'CHAT',
2827
+ modelTitle: 'gpt-4.1',
2828
+ modelName: 'gpt-4.1',
2829
+ modelDescription: "Smartest non-reasoning model with 128K context window. Enhanced version of GPT-4 with improved instruction following, better factual accuracy, and reduced hallucinations. Features advanced function calling capabilities and superior performance on coding tasks. Ideal for applications requiring high intelligence without reasoning overhead.",
2830
+ pricing: {
2831
+ prompt: pricing(`$3.00 / 1M tokens`),
2832
+ output: pricing(`$12.00 / 1M tokens`),
2833
+ },
2834
+ },
2835
+ /**/
2836
+ /**/
2837
+ {
2838
+ modelVariant: 'CHAT',
2839
+ modelTitle: 'gpt-4.1-mini',
2840
+ modelName: 'gpt-4.1-mini',
2841
+ modelDescription: "Smaller, faster version of GPT-4.1 with 128K context window. Balances intelligence and efficiency with 3x faster inference than base GPT-4.1. Maintains strong capabilities across text generation, reasoning, and coding while offering better cost-performance ratio for most applications.",
2842
+ pricing: {
2843
+ prompt: pricing(`$0.80 / 1M tokens`),
2844
+ output: pricing(`$3.20 / 1M tokens`),
2845
+ },
2846
+ },
2847
+ /**/
2848
+ /**/
2849
+ {
2850
+ modelVariant: 'CHAT',
2851
+ modelTitle: 'gpt-4.1-nano',
2852
+ modelName: 'gpt-4.1-nano',
2853
+ modelDescription: "Fastest, most cost-efficient version of GPT-4.1 with 128K context window. Optimized for high-throughput applications requiring good quality at minimal cost. Features 5x faster inference than GPT-4.1 while maintaining adequate performance for most general-purpose tasks.",
2854
+ pricing: {
2855
+ prompt: pricing(`$0.20 / 1M tokens`),
2856
+ output: pricing(`$0.80 / 1M tokens`),
2857
+ },
2858
+ },
2859
+ /**/
2860
+ /**/
2861
+ {
2862
+ modelVariant: 'CHAT',
2863
+ modelTitle: 'o3',
2864
+ modelName: 'o3',
2865
+ modelDescription: "Advanced reasoning model with 128K context window specializing in complex logical, mathematical, and analytical tasks. Successor to o1 with enhanced step-by-step problem-solving capabilities and superior performance on STEM-focused problems. Ideal for professional applications requiring deep analytical thinking and precise reasoning.",
2866
+ pricing: {
2867
+ prompt: pricing(`$15.00 / 1M tokens`),
2868
+ output: pricing(`$60.00 / 1M tokens`),
2869
+ },
2870
+ },
2871
+ /**/
2872
+ /**/
2873
+ {
2874
+ modelVariant: 'CHAT',
2875
+ modelTitle: 'o3-pro',
2876
+ modelName: 'o3-pro',
2877
+ modelDescription: "Enhanced version of o3 with more compute allocated for better responses on the most challenging problems. Features extended reasoning time and improved accuracy on complex analytical tasks. Designed for applications where maximum reasoning quality is more important than response speed.",
2878
+ pricing: {
2879
+ prompt: pricing(`$30.00 / 1M tokens`),
2880
+ output: pricing(`$120.00 / 1M tokens`),
2881
+ },
2882
+ },
2883
+ /**/
2884
+ /**/
2885
+ {
2886
+ modelVariant: 'CHAT',
2887
+ modelTitle: 'o4-mini',
2888
+ modelName: 'o4-mini',
2889
+ modelDescription: "Fast, cost-efficient reasoning model with 128K context window. Successor to o1-mini with improved analytical capabilities while maintaining speed advantages. Features enhanced mathematical reasoning and logical problem-solving at significantly lower cost than full reasoning models.",
2890
+ pricing: {
2891
+ prompt: pricing(`$4.00 / 1M tokens`),
2892
+ output: pricing(`$16.00 / 1M tokens`),
2893
+ },
2894
+ },
2895
+ /**/
2896
+ /**/
2897
+ {
2898
+ modelVariant: 'CHAT',
2899
+ modelTitle: 'o3-deep-research',
2900
+ modelName: 'o3-deep-research',
2901
+ modelDescription: "Most powerful deep research model with 128K context window. Specialized for comprehensive research tasks, literature analysis, and complex information synthesis. Features advanced citation capabilities and enhanced factual accuracy for academic and professional research applications.",
2902
+ pricing: {
2903
+ prompt: pricing(`$25.00 / 1M tokens`),
2904
+ output: pricing(`$100.00 / 1M tokens`),
2905
+ },
2906
+ },
2907
+ /**/
2908
+ /**/
2909
+ {
2910
+ modelVariant: 'CHAT',
2911
+ modelTitle: 'o4-mini-deep-research',
2912
+ modelName: 'o4-mini-deep-research',
2913
+ modelDescription: "Faster, more affordable deep research model with 128K context window. Balances research capabilities with cost efficiency, offering good performance on literature review, fact-checking, and information synthesis tasks at a more accessible price point.",
2914
+ pricing: {
2915
+ prompt: pricing(`$12.00 / 1M tokens`),
2916
+ output: pricing(`$48.00 / 1M tokens`),
2917
+ },
2918
+ },
2919
+ /**/
2712
2920
  /*/
2713
2921
  {
2714
2922
  modelTitle: 'dall-e-3',
@@ -3229,7 +3437,6 @@ class AzureOpenAiExecutionTools {
3229
3437
  const modelName = prompt.modelRequirements.modelName || this.options.deploymentName;
3230
3438
  const modelSettings = {
3231
3439
  maxTokens: modelRequirements.maxTokens,
3232
- // <- TODO: [🌾] Make some global max cap for maxTokens
3233
3440
  temperature: modelRequirements.temperature,
3234
3441
  user: (_a = this.options.userId) === null || _a === void 0 ? void 0 : _a.toString(),
3235
3442
  // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
@@ -3335,8 +3542,7 @@ class AzureOpenAiExecutionTools {
3335
3542
  try {
3336
3543
  const modelName = prompt.modelRequirements.modelName || this.options.deploymentName;
3337
3544
  const modelSettings = {
3338
- maxTokens: modelRequirements.maxTokens || 2000,
3339
- // <- TODO: [🌾] Make some global max cap for maxTokens
3545
+ maxTokens: modelRequirements.maxTokens,
3340
3546
  temperature: modelRequirements.temperature,
3341
3547
  user: (_a = this.options.userId) === null || _a === void 0 ? void 0 : _a.toString(),
3342
3548
  // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
@@ -3720,7 +3926,7 @@ function createExecutionToolsFromVercelProvider(options) {
3720
3926
  /**
3721
3927
  * List of available Deepseek models with descriptions
3722
3928
  *
3723
- * Note: Done at 2025-05-06
3929
+ * Note: Synced with official API docs at 2025-08-20
3724
3930
  *
3725
3931
  * @see https://www.deepseek.com/models
3726
3932
  * @public exported from `@promptbook/deepseek`
@@ -3730,12 +3936,32 @@ const DEEPSEEK_MODELS = exportJson({
3730
3936
  value: [
3731
3937
  {
3732
3938
  modelVariant: 'CHAT',
3733
- modelTitle: 'Deepseek Chat Pro',
3734
- modelName: 'deepseek-chat-pro',
3735
- modelDescription: 'Latest flagship general-purpose model with 256K context window. Enhanced from base Chat model with 40% improvement on complex reasoning tasks and specialized domain knowledge. Features advanced prompt optimization and improved contextual memory. Ideal for enterprise applications requiring highest quality responses.',
3939
+ modelTitle: 'DeepSeek V3',
3940
+ modelName: 'deepseek-chat',
3941
+ modelDescription: 'Latest flagship general-purpose model with 128K context window. Features exceptional reasoning capabilities, advanced code generation, and strong performance across diverse domains. Offers competitive performance with leading models while maintaining cost efficiency. Ideal for complex reasoning, coding, and knowledge-intensive tasks.',
3736
3942
  pricing: {
3737
- prompt: pricing(`$1.20 / 1M tokens`),
3738
- output: pricing(`$2.40 / 1M tokens`),
3943
+ prompt: pricing(`$0.14 / 1M tokens`),
3944
+ output: pricing(`$0.28 / 1M tokens`),
3945
+ },
3946
+ },
3947
+ {
3948
+ modelVariant: 'CHAT',
3949
+ modelTitle: 'DeepSeek R1',
3950
+ modelName: 'deepseek-reasoner',
3951
+ modelDescription: 'Advanced reasoning model with 128K context window specializing in complex problem-solving and analytical thinking. Features explicit reasoning chains, enhanced mathematical capabilities, and superior performance on STEM tasks. Designed for applications requiring deep analytical reasoning and step-by-step problem solving.',
3952
+ pricing: {
3953
+ prompt: pricing(`$0.55 / 1M tokens`),
3954
+ output: pricing(`$2.19 / 1M tokens`),
3955
+ },
3956
+ },
3957
+ {
3958
+ modelVariant: 'CHAT',
3959
+ modelTitle: 'DeepSeek Coder V2',
3960
+ modelName: 'deepseek-coder',
3961
+ modelDescription: 'Specialized coding model with 128K context window optimized for software development tasks. Features exceptional code generation, debugging, and refactoring capabilities across 40+ programming languages. Particularly strong in understanding complex codebases and implementing solutions based on natural language specifications.',
3962
+ pricing: {
3963
+ prompt: pricing(`$0.14 / 1M tokens`),
3964
+ output: pricing(`$0.28 / 1M tokens`),
3739
3965
  },
3740
3966
  },
3741
3967
  {
@@ -3969,7 +4195,7 @@ const _GoogleMetadataRegistration = $llmToolsMetadataRegister.register({
3969
4195
  /**
3970
4196
  * List of available Google models with descriptions
3971
4197
  *
3972
- * Note: Done at 2025-05-06
4198
+ * Note: Synced with official API docs at 2025-08-20
3973
4199
  *
3974
4200
  * @see https://ai.google.dev/models/gemini
3975
4201
  * @public exported from `@promptbook/google`
@@ -3980,11 +4206,51 @@ const GOOGLE_MODELS = exportJson({
3980
4206
  {
3981
4207
  modelVariant: 'CHAT',
3982
4208
  modelTitle: 'Gemini 2.5 Pro',
3983
- modelName: 'gemini-2.5-pro-preview-03-25',
3984
- modelDescription: 'Latest advanced multimodal model with 1M token context window. Features exceptional reasoning across complex tasks, sophisticated function calling, and advanced image analysis (16MP resolution). Demonstrates superior performance in math, coding, and knowledge-intensive tasks with 30% improvement over Gemini 1.5 Pro. Ideal for enterprise applications requiring deep contextual understanding.',
4209
+ modelName: 'gemini-2.5-pro',
4210
+ modelDescription: 'State-of-the-art thinking model with 1M token context window capable of reasoning over complex problems in code, math, and STEM. Features enhanced thinking capabilities, advanced multimodal understanding, and superior performance on analytical tasks. Ideal for complex enterprise applications requiring maximum intelligence and reasoning.',
3985
4211
  pricing: {
3986
- prompt: pricing(`$8.00 / 1M tokens`),
3987
- output: pricing(`$24.00 / 1M tokens`),
4212
+ prompt: pricing(`$7.00 / 1M tokens`),
4213
+ output: pricing(`$21.00 / 1M tokens`),
4214
+ },
4215
+ },
4216
+ {
4217
+ modelVariant: 'CHAT',
4218
+ modelTitle: 'Gemini 2.5 Flash',
4219
+ modelName: 'gemini-2.5-flash',
4220
+ modelDescription: 'Best model in terms of price-performance with 1M token context window offering well-rounded capabilities. Features adaptive thinking, cost efficiency, and enhanced reasoning for large-scale processing. Ideal for low-latency, high-volume tasks that require thinking and agentic use cases.',
4221
+ pricing: {
4222
+ prompt: pricing(`$0.35 / 1M tokens`),
4223
+ output: pricing(`$1.05 / 1M tokens`),
4224
+ },
4225
+ },
4226
+ {
4227
+ modelVariant: 'CHAT',
4228
+ modelTitle: 'Gemini 2.5 Flash Lite',
4229
+ modelName: 'gemini-2.5-flash-lite',
4230
+ modelDescription: 'Cost-efficient Gemini 2.5 Flash model optimized for high throughput with 1M token context window. Features thinking capabilities while maintaining the most cost-efficient pricing. Perfect for real-time, low-latency use cases requiring good quality at scale.',
4231
+ pricing: {
4232
+ prompt: pricing(`$0.20 / 1M tokens`),
4233
+ output: pricing(`$0.60 / 1M tokens`),
4234
+ },
4235
+ },
4236
+ {
4237
+ modelVariant: 'CHAT',
4238
+ modelTitle: 'Gemini 2.0 Flash',
4239
+ modelName: 'gemini-2.0-flash',
4240
+ modelDescription: 'Next-generation model with 1M token context window delivering improved capabilities, superior speed, and realtime streaming. Features enhanced function calling, code execution, and search capabilities. Ideal for applications requiring cutting-edge AI capabilities with fast response times.',
4241
+ pricing: {
4242
+ prompt: pricing(`$0.25 / 1M tokens`),
4243
+ output: pricing(`$0.75 / 1M tokens`),
4244
+ },
4245
+ },
4246
+ {
4247
+ modelVariant: 'CHAT',
4248
+ modelTitle: 'Gemini 2.0 Flash Lite',
4249
+ modelName: 'gemini-2.0-flash-lite',
4250
+ modelDescription: 'Cost-efficient Gemini 2.0 Flash model optimized for low latency with 1M token context window. Balances performance and cost with enhanced efficiency for high-volume applications. Perfect for applications requiring good quality responses at minimal cost.',
4251
+ pricing: {
4252
+ prompt: pricing(`$0.15 / 1M tokens`),
4253
+ output: pricing(`$0.45 / 1M tokens`),
3988
4254
  },
3989
4255
  },
3990
4256
  {
@@ -4372,7 +4638,18 @@ class OpenAiCompatibleExecutionTools {
4372
4638
  const openAiOptions = { ...this.options };
4373
4639
  delete openAiOptions.isVerbose;
4374
4640
  delete openAiOptions.userId;
4375
- this.client = new OpenAI(openAiOptions);
4641
+ // Enhanced configuration for better ECONNRESET handling
4642
+ const enhancedOptions = {
4643
+ ...openAiOptions,
4644
+ timeout: API_REQUEST_TIMEOUT,
4645
+ maxRetries: CONNECTION_RETRIES_LIMIT,
4646
+ defaultHeaders: {
4647
+ Connection: 'keep-alive',
4648
+ 'Keep-Alive': 'timeout=30, max=100',
4649
+ ...openAiOptions.defaultHeaders,
4650
+ },
4651
+ };
4652
+ this.client = new OpenAI(enhancedOptions);
4376
4653
  }
4377
4654
  return this.client;
4378
4655
  }
@@ -4425,7 +4702,6 @@ class OpenAiCompatibleExecutionTools {
4425
4702
  const modelSettings = {
4426
4703
  model: modelName,
4427
4704
  max_tokens: modelRequirements.maxTokens,
4428
- // <- TODO: [🌾] Make some global max cap for maxTokens
4429
4705
  temperature: modelRequirements.temperature,
4430
4706
  // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
4431
4707
  // <- Note: [🧆]
@@ -4461,7 +4737,7 @@ class OpenAiCompatibleExecutionTools {
4461
4737
  console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
4462
4738
  }
4463
4739
  const rawResponse = await this.limiter
4464
- .schedule(() => client.chat.completions.create(rawRequest))
4740
+ .schedule(() => this.makeRequestWithRetry(() => client.chat.completions.create(rawRequest)))
4465
4741
  .catch((error) => {
4466
4742
  assertsError(error);
4467
4743
  if (this.options.isVerbose) {
@@ -4521,8 +4797,7 @@ class OpenAiCompatibleExecutionTools {
4521
4797
  const modelName = modelRequirements.modelName || this.getDefaultCompletionModel().modelName;
4522
4798
  const modelSettings = {
4523
4799
  model: modelName,
4524
- max_tokens: modelRequirements.maxTokens || 2000,
4525
- // <- TODO: [🌾] Make some global max cap for maxTokens
4800
+ max_tokens: modelRequirements.maxTokens,
4526
4801
  temperature: modelRequirements.temperature,
4527
4802
  // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
4528
4803
  // <- Note: [🧆]
@@ -4538,7 +4813,7 @@ class OpenAiCompatibleExecutionTools {
4538
4813
  console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
4539
4814
  }
4540
4815
  const rawResponse = await this.limiter
4541
- .schedule(() => client.completions.create(rawRequest))
4816
+ .schedule(() => this.makeRequestWithRetry(() => client.completions.create(rawRequest)))
4542
4817
  .catch((error) => {
4543
4818
  assertsError(error);
4544
4819
  if (this.options.isVerbose) {
@@ -4602,7 +4877,7 @@ class OpenAiCompatibleExecutionTools {
4602
4877
  console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
4603
4878
  }
4604
4879
  const rawResponse = await this.limiter
4605
- .schedule(() => client.embeddings.create(rawRequest))
4880
+ .schedule(() => this.makeRequestWithRetry(() => client.embeddings.create(rawRequest)))
4606
4881
  .catch((error) => {
4607
4882
  assertsError(error);
4608
4883
  if (this.options.isVerbose) {
@@ -4660,6 +4935,76 @@ class OpenAiCompatibleExecutionTools {
4660
4935
  }
4661
4936
  return model;
4662
4937
  }
4938
+ // <- Note: [🤖] getDefaultXxxModel
4939
+ /**
4940
+ * Makes a request with retry logic for network errors like ECONNRESET
4941
+ */
4942
+ async makeRequestWithRetry(requestFn) {
4943
+ let lastError;
4944
+ for (let attempt = 1; attempt <= CONNECTION_RETRIES_LIMIT; attempt++) {
4945
+ try {
4946
+ return await requestFn();
4947
+ }
4948
+ catch (error) {
4949
+ assertsError(error);
4950
+ lastError = error;
4951
+ // Check if this is a retryable network error
4952
+ const isRetryableError = this.isRetryableNetworkError(error);
4953
+ if (!isRetryableError || attempt === CONNECTION_RETRIES_LIMIT) {
4954
+ if (this.options.isVerbose) {
4955
+ console.info(colors.bgRed('Final error after retries'), `Attempt ${attempt}/${CONNECTION_RETRIES_LIMIT}:`, error);
4956
+ }
4957
+ throw error;
4958
+ }
4959
+ // Calculate exponential backoff delay
4960
+ const baseDelay = 1000; // 1 second
4961
+ const backoffDelay = baseDelay * Math.pow(2, attempt - 1);
4962
+ const jitterDelay = Math.random() * 500; // Add some randomness
4963
+ const totalDelay = backoffDelay + jitterDelay;
4964
+ if (this.options.isVerbose) {
4965
+ console.info(colors.bgYellow('Retrying request'), `Attempt ${attempt}/${CONNECTION_RETRIES_LIMIT}, waiting ${Math.round(totalDelay)}ms:`, error.message);
4966
+ }
4967
+ // Wait before retrying
4968
+ await new Promise((resolve) => setTimeout(resolve, totalDelay));
4969
+ }
4970
+ }
4971
+ throw lastError;
4972
+ }
4973
+ /**
4974
+ * Determines if an error is retryable (network-related errors)
4975
+ */
4976
+ isRetryableNetworkError(error) {
4977
+ const errorMessage = error.message.toLowerCase();
4978
+ const errorCode = error.code;
4979
+ // Network connection errors that should be retried
4980
+ const retryableErrors = [
4981
+ 'econnreset',
4982
+ 'enotfound',
4983
+ 'econnrefused',
4984
+ 'etimedout',
4985
+ 'socket hang up',
4986
+ 'network error',
4987
+ 'fetch failed',
4988
+ 'connection reset',
4989
+ 'connection refused',
4990
+ 'timeout',
4991
+ ];
4992
+ // Check error message
4993
+ if (retryableErrors.some((retryableError) => errorMessage.includes(retryableError))) {
4994
+ return true;
4995
+ }
4996
+ // Check error code
4997
+ if (errorCode && retryableErrors.includes(errorCode.toLowerCase())) {
4998
+ return true;
4999
+ }
5000
+ // Check for specific HTTP status codes that are retryable
5001
+ const errorWithStatus = error;
5002
+ const httpStatus = errorWithStatus.status || errorWithStatus.statusCode;
5003
+ if (httpStatus && [429, 500, 502, 503, 504].includes(httpStatus)) {
5004
+ return true;
5005
+ }
5006
+ return false;
5007
+ }
4663
5008
  }
4664
5009
  /**
4665
5010
  * TODO: [🛄] Some way how to re-wrap the errors from `OpenAiCompatibleExecutionTools`
@@ -4671,7 +5016,7 @@ class OpenAiCompatibleExecutionTools {
4671
5016
  /**
4672
5017
  * List of available models in Ollama library
4673
5018
  *
4674
- * Note: Done at 2025-05-19
5019
+ * Note: Synced with official API docs at 2025-08-20
4675
5020
  *
4676
5021
  * @see https://ollama.com/library
4677
5022
  * @public exported from `@promptbook/ollama`
@@ -4679,6 +5024,24 @@ class OpenAiCompatibleExecutionTools {
4679
5024
  const OLLAMA_MODELS = exportJson({
4680
5025
  name: 'OLLAMA_MODELS',
4681
5026
  value: [
5027
+ {
5028
+ modelVariant: 'CHAT',
5029
+ modelTitle: 'llama3.3',
5030
+ modelName: 'llama3.3',
5031
+ modelDescription: 'Meta Llama 3.3 (70B parameters) with 128K context window. Latest generation foundation model with significantly enhanced reasoning, instruction following, and multilingual capabilities. Features improved performance on complex tasks and better factual accuracy compared to Llama 3.1.',
5032
+ },
5033
+ {
5034
+ modelVariant: 'CHAT',
5035
+ modelTitle: 'llama3.2',
5036
+ modelName: 'llama3.2',
5037
+ modelDescription: 'Meta Llama 3.2 (1B-90B parameters) with 128K context window. Enhanced model with improved reasoning capabilities, better instruction following, and multimodal support in larger variants. Features significant performance improvements over Llama 3.1 across diverse tasks.',
5038
+ },
5039
+ {
5040
+ modelVariant: 'CHAT',
5041
+ modelTitle: 'llama3.1',
5042
+ modelName: 'llama3.1',
5043
+ modelDescription: 'Meta Llama 3.1 (8B-405B parameters) with 128K context window. Advanced foundation model with enhanced reasoning, improved multilingual capabilities, and better performance on complex tasks. Features significant improvements in code generation and mathematical reasoning.',
5044
+ },
4682
5045
  {
4683
5046
  modelVariant: 'CHAT',
4684
5047
  modelTitle: 'llama3',
@@ -5149,6 +5512,9 @@ class OpenAiExecutionTools extends OpenAiCompatibleExecutionTools {
5149
5512
  get description() {
5150
5513
  return 'Use all models provided by OpenAI';
5151
5514
  }
5515
+ get profile() {
5516
+ return LLM_PROVIDER_PROFILES.OPENAI;
5517
+ }
5152
5518
  /*
5153
5519
  Note: Commenting this out to avoid circular dependency
5154
5520
  /**
@@ -5173,7 +5539,7 @@ class OpenAiExecutionTools extends OpenAiCompatibleExecutionTools {
5173
5539
  * Default model for chat variant.
5174
5540
  */
5175
5541
  getDefaultChatModel() {
5176
- return this.getDefaultModel('gpt-4-turbo');
5542
+ return this.getDefaultModel('gpt-5');
5177
5543
  }
5178
5544
  /**
5179
5545
  * Default model for completion variant.
@@ -5241,8 +5607,6 @@ class OpenAiAssistantExecutionTools extends OpenAiExecutionTools {
5241
5607
  const modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
5242
5608
  const modelSettings = {
5243
5609
  model: modelName,
5244
- max_tokens: modelRequirements.maxTokens,
5245
- // <- TODO: [🌾] Make some global max cap for maxTokens
5246
5610
 
5247
5611
  temperature: modelRequirements.temperature,
5248
5612
 
@@ -6086,7 +6450,7 @@ function prettifyMarkdown(content) {
6086
6450
  try {
6087
6451
  return format(content, {
6088
6452
  parser: 'markdown',
6089
- plugins: [parserHtml],
6453
+ plugins: [parserMarkdown, parserHtml],
6090
6454
  // TODO: DRY - make some import or auto-copy of .prettierrc
6091
6455
  endOfLine: 'lf',
6092
6456
  tabWidth: 4,
@@ -6985,6 +7349,7 @@ function createTask(options) {
6985
7349
  const errors = [];
6986
7350
  const warnings = [];
6987
7351
  let currentValue = {};
7352
+ let customTldr = null;
6988
7353
  const partialResultSubject = new Subject();
6989
7354
  // <- Note: Not using `BehaviorSubject` because on error we can't access the last value
6990
7355
  const finalResultPromise = /* not await */ taskProcessCallback((newOngoingResult) => {
@@ -6995,6 +7360,9 @@ function createTask(options) {
6995
7360
  Object.assign(currentValue, newOngoingResult);
6996
7361
  // <- TODO: assign deep
6997
7362
  partialResultSubject.next(newOngoingResult);
7363
+ }, (tldrInfo) => {
7364
+ customTldr = tldrInfo;
7365
+ updatedAt = new Date();
6998
7366
  });
6999
7367
  finalResultPromise
7000
7368
  .catch((error) => {
@@ -7048,6 +7416,78 @@ function createTask(options) {
7048
7416
  return status;
7049
7417
  // <- Note: [1] --||--
7050
7418
  },
7419
+ get tldr() {
7420
+ var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k;
7421
+ // Use custom tldr if available
7422
+ if (customTldr) {
7423
+ return customTldr;
7424
+ }
7425
+ // Fallback to default implementation
7426
+ const cv = currentValue;
7427
+ // If explicit percent is provided, use it
7428
+ let percentRaw = (_f = (_d = (_b = (_a = cv === null || cv === void 0 ? void 0 : cv.tldr) === null || _a === void 0 ? void 0 : _a.percent) !== null && _b !== void 0 ? _b : (_c = cv === null || cv === void 0 ? void 0 : cv.usage) === null || _c === void 0 ? void 0 : _c.percent) !== null && _d !== void 0 ? _d : (_e = cv === null || cv === void 0 ? void 0 : cv.progress) === null || _e === void 0 ? void 0 : _e.percent) !== null && _f !== void 0 ? _f : cv === null || cv === void 0 ? void 0 : cv.percent;
7429
+ // Simulate progress if not provided
7430
+ if (typeof percentRaw !== 'number') {
7431
+ // Simulate progress: evenly split across subtasks, based on elapsed time
7432
+ const now = new Date();
7433
+ const elapsedMs = now.getTime() - createdAt.getTime();
7434
+ const totalMs = DEFAULT_TASK_SIMULATED_DURATION_MS;
7435
+ // If subtasks are defined, split progress evenly
7436
+ const subtaskCount = Array.isArray(cv === null || cv === void 0 ? void 0 : cv.subtasks) ? cv.subtasks.length : 1;
7437
+ const completedSubtasks = Array.isArray(cv === null || cv === void 0 ? void 0 : cv.subtasks)
7438
+ ? cv.subtasks.filter((s) => s.done || s.completed).length
7439
+ : 0;
7440
+ // Progress from completed subtasks
7441
+ const subtaskProgress = subtaskCount > 0 ? completedSubtasks / subtaskCount : 0;
7442
+ // Progress from elapsed time for current subtask
7443
+ const timeProgress = Math.min(elapsedMs / totalMs, 1);
7444
+ // Combine: completed subtasks + time progress for current subtask
7445
+ percentRaw = Math.min(subtaskProgress + (1 / subtaskCount) * timeProgress, 1);
7446
+ if (status === 'FINISHED')
7447
+ percentRaw = 1;
7448
+ if (status === 'ERROR')
7449
+ percentRaw = 0;
7450
+ }
7451
+ // Clamp to [0,1]
7452
+ let percent = Number(percentRaw) || 0;
7453
+ if (percent < 0)
7454
+ percent = 0;
7455
+ if (percent > 1)
7456
+ percent = 1;
7457
+ // Build a short message: prefer explicit tldr.message, then common summary/message fields, then errors/warnings, then status
7458
+ const messageFromResult = (_k = (_j = (_h = (_g = cv === null || cv === void 0 ? void 0 : cv.tldr) === null || _g === void 0 ? void 0 : _g.message) !== null && _h !== void 0 ? _h : cv === null || cv === void 0 ? void 0 : cv.message) !== null && _j !== void 0 ? _j : cv === null || cv === void 0 ? void 0 : cv.summary) !== null && _k !== void 0 ? _k : cv === null || cv === void 0 ? void 0 : cv.statusMessage;
7459
+ let message = messageFromResult;
7460
+ if (!message) {
7461
+ // If subtasks, show current subtask
7462
+ if (Array.isArray(cv === null || cv === void 0 ? void 0 : cv.subtasks) && cv.subtasks.length > 0) {
7463
+ const current = cv.subtasks.find((s) => !s.done && !s.completed);
7464
+ if (current && current.title) {
7465
+ message = `Working on ${current.title}`;
7466
+ }
7467
+ }
7468
+ if (!message) {
7469
+ if (errors.length) {
7470
+ message = errors[errors.length - 1].message || 'Error';
7471
+ }
7472
+ else if (warnings.length) {
7473
+ message = warnings[warnings.length - 1].message || 'Warning';
7474
+ }
7475
+ else if (status === 'FINISHED') {
7476
+ message = 'Finished';
7477
+ }
7478
+ else if (status === 'ERROR') {
7479
+ message = 'Error';
7480
+ }
7481
+ else {
7482
+ message = 'Running';
7483
+ }
7484
+ }
7485
+ }
7486
+ return {
7487
+ percent: percent,
7488
+ message,
7489
+ };
7490
+ },
7051
7491
  get createdAt() {
7052
7492
  return createdAt;
7053
7493
  // <- Note: [1] --||--
@@ -7253,7 +7693,7 @@ class MultipleLlmExecutionTools {
7253
7693
  }
7254
7694
  return spaceTrim((block) => `
7255
7695
  ${headLine}
7256
-
7696
+
7257
7697
  ${ /* <- Note: Indenting the description: */block(description)}
7258
7698
  `);
7259
7699
  })
@@ -7264,6 +7704,9 @@ class MultipleLlmExecutionTools {
7264
7704
  ${block(innerModelsTitlesAndDescriptions)}
7265
7705
  `);
7266
7706
  }
7707
+ get profile() {
7708
+ return LLM_PROVIDER_PROFILES.MULTIPLE;
7709
+ }
7267
7710
  /**
7268
7711
  * Check the configuration of all execution tools
7269
7712
  */
@@ -7308,25 +7751,22 @@ class MultipleLlmExecutionTools {
7308
7751
  const errors = [];
7309
7752
  llm: for (const llmExecutionTools of this.llmExecutionTools) {
7310
7753
  try {
7311
- variant: switch (prompt.modelRequirements.modelVariant) {
7754
+ switch (prompt.modelRequirements.modelVariant) {
7312
7755
  case 'CHAT':
7313
7756
  if (llmExecutionTools.callChatModel === undefined) {
7314
7757
  continue llm;
7315
7758
  }
7316
7759
  return await llmExecutionTools.callChatModel(prompt);
7317
- break variant;
7318
7760
  case 'COMPLETION':
7319
7761
  if (llmExecutionTools.callCompletionModel === undefined) {
7320
7762
  continue llm;
7321
7763
  }
7322
7764
  return await llmExecutionTools.callCompletionModel(prompt);
7323
- break variant;
7324
7765
  case 'EMBEDDING':
7325
7766
  if (llmExecutionTools.callEmbeddingModel === undefined) {
7326
7767
  continue llm;
7327
7768
  }
7328
7769
  return await llmExecutionTools.callEmbeddingModel(prompt);
7329
- break variant;
7330
7770
  // <- case [🤖]:
7331
7771
  default:
7332
7772
  throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}"`);
@@ -7479,7 +7919,7 @@ async function preparePersona(personaDescription, tools, options) {
7479
7919
  const result = await preparePersonaExecutor({
7480
7920
  availableModels /* <- Note: Passing as JSON */,
7481
7921
  personaDescription,
7482
- }).asPromise();
7922
+ }).asPromise({ isCrashedOnError: true });
7483
7923
  const { outputParameters } = result;
7484
7924
  const { modelsRequirements: modelsRequirementsJson } = outputParameters;
7485
7925
  let modelsRequirementsUnchecked = jsonParse(modelsRequirementsJson);
@@ -8092,7 +8532,7 @@ async function preparePipeline(pipeline, tools, options) {
8092
8532
  });
8093
8533
  const result = await prepareTitleExecutor({
8094
8534
  book: sources.map(({ content }) => content).join('\n\n'),
8095
- }).asPromise();
8535
+ }).asPromise({ isCrashedOnError: true });
8096
8536
  const { outputParameters } = result;
8097
8537
  const { title: titleRaw } = outputParameters;
8098
8538
  if (isVerbose) {
@@ -8920,7 +9360,7 @@ function validatePromptResult(options) {
8920
9360
  */
8921
9361
  async function executeAttempts(options) {
8922
9362
  const { jokerParameterNames, priority, maxAttempts, // <- Note: [💂]
8923
- preparedContent, parameters, task, preparedPipeline, tools, $executionReport, pipelineIdentification, maxExecutionAttempts, } = options;
9363
+ preparedContent, parameters, task, preparedPipeline, tools, $executionReport, pipelineIdentification, maxExecutionAttempts, onProgress, } = options;
8924
9364
  const $ongoingTaskResult = {
8925
9365
  $result: null,
8926
9366
  $resultString: null,
@@ -9164,6 +9604,10 @@ async function executeAttempts(options) {
9164
9604
  result: $ongoingTaskResult.$resultString,
9165
9605
  error: error,
9166
9606
  });
9607
+ // Report failed attempt
9608
+ onProgress({
9609
+ errors: [error],
9610
+ });
9167
9611
  }
9168
9612
  finally {
9169
9613
  if (!isJokerAttempt &&
@@ -10037,15 +10481,74 @@ function createPipelineExecutor(options) {
10037
10481
  });
10038
10482
  });
10039
10483
  };
10040
- const pipelineExecutor = (inputParameters) => createTask({
10041
- taskType: 'EXECUTION',
10042
- title: pipeline.title,
10043
- taskProcessCallback(updateOngoingResult) {
10044
- return pipelineExecutorWithCallback(inputParameters, async (newOngoingResult) => {
10045
- updateOngoingResult(newOngoingResult);
10046
- });
10047
- },
10048
- });
10484
+ const pipelineExecutor = (inputParameters) => {
10485
+ const startTime = new Date().getTime();
10486
+ return createTask({
10487
+ taskType: 'EXECUTION',
10488
+ title: pipeline.title,
10489
+ taskProcessCallback(updateOngoingResult, updateTldr) {
10490
+ return pipelineExecutorWithCallback(inputParameters, async (newOngoingResult) => {
10491
+ var _a, _b;
10492
+ updateOngoingResult(newOngoingResult);
10493
+ // Calculate and update tldr based on pipeline progress
10494
+ const cv = newOngoingResult;
10495
+ // Calculate progress based on parameters resolved vs total parameters
10496
+ const totalParameters = pipeline.parameters.filter(p => !p.isInput).length;
10497
+ let resolvedParameters = 0;
10498
+ let currentTaskTitle = '';
10499
+ // Get the resolved parameters from output parameters
10500
+ if (cv === null || cv === void 0 ? void 0 : cv.outputParameters) {
10501
+ // Count how many output parameters have non-empty values
10502
+ resolvedParameters = Object.values(cv.outputParameters).filter(value => value !== undefined && value !== null && String(value).trim() !== '').length;
10503
+ }
10504
+ // Try to determine current task from execution report
10505
+ if (((_a = cv === null || cv === void 0 ? void 0 : cv.executionReport) === null || _a === void 0 ? void 0 : _a.promptExecutions) && cv.executionReport.promptExecutions.length > 0) {
10506
+ const lastExecution = cv.executionReport.promptExecutions[cv.executionReport.promptExecutions.length - 1];
10507
+ if ((_b = lastExecution === null || lastExecution === void 0 ? void 0 : lastExecution.prompt) === null || _b === void 0 ? void 0 : _b.title) {
10508
+ currentTaskTitle = lastExecution.prompt.title;
10509
+ }
10510
+ }
10511
+ // Calculate base progress percentage
10512
+ let percent = totalParameters > 0 ? resolvedParameters / totalParameters : 0;
10513
+ // Add time-based progress for current task if we haven't completed all parameters
10514
+ if (resolvedParameters < totalParameters) {
10515
+ const elapsedMs = new Date().getTime() - startTime;
10516
+ const estimatedTotalMs = totalParameters * 30 * 1000; // Estimate 30 seconds per parameter
10517
+ const timeProgress = Math.min(elapsedMs / estimatedTotalMs, 0.9); // Cap at 90% for time-based progress
10518
+ // If we have time progress but no parameter progress, show time progress
10519
+ if (percent === 0 && timeProgress > 0) {
10520
+ percent = Math.min(timeProgress, 0.1); // Show some progress but not more than 10%
10521
+ }
10522
+ else if (percent < 1) {
10523
+ // Add partial progress for current task
10524
+ const taskProgress = totalParameters > 0 ? (1 / totalParameters) * 0.5 : 0; // 50% of task progress
10525
+ percent = Math.min(percent + taskProgress, 0.95); // Cap at 95% until fully complete
10526
+ }
10527
+ }
10528
+ // Clamp to [0,1]
10529
+ percent = Math.min(Math.max(percent, 0), 1);
10530
+ // Generate message
10531
+ let message = '';
10532
+ if (currentTaskTitle) {
10533
+ message = `Executing: ${currentTaskTitle}`;
10534
+ }
10535
+ else if (resolvedParameters === 0) {
10536
+ message = 'Starting pipeline execution';
10537
+ }
10538
+ else if (resolvedParameters < totalParameters) {
10539
+ message = `Processing pipeline (${resolvedParameters}/${totalParameters} parameters resolved)`;
10540
+ }
10541
+ else {
10542
+ message = 'Completing pipeline execution';
10543
+ }
10544
+ updateTldr({
10545
+ percent: percent,
10546
+ message,
10547
+ });
10548
+ });
10549
+ },
10550
+ });
10551
+ };
10049
10552
  // <- TODO: Make types such as there is no need to do `as` for `createTask`
10050
10553
  return pipelineExecutor;
10051
10554
  }
@@ -10130,7 +10633,9 @@ class MarkdownScraper {
10130
10633
  },
10131
10634
  });
10132
10635
  const knowledgeContent = await source.asText();
10133
- const result = await prepareKnowledgeFromMarkdownExecutor({ knowledgeContent }).asPromise();
10636
+ const result = await prepareKnowledgeFromMarkdownExecutor({ knowledgeContent }).asPromise({
10637
+ isCrashedOnError: true,
10638
+ });
10134
10639
  const { outputParameters } = result;
10135
10640
  const { knowledgePieces: knowledgePiecesRaw } = outputParameters;
10136
10641
  const knowledgeTextPieces = (knowledgePiecesRaw || '').split('\n---\n');
@@ -10154,12 +10659,16 @@ class MarkdownScraper {
10154
10659
  ];
10155
10660
  */
10156
10661
  try {
10157
- const titleResult = await prepareTitleExecutor({ knowledgePieceContent }).asPromise();
10662
+ const titleResult = await prepareTitleExecutor({ knowledgePieceContent }).asPromise({
10663
+ isCrashedOnError: true,
10664
+ });
10158
10665
  const { title: titleRaw = 'Untitled' } = titleResult.outputParameters;
10159
10666
  title = spaceTrim(titleRaw) /* <- TODO: Maybe do in pipeline */;
10160
10667
  name = titleToName(title);
10161
10668
  // --- Keywords
10162
- const keywordsResult = await prepareKeywordsExecutor({ knowledgePieceContent }).asPromise();
10669
+ const keywordsResult = await prepareKeywordsExecutor({ knowledgePieceContent }).asPromise({
10670
+ isCrashedOnError: true,
10671
+ });
10163
10672
  const { keywords: keywordsRaw = '' } = keywordsResult.outputParameters;
10164
10673
  keywords = (keywordsRaw || '')
10165
10674
  .split(',')
@@ -12826,31 +13335,23 @@ function extractBlock(markdown) {
12826
13335
  return content;
12827
13336
  }
12828
13337
 
13338
+ /**
13339
+ * @private internal for `preserve`
13340
+ */
13341
+ const _preserved = [];
12829
13342
  /**
12830
13343
  * Does nothing, but preserves the function in the bundle
12831
13344
  * Compiler is tricked into thinking the function is used
12832
13345
  *
12833
13346
  * @param value any function to preserve
12834
13347
  * @returns nothing
12835
- * @private internal function of `JavascriptExecutionTools` and `JavascriptEvalExecutionTools`
12836
- */
12837
- function preserve(func) {
12838
- // Note: NOT calling the function
12839
- (async () => {
12840
- // TODO: [💩] Change to `await forEver` or `forTime(Infinity)`
12841
- await forTime(100000000);
12842
- // [1]
12843
- try {
12844
- await func();
12845
- }
12846
- finally {
12847
- // do nothing
12848
- }
12849
- })();
13348
+ * @private within the repository
13349
+ */
13350
+ function $preserve(...value) {
13351
+ _preserved.push(...value);
12850
13352
  }
12851
13353
  /**
12852
- * TODO: Probably remove in favour of `keepImported`
12853
- * TODO: [1] This maybe does memory leak
13354
+ * Note: [💞] Ignore a discrepancy between file name and entity name
12854
13355
  */
12855
13356
 
12856
13357
  // Note: [💎]
@@ -12878,25 +13379,25 @@ class JavascriptEvalExecutionTools {
12878
13379
  // Note: [💎]
12879
13380
  // Note: Using direct eval, following variables are in same scope as eval call so they are accessible from inside the evaluated script:
12880
13381
  const spaceTrim$1 = (_) => spaceTrim(_);
12881
- preserve(spaceTrim$1);
13382
+ $preserve(spaceTrim$1);
12882
13383
  const removeQuotes$1 = removeQuotes;
12883
- preserve(removeQuotes$1);
13384
+ $preserve(removeQuotes$1);
12884
13385
  const unwrapResult$1 = unwrapResult;
12885
- preserve(unwrapResult$1);
13386
+ $preserve(unwrapResult$1);
12886
13387
  const trimEndOfCodeBlock$1 = trimEndOfCodeBlock;
12887
- preserve(trimEndOfCodeBlock$1);
13388
+ $preserve(trimEndOfCodeBlock$1);
12888
13389
  const trimCodeBlock$1 = trimCodeBlock;
12889
- preserve(trimCodeBlock$1);
13390
+ $preserve(trimCodeBlock$1);
12890
13391
  // TODO: DRY [🍯]
12891
13392
  const trim = (str) => str.trim();
12892
- preserve(trim);
13393
+ $preserve(trim);
12893
13394
  // TODO: DRY [🍯]
12894
13395
  const reverse = (str) => str.split('').reverse().join('');
12895
- preserve(reverse);
13396
+ $preserve(reverse);
12896
13397
  const removeEmojis$1 = removeEmojis;
12897
- preserve(removeEmojis$1);
13398
+ $preserve(removeEmojis$1);
12898
13399
  const prettifyMarkdown$1 = prettifyMarkdown;
12899
- preserve(prettifyMarkdown$1);
13400
+ $preserve(prettifyMarkdown$1);
12900
13401
  //-------[n12:]---
12901
13402
  const capitalize$1 = capitalize;
12902
13403
  const decapitalize$1 = decapitalize;
@@ -12912,18 +13413,18 @@ class JavascriptEvalExecutionTools {
12912
13413
  // TODO: DRY [🍯]
12913
13414
  Array.from(parseKeywordsFromString(input)).join(', '); /* <- TODO: [🧠] What is the best format comma list, bullet list,...? */
12914
13415
  const normalizeTo_SCREAMING_CASE$1 = normalizeTo_SCREAMING_CASE;
12915
- preserve(capitalize$1);
12916
- preserve(decapitalize$1);
12917
- preserve(nameToUriPart$1);
12918
- preserve(nameToUriParts$1);
12919
- preserve(removeDiacritics$1);
12920
- preserve(normalizeWhitespaces$1);
12921
- preserve(normalizeToKebabCase$1);
12922
- preserve(normalizeTo_camelCase$1);
12923
- preserve(normalizeTo_snake_case$1);
12924
- preserve(normalizeTo_PascalCase$1);
12925
- preserve(parseKeywords);
12926
- preserve(normalizeTo_SCREAMING_CASE$1);
13416
+ $preserve(capitalize$1);
13417
+ $preserve(decapitalize$1);
13418
+ $preserve(nameToUriPart$1);
13419
+ $preserve(nameToUriParts$1);
13420
+ $preserve(removeDiacritics$1);
13421
+ $preserve(normalizeWhitespaces$1);
13422
+ $preserve(normalizeToKebabCase$1);
13423
+ $preserve(normalizeTo_camelCase$1);
13424
+ $preserve(normalizeTo_snake_case$1);
13425
+ $preserve(normalizeTo_PascalCase$1);
13426
+ $preserve(parseKeywords);
13427
+ $preserve(normalizeTo_SCREAMING_CASE$1);
12927
13428
  //-------[/n12]---
12928
13429
  if (!script.includes('return')) {
12929
13430
  script = `return ${script}`;
@@ -16972,7 +17473,7 @@ class Wizard {
16972
17473
  // ▶ Create executor - the function that will execute the Pipeline
16973
17474
  const pipelineExecutor = createPipelineExecutor({ pipeline, tools });
16974
17475
  // 🚀▶ Execute the Pipeline
16975
- const result = await pipelineExecutor(inputParameters).asPromise();
17476
+ const result = await pipelineExecutor(inputParameters).asPromise({ isCrashedOnError: true });
16976
17477
  const { outputParameters } = result;
16977
17478
  const outputParametersLength = Object.keys(outputParameters).length;
16978
17479
  let resultString;