@promptbook/wizard 0.100.0-8 → 0.100.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (129) hide show
  1. package/README.md +7 -14
  2. package/esm/index.es.js +651 -150
  3. package/esm/index.es.js.map +1 -1
  4. package/esm/typings/src/_packages/color.index.d.ts +50 -0
  5. package/esm/typings/src/_packages/components.index.d.ts +36 -0
  6. package/esm/typings/src/_packages/core.index.d.ts +30 -0
  7. package/esm/typings/src/_packages/types.index.d.ts +38 -0
  8. package/esm/typings/src/book-2.0/agent-source/parseAgentSource.d.ts +30 -0
  9. package/esm/typings/src/book-2.0/agent-source/parseAgentSource.test.d.ts +1 -0
  10. package/esm/typings/src/book-2.0/agent-source/string_book.d.ts +26 -0
  11. package/esm/typings/src/book-2.0/commitments/ACTION/ACTION.d.ts +38 -0
  12. package/esm/typings/src/book-2.0/commitments/FORMAT/FORMAT.d.ts +39 -0
  13. package/esm/typings/src/book-2.0/commitments/KNOWLEDGE/KNOWLEDGE.d.ts +45 -0
  14. package/esm/typings/src/book-2.0/commitments/META_IMAGE/META_IMAGE.d.ts +44 -0
  15. package/esm/typings/src/book-2.0/commitments/META_LINK/META_LINK.d.ts +56 -0
  16. package/esm/typings/src/book-2.0/commitments/MODEL/MODEL.d.ts +39 -0
  17. package/esm/typings/src/book-2.0/commitments/NOTE/NOTE.d.ts +49 -0
  18. package/esm/typings/src/book-2.0/commitments/PERSONA/PERSONA.d.ts +46 -0
  19. package/esm/typings/src/book-2.0/commitments/RULE/RULE.d.ts +44 -0
  20. package/esm/typings/src/book-2.0/commitments/SAMPLE/SAMPLE.d.ts +44 -0
  21. package/esm/typings/src/book-2.0/commitments/STYLE/STYLE.d.ts +38 -0
  22. package/esm/typings/src/book-2.0/commitments/_base/BaseCommitmentDefinition.d.ts +52 -0
  23. package/esm/typings/src/book-2.0/commitments/_base/BookCommitment.d.ts +5 -0
  24. package/esm/typings/src/book-2.0/commitments/_base/CommitmentDefinition.d.ts +48 -0
  25. package/esm/typings/src/book-2.0/commitments/_base/NotYetImplementedCommitmentDefinition.d.ts +22 -0
  26. package/esm/typings/src/book-2.0/commitments/_base/createEmptyAgentModelRequirements.d.ts +19 -0
  27. package/esm/typings/src/book-2.0/commitments/_misc/AgentModelRequirements.d.ts +37 -0
  28. package/esm/typings/src/book-2.0/commitments/_misc/AgentSourceParseResult.d.ts +18 -0
  29. package/esm/typings/src/book-2.0/commitments/_misc/ParsedCommitment.d.ts +22 -0
  30. package/esm/typings/src/book-2.0/commitments/_misc/createAgentModelRequirements.d.ts +62 -0
  31. package/esm/typings/src/book-2.0/commitments/_misc/createAgentModelRequirementsWithCommitments.d.ts +36 -0
  32. package/esm/typings/src/book-2.0/commitments/_misc/createCommitmentRegex.d.ts +20 -0
  33. package/esm/typings/src/book-2.0/commitments/_misc/parseAgentSourceWithCommitments.d.ts +24 -0
  34. package/esm/typings/src/book-2.0/commitments/_misc/removeCommentsFromSystemMessage.d.ts +11 -0
  35. package/esm/typings/src/book-2.0/commitments/index.d.ts +56 -0
  36. package/esm/typings/src/book-2.0/utils/profileImageUtils.d.ts +39 -0
  37. package/esm/typings/src/book-components/AvatarProfile/AvatarChip/AvatarChip.d.ts +35 -0
  38. package/esm/typings/src/book-components/AvatarProfile/AvatarChip/AvatarChipFromSource.d.ts +21 -0
  39. package/esm/typings/src/book-components/AvatarProfile/AvatarChip/index.d.ts +2 -0
  40. package/esm/typings/src/book-components/AvatarProfile/AvatarProfile/AvatarProfile.d.ts +26 -0
  41. package/esm/typings/src/book-components/AvatarProfile/AvatarProfile/AvatarProfileFromSource.d.ts +19 -0
  42. package/esm/typings/src/book-components/BookEditor/BookEditor.d.ts +35 -0
  43. package/esm/typings/src/book-components/BookEditor/BookEditorInner.d.ts +15 -0
  44. package/esm/typings/src/book-components/BookEditor/config.d.ts +10 -0
  45. package/esm/typings/src/book-components/BookEditor/injectCssModuleIntoShadowRoot.d.ts +11 -0
  46. package/esm/typings/src/book-components/Chat/Chat/Chat.d.ts +20 -0
  47. package/esm/typings/src/book-components/Chat/Chat/ChatProps.d.ts +110 -0
  48. package/esm/typings/src/book-components/Chat/LlmChat/LlmChat.d.ts +14 -0
  49. package/esm/typings/src/book-components/Chat/LlmChat/LlmChat.test.d.ts +1 -0
  50. package/esm/typings/src/book-components/Chat/LlmChat/LlmChatProps.d.ts +24 -0
  51. package/esm/typings/src/book-components/Chat/types/ChatMessage.d.ts +16 -0
  52. package/esm/typings/src/book-components/Chat/types/ChatParticipant.d.ts +32 -0
  53. package/esm/typings/src/book-components/Chat/utils/ChatPersistence.d.ts +25 -0
  54. package/esm/typings/src/book-components/Chat/utils/ExportFormat.d.ts +4 -0
  55. package/esm/typings/src/book-components/Chat/utils/addUtmParamsToUrl.d.ts +7 -0
  56. package/esm/typings/src/book-components/Chat/utils/createShortLinkForChat.d.ts +7 -0
  57. package/esm/typings/src/book-components/Chat/utils/downloadFile.d.ts +6 -0
  58. package/esm/typings/src/book-components/Chat/utils/exportChatHistory.d.ts +9 -0
  59. package/esm/typings/src/book-components/Chat/utils/generatePdfContent.d.ts +8 -0
  60. package/esm/typings/src/book-components/Chat/utils/generateQrDataUrl.d.ts +7 -0
  61. package/esm/typings/src/book-components/Chat/utils/getPromptbookBranding.d.ts +6 -0
  62. package/esm/typings/src/book-components/Chat/utils/messagesToHtml.d.ts +8 -0
  63. package/esm/typings/src/book-components/Chat/utils/messagesToJson.d.ts +7 -0
  64. package/esm/typings/src/book-components/Chat/utils/messagesToMarkdown.d.ts +8 -0
  65. package/esm/typings/src/book-components/Chat/utils/messagesToText.d.ts +8 -0
  66. package/esm/typings/src/book-components/_common/react-utils/classNames.d.ts +7 -0
  67. package/esm/typings/src/book-components/_common/react-utils/collectCssTextsForClass.d.ts +7 -0
  68. package/esm/typings/src/book-components/_common/react-utils/escapeHtml.d.ts +6 -0
  69. package/esm/typings/src/book-components/_common/react-utils/escapeRegex.d.ts +6 -0
  70. package/esm/typings/src/config.d.ts +19 -0
  71. package/esm/typings/src/execution/AvailableModel.d.ts +4 -0
  72. package/esm/typings/src/execution/ExecutionTask.d.ts +27 -1
  73. package/esm/typings/src/execution/LlmExecutionTools.d.ts +8 -0
  74. package/esm/typings/src/execution/createPipelineExecutor/40-executeAttempts.d.ts +6 -1
  75. package/esm/typings/src/llm-providers/_common/filterModels.d.ts +0 -3
  76. package/esm/typings/src/llm-providers/_common/profiles/llmProviderProfiles.d.ts +81 -0
  77. package/esm/typings/src/llm-providers/_common/profiles/test/llmProviderProfiles.test.d.ts +1 -0
  78. package/esm/typings/src/llm-providers/_multiple/MultipleLlmExecutionTools.d.ts +5 -0
  79. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +5 -5
  80. package/esm/typings/src/llm-providers/anthropic-claude/anthropic-claude-models.d.ts +1 -1
  81. package/esm/typings/src/llm-providers/deepseek/deepseek-models.d.ts +1 -1
  82. package/esm/typings/src/llm-providers/google/google-models.d.ts +1 -1
  83. package/esm/typings/src/llm-providers/mocked/MockedEchoLlmExecutionTools.d.ts +5 -0
  84. package/esm/typings/src/llm-providers/ollama/ollama-models.d.ts +1 -1
  85. package/esm/typings/src/llm-providers/openai/OpenAiCompatibleExecutionTools.d.ts +8 -0
  86. package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +5 -0
  87. package/esm/typings/src/llm-providers/openai/openai-models.d.ts +1 -1
  88. package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +5 -0
  89. package/esm/typings/src/pipeline/book-notation.d.ts +2 -1
  90. package/esm/typings/src/playground/permanent/error-handling-playground.d.ts +5 -0
  91. package/esm/typings/src/types/ModelRequirements.d.ts +0 -2
  92. package/esm/typings/src/types/typeAliases.d.ts +6 -0
  93. package/esm/typings/src/utils/color/$randomColor.d.ts +11 -0
  94. package/esm/typings/src/utils/color/Color.d.ts +180 -0
  95. package/esm/typings/src/utils/color/css-colors.d.ts +159 -0
  96. package/esm/typings/src/utils/color/internal-utils/checkChannelValue.d.ts +14 -0
  97. package/esm/typings/src/utils/color/internal-utils/hslToRgb.d.ts +17 -0
  98. package/esm/typings/src/utils/color/internal-utils/rgbToHsl.d.ts +17 -0
  99. package/esm/typings/src/utils/color/operators/ColorTransformer.d.ts +5 -0
  100. package/esm/typings/src/utils/color/operators/darken.d.ts +9 -0
  101. package/esm/typings/src/utils/color/operators/furthest.d.ts +16 -0
  102. package/esm/typings/src/utils/color/operators/grayscale.d.ts +9 -0
  103. package/esm/typings/src/utils/color/operators/lighten.d.ts +12 -0
  104. package/esm/typings/src/utils/color/operators/mixWithColor.d.ts +11 -0
  105. package/esm/typings/src/utils/color/operators/nearest.d.ts +10 -0
  106. package/esm/typings/src/utils/color/operators/negative.d.ts +7 -0
  107. package/esm/typings/src/utils/color/operators/negativeLightness.d.ts +7 -0
  108. package/esm/typings/src/utils/color/operators/withAlpha.d.ts +9 -0
  109. package/esm/typings/src/utils/color/utils/areColorsEqual.d.ts +14 -0
  110. package/esm/typings/src/utils/color/utils/colorDistance.d.ts +21 -0
  111. package/esm/typings/src/utils/color/utils/colorHue.d.ts +11 -0
  112. package/esm/typings/src/utils/color/utils/colorHueDistance.d.ts +11 -0
  113. package/esm/typings/src/utils/color/utils/colorHueDistance.test.d.ts +1 -0
  114. package/esm/typings/src/utils/color/utils/colorLuminance.d.ts +9 -0
  115. package/esm/typings/src/utils/color/utils/colorSatulightion.d.ts +7 -0
  116. package/esm/typings/src/utils/color/utils/colorSaturation.d.ts +9 -0
  117. package/esm/typings/src/utils/color/utils/colorToDataUrl.d.ts +10 -0
  118. package/esm/typings/src/utils/color/utils/mixColors.d.ts +11 -0
  119. package/esm/typings/src/utils/organization/preserve.d.ts +21 -0
  120. package/esm/typings/src/utils/take/classes/TakeChain.d.ts +11 -0
  121. package/esm/typings/src/utils/take/interfaces/ITakeChain.d.ts +12 -0
  122. package/esm/typings/src/utils/take/interfaces/Takeable.d.ts +7 -0
  123. package/esm/typings/src/utils/take/take.d.ts +12 -0
  124. package/esm/typings/src/utils/take/take.test.d.ts +1 -0
  125. package/esm/typings/src/version.d.ts +1 -1
  126. package/package.json +2 -3
  127. package/umd/index.umd.js +655 -154
  128. package/umd/index.umd.js.map +1 -1
  129. package/esm/typings/src/scripting/javascript/utils/preserve.d.ts +0 -14
package/umd/index.umd.js CHANGED
@@ -1,8 +1,8 @@
1
1
  (function (global, factory) {
2
- typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports, require('spacetrim'), require('crypto'), require('socket.io-client'), require('@anthropic-ai/sdk'), require('bottleneck'), require('colors'), require('@azure/openai'), require('openai'), require('fs/promises'), require('child_process'), require('waitasecond'), require('crypto-js'), require('crypto-js/enc-hex'), require('path'), require('prettier'), require('prettier/parser-html'), require('rxjs'), require('crypto-js/sha256'), require('mime-types'), require('papaparse'), require('@mozilla/readability'), require('jsdom'), require('showdown'), require('dotenv'), require('jszip')) :
3
- typeof define === 'function' && define.amd ? define(['exports', 'spacetrim', 'crypto', 'socket.io-client', '@anthropic-ai/sdk', 'bottleneck', 'colors', '@azure/openai', 'openai', 'fs/promises', 'child_process', 'waitasecond', 'crypto-js', 'crypto-js/enc-hex', 'path', 'prettier', 'prettier/parser-html', 'rxjs', 'crypto-js/sha256', 'mime-types', 'papaparse', '@mozilla/readability', 'jsdom', 'showdown', 'dotenv', 'jszip'], factory) :
4
- (global = typeof globalThis !== 'undefined' ? globalThis : global || self, factory(global["promptbook-wizard"] = {}, global.spaceTrim, global.crypto, global.socket_ioClient, global.Anthropic, global.Bottleneck, global.colors, global.openai, global.OpenAI, global.promises, global.child_process, global.waitasecond, global.cryptoJs, global.hexEncoder, global.path, global.prettier, global.parserHtml, global.rxjs, global.sha256, global.mimeTypes, global.papaparse, global.readability, global.jsdom, global.showdown, global.dotenv, global.JSZip));
5
- })(this, (function (exports, spaceTrim, crypto, socket_ioClient, Anthropic, Bottleneck, colors, openai, OpenAI, promises, child_process, waitasecond, cryptoJs, hexEncoder, path, prettier, parserHtml, rxjs, sha256, mimeTypes, papaparse, readability, jsdom, showdown, dotenv, JSZip) { 'use strict';
2
+ typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports, require('spacetrim'), require('crypto'), require('socket.io-client'), require('@anthropic-ai/sdk'), require('bottleneck'), require('colors'), require('@azure/openai'), require('openai'), require('fs/promises'), require('child_process'), require('waitasecond'), require('crypto-js'), require('crypto-js/enc-hex'), require('path'), require('prettier/parser-html'), require('prettier/parser-markdown'), require('prettier/standalone'), require('rxjs'), require('crypto-js/sha256'), require('mime-types'), require('papaparse'), require('@mozilla/readability'), require('jsdom'), require('showdown'), require('dotenv'), require('jszip')) :
3
+ typeof define === 'function' && define.amd ? define(['exports', 'spacetrim', 'crypto', 'socket.io-client', '@anthropic-ai/sdk', 'bottleneck', 'colors', '@azure/openai', 'openai', 'fs/promises', 'child_process', 'waitasecond', 'crypto-js', 'crypto-js/enc-hex', 'path', 'prettier/parser-html', 'prettier/parser-markdown', 'prettier/standalone', 'rxjs', 'crypto-js/sha256', 'mime-types', 'papaparse', '@mozilla/readability', 'jsdom', 'showdown', 'dotenv', 'jszip'], factory) :
4
+ (global = typeof globalThis !== 'undefined' ? globalThis : global || self, factory(global["promptbook-wizard"] = {}, global.spaceTrim, global.crypto, global.socket_ioClient, global.Anthropic, global.Bottleneck, global.colors, global.openai, global.OpenAI, global.promises, global.child_process, global.waitasecond, global.cryptoJs, global.hexEncoder, global.path, global.parserHtml, global.parserMarkdown, global.standalone, global.rxjs, global.sha256, global.mimeTypes, global.papaparse, global.readability, global.jsdom, global.showdown, global.dotenv, global.JSZip));
5
+ })(this, (function (exports, spaceTrim, crypto, socket_ioClient, Anthropic, Bottleneck, colors, openai, OpenAI, promises, child_process, waitasecond, cryptoJs, hexEncoder, path, parserHtml, parserMarkdown, standalone, rxjs, sha256, mimeTypes, papaparse, readability, jsdom, showdown, dotenv, JSZip) { 'use strict';
6
6
 
7
7
  function _interopDefaultLegacy (e) { return e && typeof e === 'object' && 'default' in e ? e : { 'default': e }; }
8
8
 
@@ -31,6 +31,7 @@
31
31
  var OpenAI__default = /*#__PURE__*/_interopDefaultLegacy(OpenAI);
32
32
  var hexEncoder__default = /*#__PURE__*/_interopDefaultLegacy(hexEncoder);
33
33
  var parserHtml__default = /*#__PURE__*/_interopDefaultLegacy(parserHtml);
34
+ var parserMarkdown__default = /*#__PURE__*/_interopDefaultLegacy(parserMarkdown);
34
35
  var sha256__default = /*#__PURE__*/_interopDefaultLegacy(sha256);
35
36
  var dotenv__namespace = /*#__PURE__*/_interopNamespace(dotenv);
36
37
  var JSZip__default = /*#__PURE__*/_interopDefaultLegacy(JSZip);
@@ -49,7 +50,7 @@
49
50
  * @generated
50
51
  * @see https://github.com/webgptorg/promptbook
51
52
  */
52
- const PROMPTBOOK_ENGINE_VERSION = '0.100.0-8';
53
+ const PROMPTBOOK_ENGINE_VERSION = '0.100.0';
53
54
  /**
54
55
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
55
56
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -323,6 +324,12 @@
323
324
  * @public exported from `@promptbook/core`
324
325
  */
325
326
  const DEFAULT_IS_AUTO_INSTALLED = false;
327
+ /**
328
+ * Default simulated duration for a task in milliseconds (used for progress reporting)
329
+ *
330
+ * @public exported from `@promptbook/core`
331
+ */
332
+ const DEFAULT_TASK_SIMULATED_DURATION_MS = 5 * 60 * 1000; // 5 minutes
326
333
  /**
327
334
  * Default rate limits (requests per minute)
328
335
  *
@@ -331,6 +338,13 @@
331
338
  * @public exported from `@promptbook/core`
332
339
  */
333
340
  const DEFAULT_MAX_REQUESTS_PER_MINUTE = 60;
341
+ /**
342
+ * API request timeout in milliseconds
343
+ * Can be overridden via API_REQUEST_TIMEOUT environment variable
344
+ *
345
+ * @public exported from `@promptbook/core`
346
+ */
347
+ const API_REQUEST_TIMEOUT = parseInt(process.env.API_REQUEST_TIMEOUT || '90000');
334
348
  /**
335
349
  * Indicates whether pipeline logic validation is enabled. When true, the pipeline logic is checked for consistency.
336
350
  *
@@ -1332,6 +1346,76 @@
1332
1346
  return deserializedError;
1333
1347
  }
1334
1348
 
1349
+ /**
1350
+ * Predefined profiles for LLM providers to maintain consistency across the application
1351
+ * These profiles represent each provider as a virtual persona in chat interfaces
1352
+ *
1353
+ * @private !!!!
1354
+ */
1355
+ const LLM_PROVIDER_PROFILES = {
1356
+ OPENAI: {
1357
+ name: 'OPENAI',
1358
+ fullname: 'OpenAI GPT',
1359
+ color: '#10a37f', // OpenAI's signature green
1360
+ // Note: avatarSrc could be added when we have provider logos available
1361
+ },
1362
+ ANTHROPIC: {
1363
+ name: 'ANTHROPIC',
1364
+ fullname: 'Anthropic Claude',
1365
+ color: '#d97706', // Anthropic's orange/amber color
1366
+ },
1367
+ AZURE_OPENAI: {
1368
+ name: 'AZURE_OPENAI',
1369
+ fullname: 'Azure OpenAI',
1370
+ color: '#0078d4', // Microsoft Azure blue
1371
+ },
1372
+ GOOGLE: {
1373
+ name: 'GOOGLE',
1374
+ fullname: 'Google Gemini',
1375
+ color: '#4285f4', // Google blue
1376
+ },
1377
+ DEEPSEEK: {
1378
+ name: 'DEEPSEEK',
1379
+ fullname: 'DeepSeek',
1380
+ color: '#7c3aed', // Purple color for DeepSeek
1381
+ },
1382
+ OLLAMA: {
1383
+ name: 'OLLAMA',
1384
+ fullname: 'Ollama',
1385
+ color: '#059669', // Emerald green for local models
1386
+ },
1387
+ REMOTE: {
1388
+ name: 'REMOTE',
1389
+ fullname: 'Remote Server',
1390
+ color: '#6b7280', // Gray for remote/proxy connections
1391
+ },
1392
+ MOCKED_ECHO: {
1393
+ name: 'MOCKED_ECHO',
1394
+ fullname: 'Echo (Test)',
1395
+ color: '#8b5cf6', // Purple for test/mock tools
1396
+ },
1397
+ MOCKED_FAKE: {
1398
+ name: 'MOCKED_FAKE',
1399
+ fullname: 'Fake LLM (Test)',
1400
+ color: '#ec4899', // Pink for fake/test tools
1401
+ },
1402
+ VERCEL: {
1403
+ name: 'VERCEL',
1404
+ fullname: 'Vercel AI',
1405
+ color: '#000000', // Vercel's black
1406
+ },
1407
+ MULTIPLE: {
1408
+ name: 'MULTIPLE',
1409
+ fullname: 'Multiple Providers',
1410
+ color: '#6366f1', // Indigo for combined/multiple providers
1411
+ },
1412
+ };
1413
+ /**
1414
+ * TODO: Refactor this - each profile must be alongside the provider definition
1415
+ * TODO: Unite `AvatarProfileProps` and `ChatParticipant`
1416
+ * Note: [💞] Ignore a discrepancy between file name and entity name
1417
+ */
1418
+
1335
1419
  /**
1336
1420
  * Tests if given string is valid URL.
1337
1421
  *
@@ -1445,6 +1529,9 @@
1445
1529
  get description() {
1446
1530
  return `Models from Promptbook remote server ${this.options.remoteServerUrl}`;
1447
1531
  }
1532
+ get profile() {
1533
+ return LLM_PROVIDER_PROFILES.REMOTE;
1534
+ }
1448
1535
  /**
1449
1536
  * Check the configuration of all execution tools
1450
1537
  */
@@ -1713,7 +1800,7 @@
1713
1800
  /**
1714
1801
  * List of available Anthropic Claude models with pricing
1715
1802
  *
1716
- * Note: Done at 2025-05-06
1803
+ * Note: Synced with official API docs at 2025-08-20
1717
1804
  *
1718
1805
  * @see https://docs.anthropic.com/en/docs/models-overview
1719
1806
  * @public exported from `@promptbook/anthropic-claude`
@@ -1723,12 +1810,52 @@
1723
1810
  value: [
1724
1811
  {
1725
1812
  modelVariant: 'CHAT',
1726
- modelTitle: 'Claude 3.5 Sonnet',
1727
- modelName: 'claude-3-5-sonnet-20240620',
1728
- modelDescription: 'Latest Claude model with 200K token context window. Features state-of-the-art reasoning capabilities, sophisticated code generation, and enhanced multilingual understanding. Offers superior accuracy with 30% fewer hallucinations than Claude 3 Sonnet. Provides exceptional performance for complex enterprise applications while maintaining fast response times.',
1813
+ modelTitle: 'Claude Opus 4.1',
1814
+ modelName: 'claude-opus-4-1-20250805',
1815
+ modelDescription: 'Most powerful and capable Claude model with 200K token context window. Features superior reasoning capabilities, exceptional coding abilities, and advanced multimodal understanding. Sets new standards in complex reasoning and analytical tasks with enhanced safety measures. Ideal for the most demanding enterprise applications requiring maximum intelligence.',
1729
1816
  pricing: {
1730
- prompt: pricing(`$2.50 / 1M tokens`),
1731
- output: pricing(`$12.50 / 1M tokens`),
1817
+ prompt: pricing(`$15.00 / 1M tokens`),
1818
+ output: pricing(`$75.00 / 1M tokens`),
1819
+ },
1820
+ },
1821
+ {
1822
+ modelVariant: 'CHAT',
1823
+ modelTitle: 'Claude Opus 4',
1824
+ modelName: 'claude-opus-4-20250514',
1825
+ modelDescription: 'Previous flagship Claude model with 200K token context window. Features very high intelligence and capability with exceptional performance across reasoning, coding, and creative tasks. Maintains strong safety guardrails while delivering sophisticated outputs for complex professional applications.',
1826
+ pricing: {
1827
+ prompt: pricing(`$15.00 / 1M tokens`),
1828
+ output: pricing(`$75.00 / 1M tokens`),
1829
+ },
1830
+ },
1831
+ {
1832
+ modelVariant: 'CHAT',
1833
+ modelTitle: 'Claude Sonnet 4',
1834
+ modelName: 'claude-sonnet-4-20250514',
1835
+ modelDescription: 'High-performance Claude model with exceptional reasoning capabilities and 200K token context window (1M context beta available). Features balanced intelligence and efficiency with enhanced multimodal understanding. Offers optimal performance for most enterprise applications requiring sophisticated AI capabilities.',
1836
+ pricing: {
1837
+ prompt: pricing(`$3.00 / 1M tokens`),
1838
+ output: pricing(`$15.00 / 1M tokens`),
1839
+ },
1840
+ },
1841
+ {
1842
+ modelVariant: 'CHAT',
1843
+ modelTitle: 'Claude Sonnet 3.7',
1844
+ modelName: 'claude-3-7-sonnet-20250219',
1845
+ modelDescription: 'High-performance Claude model with early extended thinking capabilities and 200K token context window. Features enhanced reasoning chains, improved factual accuracy, and toggleable extended thinking for complex problem-solving. Ideal for applications requiring deep analytical capabilities.',
1846
+ pricing: {
1847
+ prompt: pricing(`$3.00 / 1M tokens`),
1848
+ output: pricing(`$15.00 / 1M tokens`),
1849
+ },
1850
+ },
1851
+ {
1852
+ modelVariant: 'CHAT',
1853
+ modelTitle: 'Claude Haiku 3.5',
1854
+ modelName: 'claude-3-5-haiku-20241022',
1855
+ modelDescription: 'Fastest Claude model with 200K token context window optimized for intelligence at blazing speeds. Features enhanced reasoning and contextual understanding while maintaining sub-second response times. Perfect for real-time applications, customer-facing deployments, and high-throughput services.',
1856
+ pricing: {
1857
+ prompt: pricing(`$0.80 / 1M tokens`),
1858
+ output: pricing(`$4.00 / 1M tokens`),
1732
1859
  },
1733
1860
  },
1734
1861
  {
@@ -2394,6 +2521,9 @@
2394
2521
  get description() {
2395
2522
  return 'Use all models provided by Anthropic Claude';
2396
2523
  }
2524
+ get profile() {
2525
+ return LLM_PROVIDER_PROFILES.ANTHROPIC;
2526
+ }
2397
2527
  async getClient() {
2398
2528
  if (this.client === null) {
2399
2529
  // Note: Passing only Anthropic Claude relevant options to Anthropic constructor
@@ -2434,8 +2564,7 @@
2434
2564
  const rawPromptContent = templateParameters(content, { ...parameters, modelName });
2435
2565
  const rawRequest = {
2436
2566
  model: modelRequirements.modelName || this.getDefaultChatModel().modelName,
2437
- max_tokens: modelRequirements.maxTokens || 4096,
2438
- // <- TODO: [🌾] Make some global max cap for maxTokens
2567
+ max_tokens: modelRequirements.maxTokens || 8192,
2439
2568
  temperature: modelRequirements.temperature,
2440
2569
  system: modelRequirements.systemMessage,
2441
2570
  messages: [
@@ -2494,59 +2623,6 @@
2494
2623
  },
2495
2624
  });
2496
2625
  }
2497
- /**
2498
- * Calls Anthropic Claude API to use a completion model.
2499
- */
2500
- async callCompletionModel(prompt) {
2501
- if (this.options.isVerbose) {
2502
- console.info('🖋 Anthropic Claude callCompletionModel call');
2503
- }
2504
- const { content, parameters, modelRequirements } = prompt;
2505
- if (modelRequirements.modelVariant !== 'COMPLETION') {
2506
- throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
2507
- }
2508
- const client = await this.getClient();
2509
- const modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
2510
- const rawPromptContent = templateParameters(content, { ...parameters, modelName });
2511
- const rawRequest = {
2512
- model: modelName,
2513
- max_tokens_to_sample: modelRequirements.maxTokens || 2000,
2514
- temperature: modelRequirements.temperature,
2515
- prompt: rawPromptContent,
2516
- };
2517
- const start = $getCurrentDate();
2518
- const rawResponse = await this.limiter
2519
- .schedule(() => client.completions.create(rawRequest))
2520
- .catch((error) => {
2521
- if (this.options.isVerbose) {
2522
- console.info(colors__default["default"].bgRed('error'), error);
2523
- }
2524
- throw error;
2525
- });
2526
- if (this.options.isVerbose) {
2527
- console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
2528
- }
2529
- if (!rawResponse.completion) {
2530
- throw new PipelineExecutionError('No completion from Anthropic Claude');
2531
- }
2532
- const resultContent = rawResponse.completion;
2533
- const complete = $getCurrentDate();
2534
- const usage = computeAnthropicClaudeUsage(rawPromptContent, resultContent, rawResponse);
2535
- return exportJson({
2536
- name: 'promptResult',
2537
- message: `Result of \`AnthropicClaudeExecutionTools.callCompletionModel\``,
2538
- order: [],
2539
- value: {
2540
- content: resultContent,
2541
- modelName: rawResponse.model || modelName,
2542
- timing: { start, complete },
2543
- usage,
2544
- rawPromptContent,
2545
- rawRequest,
2546
- rawResponse,
2547
- },
2548
- });
2549
- }
2550
2626
  // <- Note: [🤖] callXxxModel
2551
2627
  /**
2552
2628
  * Get the model that should be used as default
@@ -2568,7 +2644,7 @@
2568
2644
  * Default model for chat variant.
2569
2645
  */
2570
2646
  getDefaultChatModel() {
2571
- return this.getDefaultModel('claude-3-5-sonnet');
2647
+ return this.getDefaultModel('claude-sonnet-4-20250514');
2572
2648
  }
2573
2649
  }
2574
2650
  /**
@@ -2711,7 +2787,7 @@
2711
2787
  /**
2712
2788
  * List of available OpenAI models with pricing
2713
2789
  *
2714
- * Note: Done at 2025-05-06
2790
+ * Note: Synced with official API docs at 2025-08-20
2715
2791
  *
2716
2792
  * @see https://platform.openai.com/docs/models/
2717
2793
  * @see https://openai.com/api/pricing/
@@ -2720,6 +2796,138 @@
2720
2796
  const OPENAI_MODELS = exportJson({
2721
2797
  name: 'OPENAI_MODELS',
2722
2798
  value: [
2799
+ /**/
2800
+ {
2801
+ modelVariant: 'CHAT',
2802
+ modelTitle: 'gpt-5',
2803
+ modelName: 'gpt-5',
2804
+ modelDescription: "OpenAI's most advanced language model with unprecedented reasoning capabilities and 200K context window. Features revolutionary improvements in complex problem-solving, scientific reasoning, and creative tasks. Demonstrates human-level performance across diverse domains with enhanced safety measures and alignment. Represents the next generation of AI with superior understanding, nuanced responses, and advanced multimodal capabilities.",
2805
+ pricing: {
2806
+ prompt: pricing(`$1.25 / 1M tokens`),
2807
+ output: pricing(`$10.00 / 1M tokens`),
2808
+ },
2809
+ },
2810
+ /**/
2811
+ /**/
2812
+ {
2813
+ modelVariant: 'CHAT',
2814
+ modelTitle: 'gpt-5-mini',
2815
+ modelName: 'gpt-5-mini',
2816
+ modelDescription: "A faster, cost-efficient version of GPT-5 for well-defined tasks with 200K context window. Maintains core GPT-5 capabilities while offering 5x faster inference and significantly lower costs. Features enhanced instruction following and reduced latency for production applications requiring quick responses with high quality.",
2817
+ pricing: {
2818
+ prompt: pricing(`$0.25 / 1M tokens`),
2819
+ output: pricing(`$2.00 / 1M tokens`),
2820
+ },
2821
+ },
2822
+ /**/
2823
+ /**/
2824
+ {
2825
+ modelVariant: 'CHAT',
2826
+ modelTitle: 'gpt-5-nano',
2827
+ modelName: 'gpt-5-nano',
2828
+ modelDescription: "The fastest, most cost-efficient version of GPT-5 with 200K context window. Optimized for summarization, classification, and simple reasoning tasks. Features 10x faster inference than base GPT-5 while maintaining good quality for straightforward applications. Ideal for high-volume, cost-sensitive deployments.",
2829
+ pricing: {
2830
+ prompt: pricing(`$0.05 / 1M tokens`),
2831
+ output: pricing(`$0.40 / 1M tokens`),
2832
+ },
2833
+ },
2834
+ /**/
2835
+ /**/
2836
+ {
2837
+ modelVariant: 'CHAT',
2838
+ modelTitle: 'gpt-4.1',
2839
+ modelName: 'gpt-4.1',
2840
+ modelDescription: "Smartest non-reasoning model with 128K context window. Enhanced version of GPT-4 with improved instruction following, better factual accuracy, and reduced hallucinations. Features advanced function calling capabilities and superior performance on coding tasks. Ideal for applications requiring high intelligence without reasoning overhead.",
2841
+ pricing: {
2842
+ prompt: pricing(`$3.00 / 1M tokens`),
2843
+ output: pricing(`$12.00 / 1M tokens`),
2844
+ },
2845
+ },
2846
+ /**/
2847
+ /**/
2848
+ {
2849
+ modelVariant: 'CHAT',
2850
+ modelTitle: 'gpt-4.1-mini',
2851
+ modelName: 'gpt-4.1-mini',
2852
+ modelDescription: "Smaller, faster version of GPT-4.1 with 128K context window. Balances intelligence and efficiency with 3x faster inference than base GPT-4.1. Maintains strong capabilities across text generation, reasoning, and coding while offering better cost-performance ratio for most applications.",
2853
+ pricing: {
2854
+ prompt: pricing(`$0.80 / 1M tokens`),
2855
+ output: pricing(`$3.20 / 1M tokens`),
2856
+ },
2857
+ },
2858
+ /**/
2859
+ /**/
2860
+ {
2861
+ modelVariant: 'CHAT',
2862
+ modelTitle: 'gpt-4.1-nano',
2863
+ modelName: 'gpt-4.1-nano',
2864
+ modelDescription: "Fastest, most cost-efficient version of GPT-4.1 with 128K context window. Optimized for high-throughput applications requiring good quality at minimal cost. Features 5x faster inference than GPT-4.1 while maintaining adequate performance for most general-purpose tasks.",
2865
+ pricing: {
2866
+ prompt: pricing(`$0.20 / 1M tokens`),
2867
+ output: pricing(`$0.80 / 1M tokens`),
2868
+ },
2869
+ },
2870
+ /**/
2871
+ /**/
2872
+ {
2873
+ modelVariant: 'CHAT',
2874
+ modelTitle: 'o3',
2875
+ modelName: 'o3',
2876
+ modelDescription: "Advanced reasoning model with 128K context window specializing in complex logical, mathematical, and analytical tasks. Successor to o1 with enhanced step-by-step problem-solving capabilities and superior performance on STEM-focused problems. Ideal for professional applications requiring deep analytical thinking and precise reasoning.",
2877
+ pricing: {
2878
+ prompt: pricing(`$15.00 / 1M tokens`),
2879
+ output: pricing(`$60.00 / 1M tokens`),
2880
+ },
2881
+ },
2882
+ /**/
2883
+ /**/
2884
+ {
2885
+ modelVariant: 'CHAT',
2886
+ modelTitle: 'o3-pro',
2887
+ modelName: 'o3-pro',
2888
+ modelDescription: "Enhanced version of o3 with more compute allocated for better responses on the most challenging problems. Features extended reasoning time and improved accuracy on complex analytical tasks. Designed for applications where maximum reasoning quality is more important than response speed.",
2889
+ pricing: {
2890
+ prompt: pricing(`$30.00 / 1M tokens`),
2891
+ output: pricing(`$120.00 / 1M tokens`),
2892
+ },
2893
+ },
2894
+ /**/
2895
+ /**/
2896
+ {
2897
+ modelVariant: 'CHAT',
2898
+ modelTitle: 'o4-mini',
2899
+ modelName: 'o4-mini',
2900
+ modelDescription: "Fast, cost-efficient reasoning model with 128K context window. Successor to o1-mini with improved analytical capabilities while maintaining speed advantages. Features enhanced mathematical reasoning and logical problem-solving at significantly lower cost than full reasoning models.",
2901
+ pricing: {
2902
+ prompt: pricing(`$4.00 / 1M tokens`),
2903
+ output: pricing(`$16.00 / 1M tokens`),
2904
+ },
2905
+ },
2906
+ /**/
2907
+ /**/
2908
+ {
2909
+ modelVariant: 'CHAT',
2910
+ modelTitle: 'o3-deep-research',
2911
+ modelName: 'o3-deep-research',
2912
+ modelDescription: "Most powerful deep research model with 128K context window. Specialized for comprehensive research tasks, literature analysis, and complex information synthesis. Features advanced citation capabilities and enhanced factual accuracy for academic and professional research applications.",
2913
+ pricing: {
2914
+ prompt: pricing(`$25.00 / 1M tokens`),
2915
+ output: pricing(`$100.00 / 1M tokens`),
2916
+ },
2917
+ },
2918
+ /**/
2919
+ /**/
2920
+ {
2921
+ modelVariant: 'CHAT',
2922
+ modelTitle: 'o4-mini-deep-research',
2923
+ modelName: 'o4-mini-deep-research',
2924
+ modelDescription: "Faster, more affordable deep research model with 128K context window. Balances research capabilities with cost efficiency, offering good performance on literature review, fact-checking, and information synthesis tasks at a more accessible price point.",
2925
+ pricing: {
2926
+ prompt: pricing(`$12.00 / 1M tokens`),
2927
+ output: pricing(`$48.00 / 1M tokens`),
2928
+ },
2929
+ },
2930
+ /**/
2723
2931
  /*/
2724
2932
  {
2725
2933
  modelTitle: 'dall-e-3',
@@ -3240,7 +3448,6 @@
3240
3448
  const modelName = prompt.modelRequirements.modelName || this.options.deploymentName;
3241
3449
  const modelSettings = {
3242
3450
  maxTokens: modelRequirements.maxTokens,
3243
- // <- TODO: [🌾] Make some global max cap for maxTokens
3244
3451
  temperature: modelRequirements.temperature,
3245
3452
  user: (_a = this.options.userId) === null || _a === void 0 ? void 0 : _a.toString(),
3246
3453
  // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
@@ -3346,8 +3553,7 @@
3346
3553
  try {
3347
3554
  const modelName = prompt.modelRequirements.modelName || this.options.deploymentName;
3348
3555
  const modelSettings = {
3349
- maxTokens: modelRequirements.maxTokens || 2000,
3350
- // <- TODO: [🌾] Make some global max cap for maxTokens
3556
+ maxTokens: modelRequirements.maxTokens,
3351
3557
  temperature: modelRequirements.temperature,
3352
3558
  user: (_a = this.options.userId) === null || _a === void 0 ? void 0 : _a.toString(),
3353
3559
  // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
@@ -3731,7 +3937,7 @@
3731
3937
  /**
3732
3938
  * List of available Deepseek models with descriptions
3733
3939
  *
3734
- * Note: Done at 2025-05-06
3940
+ * Note: Synced with official API docs at 2025-08-20
3735
3941
  *
3736
3942
  * @see https://www.deepseek.com/models
3737
3943
  * @public exported from `@promptbook/deepseek`
@@ -3741,12 +3947,32 @@
3741
3947
  value: [
3742
3948
  {
3743
3949
  modelVariant: 'CHAT',
3744
- modelTitle: 'Deepseek Chat Pro',
3745
- modelName: 'deepseek-chat-pro',
3746
- modelDescription: 'Latest flagship general-purpose model with 256K context window. Enhanced from base Chat model with 40% improvement on complex reasoning tasks and specialized domain knowledge. Features advanced prompt optimization and improved contextual memory. Ideal for enterprise applications requiring highest quality responses.',
3950
+ modelTitle: 'DeepSeek V3',
3951
+ modelName: 'deepseek-chat',
3952
+ modelDescription: 'Latest flagship general-purpose model with 128K context window. Features exceptional reasoning capabilities, advanced code generation, and strong performance across diverse domains. Offers competitive performance with leading models while maintaining cost efficiency. Ideal for complex reasoning, coding, and knowledge-intensive tasks.',
3747
3953
  pricing: {
3748
- prompt: pricing(`$1.20 / 1M tokens`),
3749
- output: pricing(`$2.40 / 1M tokens`),
3954
+ prompt: pricing(`$0.14 / 1M tokens`),
3955
+ output: pricing(`$0.28 / 1M tokens`),
3956
+ },
3957
+ },
3958
+ {
3959
+ modelVariant: 'CHAT',
3960
+ modelTitle: 'DeepSeek R1',
3961
+ modelName: 'deepseek-reasoner',
3962
+ modelDescription: 'Advanced reasoning model with 128K context window specializing in complex problem-solving and analytical thinking. Features explicit reasoning chains, enhanced mathematical capabilities, and superior performance on STEM tasks. Designed for applications requiring deep analytical reasoning and step-by-step problem solving.',
3963
+ pricing: {
3964
+ prompt: pricing(`$0.55 / 1M tokens`),
3965
+ output: pricing(`$2.19 / 1M tokens`),
3966
+ },
3967
+ },
3968
+ {
3969
+ modelVariant: 'CHAT',
3970
+ modelTitle: 'DeepSeek Coder V2',
3971
+ modelName: 'deepseek-coder',
3972
+ modelDescription: 'Specialized coding model with 128K context window optimized for software development tasks. Features exceptional code generation, debugging, and refactoring capabilities across 40+ programming languages. Particularly strong in understanding complex codebases and implementing solutions based on natural language specifications.',
3973
+ pricing: {
3974
+ prompt: pricing(`$0.14 / 1M tokens`),
3975
+ output: pricing(`$0.28 / 1M tokens`),
3750
3976
  },
3751
3977
  },
3752
3978
  {
@@ -3980,7 +4206,7 @@
3980
4206
  /**
3981
4207
  * List of available Google models with descriptions
3982
4208
  *
3983
- * Note: Done at 2025-05-06
4209
+ * Note: Synced with official API docs at 2025-08-20
3984
4210
  *
3985
4211
  * @see https://ai.google.dev/models/gemini
3986
4212
  * @public exported from `@promptbook/google`
@@ -3991,11 +4217,51 @@
3991
4217
  {
3992
4218
  modelVariant: 'CHAT',
3993
4219
  modelTitle: 'Gemini 2.5 Pro',
3994
- modelName: 'gemini-2.5-pro-preview-03-25',
3995
- modelDescription: 'Latest advanced multimodal model with 1M token context window. Features exceptional reasoning across complex tasks, sophisticated function calling, and advanced image analysis (16MP resolution). Demonstrates superior performance in math, coding, and knowledge-intensive tasks with 30% improvement over Gemini 1.5 Pro. Ideal for enterprise applications requiring deep contextual understanding.',
4220
+ modelName: 'gemini-2.5-pro',
4221
+ modelDescription: 'State-of-the-art thinking model with 1M token context window capable of reasoning over complex problems in code, math, and STEM. Features enhanced thinking capabilities, advanced multimodal understanding, and superior performance on analytical tasks. Ideal for complex enterprise applications requiring maximum intelligence and reasoning.',
3996
4222
  pricing: {
3997
- prompt: pricing(`$8.00 / 1M tokens`),
3998
- output: pricing(`$24.00 / 1M tokens`),
4223
+ prompt: pricing(`$7.00 / 1M tokens`),
4224
+ output: pricing(`$21.00 / 1M tokens`),
4225
+ },
4226
+ },
4227
+ {
4228
+ modelVariant: 'CHAT',
4229
+ modelTitle: 'Gemini 2.5 Flash',
4230
+ modelName: 'gemini-2.5-flash',
4231
+ modelDescription: 'Best model in terms of price-performance with 1M token context window offering well-rounded capabilities. Features adaptive thinking, cost efficiency, and enhanced reasoning for large-scale processing. Ideal for low-latency, high-volume tasks that require thinking and agentic use cases.',
4232
+ pricing: {
4233
+ prompt: pricing(`$0.35 / 1M tokens`),
4234
+ output: pricing(`$1.05 / 1M tokens`),
4235
+ },
4236
+ },
4237
+ {
4238
+ modelVariant: 'CHAT',
4239
+ modelTitle: 'Gemini 2.5 Flash Lite',
4240
+ modelName: 'gemini-2.5-flash-lite',
4241
+ modelDescription: 'Cost-efficient Gemini 2.5 Flash model optimized for high throughput with 1M token context window. Features thinking capabilities while maintaining the most cost-efficient pricing. Perfect for real-time, low-latency use cases requiring good quality at scale.',
4242
+ pricing: {
4243
+ prompt: pricing(`$0.20 / 1M tokens`),
4244
+ output: pricing(`$0.60 / 1M tokens`),
4245
+ },
4246
+ },
4247
+ {
4248
+ modelVariant: 'CHAT',
4249
+ modelTitle: 'Gemini 2.0 Flash',
4250
+ modelName: 'gemini-2.0-flash',
4251
+ modelDescription: 'Next-generation model with 1M token context window delivering improved capabilities, superior speed, and realtime streaming. Features enhanced function calling, code execution, and search capabilities. Ideal for applications requiring cutting-edge AI capabilities with fast response times.',
4252
+ pricing: {
4253
+ prompt: pricing(`$0.25 / 1M tokens`),
4254
+ output: pricing(`$0.75 / 1M tokens`),
4255
+ },
4256
+ },
4257
+ {
4258
+ modelVariant: 'CHAT',
4259
+ modelTitle: 'Gemini 2.0 Flash Lite',
4260
+ modelName: 'gemini-2.0-flash-lite',
4261
+ modelDescription: 'Cost-efficient Gemini 2.0 Flash model optimized for low latency with 1M token context window. Balances performance and cost with enhanced efficiency for high-volume applications. Perfect for applications requiring good quality responses at minimal cost.',
4262
+ pricing: {
4263
+ prompt: pricing(`$0.15 / 1M tokens`),
4264
+ output: pricing(`$0.45 / 1M tokens`),
3999
4265
  },
4000
4266
  },
4001
4267
  {
@@ -4383,7 +4649,18 @@
4383
4649
  const openAiOptions = { ...this.options };
4384
4650
  delete openAiOptions.isVerbose;
4385
4651
  delete openAiOptions.userId;
4386
- this.client = new OpenAI__default["default"](openAiOptions);
4652
+ // Enhanced configuration for better ECONNRESET handling
4653
+ const enhancedOptions = {
4654
+ ...openAiOptions,
4655
+ timeout: API_REQUEST_TIMEOUT,
4656
+ maxRetries: CONNECTION_RETRIES_LIMIT,
4657
+ defaultHeaders: {
4658
+ Connection: 'keep-alive',
4659
+ 'Keep-Alive': 'timeout=30, max=100',
4660
+ ...openAiOptions.defaultHeaders,
4661
+ },
4662
+ };
4663
+ this.client = new OpenAI__default["default"](enhancedOptions);
4387
4664
  }
4388
4665
  return this.client;
4389
4666
  }
@@ -4436,7 +4713,6 @@
4436
4713
  const modelSettings = {
4437
4714
  model: modelName,
4438
4715
  max_tokens: modelRequirements.maxTokens,
4439
- // <- TODO: [🌾] Make some global max cap for maxTokens
4440
4716
  temperature: modelRequirements.temperature,
4441
4717
  // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
4442
4718
  // <- Note: [🧆]
@@ -4472,7 +4748,7 @@
4472
4748
  console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
4473
4749
  }
4474
4750
  const rawResponse = await this.limiter
4475
- .schedule(() => client.chat.completions.create(rawRequest))
4751
+ .schedule(() => this.makeRequestWithRetry(() => client.chat.completions.create(rawRequest)))
4476
4752
  .catch((error) => {
4477
4753
  assertsError(error);
4478
4754
  if (this.options.isVerbose) {
@@ -4532,8 +4808,7 @@
4532
4808
  const modelName = modelRequirements.modelName || this.getDefaultCompletionModel().modelName;
4533
4809
  const modelSettings = {
4534
4810
  model: modelName,
4535
- max_tokens: modelRequirements.maxTokens || 2000,
4536
- // <- TODO: [🌾] Make some global max cap for maxTokens
4811
+ max_tokens: modelRequirements.maxTokens,
4537
4812
  temperature: modelRequirements.temperature,
4538
4813
  // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
4539
4814
  // <- Note: [🧆]
@@ -4549,7 +4824,7 @@
4549
4824
  console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
4550
4825
  }
4551
4826
  const rawResponse = await this.limiter
4552
- .schedule(() => client.completions.create(rawRequest))
4827
+ .schedule(() => this.makeRequestWithRetry(() => client.completions.create(rawRequest)))
4553
4828
  .catch((error) => {
4554
4829
  assertsError(error);
4555
4830
  if (this.options.isVerbose) {
@@ -4613,7 +4888,7 @@
4613
4888
  console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
4614
4889
  }
4615
4890
  const rawResponse = await this.limiter
4616
- .schedule(() => client.embeddings.create(rawRequest))
4891
+ .schedule(() => this.makeRequestWithRetry(() => client.embeddings.create(rawRequest)))
4617
4892
  .catch((error) => {
4618
4893
  assertsError(error);
4619
4894
  if (this.options.isVerbose) {
@@ -4671,6 +4946,76 @@
4671
4946
  }
4672
4947
  return model;
4673
4948
  }
4949
+ // <- Note: [🤖] getDefaultXxxModel
4950
+ /**
4951
+ * Makes a request with retry logic for network errors like ECONNRESET
4952
+ */
4953
+ async makeRequestWithRetry(requestFn) {
4954
+ let lastError;
4955
+ for (let attempt = 1; attempt <= CONNECTION_RETRIES_LIMIT; attempt++) {
4956
+ try {
4957
+ return await requestFn();
4958
+ }
4959
+ catch (error) {
4960
+ assertsError(error);
4961
+ lastError = error;
4962
+ // Check if this is a retryable network error
4963
+ const isRetryableError = this.isRetryableNetworkError(error);
4964
+ if (!isRetryableError || attempt === CONNECTION_RETRIES_LIMIT) {
4965
+ if (this.options.isVerbose) {
4966
+ console.info(colors__default["default"].bgRed('Final error after retries'), `Attempt ${attempt}/${CONNECTION_RETRIES_LIMIT}:`, error);
4967
+ }
4968
+ throw error;
4969
+ }
4970
+ // Calculate exponential backoff delay
4971
+ const baseDelay = 1000; // 1 second
4972
+ const backoffDelay = baseDelay * Math.pow(2, attempt - 1);
4973
+ const jitterDelay = Math.random() * 500; // Add some randomness
4974
+ const totalDelay = backoffDelay + jitterDelay;
4975
+ if (this.options.isVerbose) {
4976
+ console.info(colors__default["default"].bgYellow('Retrying request'), `Attempt ${attempt}/${CONNECTION_RETRIES_LIMIT}, waiting ${Math.round(totalDelay)}ms:`, error.message);
4977
+ }
4978
+ // Wait before retrying
4979
+ await new Promise((resolve) => setTimeout(resolve, totalDelay));
4980
+ }
4981
+ }
4982
+ throw lastError;
4983
+ }
4984
+ /**
4985
+ * Determines if an error is retryable (network-related errors)
4986
+ */
4987
+ isRetryableNetworkError(error) {
4988
+ const errorMessage = error.message.toLowerCase();
4989
+ const errorCode = error.code;
4990
+ // Network connection errors that should be retried
4991
+ const retryableErrors = [
4992
+ 'econnreset',
4993
+ 'enotfound',
4994
+ 'econnrefused',
4995
+ 'etimedout',
4996
+ 'socket hang up',
4997
+ 'network error',
4998
+ 'fetch failed',
4999
+ 'connection reset',
5000
+ 'connection refused',
5001
+ 'timeout',
5002
+ ];
5003
+ // Check error message
5004
+ if (retryableErrors.some((retryableError) => errorMessage.includes(retryableError))) {
5005
+ return true;
5006
+ }
5007
+ // Check error code
5008
+ if (errorCode && retryableErrors.includes(errorCode.toLowerCase())) {
5009
+ return true;
5010
+ }
5011
+ // Check for specific HTTP status codes that are retryable
5012
+ const errorWithStatus = error;
5013
+ const httpStatus = errorWithStatus.status || errorWithStatus.statusCode;
5014
+ if (httpStatus && [429, 500, 502, 503, 504].includes(httpStatus)) {
5015
+ return true;
5016
+ }
5017
+ return false;
5018
+ }
4674
5019
  }
4675
5020
  /**
4676
5021
  * TODO: [🛄] Some way how to re-wrap the errors from `OpenAiCompatibleExecutionTools`
@@ -4682,7 +5027,7 @@
4682
5027
  /**
4683
5028
  * List of available models in Ollama library
4684
5029
  *
4685
- * Note: Done at 2025-05-19
5030
+ * Note: Synced with official API docs at 2025-08-20
4686
5031
  *
4687
5032
  * @see https://ollama.com/library
4688
5033
  * @public exported from `@promptbook/ollama`
@@ -4690,6 +5035,24 @@
4690
5035
  const OLLAMA_MODELS = exportJson({
4691
5036
  name: 'OLLAMA_MODELS',
4692
5037
  value: [
5038
+ {
5039
+ modelVariant: 'CHAT',
5040
+ modelTitle: 'llama3.3',
5041
+ modelName: 'llama3.3',
5042
+ modelDescription: 'Meta Llama 3.3 (70B parameters) with 128K context window. Latest generation foundation model with significantly enhanced reasoning, instruction following, and multilingual capabilities. Features improved performance on complex tasks and better factual accuracy compared to Llama 3.1.',
5043
+ },
5044
+ {
5045
+ modelVariant: 'CHAT',
5046
+ modelTitle: 'llama3.2',
5047
+ modelName: 'llama3.2',
5048
+ modelDescription: 'Meta Llama 3.2 (1B-90B parameters) with 128K context window. Enhanced model with improved reasoning capabilities, better instruction following, and multimodal support in larger variants. Features significant performance improvements over Llama 3.1 across diverse tasks.',
5049
+ },
5050
+ {
5051
+ modelVariant: 'CHAT',
5052
+ modelTitle: 'llama3.1',
5053
+ modelName: 'llama3.1',
5054
+ modelDescription: 'Meta Llama 3.1 (8B-405B parameters) with 128K context window. Advanced foundation model with enhanced reasoning, improved multilingual capabilities, and better performance on complex tasks. Features significant improvements in code generation and mathematical reasoning.',
5055
+ },
4693
5056
  {
4694
5057
  modelVariant: 'CHAT',
4695
5058
  modelTitle: 'llama3',
@@ -5160,6 +5523,9 @@
5160
5523
  get description() {
5161
5524
  return 'Use all models provided by OpenAI';
5162
5525
  }
5526
+ get profile() {
5527
+ return LLM_PROVIDER_PROFILES.OPENAI;
5528
+ }
5163
5529
  /*
5164
5530
  Note: Commenting this out to avoid circular dependency
5165
5531
  /**
@@ -5184,7 +5550,7 @@
5184
5550
  * Default model for chat variant.
5185
5551
  */
5186
5552
  getDefaultChatModel() {
5187
- return this.getDefaultModel('gpt-4-turbo');
5553
+ return this.getDefaultModel('gpt-5');
5188
5554
  }
5189
5555
  /**
5190
5556
  * Default model for completion variant.
@@ -5252,8 +5618,6 @@
5252
5618
  const modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
5253
5619
  const modelSettings = {
5254
5620
  model: modelName,
5255
- max_tokens: modelRequirements.maxTokens,
5256
- // <- TODO: [🌾] Make some global max cap for maxTokens
5257
5621
 
5258
5622
  temperature: modelRequirements.temperature,
5259
5623
 
@@ -6095,9 +6459,9 @@
6095
6459
  */
6096
6460
  function prettifyMarkdown(content) {
6097
6461
  try {
6098
- return prettier.format(content, {
6462
+ return standalone.format(content, {
6099
6463
  parser: 'markdown',
6100
- plugins: [parserHtml__default["default"]],
6464
+ plugins: [parserMarkdown__default["default"], parserHtml__default["default"]],
6101
6465
  // TODO: DRY - make some import or auto-copy of .prettierrc
6102
6466
  endOfLine: 'lf',
6103
6467
  tabWidth: 4,
@@ -6996,6 +7360,7 @@
6996
7360
  const errors = [];
6997
7361
  const warnings = [];
6998
7362
  let currentValue = {};
7363
+ let customTldr = null;
6999
7364
  const partialResultSubject = new rxjs.Subject();
7000
7365
  // <- Note: Not using `BehaviorSubject` because on error we can't access the last value
7001
7366
  const finalResultPromise = /* not await */ taskProcessCallback((newOngoingResult) => {
@@ -7006,6 +7371,9 @@
7006
7371
  Object.assign(currentValue, newOngoingResult);
7007
7372
  // <- TODO: assign deep
7008
7373
  partialResultSubject.next(newOngoingResult);
7374
+ }, (tldrInfo) => {
7375
+ customTldr = tldrInfo;
7376
+ updatedAt = new Date();
7009
7377
  });
7010
7378
  finalResultPromise
7011
7379
  .catch((error) => {
@@ -7059,6 +7427,78 @@
7059
7427
  return status;
7060
7428
  // <- Note: [1] --||--
7061
7429
  },
7430
+ get tldr() {
7431
+ var _a, _b, _c, _d, _e, _f, _g, _h, _j, _k;
7432
+ // Use custom tldr if available
7433
+ if (customTldr) {
7434
+ return customTldr;
7435
+ }
7436
+ // Fallback to default implementation
7437
+ const cv = currentValue;
7438
+ // If explicit percent is provided, use it
7439
+ let percentRaw = (_f = (_d = (_b = (_a = cv === null || cv === void 0 ? void 0 : cv.tldr) === null || _a === void 0 ? void 0 : _a.percent) !== null && _b !== void 0 ? _b : (_c = cv === null || cv === void 0 ? void 0 : cv.usage) === null || _c === void 0 ? void 0 : _c.percent) !== null && _d !== void 0 ? _d : (_e = cv === null || cv === void 0 ? void 0 : cv.progress) === null || _e === void 0 ? void 0 : _e.percent) !== null && _f !== void 0 ? _f : cv === null || cv === void 0 ? void 0 : cv.percent;
7440
+ // Simulate progress if not provided
7441
+ if (typeof percentRaw !== 'number') {
7442
+ // Simulate progress: evenly split across subtasks, based on elapsed time
7443
+ const now = new Date();
7444
+ const elapsedMs = now.getTime() - createdAt.getTime();
7445
+ const totalMs = DEFAULT_TASK_SIMULATED_DURATION_MS;
7446
+ // If subtasks are defined, split progress evenly
7447
+ const subtaskCount = Array.isArray(cv === null || cv === void 0 ? void 0 : cv.subtasks) ? cv.subtasks.length : 1;
7448
+ const completedSubtasks = Array.isArray(cv === null || cv === void 0 ? void 0 : cv.subtasks)
7449
+ ? cv.subtasks.filter((s) => s.done || s.completed).length
7450
+ : 0;
7451
+ // Progress from completed subtasks
7452
+ const subtaskProgress = subtaskCount > 0 ? completedSubtasks / subtaskCount : 0;
7453
+ // Progress from elapsed time for current subtask
7454
+ const timeProgress = Math.min(elapsedMs / totalMs, 1);
7455
+ // Combine: completed subtasks + time progress for current subtask
7456
+ percentRaw = Math.min(subtaskProgress + (1 / subtaskCount) * timeProgress, 1);
7457
+ if (status === 'FINISHED')
7458
+ percentRaw = 1;
7459
+ if (status === 'ERROR')
7460
+ percentRaw = 0;
7461
+ }
7462
+ // Clamp to [0,1]
7463
+ let percent = Number(percentRaw) || 0;
7464
+ if (percent < 0)
7465
+ percent = 0;
7466
+ if (percent > 1)
7467
+ percent = 1;
7468
+ // Build a short message: prefer explicit tldr.message, then common summary/message fields, then errors/warnings, then status
7469
+ const messageFromResult = (_k = (_j = (_h = (_g = cv === null || cv === void 0 ? void 0 : cv.tldr) === null || _g === void 0 ? void 0 : _g.message) !== null && _h !== void 0 ? _h : cv === null || cv === void 0 ? void 0 : cv.message) !== null && _j !== void 0 ? _j : cv === null || cv === void 0 ? void 0 : cv.summary) !== null && _k !== void 0 ? _k : cv === null || cv === void 0 ? void 0 : cv.statusMessage;
7470
+ let message = messageFromResult;
7471
+ if (!message) {
7472
+ // If subtasks, show current subtask
7473
+ if (Array.isArray(cv === null || cv === void 0 ? void 0 : cv.subtasks) && cv.subtasks.length > 0) {
7474
+ const current = cv.subtasks.find((s) => !s.done && !s.completed);
7475
+ if (current && current.title) {
7476
+ message = `Working on ${current.title}`;
7477
+ }
7478
+ }
7479
+ if (!message) {
7480
+ if (errors.length) {
7481
+ message = errors[errors.length - 1].message || 'Error';
7482
+ }
7483
+ else if (warnings.length) {
7484
+ message = warnings[warnings.length - 1].message || 'Warning';
7485
+ }
7486
+ else if (status === 'FINISHED') {
7487
+ message = 'Finished';
7488
+ }
7489
+ else if (status === 'ERROR') {
7490
+ message = 'Error';
7491
+ }
7492
+ else {
7493
+ message = 'Running';
7494
+ }
7495
+ }
7496
+ }
7497
+ return {
7498
+ percent: percent,
7499
+ message,
7500
+ };
7501
+ },
7062
7502
  get createdAt() {
7063
7503
  return createdAt;
7064
7504
  // <- Note: [1] --||--
@@ -7264,7 +7704,7 @@
7264
7704
  }
7265
7705
  return spaceTrim__default["default"]((block) => `
7266
7706
  ${headLine}
7267
-
7707
+
7268
7708
  ${ /* <- Note: Indenting the description: */block(description)}
7269
7709
  `);
7270
7710
  })
@@ -7275,6 +7715,9 @@
7275
7715
  ${block(innerModelsTitlesAndDescriptions)}
7276
7716
  `);
7277
7717
  }
7718
+ get profile() {
7719
+ return LLM_PROVIDER_PROFILES.MULTIPLE;
7720
+ }
7278
7721
  /**
7279
7722
  * Check the configuration of all execution tools
7280
7723
  */
@@ -7319,25 +7762,22 @@
7319
7762
  const errors = [];
7320
7763
  llm: for (const llmExecutionTools of this.llmExecutionTools) {
7321
7764
  try {
7322
- variant: switch (prompt.modelRequirements.modelVariant) {
7765
+ switch (prompt.modelRequirements.modelVariant) {
7323
7766
  case 'CHAT':
7324
7767
  if (llmExecutionTools.callChatModel === undefined) {
7325
7768
  continue llm;
7326
7769
  }
7327
7770
  return await llmExecutionTools.callChatModel(prompt);
7328
- break variant;
7329
7771
  case 'COMPLETION':
7330
7772
  if (llmExecutionTools.callCompletionModel === undefined) {
7331
7773
  continue llm;
7332
7774
  }
7333
7775
  return await llmExecutionTools.callCompletionModel(prompt);
7334
- break variant;
7335
7776
  case 'EMBEDDING':
7336
7777
  if (llmExecutionTools.callEmbeddingModel === undefined) {
7337
7778
  continue llm;
7338
7779
  }
7339
7780
  return await llmExecutionTools.callEmbeddingModel(prompt);
7340
- break variant;
7341
7781
  // <- case [🤖]:
7342
7782
  default:
7343
7783
  throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}"`);
@@ -7490,7 +7930,7 @@
7490
7930
  const result = await preparePersonaExecutor({
7491
7931
  availableModels /* <- Note: Passing as JSON */,
7492
7932
  personaDescription,
7493
- }).asPromise();
7933
+ }).asPromise({ isCrashedOnError: true });
7494
7934
  const { outputParameters } = result;
7495
7935
  const { modelsRequirements: modelsRequirementsJson } = outputParameters;
7496
7936
  let modelsRequirementsUnchecked = jsonParse(modelsRequirementsJson);
@@ -8103,7 +8543,7 @@
8103
8543
  });
8104
8544
  const result = await prepareTitleExecutor({
8105
8545
  book: sources.map(({ content }) => content).join('\n\n'),
8106
- }).asPromise();
8546
+ }).asPromise({ isCrashedOnError: true });
8107
8547
  const { outputParameters } = result;
8108
8548
  const { title: titleRaw } = outputParameters;
8109
8549
  if (isVerbose) {
@@ -8931,7 +9371,7 @@
8931
9371
  */
8932
9372
  async function executeAttempts(options) {
8933
9373
  const { jokerParameterNames, priority, maxAttempts, // <- Note: [💂]
8934
- preparedContent, parameters, task, preparedPipeline, tools, $executionReport, pipelineIdentification, maxExecutionAttempts, } = options;
9374
+ preparedContent, parameters, task, preparedPipeline, tools, $executionReport, pipelineIdentification, maxExecutionAttempts, onProgress, } = options;
8935
9375
  const $ongoingTaskResult = {
8936
9376
  $result: null,
8937
9377
  $resultString: null,
@@ -9175,6 +9615,10 @@
9175
9615
  result: $ongoingTaskResult.$resultString,
9176
9616
  error: error,
9177
9617
  });
9618
+ // Report failed attempt
9619
+ onProgress({
9620
+ errors: [error],
9621
+ });
9178
9622
  }
9179
9623
  finally {
9180
9624
  if (!isJokerAttempt &&
@@ -10048,15 +10492,74 @@
10048
10492
  });
10049
10493
  });
10050
10494
  };
10051
- const pipelineExecutor = (inputParameters) => createTask({
10052
- taskType: 'EXECUTION',
10053
- title: pipeline.title,
10054
- taskProcessCallback(updateOngoingResult) {
10055
- return pipelineExecutorWithCallback(inputParameters, async (newOngoingResult) => {
10056
- updateOngoingResult(newOngoingResult);
10057
- });
10058
- },
10059
- });
10495
+ const pipelineExecutor = (inputParameters) => {
10496
+ const startTime = new Date().getTime();
10497
+ return createTask({
10498
+ taskType: 'EXECUTION',
10499
+ title: pipeline.title,
10500
+ taskProcessCallback(updateOngoingResult, updateTldr) {
10501
+ return pipelineExecutorWithCallback(inputParameters, async (newOngoingResult) => {
10502
+ var _a, _b;
10503
+ updateOngoingResult(newOngoingResult);
10504
+ // Calculate and update tldr based on pipeline progress
10505
+ const cv = newOngoingResult;
10506
+ // Calculate progress based on parameters resolved vs total parameters
10507
+ const totalParameters = pipeline.parameters.filter(p => !p.isInput).length;
10508
+ let resolvedParameters = 0;
10509
+ let currentTaskTitle = '';
10510
+ // Get the resolved parameters from output parameters
10511
+ if (cv === null || cv === void 0 ? void 0 : cv.outputParameters) {
10512
+ // Count how many output parameters have non-empty values
10513
+ resolvedParameters = Object.values(cv.outputParameters).filter(value => value !== undefined && value !== null && String(value).trim() !== '').length;
10514
+ }
10515
+ // Try to determine current task from execution report
10516
+ if (((_a = cv === null || cv === void 0 ? void 0 : cv.executionReport) === null || _a === void 0 ? void 0 : _a.promptExecutions) && cv.executionReport.promptExecutions.length > 0) {
10517
+ const lastExecution = cv.executionReport.promptExecutions[cv.executionReport.promptExecutions.length - 1];
10518
+ if ((_b = lastExecution === null || lastExecution === void 0 ? void 0 : lastExecution.prompt) === null || _b === void 0 ? void 0 : _b.title) {
10519
+ currentTaskTitle = lastExecution.prompt.title;
10520
+ }
10521
+ }
10522
+ // Calculate base progress percentage
10523
+ let percent = totalParameters > 0 ? resolvedParameters / totalParameters : 0;
10524
+ // Add time-based progress for current task if we haven't completed all parameters
10525
+ if (resolvedParameters < totalParameters) {
10526
+ const elapsedMs = new Date().getTime() - startTime;
10527
+ const estimatedTotalMs = totalParameters * 30 * 1000; // Estimate 30 seconds per parameter
10528
+ const timeProgress = Math.min(elapsedMs / estimatedTotalMs, 0.9); // Cap at 90% for time-based progress
10529
+ // If we have time progress but no parameter progress, show time progress
10530
+ if (percent === 0 && timeProgress > 0) {
10531
+ percent = Math.min(timeProgress, 0.1); // Show some progress but not more than 10%
10532
+ }
10533
+ else if (percent < 1) {
10534
+ // Add partial progress for current task
10535
+ const taskProgress = totalParameters > 0 ? (1 / totalParameters) * 0.5 : 0; // 50% of task progress
10536
+ percent = Math.min(percent + taskProgress, 0.95); // Cap at 95% until fully complete
10537
+ }
10538
+ }
10539
+ // Clamp to [0,1]
10540
+ percent = Math.min(Math.max(percent, 0), 1);
10541
+ // Generate message
10542
+ let message = '';
10543
+ if (currentTaskTitle) {
10544
+ message = `Executing: ${currentTaskTitle}`;
10545
+ }
10546
+ else if (resolvedParameters === 0) {
10547
+ message = 'Starting pipeline execution';
10548
+ }
10549
+ else if (resolvedParameters < totalParameters) {
10550
+ message = `Processing pipeline (${resolvedParameters}/${totalParameters} parameters resolved)`;
10551
+ }
10552
+ else {
10553
+ message = 'Completing pipeline execution';
10554
+ }
10555
+ updateTldr({
10556
+ percent: percent,
10557
+ message,
10558
+ });
10559
+ });
10560
+ },
10561
+ });
10562
+ };
10060
10563
  // <- TODO: Make types such as there is no need to do `as` for `createTask`
10061
10564
  return pipelineExecutor;
10062
10565
  }
@@ -10141,7 +10644,9 @@
10141
10644
  },
10142
10645
  });
10143
10646
  const knowledgeContent = await source.asText();
10144
- const result = await prepareKnowledgeFromMarkdownExecutor({ knowledgeContent }).asPromise();
10647
+ const result = await prepareKnowledgeFromMarkdownExecutor({ knowledgeContent }).asPromise({
10648
+ isCrashedOnError: true,
10649
+ });
10145
10650
  const { outputParameters } = result;
10146
10651
  const { knowledgePieces: knowledgePiecesRaw } = outputParameters;
10147
10652
  const knowledgeTextPieces = (knowledgePiecesRaw || '').split('\n---\n');
@@ -10165,12 +10670,16 @@
10165
10670
  ];
10166
10671
  */
10167
10672
  try {
10168
- const titleResult = await prepareTitleExecutor({ knowledgePieceContent }).asPromise();
10673
+ const titleResult = await prepareTitleExecutor({ knowledgePieceContent }).asPromise({
10674
+ isCrashedOnError: true,
10675
+ });
10169
10676
  const { title: titleRaw = 'Untitled' } = titleResult.outputParameters;
10170
10677
  title = spaceTrim__default["default"](titleRaw) /* <- TODO: Maybe do in pipeline */;
10171
10678
  name = titleToName(title);
10172
10679
  // --- Keywords
10173
- const keywordsResult = await prepareKeywordsExecutor({ knowledgePieceContent }).asPromise();
10680
+ const keywordsResult = await prepareKeywordsExecutor({ knowledgePieceContent }).asPromise({
10681
+ isCrashedOnError: true,
10682
+ });
10174
10683
  const { keywords: keywordsRaw = '' } = keywordsResult.outputParameters;
10175
10684
  keywords = (keywordsRaw || '')
10176
10685
  .split(',')
@@ -12837,31 +13346,23 @@
12837
13346
  return content;
12838
13347
  }
12839
13348
 
13349
+ /**
13350
+ * @private internal for `preserve`
13351
+ */
13352
+ const _preserved = [];
12840
13353
  /**
12841
13354
  * Does nothing, but preserves the function in the bundle
12842
13355
  * Compiler is tricked into thinking the function is used
12843
13356
  *
12844
13357
  * @param value any function to preserve
12845
13358
  * @returns nothing
12846
- * @private internal function of `JavascriptExecutionTools` and `JavascriptEvalExecutionTools`
12847
- */
12848
- function preserve(func) {
12849
- // Note: NOT calling the function
12850
- (async () => {
12851
- // TODO: [💩] Change to `await forEver` or `forTime(Infinity)`
12852
- await waitasecond.forTime(100000000);
12853
- // [1]
12854
- try {
12855
- await func();
12856
- }
12857
- finally {
12858
- // do nothing
12859
- }
12860
- })();
13359
+ * @private within the repository
13360
+ */
13361
+ function $preserve(...value) {
13362
+ _preserved.push(...value);
12861
13363
  }
12862
13364
  /**
12863
- * TODO: Probably remove in favour of `keepImported`
12864
- * TODO: [1] This maybe does memory leak
13365
+ * Note: [💞] Ignore a discrepancy between file name and entity name
12865
13366
  */
12866
13367
 
12867
13368
  // Note: [💎]
@@ -12889,25 +13390,25 @@
12889
13390
  // Note: [💎]
12890
13391
  // Note: Using direct eval, following variables are in same scope as eval call so they are accessible from inside the evaluated script:
12891
13392
  const spaceTrim = (_) => spaceTrim__default["default"](_);
12892
- preserve(spaceTrim);
13393
+ $preserve(spaceTrim);
12893
13394
  const removeQuotes$1 = removeQuotes;
12894
- preserve(removeQuotes$1);
13395
+ $preserve(removeQuotes$1);
12895
13396
  const unwrapResult$1 = unwrapResult;
12896
- preserve(unwrapResult$1);
13397
+ $preserve(unwrapResult$1);
12897
13398
  const trimEndOfCodeBlock$1 = trimEndOfCodeBlock;
12898
- preserve(trimEndOfCodeBlock$1);
13399
+ $preserve(trimEndOfCodeBlock$1);
12899
13400
  const trimCodeBlock$1 = trimCodeBlock;
12900
- preserve(trimCodeBlock$1);
13401
+ $preserve(trimCodeBlock$1);
12901
13402
  // TODO: DRY [🍯]
12902
13403
  const trim = (str) => str.trim();
12903
- preserve(trim);
13404
+ $preserve(trim);
12904
13405
  // TODO: DRY [🍯]
12905
13406
  const reverse = (str) => str.split('').reverse().join('');
12906
- preserve(reverse);
13407
+ $preserve(reverse);
12907
13408
  const removeEmojis$1 = removeEmojis;
12908
- preserve(removeEmojis$1);
13409
+ $preserve(removeEmojis$1);
12909
13410
  const prettifyMarkdown$1 = prettifyMarkdown;
12910
- preserve(prettifyMarkdown$1);
13411
+ $preserve(prettifyMarkdown$1);
12911
13412
  //-------[n12:]---
12912
13413
  const capitalize$1 = capitalize;
12913
13414
  const decapitalize$1 = decapitalize;
@@ -12923,18 +13424,18 @@
12923
13424
  // TODO: DRY [🍯]
12924
13425
  Array.from(parseKeywordsFromString(input)).join(', '); /* <- TODO: [🧠] What is the best format comma list, bullet list,...? */
12925
13426
  const normalizeTo_SCREAMING_CASE$1 = normalizeTo_SCREAMING_CASE;
12926
- preserve(capitalize$1);
12927
- preserve(decapitalize$1);
12928
- preserve(nameToUriPart$1);
12929
- preserve(nameToUriParts$1);
12930
- preserve(removeDiacritics$1);
12931
- preserve(normalizeWhitespaces$1);
12932
- preserve(normalizeToKebabCase$1);
12933
- preserve(normalizeTo_camelCase$1);
12934
- preserve(normalizeTo_snake_case$1);
12935
- preserve(normalizeTo_PascalCase$1);
12936
- preserve(parseKeywords);
12937
- preserve(normalizeTo_SCREAMING_CASE$1);
13427
+ $preserve(capitalize$1);
13428
+ $preserve(decapitalize$1);
13429
+ $preserve(nameToUriPart$1);
13430
+ $preserve(nameToUriParts$1);
13431
+ $preserve(removeDiacritics$1);
13432
+ $preserve(normalizeWhitespaces$1);
13433
+ $preserve(normalizeToKebabCase$1);
13434
+ $preserve(normalizeTo_camelCase$1);
13435
+ $preserve(normalizeTo_snake_case$1);
13436
+ $preserve(normalizeTo_PascalCase$1);
13437
+ $preserve(parseKeywords);
13438
+ $preserve(normalizeTo_SCREAMING_CASE$1);
12938
13439
  //-------[/n12]---
12939
13440
  if (!script.includes('return')) {
12940
13441
  script = `return ${script}`;
@@ -16983,7 +17484,7 @@
16983
17484
  // ▶ Create executor - the function that will execute the Pipeline
16984
17485
  const pipelineExecutor = createPipelineExecutor({ pipeline, tools });
16985
17486
  // 🚀▶ Execute the Pipeline
16986
- const result = await pipelineExecutor(inputParameters).asPromise();
17487
+ const result = await pipelineExecutor(inputParameters).asPromise({ isCrashedOnError: true });
16987
17488
  const { outputParameters } = result;
16988
17489
  const outputParametersLength = Object.keys(outputParameters).length;
16989
17490
  let resultString;