@promptbook/core 0.103.0-4 → 0.103.0-40

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (143) hide show
  1. package/README.md +262 -203
  2. package/esm/index.es.js +2459 -183
  3. package/esm/index.es.js.map +1 -1
  4. package/esm/typings/books/index.d.ts +0 -81
  5. package/esm/typings/src/_packages/browser.index.d.ts +6 -0
  6. package/esm/typings/src/_packages/cli.index.d.ts +4 -0
  7. package/esm/typings/src/_packages/components.index.d.ts +12 -8
  8. package/esm/typings/src/_packages/core.index.d.ts +30 -10
  9. package/esm/typings/src/_packages/node.index.d.ts +4 -2
  10. package/esm/typings/src/_packages/types.index.d.ts +18 -2
  11. package/esm/typings/src/_packages/wizard.index.d.ts +4 -0
  12. package/esm/typings/src/book-2.0/agent-source/AgentBasicInformation.d.ts +1 -0
  13. package/esm/typings/src/book-2.0/agent-source/padBook.d.ts +16 -0
  14. package/esm/typings/src/book-components/BookEditor/BookEditor.d.ts +76 -15
  15. package/esm/typings/src/book-components/BookEditor/BookEditorActionbar.d.ts +14 -0
  16. package/esm/typings/src/book-components/BookEditor/BookEditorMonaco.d.ts +5 -0
  17. package/esm/typings/src/book-components/Chat/MarkdownContent/MarkdownContent.d.ts +15 -0
  18. package/esm/typings/src/book-components/Chat/MockedChat/MockedChat.d.ts +5 -0
  19. package/esm/typings/src/book-components/Chat/save/html/htmlSaveFormatDefinition.d.ts +1 -0
  20. package/esm/typings/src/book-components/Chat/save/pdf/pdfSaveFormatDefinition.d.ts +4 -0
  21. package/esm/typings/src/book-components/Qr/BrandedQrCode.d.ts +18 -0
  22. package/esm/typings/src/book-components/Qr/GenericQrCode.d.ts +10 -0
  23. package/esm/typings/src/book-components/Qr/PromptbookQrCode.d.ts +18 -0
  24. package/esm/typings/src/book-components/Qr/useQrCode.d.ts +15 -0
  25. package/esm/typings/src/book-components/_common/Dropdown/Dropdown.d.ts +15 -0
  26. package/esm/typings/src/book-components/_common/Modal/Modal.d.ts +2 -2
  27. package/esm/typings/src/book-components/_common/Tooltip/Tooltip.d.ts +47 -0
  28. package/esm/typings/src/book-components/_common/react-utils/classNames.d.ts +1 -1
  29. package/esm/typings/src/book-components/icons/AboutIcon.d.ts +9 -0
  30. package/esm/typings/src/book-components/icons/CloseIcon.d.ts +4 -8
  31. package/esm/typings/src/book-components/icons/DownloadIcon.d.ts +9 -0
  32. package/esm/typings/src/book-components/icons/ExitFullscreenIcon.d.ts +7 -0
  33. package/esm/typings/src/book-components/icons/FullscreenIcon.d.ts +7 -0
  34. package/esm/typings/src/book-components/icons/MenuIcon.d.ts +12 -0
  35. package/esm/typings/src/cli/cli-commands/_boilerplate.d.ts +2 -1
  36. package/esm/typings/src/cli/cli-commands/about.d.ts +3 -1
  37. package/esm/typings/src/cli/cli-commands/hello.d.ts +2 -1
  38. package/esm/typings/src/cli/cli-commands/list-models.d.ts +2 -1
  39. package/esm/typings/src/cli/cli-commands/list-scrapers.d.ts +2 -1
  40. package/esm/typings/src/cli/cli-commands/login.d.ts +2 -1
  41. package/esm/typings/src/cli/cli-commands/make.d.ts +2 -1
  42. package/esm/typings/src/cli/cli-commands/prettify.d.ts +2 -1
  43. package/esm/typings/src/cli/cli-commands/run.d.ts +2 -1
  44. package/esm/typings/src/cli/cli-commands/{start-server.d.ts → start-agents-server.d.ts} +3 -2
  45. package/esm/typings/src/cli/cli-commands/start-pipelines-server.d.ts +15 -0
  46. package/esm/typings/src/cli/cli-commands/test-command.d.ts +2 -1
  47. package/esm/typings/src/cli/common/$addGlobalOptionsToCommand.d.ts +2 -1
  48. package/esm/typings/src/collection/agent-collection/AgentCollection.d.ts +36 -0
  49. package/esm/typings/src/collection/agent-collection/constructors/AgentCollectionInDirectory.d.ts +88 -0
  50. package/esm/typings/src/collection/{PipelineCollection.d.ts → pipeline-collection/PipelineCollection.d.ts} +7 -3
  51. package/esm/typings/src/collection/{SimplePipelineCollection.d.ts → pipeline-collection/SimplePipelineCollection.d.ts} +5 -5
  52. package/esm/typings/src/collection/{constructors/createCollectionFromDirectory.d.ts → pipeline-collection/constructors/createPipelineCollectionFromDirectory.d.ts} +8 -11
  53. package/esm/typings/src/collection/pipeline-collection/constructors/createPipelineCollectionFromJson.d.ts +13 -0
  54. package/esm/typings/src/collection/{constructors/createCollectionFromPromise.d.ts → pipeline-collection/constructors/createPipelineCollectionFromPromise.d.ts} +6 -5
  55. package/esm/typings/src/collection/{constructors/createCollectionFromUrl.d.ts → pipeline-collection/constructors/createPipelineCollectionFromUrl.d.ts} +3 -3
  56. package/esm/typings/src/collection/{constructors/createSubcollection.d.ts → pipeline-collection/constructors/createPipelineSubcollection.d.ts} +3 -3
  57. package/esm/typings/src/collection/pipeline-collection/pipelineCollectionToJson.d.ts +13 -0
  58. package/esm/typings/src/commands/_common/types/CommandParser.d.ts +4 -5
  59. package/esm/typings/src/config.d.ts +22 -2
  60. package/esm/typings/src/errors/0-index.d.ts +3 -0
  61. package/esm/typings/src/errors/NotAllowed.d.ts +9 -0
  62. package/esm/typings/src/execution/AvailableModel.d.ts +1 -0
  63. package/esm/typings/src/execution/Executables.d.ts +3 -0
  64. package/esm/typings/src/execution/ExecutionTask.d.ts +12 -3
  65. package/esm/typings/src/execution/ExecutionTools.d.ts +5 -0
  66. package/esm/typings/src/execution/FilesystemTools.d.ts +1 -1
  67. package/esm/typings/src/execution/LlmExecutionTools.d.ts +7 -1
  68. package/esm/typings/src/execution/createPipelineExecutor/10-executePipeline.d.ts +5 -0
  69. package/esm/typings/src/execution/createPipelineExecutor/20-executeTask.d.ts +5 -0
  70. package/esm/typings/src/execution/createPipelineExecutor/30-executeFormatSubvalues.d.ts +5 -0
  71. package/esm/typings/src/execution/createPipelineExecutor/40-executeAttempts.d.ts +5 -0
  72. package/esm/typings/src/execution/utils/logLlmCall.d.ts +8 -0
  73. package/esm/typings/src/execution/utils/usage-constants.d.ts +4 -124
  74. package/esm/typings/src/high-level-abstractions/_common/HighLevelAbstraction.d.ts +2 -1
  75. package/esm/typings/src/llm-providers/_common/register/$registeredLlmToolsMessage.d.ts +2 -1
  76. package/esm/typings/src/llm-providers/agent/Agent.d.ts +49 -0
  77. package/esm/typings/src/llm-providers/agent/AgentLlmExecutionTools.d.ts +9 -4
  78. package/esm/typings/src/llm-providers/agent/AgentOptions.d.ts +17 -0
  79. package/esm/typings/src/llm-providers/agent/CreateAgentLlmExecutionToolsOptions.d.ts +16 -0
  80. package/esm/typings/src/llm-providers/agent/createAgentLlmExecutionTools.d.ts +1 -19
  81. package/esm/typings/src/llm-providers/openai/OpenAiAssistantExecutionTools.d.ts +28 -0
  82. package/esm/typings/src/llm-providers/openai/OpenAiAssistantExecutionToolsOptions.d.ts +7 -1
  83. package/esm/typings/src/other/templates/getTemplatesPipelineCollection.d.ts +1 -1
  84. package/esm/typings/src/playground/permanent/_boilerplate.d.ts +5 -0
  85. package/esm/typings/src/playground/permanent/agent-with-browser-playground.d.ts +5 -0
  86. package/esm/typings/src/playground/playground.d.ts +0 -3
  87. package/esm/typings/src/playground/playground1.d.ts +2 -0
  88. package/esm/typings/src/remote-server/startRemoteServer.d.ts +4 -1
  89. package/esm/typings/src/remote-server/types/RemoteServerOptions.d.ts +22 -8
  90. package/esm/typings/src/scrapers/_boilerplate/createBoilerplateScraper.d.ts +1 -12
  91. package/esm/typings/src/scrapers/_boilerplate/register-metadata.d.ts +1 -9
  92. package/esm/typings/src/scrapers/document/createDocumentScraper.d.ts +1 -12
  93. package/esm/typings/src/scrapers/document/register-metadata.d.ts +1 -9
  94. package/esm/typings/src/scrapers/document-legacy/createLegacyDocumentScraper.d.ts +1 -12
  95. package/esm/typings/src/scrapers/document-legacy/register-metadata.d.ts +1 -9
  96. package/esm/typings/src/scrapers/markdown/createMarkdownScraper.d.ts +1 -12
  97. package/esm/typings/src/scrapers/markdown/register-metadata.d.ts +1 -9
  98. package/esm/typings/src/scrapers/markitdown/createMarkitdownScraper.d.ts +1 -12
  99. package/esm/typings/src/scrapers/markitdown/register-metadata.d.ts +1 -9
  100. package/esm/typings/src/scrapers/pdf/createPdfScraper.d.ts +1 -12
  101. package/esm/typings/src/scrapers/pdf/register-metadata.d.ts +1 -9
  102. package/esm/typings/src/scrapers/website/createWebsiteScraper.d.ts +1 -12
  103. package/esm/typings/src/scrapers/website/register-metadata.d.ts +1 -9
  104. package/esm/typings/src/storage/env-storage/$EnvStorage.d.ts +2 -1
  105. package/esm/typings/src/transpilers/_common/BookTranspiler.d.ts +29 -0
  106. package/esm/typings/src/transpilers/_common/BookTranspilerOptions.d.ts +18 -0
  107. package/esm/typings/src/transpilers/_common/register/$bookTranspilersRegister.d.ts +15 -0
  108. package/esm/typings/src/transpilers/formatted-book-in-markdown/FormattedBookInMarkdownTranspiler.d.ts +13 -0
  109. package/esm/typings/src/transpilers/formatted-book-in-markdown/register.d.ts +15 -0
  110. package/esm/typings/src/transpilers/openai-sdk/OpenAiSdkTranspiler.d.ts +13 -0
  111. package/esm/typings/src/transpilers/openai-sdk/OpenAiSdkTranspiler.test.d.ts +1 -0
  112. package/esm/typings/src/transpilers/openai-sdk/playground/playground.d.ts +5 -0
  113. package/esm/typings/src/transpilers/openai-sdk/register.d.ts +15 -0
  114. package/esm/typings/src/types/LlmCall.d.ts +20 -0
  115. package/esm/typings/src/types/Updatable.d.ts +19 -0
  116. package/esm/typings/src/types/typeAliases.d.ts +1 -1
  117. package/esm/typings/src/utils/execCommand/$execCommand.d.ts +2 -1
  118. package/esm/typings/src/utils/execCommand/$execCommands.d.ts +2 -1
  119. package/esm/typings/src/utils/files/$induceBookDownload.d.ts +13 -0
  120. package/esm/typings/src/utils/files/$induceFileDownload.d.ts +13 -0
  121. package/esm/typings/src/utils/files/ObjectUrl.d.ts +46 -0
  122. package/esm/typings/src/utils/files/listAllFiles.d.ts +2 -3
  123. package/esm/typings/src/utils/misc/aboutPromptbookInformation.d.ts +21 -0
  124. package/esm/typings/src/utils/misc/injectCssModuleIntoShadowRoot.d.ts +1 -0
  125. package/esm/typings/src/utils/misc/xAboutPromptbookInformation.d.ts +13 -0
  126. package/esm/typings/src/utils/organization/$side_effect.d.ts +7 -0
  127. package/esm/typings/src/utils/serialization/$deepFreeze.d.ts +2 -1
  128. package/esm/typings/src/version.d.ts +1 -1
  129. package/esm/typings/src/wizard/$getCompiledBook.d.ts +1 -2
  130. package/package.json +8 -5
  131. package/umd/index.umd.js +2474 -188
  132. package/umd/index.umd.js.map +1 -1
  133. package/esm/typings/src/book-components/BookEditor/BookEditorInner.d.ts +0 -5
  134. package/esm/typings/src/book-components/BookEditor/BookEditorWrapper.d.ts +0 -9
  135. package/esm/typings/src/book-components/BookEditor/config.d.ts +0 -10
  136. package/esm/typings/src/book-components/Chat/utils/renderMarkdown.d.ts +0 -21
  137. package/esm/typings/src/collection/collectionToJson.d.ts +0 -13
  138. package/esm/typings/src/collection/constructors/createCollectionFromJson.d.ts +0 -13
  139. /package/esm/typings/src/{book-components/Chat/utils/renderMarkdown.test.d.ts → collection/agent-collection/constructors/AgentCollectionInDirectory.test.d.ts} +0 -0
  140. /package/esm/typings/src/collection/{constructors/createCollectionFromDirectory.test.d.ts → pipeline-collection/constructors/createPipelineCollectionFromDirectory.test.d.ts} +0 -0
  141. /package/esm/typings/src/collection/{constructors/createCollectionFromJson.test.d.ts → pipeline-collection/constructors/createPipelineCollectionFromJson.test.d.ts} +0 -0
  142. /package/esm/typings/src/collection/{constructors/createCollectionFromPromise.test.d.ts → pipeline-collection/constructors/createPipelineCollectionFromPromise.test.d.ts} +0 -0
  143. /package/esm/typings/src/collection/{collectionToJson.test.d.ts → pipeline-collection/pipelineCollectionToJson.test.d.ts} +0 -0
package/umd/index.umd.js CHANGED
@@ -1,8 +1,8 @@
1
1
  (function (global, factory) {
2
- typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports, require('spacetrim'), require('crypto'), require('rxjs'), require('waitasecond'), require('crypto-js/enc-hex'), require('crypto-js/sha256'), require('path'), require('crypto-js'), require('mime-types'), require('papaparse'), require('moment'), require('colors')) :
3
- typeof define === 'function' && define.amd ? define(['exports', 'spacetrim', 'crypto', 'rxjs', 'waitasecond', 'crypto-js/enc-hex', 'crypto-js/sha256', 'path', 'crypto-js', 'mime-types', 'papaparse', 'moment', 'colors'], factory) :
4
- (global = typeof globalThis !== 'undefined' ? globalThis : global || self, factory(global["promptbook-core"] = {}, global.spaceTrim, global.crypto, global.rxjs, global.waitasecond, global.hexEncoder, global.sha256, global.path, global.cryptoJs, global.mimeTypes, global.papaparse, global.moment, global.colors));
5
- })(this, (function (exports, spaceTrim, crypto, rxjs, waitasecond, hexEncoder, sha256, path, cryptoJs, mimeTypes, papaparse, moment, colors) { 'use strict';
2
+ typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports, require('spacetrim'), require('crypto'), require('rxjs'), require('waitasecond'), require('crypto-js/enc-hex'), require('crypto-js/sha256'), require('path'), require('crypto-js'), require('mime-types'), require('papaparse'), require('moment'), require('colors'), require('bottleneck'), require('openai')) :
3
+ typeof define === 'function' && define.amd ? define(['exports', 'spacetrim', 'crypto', 'rxjs', 'waitasecond', 'crypto-js/enc-hex', 'crypto-js/sha256', 'path', 'crypto-js', 'mime-types', 'papaparse', 'moment', 'colors', 'bottleneck', 'openai'], factory) :
4
+ (global = typeof globalThis !== 'undefined' ? globalThis : global || self, factory(global["promptbook-core"] = {}, global.spaceTrim, global.crypto, global.rxjs, global.waitasecond, global.hexEncoder, global.sha256, global.path, global.cryptoJs, global.mimeTypes, global.papaparse, global.moment, global.colors, global.Bottleneck, global.OpenAI));
5
+ })(this, (function (exports, spaceTrim, crypto, rxjs, waitasecond, hexEncoder, sha256, path, cryptoJs, mimeTypes, papaparse, moment, colors, Bottleneck, OpenAI) { 'use strict';
6
6
 
7
7
  function _interopDefaultLegacy (e) { return e && typeof e === 'object' && 'default' in e ? e : { 'default': e }; }
8
8
 
@@ -11,6 +11,8 @@
11
11
  var sha256__default = /*#__PURE__*/_interopDefaultLegacy(sha256);
12
12
  var moment__default = /*#__PURE__*/_interopDefaultLegacy(moment);
13
13
  var colors__default = /*#__PURE__*/_interopDefaultLegacy(colors);
14
+ var Bottleneck__default = /*#__PURE__*/_interopDefaultLegacy(Bottleneck);
15
+ var OpenAI__default = /*#__PURE__*/_interopDefaultLegacy(OpenAI);
14
16
 
15
17
  // ⚠️ WARNING: This code has been generated so that any manual changes will be overwritten
16
18
  /**
@@ -26,13 +28,13 @@
26
28
  * @generated
27
29
  * @see https://github.com/webgptorg/promptbook
28
30
  */
29
- const PROMPTBOOK_ENGINE_VERSION = '0.103.0-4';
31
+ const PROMPTBOOK_ENGINE_VERSION = '0.103.0-40';
30
32
  /**
31
33
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
32
34
  * Note: [💞] Ignore a discrepancy between file name and entity name
33
35
  */
34
36
 
35
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book"},{title:"Prepare Knowledge-piece Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge-piece Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book"},{title:"Prepare Persona",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book",formfactorName:"GENERIC",parameters:[{name:"availableModels",description:"List of available model names together with their descriptions as JSON",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelsRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n```json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpful assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n```\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n```json\n{availableModels}\n```\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelsRequirements",format:"JSON",dependentParameterNames:["availableModels","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Persona\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book`\n- INPUT PARAMETER `{availableModels}` List of available model names together with their descriptions as JSON\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelsRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n\\`\\`\\`json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpful assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n\\`\\`\\`json\n{availableModels}\n\\`\\`\\`\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelsRequirements}`\n"}],sourceFile:"./books/prepare-persona.book"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-title.book",formfactorName:"GENERIC",parameters:[{name:"book",description:"The book to prepare the title for",isInput:true,isOutput:false},{name:"title",description:"Best title for the book",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-title",title:"Make title",content:"Make best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}",resultingParameterName:"title",expectations:{words:{min:1,max:8},lines:{min:1,max:1}},dependentParameterNames:["book"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-title.book`\n- INPUT PARAMETER `{book}` The book to prepare the title for\n- OUTPUT PARAMETER `{title}` Best title for the book\n\n## Make title\n\n- EXPECT MIN 1 Word\n- EXPECT MAX 8 Words\n- EXPECT EXACTLY 1 Line\n\n```markdown\nMake best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-title.book"},{title:"📊 Curriculum Audit",pipelineUrl:"https://promptbook.studio/promptbook//examples/lsvp-asistent.book",formfactorName:"GENERIC",parameters:[{name:"result",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"prompt",title:"Prompt",content:"Asistent pro LŠVP\n\nPERSONA Jsi asistent pro RVP Lyceum v rámci Národního pedagogického institutu České Republiky\nMETA IMAGE https://edulk.cz/getFile/id:475818/type:large/02%20zna%C4%8Dka%20npi.jpg\nRULE Pokud jsi nejsi jistý, napiš nevím\nKNOWLEDGE ./241129_Lyceum_final.pdf\nCONTEXT Obecně dokážeš řešit libovolné ŠVP, aktuálně řešíš {Školní vzdělávací program LYCEUM}\nRULE Z {Porovnání RVP a ŠVP - postup} je nejdůležitější fáze 3\nKNOWLEDGE {Školní vzdělávací program LYCEUM} ./ŠVP Lyceum - Finance v digitální době.pdf\nKNOWLEDGE @Slovník\n\n**Interní slovník - RVP/ŠVP**\n\n**RVP**\n\nRámcový vzdělávací program pro obor vzdělání Lyceum je dokument na národní úrovni, který formuluje požadavky na školní vzdělávací programy ve formě především očekávaných výsledků učení, kterých mají žáci absolvováním tohoto programu na dané škole dosáhnout.\n\n**ŠVP**\n\nŠkolní vzdělávací program pro obor vzdělání Lyceum je dokument každé jednotlivé školy, který popisuje v jakých vyučovacích předmětech/ vzdělávacích modulech a v jakých ročnících budou požadované očekávané výsledky učení naplněny. Zároveň formuluje další očekávané výsledky učení, které naplňují disponibilní část vyučovacího času určeného RVP pro tento obor vzdělání.\n\n**Očekávaný výsledek učení (OVU)**\n\nVyjadřuje jednotlivý požadavek na to, co mají žáci umět na konci vzdělávacího programu, tzn. jejich požadované kompetence. Je vyjádřen formulací, která je uvozena činnostním slovesem a dále obsahuje předmět této činnosti. Formulace je konkretizována resp. doplněna zpravidla formou odrážek vymezením dílčích znalostí, dovedností, postojů, jejichž splnění je předpokladem dosažení OVU jako celku.\n\n_Příklad:_\n\n<div class=\"joplin-table-wrapper\"><table><tbody><tr><th><p><strong>Žák/žákyně řídí realizaci jednoduchého projektu</strong></p></th></tr><tr><td><ul><li>naplánuje aktivity projektu</li></ul></td></tr><tr><td><ul><li>navrhne rozpočet projektu vzhledem k navrženým aktivitám</li></ul></td></tr><tr><td><ul><li>stanoví základní ukazatele a sleduje jejich naplňování</li></ul></td></tr><tr><td><ul><li>vede projektový tým</li></ul></td></tr><tr><td><ul><li>uvede, jak by řešil krizové situace v projektu</li></ul></td></tr><tr><td><ul><li>vyhodnotí úspěšnost projektu</li></ul></td></tr></tbody></table></div>\n\n**Vzdělávací oblasti**\n\nOčekávané výsledky učení jsou v **_RVP členěny do 4 vzdělávacích oblastí_**, které tvoří společný všeobecně vzdělávací základ:\n\n- Osobnostní rozvoj, vzdělávání ke zdraví, bezpečí a produktivnímu pracovnímu životu (kariéře)\n- Komunikační a jazykové vzdělávání\n- Aplikované vzdělávání STEM (Science, Technology, Engeneering, Math), tj. přírodní vědy, informatika, technika, matematika\n- Prakticky orientované vzdělávání společenskovědní a humanitní\n\nKaždá vzdělávací oblast se dále člení na okruhy, v jejichž rámci jsou OVU samostatně číslované.\n\n<div class=\"joplin-table-wrapper\"><table><tbody><tr><th rowspan=\"21\"><ul><li>Prakticky orientované vzdělávání společenskovědní a humanitní</li></ul></th><th rowspan=\"21\"><p><strong>Člověk, ekonomie a podnikání</strong></p></th><th rowspan=\"7\"><p><strong>1</strong></p></th><th><p><strong>zpracuje podklady související s podnikáním</strong></p></th></tr><tr><td><p>připraví podnikatelský záměr</p></td></tr><tr><td><p>sestaví zakladatelský rozpočet</p></td></tr><tr><td><p>zkalkuluje cenu zboží nebo služby</p></td></tr><tr><td><p>vysvětlí na příkladu základní povinnosti podnikatele vůči státu a zaměstnancům</p></td></tr><tr><td><p>vede daňovou evidenci</p></td></tr><tr><td><p>vysvětlí na příkladech etiku v podnikání</p></td></tr><tr><td rowspan=\"7\"><p><strong>2</strong></p></td><td><p><strong>řídí realizaci jednoduchého projektu</strong></p></td></tr><tr><td><p>naplánuje aktivity projektu</p></td></tr><tr><td><p>navrhne rozpočet projektu vzhledem k navrženým aktivitám</p></td></tr><tr><td><p>stanoví základní ukazatele a sleduje jejich naplňování</p></td></tr><tr><td><p>vede projektový tým</p></td></tr><tr><td><p>uvede, jak by řešil krizové situace v projektu</p></td></tr><tr><td><p>vyhodnotí úspěšnost projektu</p></td></tr><tr><td rowspan=\"7\"><p><strong>3</strong></p></td><td><p><strong>aplikuje ekonomické teorie v osobním a profesním životě</strong></p></td></tr><tr><td><p>vysvětlí základní ekonomické otázky</p></td></tr><tr><td><p>vysvětí stanovení rovnovážné ceny na dokonalém i nedokonalém trhu</p></td></tr><tr><td><p>charakterizuje výrobní faktory a vysvětlí hranici produkčních možností a náklady obětované příležitosti</p></td></tr><tr><td><p>uvede nejdůležitější makroekonomické pojmy a vliv jejich výše na kvalitu života a podnikání v daném státě</p></td></tr><tr><td><p>vysvětlí podstatu inflace a její důsledky na finanční situaci obyvatel a na příkladu ukáže jak se bránit jejím nepříznivým důsledkům</p></td></tr><tr><td><p>uvede hlavní výhody a nevýhody mezinárodního obchodu a vliv ochranářských opatření na ekonomickou situaci dané země</p></td></tr><tr><td></td><td></td><td><p><strong>4</strong></p></td><td><p>Atd.</p></td></tr></tbody></table></div>\n\n**Vyučovací předmět / vzdělávací modul**\n\nOčekávané výsledky učení jsou v **ŠVP** členěny do vyučovacích předmětů nebo vzdělávacích modulů, které jsou dále zařazeny do jednoho nebo více ročníků 4letého studia. Vyučovací předmět / vzdělávací modul tvoří vyučovací jednotku, kde jsou očekávané výsledky učení dále rozpracovány pro potřeby výuky podle následující šablony\n\n| **A. VSTUPNÍ ČÁST** |\n| --- |\n| **1\\. Název** |\n| **2\\. Kód** (kódy by měly být navázány na obory vzdělání a výsledky učení) |\n| **2a) Kategorie vzdělání** - v případě, že nebude součástí kódu |\n| **3\\. Typ vyučovací jednotky** (modul, předmět, stáž apod.) |\n| **4\\. Délka** (počet hodin - dělitelný čtyřmi (optimální modul 16, 32 hodin = týden výuky) |\n| **5\\. Platnost** (datum, od kterého platí) |\n| **6\\. Vstupní předpoklady** (vymezení požadované úrovně vstupních vědomostí a dovedností, které jsou předpokladem úspěšného studia) |\n| |\n| **B. JÁDRO VYUČOVACÍ JEDNOTKY** |\n| **1\\. Charakteristika** (stručná anotace popisující obecné cíle a pojetí) |\n| **2\\. Očekávané výsledky učení a jejich indikátory (převzaté z RVP nebo dále konkretizované)** |\n| **3\\. Podpora rozvoje klíčových kompetencí a základních gramotností** (které klíčové kompetence jsou v rozvíjeny) |\n| **4\\. Obsah vzdělávání** (rozpis učiva) |\n| **5\\. Vzdělávací strategie** (strategie výuky, resp. učební činnosti žáků, které jsou doporučené pro dosažení výsledků) |\n| |\n| **C. VÝSTUPNÍ ČÁST** |\n| **1\\. Způsob ověřování dosažených výsledků** (ve vazbě na jednotlivé výsledky učení) |\n| **2\\. Kritéria hodnocení** (co znamená splnění výsledků učení, kdy je splněna celá vyučovací jednotka, kritéria pro známky, příp. procentuální, slovní hodnocení) |\n| **3\\. Doporučená studijní literatura, odkazy na ilustrační zdroje** |\n| **4\\. Poznámky** |\n\n**Soulad OVU RVP a ŠVP**\n\nTento soulad je předmětem zjišťování. Soulad nastává, jestliže jsou očekávané výsledky učení z jednotlivých vzdělávacích oblastí RVP **obsaženy** ve vyučovacích předmětech/ vzdělávacích modulech ŠVP jednotlivých škol, tzn. že v ŠVP se objevuje jejich formulace buď v doslovném nebo podobném znění v jednom nebo více vyučovacích předmětech/ vzdělávacích modulech.\n\n_Příklad souladu:_\n\nRVP ŠVP - komunikace a marketing (SŠ obchodní Č.\n\n| **2** | **řídí realizaci jednoduchého projektu** |\n| --- | --- |\n| naplánuje aktivity projektu |\n| navrhne rozpočet projektu vzhledem k navrženým aktivitám |\n| stanoví základní ukazatele a sleduje jejich naplňování |\n| vede projektový tým |\n| uvede, jak by řešil krizové situace v projektu |\n| vyhodnotí úspěšnost projektu |\n\nKNOWLEDGE {Porovnání RVP a ŠVP - postup}\n\n\n# AUDITNÍ PROTOKOL ŠVP-RVP\n\n# (POPIS KONTROLNÍHO ALGORITMU)\n\nMetodika je určena pro **Kvantifikaci Shody** školního vzdělávacího programu (ŠVP) s Rámcovým vzdělávacím programem (RVP).\n\n## FÁZE 1: VALIDACE DOKUMENTACE\n\n**Cíl:** Ověřit platnost, aktuálnost a strukturu zdrojových dokumentů.\n\n- **RVP Verifikace:** Otevřít aktuální verzi RVP (např. RVP ZV/G/SOŠ).\n- **Typová shoda:** Ověřit, že RVP se vztahuje k danému typu školy.\n- **ŠVP Dimenze:** Identifikovat a izolovat relevantní části ŠVP: Profil absolventa, Klíčové kompetence (KK), Vzdělávací oblasti (VO), případně Učební plán (UP).\n- **Verzování:** Potvrdit, že obě verze (RVP a ŠVP) jsou nejnovější a platné (včetně dodatků RVP).\n\n## FÁZE 2: DATABÁZOVÉ MAPOVÁNÍ VÝSTUPŮ (MASTER MATICE)\n\n**Cíl:** Vytvořit systémovou databázi pro křížové porovnání všech povinných komponent RVP se ŠVP.\n\n- **Dekompozice RVP:** Rozložit RVP na základní povinné komponenty: Klíčové kompetence, Vzdělávací oblasti a obory, Očekávané výstupy (OVU), Průřezová témata (PT).\n- **Přiřazovací mapa:** Vytvořit hlavní kontrolní matici (Master Matice) pro záznam vazeb.\n\n| Oblast RVP | Výstup RVP (OVU) | Odpovídající Část ŠVP (Předmět/Ročník) | Konkrétní Tématický Celek v ŠVP | Stav Shody (Protokol) |\n| --- | --- | --- | --- | --- |\n| ... | ... | ... | ... | ... |\n| --- | --- | --- | --- | --- |\n\n## FÁZE 3: ALGORITMICKÁ KONTROLA POKRYTÍ A HLOUBKY\n\n**Cíl:** Posoudit, zda každý povinný výstup RVP je adekvátně reflektován v obsahu ŠVP, a přidělit bodovou hodnotu pro kvantifikaci.\n\n- **Audit OVU:** Projít každý jednotlivý Očekávaný výstup (OVU) z RVP.\n- **Kódování stavu a bodování:** U každého OVU v matici označit stav pokrytí dle následujícího schématu:\n\n| Kód (Protokol) | Popis (Kvalitativní zjištění) | Bodová hodnota (Kvantifikace) |\n| --- | --- | --- |\n| ✅ | Plná shoda (Výstup pokryt v plném rozsahu, odpovídající úrovni RVP) | 1,0 |\n| --- | --- | --- |\n| ⚠️ | Částečná shoda (Formální pokrytí, omezený rozsah, chybná návaznost) | 0,5 |\n| --- | --- | --- |\n| ❌ | Absence (Výstup zcela chybí v obsahu ŠVP) | 0,0 |\n| --- | --- | --- |\n\n- **Defektologie ŠVP:** Identifikovat a zaznamenat deficity ŠVP: Chybějící výstupy (❌), Sémantické překryvy, Přetížení obsahu.\n- **Kvalitativní posun:** Ověřit, zda je formulace výstupů v ŠVP **aktivní, měřitelná a v souladu** s úrovní RVP.\n\n## FÁZE 4: STRUKTURÁLNÍ VERIFIKACE NÁVAZNOSTI (VERTIKÁLA/HORIZONTÁLA)\n\n**Cíl:** Zkontrolovat logickou posloupnost a provázanost učiva v rámci ŠVP.\n\n- **Vertikální Kontrola:** Ověřit posloupnost OVU a učiva uvnitř jednoho předmětu/oblasti (postup od jednodušších ke složitějším konceptům napříč ročníky).\n- **Horizontální Kontrola:** Zkontrolovat logické provázání napříč vzdělávacími oblastmi a předměty (např. fyzika ↔ matematika).\n- **PT Integrace:** Audit reálné integrace Průřezových témat (PT) do konkrétních částí obsahu, metod a projektů.\n\n## FÁZE 5: ANALÝZA ŠKOLNÍ PROFILACE A ROZŠÍŘENÍ RVP\n\n**Cíl:** Validovat, že profilace školy je **v souladu** s RVP a nejedná se o **rozpor**.\n\n- **Nekonfliktnost:** Porovnat definovaný Profil absolventa školy s Klíčovými kompetencemi RVP. Profil ŠVP musí RVP rozvíjet, nikoli mu odporovat.\n- **Modularita:** Zkontrolovat, zda volitelné předměty a rozšiřující moduly logicky navazují na vzdělávací oblasti RVP.\n- **Implementace specializace:** Popisně uvést, jak je školní profilace (např. STEM zaměření, projektová výuka) integrována do OVU a kompetencí definovaných RVP.\n\n## FÁZE 6: GENERÁTOR ZÁVĚREČNÉ ZPRÁVY A KVANTIFIKACE\n\n**Cíl:** Syntetizovat výsledky, kvantifikovat soulad a generovat závazné návrhy na korekce.\n\n### 6.1 Kvantifikace Souladu\n\nVypočítat Index shody (IS) na základě bodového hodnocení (Fáze 3):\n\n### 6.2 Interpretace Indexu Shody (IS)\n\nKlasifikace souladu pro standardizované vyhodnocení:\n\n| Interval IS | Klasifikace souladu | Popis |\n| --- | --- | --- |\n| 95-100 % | Výborný soulad | ŠVP plně odpovídá RVP, pouze stylistické nebo formální rozdíly. |\n| --- | --- | --- |\n| 85-94 % | Dobrá shoda | ŠVP pokrývá všechny klíčové výstupy, menší korekce nutné. |\n| --- | --- | --- |\n| 70-84 % | Částečná shoda | Významné nedostatky v některých oblastech, nutná revize obsahu. |\n| --- | --- | --- |\n| < 70 % | Kritická neshoda | ŠVP neplní rámcové požadavky, ohrožuje legislativní soulad. |\n| --- | --- | --- |\n\n### 6.3 Doplňkové Indexy\n\nVypočítat následující doplňkové indexy pro detailní kvalitativní analýzu:\n\n- **Index kompetenčního souladu (IKS):** Poměr pokrytí klíčových kompetencí RVP v ŠVP.\n- **Index průřezové integrace (IPI):** Míra reálné integrace průřezových témat do výuky.\n- **Index hloubky pokrytí (IHP):** Procento výstupů, které jsou v ŠVP rozvedeny na konkrétní výukové cíle (měřitelné, aktivní formulace).\n- **Index profilové rozšiřitelnosti (IPR):** Kolik rozšiřujících nebo profilových výstupů přesahuje rámec RVP, aniž by narušily jeho strukturu.\n\n### 6.4 Vizuální výstupy\n\nZajistit generování následujících vizualizací pro Závěrečnou zprávu:\n\n- Graf pokrytí po vzdělávacích oblastech (Sloupcový graf IS pro VO).\n- Pavoukový diagram Klíčových kompetencí (RVP vs. ŠVP).\n- Mapa defektů (Vizualizace ❌ a ⚠️ výstupů).\n\n### 6.5 Struktura Závěrečné Zprávy\n\nZpráva musí být strukturována dle standardizovaného formátu:\n\n| Oddíl | Obsah |\n| --- | --- |\n| A. Identifikace | Název školy, IZO, typ školy, datum revize, zpracovatel, verze ŠVP a RVP. |\n| --- | --- |\n| B. Shrnutí výsledků | Celkový Index Shody (IS), hlavní závěry a doporučení. |\n| --- | --- |\n| C. Kvantitativní analýza | Přehled IS v % dle kategorií OVU / VO / kompetencí. |\n| --- | --- |\n| D. Kvalitativní analýza | Slovní zhodnocení kvality souladu (formulace, obtížnost, integrace PT). |\n| --- | --- |\n| E. Rizikové oblasti | Přehled nalezených defektů (chybějící OVU, přetížení, formální shoda). |\n| --- | --- |\n| F. Návrhy opatření (Korekční plán) | Přesné návrhy změn - **Co, Kde, Kdo** má upravit, včetně termínu. |\n| --- | --- |\n| G. Přílohy | Master Matice (Fáze 2-3), revizní tabulka, výstupní grafy a metriky. |\n| --- | --- |\n\n\n\n\n.",resultingParameterName:"result",dependentParameterNames:[]}],personas:[],preparations:[{id:1,promptbookVersion:"0.103.0-3",usage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"Asistent pro LŠVP\n\nPERSONA Jsi asistent pro RVP Lyceum v rámci Národního pedagogického institutu České Republiky\nMETA IMAGE https://edulk.cz/getFile/id:475818/type:large/02%20zna%C4%8Dka%20npi.jpg\nRULE Pokud jsi nejsi jistý, napiš nevím\nKNOWLEDGE ./241129_Lyceum_final.pdf\nCONTEXT Obecně dokážeš řešit libovolné ŠVP, aktuálně řešíš {Školní vzdělávací program LYCEUM}\nRULE Z {Porovnání RVP a ŠVP - postup} je nejdůležitější fáze 3\nKNOWLEDGE {Školní vzdělávací program LYCEUM} ./ŠVP Lyceum - Finance v digitální době.pdf\nKNOWLEDGE @Slovník\n\n**Interní slovník - RVP/ŠVP**\n\n**RVP**\n\nRámcový vzdělávací program pro obor vzdělání Lyceum je dokument na národní úrovni, který formuluje požadavky na školní vzdělávací programy ve formě především očekávaných výsledků učení, kterých mají žáci absolvováním tohoto programu na dané škole dosáhnout.\n\n**ŠVP**\n\nŠkolní vzdělávací program pro obor vzdělání Lyceum je dokument každé jednotlivé školy, který popisuje v jakých vyučovacích předmětech/ vzdělávacích modulech a v jakých ročnících budou požadované očekávané výsledky učení naplněny. Zároveň formuluje další očekávané výsledky učení, které naplňují disponibilní část vyučovacího času určeného RVP pro tento obor vzdělání.\n\n**Očekávaný výsledek učení (OVU)**\n\nVyjadřuje jednotlivý požadavek na to, co mají žáci umět na konci vzdělávacího programu, tzn. jejich požadované kompetence. Je vyjádřen formulací, která je uvozena činnostním slovesem a dále obsahuje předmět této činnosti. Formulace je konkretizována resp. doplněna zpravidla formou odrážek vymezením dílčích znalostí, dovedností, postojů, jejichž splnění je předpokladem dosažení OVU jako celku.\n\n_Příklad:_\n\n<div class=\"joplin-table-wrapper\"><table><tbody><tr><th><p><strong>Žák/žákyně řídí realizaci jednoduchého projektu</strong></p></th></tr><tr><td><ul><li>naplánuje aktivity projektu</li></ul></td></tr><tr><td><ul><li>navrhne rozpočet projektu vzhledem k navrženým aktivitám</li></ul></td></tr><tr><td><ul><li>stanoví základní ukazatele a sleduje jejich naplňování</li></ul></td></tr><tr><td><ul><li>vede projektový tým</li></ul></td></tr><tr><td><ul><li>uvede, jak by řešil krizové situace v projektu</li></ul></td></tr><tr><td><ul><li>vyhodnotí úspěšnost projektu</li></ul></td></tr></tbody></table></div>\n\n**Vzdělávací oblasti**\n\nOčekávané výsledky učení jsou v **_RVP členěny do 4 vzdělávacích oblastí_**, které tvoří společný všeobecně vzdělávací základ:\n\n- Osobnostní rozvoj, vzdělávání ke zdraví, bezpečí a produktivnímu pracovnímu životu (kariéře)\n- Komunikační a jazykové vzdělávání\n- Aplikované vzdělávání STEM (Science, Technology, Engeneering, Math), tj. přírodní vědy, informatika, technika, matematika\n- Prakticky orientované vzdělávání společenskovědní a humanitní\n\nKaždá vzdělávací oblast se dále člení na okruhy, v jejichž rámci jsou OVU samostatně číslované.\n\n<div class=\"joplin-table-wrapper\"><table><tbody><tr><th rowspan=\"21\"><ul><li>Prakticky orientované vzdělávání společenskovědní a humanitní</li></ul></th><th rowspan=\"21\"><p><strong>Člověk, ekonomie a podnikání</strong></p></th><th rowspan=\"7\"><p><strong>1</strong></p></th><th><p><strong>zpracuje podklady související s podnikáním</strong></p></th></tr><tr><td><p>připraví podnikatelský záměr</p></td></tr><tr><td><p>sestaví zakladatelský rozpočet</p></td></tr><tr><td><p>zkalkuluje cenu zboží nebo služby</p></td></tr><tr><td><p>vysvětlí na příkladu základní povinnosti podnikatele vůči státu a zaměstnancům</p></td></tr><tr><td><p>vede daňovou evidenci</p></td></tr><tr><td><p>vysvětlí na příkladech etiku v podnikání</p></td></tr><tr><td rowspan=\"7\"><p><strong>2</strong></p></td><td><p><strong>řídí realizaci jednoduchého projektu</strong></p></td></tr><tr><td><p>naplánuje aktivity projektu</p></td></tr><tr><td><p>navrhne rozpočet projektu vzhledem k navrženým aktivitám</p></td></tr><tr><td><p>stanoví základní ukazatele a sleduje jejich naplňování</p></td></tr><tr><td><p>vede projektový tým</p></td></tr><tr><td><p>uvede, jak by řešil krizové situace v projektu</p></td></tr><tr><td><p>vyhodnotí úspěšnost projektu</p></td></tr><tr><td rowspan=\"7\"><p><strong>3</strong></p></td><td><p><strong>aplikuje ekonomické teorie v osobním a profesním životě</strong></p></td></tr><tr><td><p>vysvětlí základní ekonomické otázky</p></td></tr><tr><td><p>vysvětí stanovení rovnovážné ceny na dokonalém i nedokonalém trhu</p></td></tr><tr><td><p>charakterizuje výrobní faktory a vysvětlí hranici produkčních možností a náklady obětované příležitosti</p></td></tr><tr><td><p>uvede nejdůležitější makroekonomické pojmy a vliv jejich výše na kvalitu života a podnikání v daném státě</p></td></tr><tr><td><p>vysvětlí podstatu inflace a její důsledky na finanční situaci obyvatel a na příkladu ukáže jak se bránit jejím nepříznivým důsledkům</p></td></tr><tr><td><p>uvede hlavní výhody a nevýhody mezinárodního obchodu a vliv ochranářských opatření na ekonomickou situaci dané země</p></td></tr><tr><td></td><td></td><td><p><strong>4</strong></p></td><td><p>Atd.</p></td></tr></tbody></table></div>\n\n**Vyučovací předmět / vzdělávací modul**\n\nOčekávané výsledky učení jsou v **ŠVP** členěny do vyučovacích předmětů nebo vzdělávacích modulů, které jsou dále zařazeny do jednoho nebo více ročníků 4letého studia. Vyučovací předmět / vzdělávací modul tvoří vyučovací jednotku, kde jsou očekávané výsledky učení dále rozpracovány pro potřeby výuky podle následující šablony\n\n| **A. VSTUPNÍ ČÁST** |\n| --- |\n| **1\\. Název** |\n| **2\\. Kód** (kódy by měly být navázány na obory vzdělání a výsledky učení) |\n| **2a) Kategorie vzdělání** - v případě, že nebude součástí kódu |\n| **3\\. Typ vyučovací jednotky** (modul, předmět, stáž apod.) |\n| **4\\. Délka** (počet hodin - dělitelný čtyřmi (optimální modul 16, 32 hodin = týden výuky) |\n| **5\\. Platnost** (datum, od kterého platí) |\n| **6\\. Vstupní předpoklady** (vymezení požadované úrovně vstupních vědomostí a dovedností, které jsou předpokladem úspěšného studia) |\n| |\n| **B. JÁDRO VYUČOVACÍ JEDNOTKY** |\n| **1\\. Charakteristika** (stručná anotace popisující obecné cíle a pojetí) |\n| **2\\. Očekávané výsledky učení a jejich indikátory (převzaté z RVP nebo dále konkretizované)** |\n| **3\\. Podpora rozvoje klíčových kompetencí a základních gramotností** (které klíčové kompetence jsou v rozvíjeny) |\n| **4\\. Obsah vzdělávání** (rozpis učiva) |\n| **5\\. Vzdělávací strategie** (strategie výuky, resp. učební činnosti žáků, které jsou doporučené pro dosažení výsledků) |\n| |\n| **C. VÝSTUPNÍ ČÁST** |\n| **1\\. Způsob ověřování dosažených výsledků** (ve vazbě na jednotlivé výsledky učení) |\n| **2\\. Kritéria hodnocení** (co znamená splnění výsledků učení, kdy je splněna celá vyučovací jednotka, kritéria pro známky, příp. procentuální, slovní hodnocení) |\n| **3\\. Doporučená studijní literatura, odkazy na ilustrační zdroje** |\n| **4\\. Poznámky** |\n\n**Soulad OVU RVP a ŠVP**\n\nTento soulad je předmětem zjišťování. Soulad nastává, jestliže jsou očekávané výsledky učení z jednotlivých vzdělávacích oblastí RVP **obsaženy** ve vyučovacích předmětech/ vzdělávacích modulech ŠVP jednotlivých škol, tzn. že v ŠVP se objevuje jejich formulace buď v doslovném nebo podobném znění v jednom nebo více vyučovacích předmětech/ vzdělávacích modulech.\n\n_Příklad souladu:_\n\nRVP ŠVP - komunikace a marketing (SŠ obchodní Č.\n\n| **2** | **řídí realizaci jednoduchého projektu** |\n| --- | --- |\n| naplánuje aktivity projektu |\n| navrhne rozpočet projektu vzhledem k navrženým aktivitám |\n| stanoví základní ukazatele a sleduje jejich naplňování |\n| vede projektový tým |\n| uvede, jak by řešil krizové situace v projektu |\n| vyhodnotí úspěšnost projektu |\n\nKNOWLEDGE {Porovnání RVP a ŠVP - postup}\n\n\n# AUDITNÍ PROTOKOL ŠVP-RVP\n\n# (POPIS KONTROLNÍHO ALGORITMU)\n\nMetodika je určena pro **Kvantifikaci Shody** školního vzdělávacího programu (ŠVP) s Rámcovým vzdělávacím programem (RVP).\n\n## FÁZE 1: VALIDACE DOKUMENTACE\n\n**Cíl:** Ověřit platnost, aktuálnost a strukturu zdrojových dokumentů.\n\n- **RVP Verifikace:** Otevřít aktuální verzi RVP (např. RVP ZV/G/SOŠ).\n- **Typová shoda:** Ověřit, že RVP se vztahuje k danému typu školy.\n- **ŠVP Dimenze:** Identifikovat a izolovat relevantní části ŠVP: Profil absolventa, Klíčové kompetence (KK), Vzdělávací oblasti (VO), případně Učební plán (UP).\n- **Verzování:** Potvrdit, že obě verze (RVP a ŠVP) jsou nejnovější a platné (včetně dodatků RVP).\n\n## FÁZE 2: DATABÁZOVÉ MAPOVÁNÍ VÝSTUPŮ (MASTER MATICE)\n\n**Cíl:** Vytvořit systémovou databázi pro křížové porovnání všech povinných komponent RVP se ŠVP.\n\n- **Dekompozice RVP:** Rozložit RVP na základní povinné komponenty: Klíčové kompetence, Vzdělávací oblasti a obory, Očekávané výstupy (OVU), Průřezová témata (PT).\n- **Přiřazovací mapa:** Vytvořit hlavní kontrolní matici (Master Matice) pro záznam vazeb.\n\n| Oblast RVP | Výstup RVP (OVU) | Odpovídající Část ŠVP (Předmět/Ročník) | Konkrétní Tématický Celek v ŠVP | Stav Shody (Protokol) |\n| --- | --- | --- | --- | --- |\n| ... | ... | ... | ... | ... |\n| --- | --- | --- | --- | --- |\n\n## FÁZE 3: ALGORITMICKÁ KONTROLA POKRYTÍ A HLOUBKY\n\n**Cíl:** Posoudit, zda každý povinný výstup RVP je adekvátně reflektován v obsahu ŠVP, a přidělit bodovou hodnotu pro kvantifikaci.\n\n- **Audit OVU:** Projít každý jednotlivý Očekávaný výstup (OVU) z RVP.\n- **Kódování stavu a bodování:** U každého OVU v matici označit stav pokrytí dle následujícího schématu:\n\n| Kód (Protokol) | Popis (Kvalitativní zjištění) | Bodová hodnota (Kvantifikace) |\n| --- | --- | --- |\n| ✅ | Plná shoda (Výstup pokryt v plném rozsahu, odpovídající úrovni RVP) | 1,0 |\n| --- | --- | --- |\n| ⚠️ | Částečná shoda (Formální pokrytí, omezený rozsah, chybná návaznost) | 0,5 |\n| --- | --- | --- |\n| ❌ | Absence (Výstup zcela chybí v obsahu ŠVP) | 0,0 |\n| --- | --- | --- |\n\n- **Defektologie ŠVP:** Identifikovat a zaznamenat deficity ŠVP: Chybějící výstupy (❌), Sémantické překryvy, Přetížení obsahu.\n- **Kvalitativní posun:** Ověřit, zda je formulace výstupů v ŠVP **aktivní, měřitelná a v souladu** s úrovní RVP.\n\n## FÁZE 4: STRUKTURÁLNÍ VERIFIKACE NÁVAZNOSTI (VERTIKÁLA/HORIZONTÁLA)\n\n**Cíl:** Zkontrolovat logickou posloupnost a provázanost učiva v rámci ŠVP.\n\n- **Vertikální Kontrola:** Ověřit posloupnost OVU a učiva uvnitř jednoho předmětu/oblasti (postup od jednodušších ke složitějším konceptům napříč ročníky).\n- **Horizontální Kontrola:** Zkontrolovat logické provázání napříč vzdělávacími oblastmi a předměty (např. fyzika ↔ matematika).\n- **PT Integrace:** Audit reálné integrace Průřezových témat (PT) do konkrétních částí obsahu, metod a projektů.\n\n## FÁZE 5: ANALÝZA ŠKOLNÍ PROFILACE A ROZŠÍŘENÍ RVP\n\n**Cíl:** Validovat, že profilace školy je **v souladu** s RVP a nejedná se o **rozpor**.\n\n- **Nekonfliktnost:** Porovnat definovaný Profil absolventa školy s Klíčovými kompetencemi RVP. Profil ŠVP musí RVP rozvíjet, nikoli mu odporovat.\n- **Modularita:** Zkontrolovat, zda volitelné předměty a rozšiřující moduly logicky navazují na vzdělávací oblasti RVP.\n- **Implementace specializace:** Popisně uvést, jak je školní profilace (např. STEM zaměření, projektová výuka) integrována do OVU a kompetencí definovaných RVP.\n\n## FÁZE 6: GENERÁTOR ZÁVĚREČNÉ ZPRÁVY A KVANTIFIKACE\n\n**Cíl:** Syntetizovat výsledky, kvantifikovat soulad a generovat závazné návrhy na korekce.\n\n### 6.1 Kvantifikace Souladu\n\nVypočítat Index shody (IS) na základě bodového hodnocení (Fáze 3):\n\n### 6.2 Interpretace Indexu Shody (IS)\n\nKlasifikace souladu pro standardizované vyhodnocení:\n\n| Interval IS | Klasifikace souladu | Popis |\n| --- | --- | --- |\n| 95-100 % | Výborný soulad | ŠVP plně odpovídá RVP, pouze stylistické nebo formální rozdíly. |\n| --- | --- | --- |\n| 85-94 % | Dobrá shoda | ŠVP pokrývá všechny klíčové výstupy, menší korekce nutné. |\n| --- | --- | --- |\n| 70-84 % | Částečná shoda | Významné nedostatky v některých oblastech, nutná revize obsahu. |\n| --- | --- | --- |\n| < 70 % | Kritická neshoda | ŠVP neplní rámcové požadavky, ohrožuje legislativní soulad. |\n| --- | --- | --- |\n\n### 6.3 Doplňkové Indexy\n\nVypočítat následující doplňkové indexy pro detailní kvalitativní analýzu:\n\n- **Index kompetenčního souladu (IKS):** Poměr pokrytí klíčových kompetencí RVP v ŠVP.\n- **Index průřezové integrace (IPI):** Míra reálné integrace průřezových témat do výuky.\n- **Index hloubky pokrytí (IHP):** Procento výstupů, které jsou v ŠVP rozvedeny na konkrétní výukové cíle (měřitelné, aktivní formulace).\n- **Index profilové rozšiřitelnosti (IPR):** Kolik rozšiřujících nebo profilových výstupů přesahuje rámec RVP, aniž by narušily jeho strukturu.\n\n### 6.4 Vizuální výstupy\n\nZajistit generování následujících vizualizací pro Závěrečnou zprávu:\n\n- Graf pokrytí po vzdělávacích oblastech (Sloupcový graf IS pro VO).\n- Pavoukový diagram Klíčových kompetencí (RVP vs. ŠVP).\n- Mapa defektů (Vizualizace ❌ a ⚠️ výstupů).\n\n### 6.5 Struktura Závěrečné Zprávy\n\nZpráva musí být strukturována dle standardizovaného formátu:\n\n| Oddíl | Obsah |\n| --- | --- |\n| A. Identifikace | Název školy, IZO, typ školy, datum revize, zpracovatel, verze ŠVP a RVP. |\n| --- | --- |\n| B. Shrnutí výsledků | Celkový Index Shody (IS), hlavní závěry a doporučení. |\n| --- | --- |\n| C. Kvantitativní analýza | Přehled IS v % dle kategorií OVU / VO / kompetencí. |\n| --- | --- |\n| D. Kvalitativní analýza | Slovní zhodnocení kvality souladu (formulace, obtížnost, integrace PT). |\n| --- | --- |\n| E. Rizikové oblasti | Přehled nalezených defektů (chybějící OVU, přetížení, formální shoda). |\n| --- | --- |\n| F. Návrhy opatření (Korekční plán) | Přesné návrhy změn - **Co, Kde, Kdo** má upravit, včetně termínu. |\n| --- | --- |\n| G. Přílohy | Master Matice (Fáze 2-3), revizní tabulka, výstupní grafy a metriky. |\n| --- | --- |\n\n\n\n\n.\n"}],sourceFile:"./books/examples/lsvp-asistent.book"}];
37
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book"},{title:"Prepare Knowledge-piece Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge-piece Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book"},{title:"Prepare Persona",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book",formfactorName:"GENERIC",parameters:[{name:"availableModels",description:"List of available model names together with their descriptions as JSON",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelsRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n```json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpful assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n```\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n```json\n{availableModels}\n```\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelsRequirements",format:"JSON",dependentParameterNames:["availableModels","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Persona\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book`\n- INPUT PARAMETER `{availableModels}` List of available model names together with their descriptions as JSON\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelsRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n\\`\\`\\`json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpful assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n\\`\\`\\`json\n{availableModels}\n\\`\\`\\`\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelsRequirements}`\n"}],sourceFile:"./books/prepare-persona.book"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-title.book",formfactorName:"GENERIC",parameters:[{name:"book",description:"The book to prepare the title for",isInput:true,isOutput:false},{name:"title",description:"Best title for the book",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-title",title:"Make title",content:"Make best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}",resultingParameterName:"title",expectations:{words:{min:1,max:8},lines:{min:1,max:1}},dependentParameterNames:["book"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-title.book`\n- INPUT PARAMETER `{book}` The book to prepare the title for\n- OUTPUT PARAMETER `{title}` Best title for the book\n\n## Make title\n\n- EXPECT MIN 1 Word\n- EXPECT MAX 8 Words\n- EXPECT EXACTLY 1 Line\n\n```markdown\nMake best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-title.book"}];
36
38
 
37
39
  /**
38
40
  * Checks if value is valid email
@@ -1134,15 +1136,30 @@
1134
1136
  * @public exported from `@promptbook/core`
1135
1137
  */
1136
1138
  const PROMPTBOOK_COLOR = Color.fromHex('#79EAFD');
1137
- // <- TODO: [🧠] Using `Color` here increases the package size approx 3kb, maybe remove it
1139
+ // <- TODO: [🧠][🈵] Using `Color` here increases the package size approx 3kb, maybe remove it
1138
1140
  /**
1139
- * Dark color of the Promptbook
1141
+ * Colors for syntax highlighting in the `<BookEditor/>`
1142
+ *
1143
+ * TODO: [🗽] Unite branding and make single place for it
1144
+ *
1145
+ * @public exported from `@promptbook/core`
1146
+ */
1147
+ const PROMPTBOOK_SYNTAX_COLORS = {
1148
+ TITLE: Color.fromHex('#244EA8'),
1149
+ LINE: Color.fromHex('#eeeeee'),
1150
+ COMMITMENT: Color.fromHex('#DA0F78'),
1151
+ PARAMETER: Color.fromHex('#8e44ad'),
1152
+ };
1153
+ // <- TODO: [🧠][🈵] Using `Color` here increases the package size approx 3kb, maybe remove it
1154
+ /**
1155
+ * Chat color of the Promptbook (in chat)
1140
1156
  *
1141
1157
  * TODO: [🗽] Unite branding and make single place for it
1142
1158
  *
1143
1159
  * @public exported from `@promptbook/core`
1144
1160
  */
1145
1161
  const PROMPTBOOK_CHAT_COLOR = PROMPTBOOK_COLOR.then(lighten(0.1)).then(saturate(0.9)).then(grayscale(0.9));
1162
+ // <- TODO: [🧠][🈵] Using `Color` and `lighten`, `saturate`,... here increases the package size approx 3kb, maybe remove it
1146
1163
  /**
1147
1164
  * Color of the user (in chat)
1148
1165
  *
@@ -1151,6 +1168,7 @@
1151
1168
  * @public exported from `@promptbook/core`
1152
1169
  */
1153
1170
  const USER_CHAT_COLOR = Color.fromHex('#1D4ED8');
1171
+ // <- TODO: [🧠][🈵] Using `Color` here increases the package size approx 3kb, maybe remove it
1154
1172
  /**
1155
1173
  * When the title is not provided, the default title is used
1156
1174
  *
@@ -1231,6 +1249,13 @@
1231
1249
  * @public exported from `@promptbook/utils`
1232
1250
  */
1233
1251
  const SMALL_NUMBER = 0.001;
1252
+ // <- TODO: [⏳] Standardize timeouts, Make DEFAULT_TIMEOUT_MS as global constant
1253
+ /**
1254
+ * How many times to retry the connections
1255
+ *
1256
+ * @private within the repository - too low-level in comparison with other `MAX_...`
1257
+ */
1258
+ const CONNECTION_RETRIES_LIMIT = 5;
1234
1259
  /**
1235
1260
  * Short time interval to prevent race conditions in milliseconds
1236
1261
  *
@@ -1286,6 +1311,14 @@
1286
1311
  */
1287
1312
  const DEFAULT_BOOKS_DIRNAME = './books';
1288
1313
  // <- TODO: [🕝] Make also `BOOKS_DIRNAME_ALTERNATIVES`
1314
+ /**
1315
+ * Where to store your agents (also book files)
1316
+ * This is kind of a "src" for your books
1317
+ *
1318
+ * @public exported from `@promptbook/core`
1319
+ */
1320
+ const DEFAULT_AGENTS_DIRNAME = './agents';
1321
+ // <- TODO: [🕝] Make also `AGENTS_DIRNAME_ALTERNATIVES`
1289
1322
  // TODO: Just `.promptbook` in config, hardcode subfolders like `download-cache` or `execution-cache`
1290
1323
  /**
1291
1324
  * Where to store the temporary downloads
@@ -1333,7 +1366,7 @@
1333
1366
  ex-port const WIZARD_APP_ID: string_app_id = 'wizard';
1334
1367
  */
1335
1368
  /**
1336
- * The name of the builded pipeline collection made by CLI `ptbk make` and for lookup in `createCollectionFromDirectory`
1369
+ * The name of the builded pipeline collection made by CLI `ptbk make` and for lookup in `createPipelineCollectionFromDirectory`
1337
1370
  *
1338
1371
  * @public exported from `@promptbook/core`
1339
1372
  */
@@ -2068,6 +2101,12 @@
2068
2101
  * @private within the repository
2069
2102
  */
2070
2103
  const REPLACING_NONCE = 'ptbkauk42kV2dzao34faw7FudQUHYPtW';
2104
+ /**
2105
+ * Nonce which is used as string which is not occurring in normal text
2106
+ *
2107
+ * @private within the repository
2108
+ */
2109
+ const SALT_NONCE = 'ptbkghhewbvruets21t54et5';
2071
2110
  /**
2072
2111
  * Placeholder value indicating a parameter is missing its value.
2073
2112
  *
@@ -2567,7 +2606,7 @@
2567
2606
  * Library of pipelines that groups together pipelines for an application.
2568
2607
  * This implementation is a very thin wrapper around the Array / Map of pipelines.
2569
2608
  *
2570
- * @private internal function of `createCollectionFromJson`, use `createCollectionFromJson` instead
2609
+ * @private internal function of `createPipelineCollectionFromJson`, use `createPipelineCollectionFromJson` instead
2571
2610
  * @see https://github.com/webgptorg/pipeline#pipeline-collection
2572
2611
  */
2573
2612
  class SimplePipelineCollection {
@@ -2577,7 +2616,7 @@
2577
2616
  * @param pipelines Array of pipeline JSON objects to include in the collection
2578
2617
  *
2579
2618
  * Note: During the construction logic of all pipelines are validated
2580
- * Note: It is not recommended to use this constructor directly, use `createCollectionFromJson` *(or other variant)* instead
2619
+ * Note: It is not recommended to use this constructor directly, use `createPipelineCollectionFromJson` *(or other variant)* instead
2581
2620
  */
2582
2621
  constructor(...pipelines) {
2583
2622
  this.collection = new Map();
@@ -2665,16 +2704,16 @@
2665
2704
  }
2666
2705
 
2667
2706
  /**
2668
- * Creates PipelineCollection from array of PipelineJson or PipelineString
2707
+ * Creates `PipelineCollection` from array of PipelineJson or PipelineString
2669
2708
  *
2670
- * Note: Functions `collectionToJson` and `createCollectionFromJson` are complementary
2709
+ * Note: Functions `pipelineCollectionToJson` and `createPipelineCollectionFromJson` are complementary
2671
2710
  * Note: Syntax, parsing, and logic consistency checks are performed on all sources during build
2672
2711
  *
2673
2712
  * @param promptbookSources
2674
2713
  * @returns PipelineCollection
2675
2714
  * @public exported from `@promptbook/core`
2676
2715
  */
2677
- function createCollectionFromJson(...promptbooks) {
2716
+ function createPipelineCollectionFromJson(...promptbooks) {
2678
2717
  return new SimplePipelineCollection(...promptbooks);
2679
2718
  }
2680
2719
 
@@ -2847,6 +2886,19 @@
2847
2886
  }
2848
2887
  }
2849
2888
 
2889
+ /**
2890
+ * This error indicates that promptbook operation is not allowed
2891
+ *
2892
+ * @public exported from `@promptbook/core`
2893
+ */
2894
+ class NotAllowed extends Error {
2895
+ constructor(message) {
2896
+ super(message);
2897
+ this.name = 'NotAllowed';
2898
+ Object.setPrototypeOf(this, NotAllowed.prototype);
2899
+ }
2900
+ }
2901
+
2850
2902
  /**
2851
2903
  * This error type indicates that some part of the code is not implemented yet
2852
2904
  *
@@ -2941,6 +2993,7 @@
2941
2993
  PromptbookFetchError,
2942
2994
  UnexpectedError,
2943
2995
  WrappedError,
2996
+ NotAllowed,
2944
2997
  // TODO: [🪑]> VersionMismatchError,
2945
2998
  };
2946
2999
  /**
@@ -3156,6 +3209,7 @@
3156
3209
  let updatedAt = createdAt;
3157
3210
  const errors = [];
3158
3211
  const warnings = [];
3212
+ const llmCalls = [];
3159
3213
  let currentValue = {};
3160
3214
  let customTldr = null;
3161
3215
  const partialResultSubject = new rxjs.Subject();
@@ -3171,6 +3225,9 @@
3171
3225
  }, (tldrInfo) => {
3172
3226
  customTldr = tldrInfo;
3173
3227
  updatedAt = new Date();
3228
+ }, (llmCall) => {
3229
+ llmCalls.push(llmCall);
3230
+ updatedAt = new Date();
3174
3231
  });
3175
3232
  finalResultPromise
3176
3233
  .catch((error) => {
@@ -3293,7 +3350,7 @@
3293
3350
  }
3294
3351
  return {
3295
3352
  percent: percent,
3296
- message,
3353
+ message: message + ' (!!!fallback)',
3297
3354
  };
3298
3355
  },
3299
3356
  get createdAt() {
@@ -3316,6 +3373,10 @@
3316
3373
  return warnings;
3317
3374
  // <- Note: [1] --||--
3318
3375
  },
3376
+ get llmCalls() {
3377
+ return [...llmCalls, { foo: '!!! bar' }];
3378
+ // <- Note: [1] --||--
3379
+ },
3319
3380
  get currentValue() {
3320
3381
  return currentValue;
3321
3382
  // <- Note: [1] --||--
@@ -4933,7 +4994,7 @@
4933
4994
  let title = pipeline.title;
4934
4995
  if (title === undefined || title === '' || title === DEFAULT_BOOK_TITLE) {
4935
4996
  // TODO: [🌼] In future use `ptbk make` and made getPipelineCollection
4936
- const collection = createCollectionFromJson(...PipelineCollection);
4997
+ const collection = createPipelineCollectionFromJson(...PipelineCollection);
4937
4998
  const prepareTitleExecutor = createPipelineExecutor({
4938
4999
  pipeline: await collection.getPipelineByUrl('https://promptbook.studio/promptbook/prepare-title.book'),
4939
5000
  tools,
@@ -5716,6 +5777,18 @@
5716
5777
  return replacedTemplates;
5717
5778
  }
5718
5779
 
5780
+ /**
5781
+ * Logs an LLM call with the given report.
5782
+ *
5783
+ * @private internal utility of `createPipelineExecutor`
5784
+ */
5785
+ function logLlmCall(logLlmCall, report) {
5786
+ logLlmCall({
5787
+ modelName: 'model' /* <- TODO: How to get model name from the report */,
5788
+ report,
5789
+ });
5790
+ }
5791
+
5719
5792
  /**
5720
5793
  * Extracts all code blocks from markdown.
5721
5794
  *
@@ -5862,10 +5935,13 @@
5862
5935
  * @public exported from `@promptbook/utils`
5863
5936
  */
5864
5937
  function countLines(text) {
5938
+ if (text === '') {
5939
+ return 0;
5940
+ }
5865
5941
  text = text.replace('\r\n', '\n');
5866
5942
  text = text.replace('\r', '\n');
5867
5943
  const lines = text.split('\n');
5868
- return lines.reduce((count, line) => count + Math.ceil(line.length / CHARACTERS_PER_STANDARD_LINE), 0);
5944
+ return lines.reduce((count, line) => count + Math.max(Math.ceil(line.length / CHARACTERS_PER_STANDARD_LINE), 1), 0);
5869
5945
  }
5870
5946
  /**
5871
5947
  * TODO: [🥴] Implement counting in formats - like JSON, CSV, XML,...
@@ -6080,7 +6156,7 @@
6080
6156
  */
6081
6157
  async function executeAttempts(options) {
6082
6158
  const { jokerParameterNames, priority, maxAttempts, // <- Note: [💂]
6083
- preparedContent, parameters, task, preparedPipeline, tools, $executionReport, pipelineIdentification, maxExecutionAttempts, onProgress, } = options;
6159
+ preparedContent, parameters, task, preparedPipeline, tools, $executionReport, pipelineIdentification, maxExecutionAttempts, onProgress, logLlmCall: logLlmCall$1, } = options;
6084
6160
  const $ongoingTaskResult = {
6085
6161
  $result: null,
6086
6162
  $resultString: null,
@@ -6328,14 +6404,10 @@
6328
6404
  });
6329
6405
  }
6330
6406
  finally {
6331
- if (!isJokerAttempt &&
6332
- task.taskType === 'PROMPT_TASK' &&
6333
- $ongoingTaskResult.$prompt
6334
- // <- Note: [2] When some expected parameter is not defined, error will occur in templateParameters
6335
- // In that case we don’t want to make a report about it because it’s not a llm execution error
6336
- ) {
6337
- // TODO: [🧠] Maybe put other taskTypes into report
6338
- $executionReport.promptExecutions.push({
6407
+ if (!isJokerAttempt && task.taskType === 'PROMPT_TASK' && $ongoingTaskResult.$prompt) {
6408
+ // Note: [2] When some expected parameter is not defined, error will occur in templateParameters
6409
+ // In that case we don’t want to make a report about it because it’s not a llm execution error
6410
+ const executionPromptReport = {
6339
6411
  prompt: {
6340
6412
  ...$ongoingTaskResult.$prompt,
6341
6413
  // <- TODO: [🧠] How to pick everyhing except `pipelineUrl`
@@ -6344,7 +6416,11 @@
6344
6416
  error: $ongoingTaskResult.$expectError === null
6345
6417
  ? undefined
6346
6418
  : serializeError($ongoingTaskResult.$expectError),
6347
- });
6419
+ };
6420
+ $executionReport.promptExecutions.push(executionPromptReport);
6421
+ if (logLlmCall$1) {
6422
+ logLlmCall(logLlmCall$1, executionPromptReport);
6423
+ }
6348
6424
  }
6349
6425
  }
6350
6426
  if ($ongoingTaskResult.$expectError !== null && attemptIndex === maxAttempts - 1) {
@@ -6409,9 +6485,9 @@
6409
6485
  * @private internal utility of `createPipelineExecutor`
6410
6486
  */
6411
6487
  async function executeFormatSubvalues(options) {
6412
- const { task, jokerParameterNames, parameters, priority, csvSettings, onProgress, pipelineIdentification } = options;
6488
+ const { task, jokerParameterNames, parameters, priority, csvSettings, onProgress, logLlmCall, pipelineIdentification, } = options;
6413
6489
  if (task.foreach === undefined) {
6414
- return /* not await */ executeAttempts(options);
6490
+ return /* not await */ executeAttempts({ ...options, logLlmCall });
6415
6491
  }
6416
6492
  if (jokerParameterNames.length !== 0) {
6417
6493
  throw new UnexpectedError(spaceTrim__default["default"]((block) => `
@@ -6712,7 +6788,7 @@
6712
6788
  * @private internal utility of `createPipelineExecutor`
6713
6789
  */
6714
6790
  async function executeTask(options) {
6715
- const { currentTask, preparedPipeline, parametersToPass, tools, onProgress, $executionReport, pipelineIdentification, maxExecutionAttempts, maxParallelCount, csvSettings, isVerbose, rootDirname, cacheDirname, intermediateFilesStrategy, isAutoInstalled, isNotPreparedWarningSuppressed, } = options;
6791
+ const { currentTask, preparedPipeline, parametersToPass, tools, onProgress, logLlmCall, $executionReport, pipelineIdentification, maxExecutionAttempts, maxParallelCount, csvSettings, isVerbose, rootDirname, cacheDirname, intermediateFilesStrategy, isAutoInstalled, isNotPreparedWarningSuppressed, } = options;
6716
6792
  const priority = preparedPipeline.tasks.length - preparedPipeline.tasks.indexOf(currentTask);
6717
6793
  // Note: Check consistency of used and dependent parameters which was also done in `validatePipeline`, but it’s good to doublecheck
6718
6794
  const usedParameterNames = extractParameterNamesFromTask(currentTask);
@@ -6791,6 +6867,7 @@
6791
6867
  tools,
6792
6868
  $executionReport,
6793
6869
  onProgress,
6870
+ logLlmCall,
6794
6871
  pipelineIdentification,
6795
6872
  maxExecutionAttempts,
6796
6873
  maxParallelCount,
@@ -6834,6 +6911,29 @@
6834
6911
  $warnings.push(new PipelineExecutionError(spaceTrim.spaceTrim((block) => `
6835
6912
  Parameter \`{${parameter.name}}\` should be an output parameter, but it was not generated during pipeline execution
6836
6913
 
6914
+ Note: This is a warning which happened after the pipeline was executed, and \`{${parameter.name}}\` was not for some reason defined in output parameters
6915
+
6916
+ All parameters:
6917
+ ${block(preparedPipeline.parameters
6918
+ .map(({ name, isInput, isOutput, description }) => {
6919
+ let line = `\`{${name}}\``;
6920
+ if (isInput) {
6921
+ line += ' `[input parameter]`';
6922
+ }
6923
+ if (isOutput) {
6924
+ line += ' `[output parameter]`';
6925
+ }
6926
+ if (parametersToPass[name] === undefined) {
6927
+ line += ` <- Warning: Should be in the output but its not |`;
6928
+ }
6929
+ if (description) {
6930
+ line += ` ${description}`;
6931
+ }
6932
+ return line;
6933
+ })
6934
+ .map((line, index) => `${index + 1}) ${line}`)
6935
+ .join('\n'))}
6936
+
6837
6937
  ${block(pipelineIdentification)}
6838
6938
  `)));
6839
6939
  continue;
@@ -6854,7 +6954,7 @@
6854
6954
  * @private internal utility of `createPipelineExecutor`
6855
6955
  */
6856
6956
  async function executePipeline(options) {
6857
- const { inputParameters, tools, onProgress, pipeline, setPreparedPipeline, pipelineIdentification, maxParallelCount, rootDirname, isVerbose, } = options;
6957
+ const { inputParameters, tools, onProgress, logLlmCall, pipeline, setPreparedPipeline, pipelineIdentification, maxParallelCount, rootDirname, isVerbose, } = options;
6858
6958
  let { preparedPipeline } = options;
6859
6959
  if (preparedPipeline === undefined) {
6860
6960
  preparedPipeline = await preparePipeline(pipeline, tools, {
@@ -7032,6 +7132,7 @@
7032
7132
  onProgress(newOngoingResult);
7033
7133
  }
7034
7134
  },
7135
+ logLlmCall,
7035
7136
  $executionReport: executionReport,
7036
7137
  pipelineIdentification: spaceTrim.spaceTrim((block) => `
7037
7138
  ${block(pipelineIdentification)}
@@ -7155,7 +7256,7 @@
7155
7256
  // <- TODO: [🏮] Some standard way how to transform errors into warnings and how to handle non-critical fails during the tasks
7156
7257
  }
7157
7258
  let runCount = 0;
7158
- const pipelineExecutorWithCallback = async (inputParameters, onProgress) => {
7259
+ const pipelineExecutorWithCallback = async (inputParameters, onProgress, logLlmCall) => {
7159
7260
  runCount++;
7160
7261
  return /* not await */ executePipeline({
7161
7262
  pipeline,
@@ -7166,6 +7267,7 @@
7166
7267
  inputParameters,
7167
7268
  tools,
7168
7269
  onProgress,
7270
+ logLlmCall,
7169
7271
  pipelineIdentification: spaceTrim.spaceTrim((block) => `
7170
7272
  ${block(pipelineIdentification)}
7171
7273
  ${runCount === 1 ? '' : `Run #${runCount}`}
@@ -7281,7 +7383,7 @@
7281
7383
  throw new MissingToolsError('LLM tools are required for preparing persona');
7282
7384
  }
7283
7385
  // TODO: [🌼] In future use `ptbk make` and made getPipelineCollection
7284
- const collection = createCollectionFromJson(...PipelineCollection);
7386
+ const collection = createPipelineCollectionFromJson(...PipelineCollection);
7285
7387
  const preparePersonaExecutor = createPipelineExecutor({
7286
7388
  pipeline: await collection.getPipelineByUrl('https://promptbook.studio/promptbook/prepare-persona.book'),
7287
7389
  tools,
@@ -7347,7 +7449,8 @@
7347
7449
  function createEmptyAgentModelRequirements() {
7348
7450
  return {
7349
7451
  systemMessage: '',
7350
- modelName: 'gpt-5',
7452
+ // modelName: 'gpt-5',
7453
+ modelName: 'gemini-2.5-flash-lite',
7351
7454
  temperature: 0.7,
7352
7455
  topP: 0.9,
7353
7456
  topK: 50,
@@ -9765,6 +9868,46 @@
9765
9868
  return mcpServers;
9766
9869
  }
9767
9870
 
9871
+ /**
9872
+ * Number of padding lines to add at the end of the book content
9873
+ *
9874
+ * @public exported from `@promptbook/core`
9875
+ */
9876
+ const PADDING_LINES = 11;
9877
+ /**
9878
+ * A function that adds padding to the book content
9879
+ *
9880
+ * @public exported from `@promptbook/core`
9881
+ */
9882
+ function padBook(content) {
9883
+ if (!content) {
9884
+ return '\n'.repeat(PADDING_LINES);
9885
+ }
9886
+ const lines = content.split('\n');
9887
+ let trailingEmptyLines = 0;
9888
+ for (let i = lines.length - 1; i >= 0; i--) {
9889
+ const line = lines[i];
9890
+ if (line === undefined) {
9891
+ // Note: This should not happen in reality, but it's here to satisfy TypeScript's noUncheckedIndexedAccess option
9892
+ continue;
9893
+ }
9894
+ if (line.trim() === '') {
9895
+ trailingEmptyLines++;
9896
+ }
9897
+ else {
9898
+ break;
9899
+ }
9900
+ }
9901
+ if (trailingEmptyLines >= PADDING_LINES) {
9902
+ return content;
9903
+ }
9904
+ const linesToAdd = PADDING_LINES - trailingEmptyLines;
9905
+ return (content + '\n'.repeat(linesToAdd));
9906
+ }
9907
+ /**
9908
+ * TODO: [🧠] Maybe export
9909
+ */
9910
+
9768
9911
  /**
9769
9912
  * Type guard to check if a string is a valid agent source
9770
9913
  *
@@ -9792,49 +9935,38 @@
9792
9935
  *
9793
9936
  * @public exported from `@promptbook/core`
9794
9937
  */
9795
- const DEFAULT_BOOK = validateBook(spaceTrim__default["default"](`
9796
- AI Avatar
9938
+ const DEFAULT_BOOK = padBook(validateBook(spaceTrim__default["default"](`
9939
+ AI Avatar
9797
9940
 
9798
- PERSONA A friendly AI assistant that helps you with your tasks
9799
- `));
9941
+ PERSONA A friendly AI assistant that helps you with your tasks
9942
+ `)));
9943
+ // <- Note: Not using book`...` notation to avoid strange error in jest unit tests `TypeError: (0 , book_notation_1.book) is not a function`
9944
+ // <- TODO: !!! GENESIS_BOOK
9945
+ // <- !!! Buttons into genesis book
9946
+ // <- TODO: !!! createBookBoilerplate and deprecate `DEFAULT_BOOK`
9800
9947
 
9801
9948
  /**
9802
- * Converts PipelineCollection to serialized JSON
9803
- *
9804
- * Note: Functions `collectionToJson` and `createCollectionFromJson` are complementary
9949
+ * Constructs `PipelineCollection` from async sources
9805
9950
  *
9806
- * @public exported from `@promptbook/core`
9807
- */
9808
- async function collectionToJson(collection) {
9809
- const pipelineUrls = await collection.listPipelines();
9810
- const promptbooks = await Promise.all(pipelineUrls.map((url) => collection.getPipelineByUrl(url)));
9811
- return promptbooks;
9812
- }
9813
- /**
9814
- * TODO: [🧠] Maybe clear `sourceFile` or clear when exposing through API or remote server
9815
- */
9816
-
9817
- /**
9818
- * Constructs Promptbook from async sources
9819
9951
  * It can be one of the following:
9820
9952
  * - Promise of array of PipelineJson or PipelineString
9821
9953
  * - Factory function that returns Promise of array of PipelineJson or PipelineString
9822
9954
  *
9823
9955
  * Note: This is useful as internal tool for other constructor functions like
9824
- * `createCollectionFromUrl` or `createCollectionFromDirectory`
9956
+ * `createPipelineCollectionFromUrl` or `createPipelineCollectionFromDirectory`
9825
9957
  * Consider using those functions instead of this one
9826
9958
  *
9827
9959
  * Note: The function does NOT return promise it returns the collection directly which waits for the sources to be resolved
9828
9960
  * when error occurs in given promise or factory function, it is thrown during `listPipelines` or `getPipelineByUrl` call
9829
9961
  *
9830
- * Note: Consider using `createCollectionFromDirectory` or `createCollectionFromUrl`
9962
+ * Note: Consider using `createPipelineCollectionFromDirectory` or `createPipelineCollectionFromUrl`
9831
9963
  *
9832
9964
  * @param promptbookSourcesPromiseOrFactory
9833
9965
  * @returns PipelineCollection
9834
9966
  * @deprecated Do not use, it will became internal tool for other constructor functions
9835
9967
  * @public exported from `@promptbook/core`
9836
9968
  */
9837
- function createCollectionFromPromise(promptbookSourcesPromiseOrFactory) {
9969
+ function createPipelineCollectionFromPromise(promptbookSourcesPromiseOrFactory) {
9838
9970
  let collection = null;
9839
9971
  async function load() {
9840
9972
  if (collection !== null) {
@@ -9845,7 +9977,7 @@
9845
9977
  promptbookSourcesPromiseOrFactory = promptbookSourcesPromiseOrFactory();
9846
9978
  }
9847
9979
  const promptbookSources = await promptbookSourcesPromiseOrFactory;
9848
- collection = createCollectionFromJson(...promptbookSources);
9980
+ collection = createPipelineCollectionFromJson(...promptbookSources);
9849
9981
  }
9850
9982
  async function listPipelines() {
9851
9983
  await load();
@@ -9871,9 +10003,9 @@
9871
10003
  * @returns PipelineCollection
9872
10004
  * @public exported from `@promptbook/core`
9873
10005
  */
9874
- async function createCollectionFromUrl(url, options) {
10006
+ async function createPipelineCollectionFromUrl(url, options) {
9875
10007
  const { isVerbose = exports.DEFAULT_IS_VERBOSE, isLazyLoaded = false } = options || {};
9876
- const collection = createCollectionFromPromise(async () => {
10008
+ const collection = createPipelineCollectionFromPromise(async () => {
9877
10009
  if (isVerbose) {
9878
10010
  console.info(`Creating pipeline collection from url ${url.toString()}`);
9879
10011
  }
@@ -9886,14 +10018,14 @@
9886
10018
  // TODO: [main] !!3 [🏳‍🌈] Allow variant with .json .js and .ts files
9887
10019
  // TODO: [🧠][🏳‍🌈] .js and .ts files should create getter function of the collection
9888
10020
  // TODO: Look at WebGPT "📖 Make Promptbook collection" and https://webgpt.cz/_books.json
9889
- // TODO: Implement via createCollectionFromPromise
10021
+ // TODO: Implement via createPipelineCollectionFromPromise
9890
10022
  }
9891
10023
  /**
9892
10024
  * TODO: [main] !!4 [🧠] Library precompilation and do not mix markdown and json promptbooks
9893
10025
  */
9894
10026
 
9895
10027
  /**
9896
- * Creates PipelineCollection as a subset of another PipelineCollection
10028
+ * Creates `PipelineCollection` as a subset of another `PipelineCollection`
9897
10029
  *
9898
10030
  * Note: You can use any type of collection as a parent collection - local, remote, etc.
9899
10031
  * Note: This is just a thin wrapper / proxy around the parent collection
@@ -9902,7 +10034,7 @@
9902
10034
  * @returns PipelineCollection
9903
10035
  * @public exported from `@promptbook/core`
9904
10036
  */
9905
- function createSubcollection(collection, predicate) {
10037
+ function createPipelineSubcollection(collection, predicate) {
9906
10038
  async function listPipelines() {
9907
10039
  let promptbooks = await collection.listPipelines();
9908
10040
  promptbooks = promptbooks.filter(predicate);
@@ -9936,6 +10068,22 @@
9936
10068
  };
9937
10069
  }
9938
10070
 
10071
+ /**
10072
+ * Converts PipelineCollection to serialized JSON
10073
+ *
10074
+ * Note: Functions `pipelineCollectionToJson` and `createPipelineCollectionFromJson` are complementary
10075
+ *
10076
+ * @public exported from `@promptbook/core`
10077
+ */
10078
+ async function pipelineCollectionToJson(collection) {
10079
+ const pipelineUrls = await collection.listPipelines();
10080
+ const promptbooks = await Promise.all(pipelineUrls.map((url) => collection.getPipelineByUrl(url)));
10081
+ return promptbooks;
10082
+ }
10083
+ /**
10084
+ * TODO: [🧠] Maybe clear `sourceFile` or clear when exposing through API or remote server
10085
+ */
10086
+
9939
10087
  /**
9940
10088
  * All available task types
9941
10089
  *
@@ -12691,6 +12839,7 @@
12691
12839
  if (!isFlatPipeline(pipelineString)) {
12692
12840
  return pipelineString;
12693
12841
  }
12842
+ pipelineString = spaceTrim__default["default"](pipelineString);
12694
12843
  const pipelineStringLines = pipelineString.split('\n');
12695
12844
  const potentialReturnStatement = pipelineStringLines.pop();
12696
12845
  let returnStatement;
@@ -12725,7 +12874,7 @@
12725
12874
  ${returnStatement}
12726
12875
  `));
12727
12876
  // <- TODO: Maybe use book` notation
12728
- return pipelineString;
12877
+ return padBook(pipelineString);
12729
12878
  }
12730
12879
  /**
12731
12880
  * TODO: Unit test
@@ -14198,10 +14347,10 @@
14198
14347
  var _a, _b;
14199
14348
  const isMetadataAviailable = $llmToolsMetadataRegister
14200
14349
  .list()
14201
- .find(({ packageName, className }) => metadata.packageName === packageName && metadata.className === className);
14350
+ .some(({ packageName, className }) => metadata.packageName === packageName && metadata.className === className);
14202
14351
  const isInstalled = $llmToolsRegister
14203
14352
  .list()
14204
- .find(({ packageName, className }) => metadata.packageName === packageName && metadata.className === className);
14353
+ .some(({ packageName, className }) => metadata.packageName === packageName && metadata.className === className);
14205
14354
  const isFullyConfigured = ((_a = metadata.envVariables) === null || _a === void 0 ? void 0 : _a.every((envVariableName) => env[envVariableName] !== undefined)) || false;
14206
14355
  const isPartiallyConfigured = ((_b = metadata.envVariables) === null || _b === void 0 ? void 0 : _b.some((envVariableName) => env[envVariableName] !== undefined)) || false;
14207
14356
  // <- Note: [🗨]
@@ -14618,6 +14767,98 @@
14618
14767
  * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
14619
14768
  */
14620
14769
 
14770
+ /**
14771
+ * Restricts an Updatable to a (2) BehaviorSubject variant
14772
+ *
14773
+ * @see Updatable
14774
+ * @private internal utility <- TODO: [🧠] Maybe export from `@promptbook/types`
14775
+ */
14776
+ function asUpdatableSubject(value) {
14777
+ if (value instanceof rxjs.BehaviorSubject) {
14778
+ return value;
14779
+ }
14780
+ else if (Array.isArray(value)) {
14781
+ if (value.length !== 2) {
14782
+ throw new TypeError('`asUpdatableSubject`: Invalid tuple length, expected 2 elements');
14783
+ }
14784
+ if (typeof value[1] !== 'function') {
14785
+ throw new TypeError('`asUpdatableSubject`: Invalid tuple, expected second element to be a function');
14786
+ }
14787
+ const [theValue, setValue] = value;
14788
+ const subject = new rxjs.BehaviorSubject(theValue);
14789
+ subject.subscribe((newValue) => {
14790
+ setValue(newValue);
14791
+ });
14792
+ return subject;
14793
+ }
14794
+ else {
14795
+ return new rxjs.BehaviorSubject(value);
14796
+ }
14797
+ }
14798
+ /**
14799
+ * TODO: [🧠] Maybe `BehaviorSubject` is too heavy for this use case, maybe just tuple `[value,setValue]` is enough
14800
+ */
14801
+
14802
+ /**
14803
+ * Represents one AI Agent
14804
+ *
14805
+ * Note: [🦖] There are several different things in Promptbook:
14806
+ * - `Agent` - which represents an AI Agent with its source, memories, actions, etc. Agent is a higher-level abstraction which is internally using:
14807
+ * - `LlmExecutionTools` - which wraps one or more LLM models and provides an interface to execute them
14808
+ * - `AgentLlmExecutionTools` - which is a specific implementation of `LlmExecutionTools` that wraps another LlmExecutionTools and applies agent-specific system prompts and requirements
14809
+ * - `OpenAiAssistantExecutionTools` - which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities, recommended for usage in `Agent` or `AgentLlmExecutionTools`
14810
+ *
14811
+ * @public exported from `@promptbook/core`
14812
+ */
14813
+ class Agent {
14814
+ /**
14815
+ * Not used in Agent, always returns empty array
14816
+ */
14817
+ get parameters() {
14818
+ return [
14819
+ /* [😰] */
14820
+ ];
14821
+ }
14822
+ constructor(options) {
14823
+ this.options = options;
14824
+ /**
14825
+ * Name of the agent
14826
+ */
14827
+ this.agentName = null;
14828
+ /**
14829
+ * Description of the agent
14830
+ */
14831
+ this.personaDescription = null;
14832
+ /**
14833
+ * Metadata like image or color
14834
+ */
14835
+ this.meta = {};
14836
+ this.agentSource = asUpdatableSubject(options.agentSource);
14837
+ this.agentSource.subscribe((source) => {
14838
+ const { agentName, personaDescription, meta } = parseAgentSource(source);
14839
+ this.agentName = agentName;
14840
+ this.personaDescription = personaDescription;
14841
+ this.meta = { ...this.meta, ...meta };
14842
+ });
14843
+ }
14844
+ /**
14845
+ * Creates LlmExecutionTools which exposes the agent as a model
14846
+ */
14847
+ getLlmExecutionTools() {
14848
+ const llmTools = new AgentLlmExecutionTools({
14849
+ llmTools: getSingleLlmExecutionTools(this.options.executionTools.llm),
14850
+ agentSource: this.agentSource.value, // <- TODO: !!!! Allow to pass BehaviorSubject<string_book> OR refresh llmExecutionTools.callChat on agentSource change
14851
+ });
14852
+ // TODO: !!!! Add `Agent` simple "mocked" learning by appending to agent source
14853
+ // TODO: !!!! Add `Agent` learning by promptbookAgent
14854
+ return llmTools;
14855
+ }
14856
+ }
14857
+ /**
14858
+ * TODO: [🧠][😰]Agent is not working with the parameters, should it be?
14859
+ * TODO: !!! Agent on remote server
14860
+ */
14861
+
14621
14862
  /**
14622
14863
  * Change ellipsis character to three dots `…` -> `...`
14623
14864
  *
@@ -14758,126 +14999,1864 @@
14758
14999
  */
14759
15000
 
14760
15001
  /**
14761
- * Execution Tools for calling LLM models with a predefined agent "soul"
14762
- * This wraps underlying LLM execution tools and applies agent-specific system prompts and requirements
15002
+ * Helper of usage compute
14763
15003
  *
14764
- * @public exported from `@promptbook/core`
15004
+ * @param content the content of prompt or response
15005
+ * @returns part of UsageCounts
15006
+ *
15007
+ * @private internal utility of LlmExecutionTools
14765
15008
  */
14766
- class AgentLlmExecutionTools {
14767
- /**
14768
- * Creates new AgentLlmExecutionTools
14769
- *
14770
- * @param llmTools The underlying LLM execution tools to wrap
14771
- * @param agentSource The agent source string that defines the agent's behavior
14772
- */
14773
- constructor(llmTools, agentSource) {
14774
- this.llmTools = llmTools;
14775
- this.agentSource = agentSource;
14776
- /**
14777
- * Cached model requirements to avoid re-parsing the agent source
14778
- */
14779
- this._cachedModelRequirements = null;
14780
- /**
14781
- * Cached parsed agent information
14782
- */
14783
- this._cachedAgentInfo = null;
14784
- }
14785
- /**
14786
- * Get cached or parse agent information
14787
- */
14788
- getAgentInfo() {
14789
- if (this._cachedAgentInfo === null) {
14790
- this._cachedAgentInfo = parseAgentSource(this.agentSource);
14791
- }
14792
- return this._cachedAgentInfo;
14793
- }
14794
- /**
14795
- * Get cached or create agent model requirements
14796
- */
14797
- async getAgentModelRequirements() {
14798
- if (this._cachedModelRequirements === null) {
14799
- // Get available models from underlying LLM tools for best model selection
14800
- const availableModels = await this.llmTools.listModels();
14801
- this._cachedModelRequirements = await createAgentModelRequirements(this.agentSource, undefined, // Let the function pick the best model
14802
- availableModels);
14803
- }
14804
- return this._cachedModelRequirements;
14805
- }
14806
- get title() {
14807
- const agentInfo = this.getAgentInfo();
14808
- return (agentInfo.agentName || 'Agent');
14809
- }
14810
- get description() {
14811
- const agentInfo = this.getAgentInfo();
14812
- return agentInfo.personaDescription || 'AI Agent with predefined personality and behavior';
14813
- }
14814
- get profile() {
14815
- const agentInfo = this.getAgentInfo();
14816
- if (!agentInfo.agentName) {
14817
- return undefined;
14818
- }
14819
- return {
14820
- name: agentInfo.agentName.toUpperCase().replace(/\s+/g, '_'),
14821
- fullname: agentInfo.agentName,
14822
- color: agentInfo.meta.color || '#6366f1',
14823
- avatarSrc: agentInfo.meta.image,
14824
- };
14825
- }
14826
- checkConfiguration() {
14827
- // Check underlying tools configuration
14828
- return this.llmTools.checkConfiguration();
15009
+ function computeUsageCounts(content) {
15010
+ return {
15011
+ charactersCount: { value: countCharacters(content) },
15012
+ wordsCount: { value: countWords(content) },
15013
+ sentencesCount: { value: countSentences(content) },
15014
+ linesCount: { value: countLines(content) },
15015
+ paragraphsCount: { value: countParagraphs(content) },
15016
+ pagesCount: { value: countPages(content) },
15017
+ };
15018
+ }
15019
+
15020
+ /**
15021
+ * Make UncertainNumber
15022
+ *
15023
+ * @param value value of the uncertain number, if `NaN` or `undefined`, it will be set to 0 and `isUncertain=true`
15024
+ * @param isUncertain if `true`, the value is uncertain, otherwise depends on the value
15025
+ *
15026
+ * @private utility for initializating UncertainNumber
15027
+ */
15028
+ function uncertainNumber(value, isUncertain) {
15029
+ if (value === null || value === undefined || Number.isNaN(value)) {
15030
+ return UNCERTAIN_ZERO_VALUE;
14829
15031
  }
14830
- /**
14831
- * Returns a virtual model name representing the agent behavior
14832
- */
14833
- get modelName() {
14834
- const hash = cryptoJs.SHA256(hexEncoder__default["default"].parse(this.agentSource))
14835
- // <- TODO: [🥬] Encapsulate sha256 to some private utility function
14836
- .toString( /* hex */);
14837
- // <- TODO: [🥬] Make some system for hashes and ids of promptbook
14838
- const agentId = hash.substring(0, 10);
14839
- // <- TODO: [🥬] Make some system for hashes and ids of promptbook
14840
- return (normalizeToKebabCase(this.title) + '-' + agentId);
15032
+ if (isUncertain === true) {
15033
+ return { value, isUncertain };
14841
15034
  }
14842
- listModels() {
14843
- return [
14844
- {
14845
- modelName: this.modelName,
14846
- modelVariant: 'CHAT',
14847
- modelTitle: `${this.title} (Agent Chat Default)`,
14848
- modelDescription: `Chat model with agent behavior: ${this.description}`,
15035
+ return { value };
15036
+ }
15037
+
15038
+ /**
15039
+ * Create price per one token based on the string value found on openai page
15040
+ *
15041
+ * @private within the repository, used only as internal helper for `OPENAI_MODELS`
15042
+ */
15043
+ function pricing(value) {
15044
+ const [price, tokens] = value.split(' / ');
15045
+ return parseFloat(price.replace('$', '')) / parseFloat(tokens.replace('M tokens', '')) / 1000000;
15046
+ }
15047
+
15048
+ /**
15049
+ * List of available OpenAI models with pricing
15050
+ *
15051
+ * Note: Synced with official API docs at 2025-08-20
15052
+ *
15053
+ * @see https://platform.openai.com/docs/models/
15054
+ * @see https://openai.com/api/pricing/
15055
+ * @public exported from `@promptbook/openai`
15056
+ */
15057
+ const OPENAI_MODELS = exportJson({
15058
+ name: 'OPENAI_MODELS',
15059
+ value: [
15060
+ /**/
15061
+ {
15062
+ modelVariant: 'CHAT',
15063
+ modelTitle: 'gpt-5',
15064
+ modelName: 'gpt-5',
15065
+ modelDescription: "OpenAI's most advanced language model with unprecedented reasoning capabilities and 200K context window. Features revolutionary improvements in complex problem-solving, scientific reasoning, and creative tasks. Demonstrates human-level performance across diverse domains with enhanced safety measures and alignment. Represents the next generation of AI with superior understanding, nuanced responses, and advanced multimodal capabilities.",
15066
+ pricing: {
15067
+ prompt: pricing(`$1.25 / 1M tokens`),
15068
+ output: pricing(`$10.00 / 1M tokens`),
14849
15069
  },
14850
- // <- Note: We only list a single "virtual" agent model here as this wrapper only supports chat prompts
14851
- ];
14852
- }
14853
- /**
14854
- * Calls the chat model with agent-specific system prompt and requirements
14855
- */
14856
- async callChatModel(prompt) {
14857
- if (!this.llmTools.callChatModel) {
14858
- throw new Error('Underlying LLM execution tools do not support chat model calls');
14859
- }
14860
- // Ensure we're working with a chat prompt
15070
+ },
15071
+ /**/
15072
+ /**/
15073
+ {
15074
+ modelVariant: 'CHAT',
15075
+ modelTitle: 'gpt-5-mini',
15076
+ modelName: 'gpt-5-mini',
15077
+ modelDescription: 'A faster, cost-efficient version of GPT-5 for well-defined tasks with 200K context window. Maintains core GPT-5 capabilities while offering 5x faster inference and significantly lower costs. Features enhanced instruction following and reduced latency for production applications requiring quick responses with high quality.',
15078
+ pricing: {
15079
+ prompt: pricing(`$0.25 / 1M tokens`),
15080
+ output: pricing(`$2.00 / 1M tokens`),
15081
+ },
15082
+ },
15083
+ /**/
15084
+ /**/
15085
+ {
15086
+ modelVariant: 'CHAT',
15087
+ modelTitle: 'gpt-5-nano',
15088
+ modelName: 'gpt-5-nano',
15089
+ modelDescription: 'The fastest, most cost-efficient version of GPT-5 with 200K context window. Optimized for summarization, classification, and simple reasoning tasks. Features 10x faster inference than base GPT-5 while maintaining good quality for straightforward applications. Ideal for high-volume, cost-sensitive deployments.',
15090
+ pricing: {
15091
+ prompt: pricing(`$0.05 / 1M tokens`),
15092
+ output: pricing(`$0.40 / 1M tokens`),
15093
+ },
15094
+ },
15095
+ /**/
15096
+ /**/
15097
+ {
15098
+ modelVariant: 'CHAT',
15099
+ modelTitle: 'gpt-4.1',
15100
+ modelName: 'gpt-4.1',
15101
+ modelDescription: 'Smartest non-reasoning model with 128K context window. Enhanced version of GPT-4 with improved instruction following, better factual accuracy, and reduced hallucinations. Features advanced function calling capabilities and superior performance on coding tasks. Ideal for applications requiring high intelligence without reasoning overhead.',
15102
+ pricing: {
15103
+ prompt: pricing(`$3.00 / 1M tokens`),
15104
+ output: pricing(`$12.00 / 1M tokens`),
15105
+ },
15106
+ },
15107
+ /**/
15108
+ /**/
15109
+ {
15110
+ modelVariant: 'CHAT',
15111
+ modelTitle: 'gpt-4.1-mini',
15112
+ modelName: 'gpt-4.1-mini',
15113
+ modelDescription: 'Smaller, faster version of GPT-4.1 with 128K context window. Balances intelligence and efficiency with 3x faster inference than base GPT-4.1. Maintains strong capabilities across text generation, reasoning, and coding while offering better cost-performance ratio for most applications.',
15114
+ pricing: {
15115
+ prompt: pricing(`$0.80 / 1M tokens`),
15116
+ output: pricing(`$3.20 / 1M tokens`),
15117
+ },
15118
+ },
15119
+ /**/
15120
+ /**/
15121
+ {
15122
+ modelVariant: 'CHAT',
15123
+ modelTitle: 'gpt-4.1-nano',
15124
+ modelName: 'gpt-4.1-nano',
15125
+ modelDescription: 'Fastest, most cost-efficient version of GPT-4.1 with 128K context window. Optimized for high-throughput applications requiring good quality at minimal cost. Features 5x faster inference than GPT-4.1 while maintaining adequate performance for most general-purpose tasks.',
15126
+ pricing: {
15127
+ prompt: pricing(`$0.20 / 1M tokens`),
15128
+ output: pricing(`$0.80 / 1M tokens`),
15129
+ },
15130
+ },
15131
+ /**/
15132
+ /**/
15133
+ {
15134
+ modelVariant: 'CHAT',
15135
+ modelTitle: 'o3',
15136
+ modelName: 'o3',
15137
+ modelDescription: 'Advanced reasoning model with 128K context window specializing in complex logical, mathematical, and analytical tasks. Successor to o1 with enhanced step-by-step problem-solving capabilities and superior performance on STEM-focused problems. Ideal for professional applications requiring deep analytical thinking and precise reasoning.',
15138
+ pricing: {
15139
+ prompt: pricing(`$15.00 / 1M tokens`),
15140
+ output: pricing(`$60.00 / 1M tokens`),
15141
+ },
15142
+ },
15143
+ /**/
15144
+ /**/
15145
+ {
15146
+ modelVariant: 'CHAT',
15147
+ modelTitle: 'o3-pro',
15148
+ modelName: 'o3-pro',
15149
+ modelDescription: 'Enhanced version of o3 with more compute allocated for better responses on the most challenging problems. Features extended reasoning time and improved accuracy on complex analytical tasks. Designed for applications where maximum reasoning quality is more important than response speed.',
15150
+ pricing: {
15151
+ prompt: pricing(`$30.00 / 1M tokens`),
15152
+ output: pricing(`$120.00 / 1M tokens`),
15153
+ },
15154
+ },
15155
+ /**/
15156
+ /**/
15157
+ {
15158
+ modelVariant: 'CHAT',
15159
+ modelTitle: 'o4-mini',
15160
+ modelName: 'o4-mini',
15161
+ modelDescription: 'Fast, cost-efficient reasoning model with 128K context window. Successor to o1-mini with improved analytical capabilities while maintaining speed advantages. Features enhanced mathematical reasoning and logical problem-solving at significantly lower cost than full reasoning models.',
15162
+ pricing: {
15163
+ prompt: pricing(`$4.00 / 1M tokens`),
15164
+ output: pricing(`$16.00 / 1M tokens`),
15165
+ },
15166
+ },
15167
+ /**/
15168
+ /**/
15169
+ {
15170
+ modelVariant: 'CHAT',
15171
+ modelTitle: 'o3-deep-research',
15172
+ modelName: 'o3-deep-research',
15173
+ modelDescription: 'Most powerful deep research model with 128K context window. Specialized for comprehensive research tasks, literature analysis, and complex information synthesis. Features advanced citation capabilities and enhanced factual accuracy for academic and professional research applications.',
15174
+ pricing: {
15175
+ prompt: pricing(`$25.00 / 1M tokens`),
15176
+ output: pricing(`$100.00 / 1M tokens`),
15177
+ },
15178
+ },
15179
+ /**/
15180
+ /**/
15181
+ {
15182
+ modelVariant: 'CHAT',
15183
+ modelTitle: 'o4-mini-deep-research',
15184
+ modelName: 'o4-mini-deep-research',
15185
+ modelDescription: 'Faster, more affordable deep research model with 128K context window. Balances research capabilities with cost efficiency, offering good performance on literature review, fact-checking, and information synthesis tasks at a more accessible price point.',
15186
+ pricing: {
15187
+ prompt: pricing(`$12.00 / 1M tokens`),
15188
+ output: pricing(`$48.00 / 1M tokens`),
15189
+ },
15190
+ },
15191
+ /**/
15192
+ /*/
15193
+ {
15194
+ modelTitle: 'dall-e-3',
15195
+ modelName: 'dall-e-3',
15196
+ },
15197
+ /**/
15198
+ /*/
15199
+ {
15200
+ modelTitle: 'whisper-1',
15201
+ modelName: 'whisper-1',
15202
+ },
15203
+ /**/
15204
+ /**/
15205
+ {
15206
+ modelVariant: 'COMPLETION',
15207
+ modelTitle: 'davinci-002',
15208
+ modelName: 'davinci-002',
15209
+ modelDescription: 'Legacy completion model with 4K token context window. Excels at complex text generation, creative writing, and detailed content creation with strong contextual understanding. Optimized for instructions requiring nuanced outputs and extended reasoning. Suitable for applications needing high-quality text generation without conversation management.',
15210
+ pricing: {
15211
+ prompt: pricing(`$2.00 / 1M tokens`),
15212
+ output: pricing(`$2.00 / 1M tokens`),
15213
+ },
15214
+ },
15215
+ /**/
15216
+ /*/
15217
+ {
15218
+ modelTitle: 'dall-e-2',
15219
+ modelName: 'dall-e-2',
15220
+ },
15221
+ /**/
15222
+ /**/
15223
+ {
15224
+ modelVariant: 'CHAT',
15225
+ modelTitle: 'gpt-3.5-turbo-16k',
15226
+ modelName: 'gpt-3.5-turbo-16k',
15227
+ modelDescription: 'Extended context GPT-3.5 Turbo with 16K token window. Maintains core capabilities of standard 3.5 Turbo while supporting longer conversations and documents. Features good balance of performance and cost for applications requiring more context than standard 4K models. Effective for document analysis, extended conversations, and multi-step reasoning tasks.',
15228
+ pricing: {
15229
+ prompt: pricing(`$3.00 / 1M tokens`),
15230
+ output: pricing(`$4.00 / 1M tokens`),
15231
+ },
15232
+ },
15233
+ /**/
15234
+ /*/
15235
+ {
15236
+ modelTitle: 'tts-1-hd-1106',
15237
+ modelName: 'tts-1-hd-1106',
15238
+ },
15239
+ /**/
15240
+ /*/
15241
+ {
15242
+ modelTitle: 'tts-1-hd',
15243
+ modelName: 'tts-1-hd',
15244
+ },
15245
+ /**/
15246
+ /**/
15247
+ {
15248
+ modelVariant: 'CHAT',
15249
+ modelTitle: 'gpt-4',
15250
+ modelName: 'gpt-4',
15251
+ modelDescription: 'Powerful language model with 8K context window featuring sophisticated reasoning, instruction-following, and knowledge capabilities. Demonstrates strong performance on complex tasks requiring deep understanding and multi-step reasoning. Excels at code generation, logical analysis, and nuanced content creation. Suitable for advanced applications requiring high-quality outputs.',
15252
+ pricing: {
15253
+ prompt: pricing(`$30.00 / 1M tokens`),
15254
+ output: pricing(`$60.00 / 1M tokens`),
15255
+ },
15256
+ },
15257
+ /**/
15258
+ /**/
15259
+ {
15260
+ modelVariant: 'CHAT',
15261
+ modelTitle: 'gpt-4-32k',
15262
+ modelName: 'gpt-4-32k',
15263
+ modelDescription: 'Extended context version of GPT-4 with 32K token window. Maintains all capabilities of standard GPT-4 while supporting analysis of very lengthy documents, code bases, and conversations. Features enhanced ability to maintain context over long interactions and process detailed information from large inputs. Ideal for document analysis, legal review, and complex problem-solving.',
15264
+ pricing: {
15265
+ prompt: pricing(`$60.00 / 1M tokens`),
15266
+ output: pricing(`$120.00 / 1M tokens`),
15267
+ },
15268
+ },
15269
+ /**/
15270
+ /*/
15271
+ {
15272
+ modelVariant: 'CHAT',
15273
+ modelTitle: 'gpt-4-0613',
15274
+ modelName: 'gpt-4-0613',
15275
+ pricing: {
15276
+ prompt: computeUsage(` / 1M tokens`),
15277
+ output: computeUsage(` / 1M tokens`),
15278
+ },
15279
+ },
15280
+ /**/
15281
+ /**/
15282
+ {
15283
+ modelVariant: 'CHAT',
15284
+ modelTitle: 'gpt-4-turbo-2024-04-09',
15285
+ modelName: 'gpt-4-turbo-2024-04-09',
15286
+ modelDescription: 'Latest stable GPT-4 Turbo from April 2024 with 128K context window. Features enhanced reasoning chains, improved factual accuracy with 40% reduction in hallucinations, and better instruction following compared to earlier versions. Includes advanced function calling capabilities and knowledge up to April 2024. Provides optimal performance for enterprise applications requiring reliability.',
15287
+ pricing: {
15288
+ prompt: pricing(`$10.00 / 1M tokens`),
15289
+ output: pricing(`$30.00 / 1M tokens`),
15290
+ },
15291
+ },
15292
+ /**/
15293
+ /**/
15294
+ {
15295
+ modelVariant: 'CHAT',
15296
+ modelTitle: 'gpt-3.5-turbo-1106',
15297
+ modelName: 'gpt-3.5-turbo-1106',
15298
+ modelDescription: 'November 2023 version of GPT-3.5 Turbo with 16K token context window. Features improved instruction following, more consistent output formatting, and enhanced function calling capabilities. Includes knowledge cutoff from April 2023. Suitable for applications requiring good performance at lower cost than GPT-4 models.',
15299
+ pricing: {
15300
+ prompt: pricing(`$1.00 / 1M tokens`),
15301
+ output: pricing(`$2.00 / 1M tokens`),
15302
+ },
15303
+ },
15304
+ /**/
15305
+ /**/
15306
+ {
15307
+ modelVariant: 'CHAT',
15308
+ modelTitle: 'gpt-4-turbo',
15309
+ modelName: 'gpt-4-turbo',
15310
+ modelDescription: 'More capable and cost-efficient version of GPT-4 with 128K token context window. Features improved instruction following, advanced function calling capabilities, and better performance on coding tasks. Maintains superior reasoning and knowledge while offering substantial cost reduction compared to base GPT-4. Ideal for complex applications requiring extensive context processing.',
15311
+ pricing: {
15312
+ prompt: pricing(`$10.00 / 1M tokens`),
15313
+ output: pricing(`$30.00 / 1M tokens`),
15314
+ },
15315
+ },
15316
+ /**/
15317
+ /**/
15318
+ {
15319
+ modelVariant: 'COMPLETION',
15320
+ modelTitle: 'gpt-3.5-turbo-instruct-0914',
15321
+ modelName: 'gpt-3.5-turbo-instruct-0914',
15322
+ modelDescription: 'September 2023 version of GPT-3.5 Turbo Instruct with 4K context window. Optimized for completion-style instruction following with deterministic responses. Better suited than chat models for applications requiring specific formatted outputs without conversation management. Knowledge cutoff from September 2021.',
15323
+ pricing: {
15324
+ prompt: pricing(`$1.50 / 1M tokens`),
15325
+ output: pricing(`$2.00 / 1M tokens`),
15326
+ },
15327
+ },
15328
+ /**/
15329
+ /**/
15330
+ {
15331
+ modelVariant: 'COMPLETION',
15332
+ modelTitle: 'gpt-3.5-turbo-instruct',
15333
+ modelName: 'gpt-3.5-turbo-instruct',
15334
+ modelDescription: 'Optimized version of GPT-3.5 for completion-style API with 4K token context window. Features strong instruction following with single-turn design rather than multi-turn conversation. Provides more consistent, deterministic outputs compared to chat models. Well-suited for templated content generation and structured text transformation tasks.',
15335
+ pricing: {
15336
+ prompt: pricing(`$1.50 / 1M tokens`),
15337
+ output: pricing(`$2.00 / 1M tokens`),
15338
+ },
15339
+ },
15340
+ /**/
15341
+ /*/
15342
+ {
15343
+ modelTitle: 'tts-1',
15344
+ modelName: 'tts-1',
15345
+ },
15346
+ /**/
15347
+ /**/
15348
+ {
15349
+ modelVariant: 'CHAT',
15350
+ modelTitle: 'gpt-3.5-turbo',
15351
+ modelName: 'gpt-3.5-turbo',
15352
+ modelDescription: 'Latest version of GPT-3.5 Turbo with 4K token default context window (16K available). Features continually improved performance with enhanced instruction following and reduced hallucinations. Offers excellent balance between capability and cost efficiency. Suitable for most general-purpose applications requiring good AI capabilities at reasonable cost.',
15353
+ pricing: {
15354
+ prompt: pricing(`$0.50 / 1M tokens`),
15355
+ output: pricing(`$1.50 / 1M tokens`),
15356
+ },
15357
+ },
15358
+ /**/
15359
+ /**/
15360
+ {
15361
+ modelVariant: 'CHAT',
15362
+ modelTitle: 'gpt-3.5-turbo-0301',
15363
+ modelName: 'gpt-3.5-turbo-0301',
15364
+ modelDescription: 'March 2023 version of GPT-3.5 Turbo with 4K token context window. Legacy model maintained for backward compatibility with specific application behaviors. Features solid conversational abilities and basic instruction following. Knowledge cutoff from September 2021. Suitable for applications explicitly designed for this version.',
15365
+ pricing: {
15366
+ prompt: pricing(`$1.50 / 1M tokens`),
15367
+ output: pricing(`$2.00 / 1M tokens`),
15368
+ },
15369
+ },
15370
+ /**/
15371
+ /**/
15372
+ {
15373
+ modelVariant: 'COMPLETION',
15374
+ modelTitle: 'babbage-002',
15375
+ modelName: 'babbage-002',
15376
+ modelDescription: 'Efficient legacy completion model with 4K context window balancing performance and speed. Features moderate reasoning capabilities with focus on straightforward text generation tasks. Significantly more efficient than davinci models while maintaining adequate quality for many applications. Suitable for high-volume, cost-sensitive text generation needs.',
15377
+ pricing: {
15378
+ prompt: pricing(`$0.40 / 1M tokens`),
15379
+ output: pricing(`$0.40 / 1M tokens`),
15380
+ },
15381
+ },
15382
+ /**/
15383
+ /**/
15384
+ {
15385
+ modelVariant: 'CHAT',
15386
+ modelTitle: 'gpt-4-1106-preview',
15387
+ modelName: 'gpt-4-1106-preview',
15388
+ modelDescription: 'November 2023 preview version of GPT-4 Turbo with 128K token context window. Features improved instruction following, better function calling capabilities, and enhanced reasoning. Includes knowledge cutoff from April 2023. Suitable for complex applications requiring extensive document understanding and sophisticated interactions.',
15389
+ pricing: {
15390
+ prompt: pricing(`$10.00 / 1M tokens`),
15391
+ output: pricing(`$30.00 / 1M tokens`),
15392
+ },
15393
+ },
15394
+ /**/
15395
+ /**/
15396
+ {
15397
+ modelVariant: 'CHAT',
15398
+ modelTitle: 'gpt-4-0125-preview',
15399
+ modelName: 'gpt-4-0125-preview',
15400
+ modelDescription: 'January 2024 preview version of GPT-4 Turbo with 128K token context window. Features improved reasoning capabilities, enhanced tool use, and more reliable function calling. Includes knowledge cutoff from October 2023. Offers better performance on complex logical tasks and more consistent outputs than previous preview versions.',
15401
+ pricing: {
15402
+ prompt: pricing(`$10.00 / 1M tokens`),
15403
+ output: pricing(`$30.00 / 1M tokens`),
15404
+ },
15405
+ },
15406
+ /**/
15407
+ /*/
15408
+ {
15409
+ modelTitle: 'tts-1-1106',
15410
+ modelName: 'tts-1-1106',
15411
+ },
15412
+ /**/
15413
+ /**/
15414
+ {
15415
+ modelVariant: 'CHAT',
15416
+ modelTitle: 'gpt-3.5-turbo-0125',
15417
+ modelName: 'gpt-3.5-turbo-0125',
15418
+ modelDescription: 'January 2024 version of GPT-3.5 Turbo with 16K token context window. Features improved reasoning capabilities, better instruction adherence, and reduced hallucinations compared to previous versions. Includes knowledge cutoff from September 2021. Provides good performance for most general applications at reasonable cost.',
15419
+ pricing: {
15420
+ prompt: pricing(`$0.50 / 1M tokens`),
15421
+ output: pricing(`$1.50 / 1M tokens`),
15422
+ },
15423
+ },
15424
+ /**/
15425
+ /**/
15426
+ {
15427
+ modelVariant: 'CHAT',
15428
+ modelTitle: 'gpt-4-turbo-preview',
15429
+ modelName: 'gpt-4-turbo-preview',
15430
+ modelDescription: 'Preview version of GPT-4 Turbo with 128K token context window that points to the latest development model. Features cutting-edge improvements to instruction following, knowledge representation, and tool use capabilities. Provides access to newest features but may have occasional behavior changes. Best for non-critical applications wanting latest capabilities.',
15431
+ pricing: {
15432
+ prompt: pricing(`$10.00 / 1M tokens`),
15433
+ output: pricing(`$30.00 / 1M tokens`),
15434
+ },
15435
+ },
15436
+ /**/
15437
+ /**/
15438
+ {
15439
+ modelVariant: 'EMBEDDING',
15440
+ modelTitle: 'text-embedding-3-large',
15441
+ modelName: 'text-embedding-3-large',
15442
+ modelDescription: "OpenAI's most capable text embedding model generating 3072-dimensional vectors. Designed for high-quality embeddings for complex similarity tasks, clustering, and information retrieval. Features enhanced cross-lingual capabilities and significantly improved performance on retrieval and classification benchmarks. Ideal for sophisticated RAG systems and semantic search applications.",
15443
+ pricing: {
15444
+ prompt: pricing(`$0.13 / 1M tokens`),
15445
+ output: 0,
15446
+ },
15447
+ },
15448
+ /**/
15449
+ /**/
15450
+ {
15451
+ modelVariant: 'EMBEDDING',
15452
+ modelTitle: 'text-embedding-3-small',
15453
+ modelName: 'text-embedding-3-small',
15454
+ modelDescription: 'Cost-effective embedding model generating 1536-dimensional vectors. Balances quality and efficiency for simpler tasks while maintaining good performance on text similarity and retrieval applications. Offers 20% better quality than ada-002 at significantly lower cost. Ideal for production embedding applications with cost constraints.',
15455
+ pricing: {
15456
+ prompt: pricing(`$0.02 / 1M tokens`),
15457
+ output: 0,
15458
+ },
15459
+ },
15460
+ /**/
15461
+ /**/
15462
+ {
15463
+ modelVariant: 'CHAT',
15464
+ modelTitle: 'gpt-3.5-turbo-0613',
15465
+ modelName: 'gpt-3.5-turbo-0613',
15466
+ modelDescription: "June 2023 version of GPT-3.5 Turbo with 4K token context window. Features function calling capabilities for structured data extraction and API interaction. Includes knowledge cutoff from September 2021. Maintained for applications specifically designed for this version's behaviors and capabilities.",
15467
+ pricing: {
15468
+ prompt: pricing(`$1.50 / 1M tokens`),
15469
+ output: pricing(`$2.00 / 1M tokens`),
15470
+ },
15471
+ },
15472
+ /**/
15473
+ /**/
15474
+ {
15475
+ modelVariant: 'EMBEDDING',
15476
+ modelTitle: 'text-embedding-ada-002',
15477
+ modelName: 'text-embedding-ada-002',
15478
+ modelDescription: 'Legacy text embedding model generating 1536-dimensional vectors suitable for text similarity and retrieval applications. Processes up to 8K tokens per request with consistent embedding quality. While superseded by newer embedding-3 models, still maintains adequate performance for many semantic search and classification tasks.',
15479
+ pricing: {
15480
+ prompt: pricing(`$0.1 / 1M tokens`),
15481
+ output: 0,
15482
+ },
15483
+ },
15484
+ /**/
15485
+ /*/
15486
+ {
15487
+ modelVariant: 'CHAT',
15488
+ modelTitle: 'gpt-4-1106-vision-preview',
15489
+ modelName: 'gpt-4-1106-vision-preview',
15490
+ },
15491
+ /**/
15492
+ /*/
15493
+ {
15494
+ modelVariant: 'CHAT',
15495
+ modelTitle: 'gpt-4-vision-preview',
15496
+ modelName: 'gpt-4-vision-preview',
15497
+ pricing: {
15498
+ prompt: computeUsage(`$10.00 / 1M tokens`),
15499
+ output: computeUsage(`$30.00 / 1M tokens`),
15500
+ },
15501
+ },
15502
+ /**/
15503
+ /**/
15504
+ {
15505
+ modelVariant: 'CHAT',
15506
+ modelTitle: 'gpt-4o-2024-05-13',
15507
+ modelName: 'gpt-4o-2024-05-13',
15508
+ modelDescription: 'May 2024 version of GPT-4o with 128K context window. Features enhanced multimodal capabilities including superior image understanding (up to 20MP), audio processing, and improved reasoning. Optimized for 2x lower latency than GPT-4 Turbo while maintaining high performance. Includes knowledge up to October 2023. Ideal for production applications requiring reliable multimodal capabilities.',
15509
+ pricing: {
15510
+ prompt: pricing(`$5.00 / 1M tokens`),
15511
+ output: pricing(`$15.00 / 1M tokens`),
15512
+ },
15513
+ },
15514
+ /**/
15515
+ /**/
15516
+ {
15517
+ modelVariant: 'CHAT',
15518
+ modelTitle: 'gpt-4o',
15519
+ modelName: 'gpt-4o',
15520
+ modelDescription: "OpenAI's most advanced general-purpose multimodal model with 128K context window. Optimized for balanced performance, speed, and cost with 2x faster responses than GPT-4 Turbo. Features excellent vision processing, audio understanding, reasoning, and text generation quality. Represents optimal balance of capability and efficiency for most advanced applications.",
15521
+ pricing: {
15522
+ prompt: pricing(`$5.00 / 1M tokens`),
15523
+ output: pricing(`$15.00 / 1M tokens`),
15524
+ },
15525
+ },
15526
+ /**/
15527
+ /**/
15528
+ {
15529
+ modelVariant: 'CHAT',
15530
+ modelTitle: 'gpt-4o-mini',
15531
+ modelName: 'gpt-4o-mini',
15532
+ modelDescription: 'Smaller, more cost-effective version of GPT-4o with 128K context window. Maintains impressive capabilities across text, vision, and audio tasks while operating at significantly lower cost. Features 3x faster inference than GPT-4o with good performance on general tasks. Excellent for applications requiring good quality multimodal capabilities at scale.',
15533
+ pricing: {
15534
+ prompt: pricing(`$0.15 / 1M tokens`),
15535
+ output: pricing(`$0.60 / 1M tokens`),
15536
+ },
15537
+ },
15538
+ /**/
15539
+ /**/
15540
+ {
15541
+ modelVariant: 'CHAT',
15542
+ modelTitle: 'o1-preview',
15543
+ modelName: 'o1-preview',
15544
+ modelDescription: 'Advanced reasoning model with 128K context window specializing in complex logical, mathematical, and analytical tasks. Features exceptional step-by-step problem-solving capabilities, advanced mathematical and scientific reasoning, and superior performance on STEM-focused problems. Significantly outperforms GPT-4 on quantitative reasoning benchmarks. Ideal for professional and specialized applications.',
15545
+ pricing: {
15546
+ prompt: pricing(`$15.00 / 1M tokens`),
15547
+ output: pricing(`$60.00 / 1M tokens`),
15548
+ },
15549
+ },
15550
+ /**/
15551
+ /**/
15552
+ {
15553
+ modelVariant: 'CHAT',
15554
+ modelTitle: 'o1-preview-2024-09-12',
15555
+ modelName: 'o1-preview-2024-09-12',
15556
+ modelDescription: 'September 2024 version of O1 preview with 128K context window. Features specialized reasoning capabilities with 30% improvement on mathematical and scientific accuracy over previous versions. Includes enhanced support for formal logic, statistical analysis, and technical domains. Optimized for professional applications requiring precise analytical thinking and rigorous methodologies.',
15557
+ pricing: {
15558
+ prompt: pricing(`$15.00 / 1M tokens`),
15559
+ output: pricing(`$60.00 / 1M tokens`),
15560
+ },
15561
+ },
15562
+ /**/
15563
+ /**/
15564
+ {
15565
+ modelVariant: 'CHAT',
15566
+ modelTitle: 'o1-mini',
15567
+ modelName: 'o1-mini',
15568
+ modelDescription: 'Smaller, cost-effective version of the O1 model with 128K context window. Maintains strong analytical reasoning abilities while reducing computational requirements by 70%. Features good performance on mathematical, logical, and scientific tasks at significantly lower cost than full O1. Excellent for everyday analytical applications that benefit from reasoning focus.',
15569
+ pricing: {
15570
+ prompt: pricing(`$3.00 / 1M tokens`),
15571
+ output: pricing(`$12.00 / 1M tokens`),
15572
+ },
15573
+ },
15574
+ /**/
15575
+ /**/
15576
+ {
15577
+ modelVariant: 'CHAT',
15578
+ modelTitle: 'o1',
15579
+ modelName: 'o1',
15580
+ modelDescription: "OpenAI's advanced reasoning model with 128K context window focusing on logical problem-solving and analytical thinking. Features exceptional performance on quantitative tasks, step-by-step deduction, and complex technical problems. Maintains 95%+ of o1-preview capabilities with production-ready stability. Ideal for scientific computing, financial analysis, and professional applications.",
15581
+ pricing: {
15582
+ prompt: pricing(`$15.00 / 1M tokens`),
15583
+ output: pricing(`$60.00 / 1M tokens`),
15584
+ },
15585
+ },
15586
+ /**/
15587
+ /**/
15588
+ {
15589
+ modelVariant: 'CHAT',
15590
+ modelTitle: 'o3-mini',
15591
+ modelName: 'o3-mini',
15592
+ modelDescription: 'Cost-effective reasoning model with 128K context window optimized for academic and scientific problem-solving. Features efficient performance on STEM tasks with specialized capabilities in mathematics, physics, chemistry, and computer science. Offers 80% of O1 performance on technical domains at significantly lower cost. Ideal for educational applications and research support.',
15593
+ pricing: {
15594
+ prompt: pricing(`$3.00 / 1M tokens`),
15595
+ output: pricing(`$12.00 / 1M tokens`),
15596
+ },
15597
+ },
15598
+ /**/
15599
+ /**/
15600
+ {
15601
+ modelVariant: 'CHAT',
15602
+ modelTitle: 'o1-mini-2024-09-12',
15603
+ modelName: 'o1-mini-2024-09-12',
15604
+ modelDescription: "September 2024 version of O1-mini with 128K context window featuring balanced reasoning capabilities and cost-efficiency. Includes 25% improvement in mathematical accuracy and enhanced performance on coding tasks compared to previous versions. Maintains efficient resource utilization while delivering improved results for analytical applications that don't require the full O1 model.",
15605
+ pricing: {
15606
+ prompt: pricing(`$3.00 / 1M tokens`),
15607
+ output: pricing(`$12.00 / 1M tokens`),
15608
+ },
15609
+ },
15610
+ /**/
15611
+ /**/
15612
+ {
15613
+ modelVariant: 'CHAT',
15614
+ modelTitle: 'gpt-3.5-turbo-16k-0613',
15615
+ modelName: 'gpt-3.5-turbo-16k-0613',
15616
+ modelDescription: "June 2023 version of GPT-3.5 Turbo with extended 16K token context window. Features good handling of longer conversations and documents with improved memory management across extended contexts. Includes knowledge cutoff from September 2021. Maintained for applications specifically designed for this version's behaviors and capabilities.",
15617
+ pricing: {
15618
+ prompt: pricing(`$3.00 / 1M tokens`),
15619
+ output: pricing(`$4.00 / 1M tokens`),
15620
+ },
15621
+ },
15622
+ /**/
15623
+ // <- [🕕]
15624
+ ],
15625
+ });
15626
+ /**
15627
+ * Note: [🤖] Add models of new variant
15628
+ * TODO: [🧠] Some mechanism to propagate unsureness
15629
+ * TODO: [🎰] Some mechanism to auto-update available models
15630
+ * TODO: [🎰][👮‍♀️] Make this list dynamic - dynamically can be listed modelNames but not modelVariant, legacy status, context length and pricing
15631
+ * TODO: [🧠][👮‍♀️] Put here more info like description, isVision, trainingDateCutoff, languages, strengths ( Top-level performance, intelligence, fluency, and understanding), contextWindow,...
15632
+ * @see https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4
15633
+ * @see https://openai.com/api/pricing/
15634
+ * @see /other/playground/playground.ts
15635
+ * TODO: [🍓][💩] Make better
15636
+ * TODO: Change model titles to human eg: "gpt-4-turbo-2024-04-09" -> "GPT-4 Turbo (2024-04-09)"
15637
+ * TODO: [🚸] Not all models are compatible with JSON mode, add this information here and use it
15638
+ * Note: [💞] Ignore a discrepancy between file name and entity name
15639
+ */
15640
+
15641
+ /**
15642
+ * Computes the usage of the OpenAI API based on the response from OpenAI
15643
+ *
15644
+ * @param promptContent The content of the prompt
15645
+ * @param resultContent The content of the result (for embedding prompts or failed prompts pass empty string)
15646
+ * @param rawResponse The raw response from OpenAI API
15647
+ * @throws {PipelineExecutionError} If the usage is not defined in the response from OpenAI
15648
+ * @private internal utility of `OpenAiExecutionTools`
15649
+ */
15650
+ function computeOpenAiUsage(promptContent, // <- Note: Intentionally using [] to access type properties to bring jsdoc from Prompt/PromptResult to consumer
15651
+ resultContent, rawResponse) {
15652
+ var _a, _b;
15653
+ if (rawResponse.usage === undefined) {
15654
+ throw new PipelineExecutionError('The usage is not defined in the response from OpenAI');
15655
+ }
15656
+ if (((_a = rawResponse.usage) === null || _a === void 0 ? void 0 : _a.prompt_tokens) === undefined) {
15657
+ throw new PipelineExecutionError('In OpenAI response `usage.prompt_tokens` not defined');
15658
+ }
15659
+ const inputTokens = rawResponse.usage.prompt_tokens;
15660
+ const outputTokens = ((_b = rawResponse.usage) === null || _b === void 0 ? void 0 : _b.completion_tokens) || 0;
15661
+ let isUncertain = false;
15662
+ let modelInfo = OPENAI_MODELS.find((model) => model.modelName === rawResponse.model);
15663
+ if (modelInfo === undefined) {
15664
+ // Note: Model is not in the list of known models, fallback to the family of the models and mark price as uncertain
15665
+ modelInfo = OPENAI_MODELS.find((model) => (rawResponse.model || SALT_NONCE).startsWith(model.modelName));
15666
+ if (modelInfo !== undefined) {
15667
+ isUncertain = true;
15668
+ }
15669
+ }
15670
+ let price;
15671
+ if (modelInfo === undefined || modelInfo.pricing === undefined) {
15672
+ price = uncertainNumber();
15673
+ }
15674
+ else {
15675
+ price = uncertainNumber(inputTokens * modelInfo.pricing.prompt + outputTokens * modelInfo.pricing.output, isUncertain);
15676
+ }
15677
+ return {
15678
+ price,
15679
+ input: {
15680
+ tokensCount: uncertainNumber(rawResponse.usage.prompt_tokens),
15681
+ ...computeUsageCounts(promptContent),
15682
+ },
15683
+ output: {
15684
+ tokensCount: uncertainNumber(outputTokens),
15685
+ ...computeUsageCounts(resultContent),
15686
+ },
15687
+ };
15688
+ }
15689
+ /**
15690
+ * TODO: [🤝] DRY Maybe some common abstraction between `computeOpenAiUsage` and `computeAnthropicClaudeUsage`
15691
+ */
15692
+
15693
+ /**
15694
+ * Parses an OpenAI error message to identify which parameter is unsupported
15695
+ *
15696
+ * @param errorMessage The error message from OpenAI API
15697
+ * @returns The parameter name that is unsupported, or null if not an unsupported parameter error
15698
+ * @private utility of LLM Tools
15699
+ */
15700
+ function parseUnsupportedParameterError(errorMessage) {
15701
+ // Pattern to match "Unsupported value: 'parameter' does not support ..."
15702
+ const unsupportedValueMatch = errorMessage.match(/Unsupported value:\s*'([^']+)'\s*does not support/i);
15703
+ if (unsupportedValueMatch === null || unsupportedValueMatch === void 0 ? void 0 : unsupportedValueMatch[1]) {
15704
+ return unsupportedValueMatch[1];
15705
+ }
15706
+ // Pattern to match "'parameter' of type ... is not supported with this model"
15707
+ const parameterTypeMatch = errorMessage.match(/'([^']+)'\s*of type.*is not supported with this model/i);
15708
+ if (parameterTypeMatch === null || parameterTypeMatch === void 0 ? void 0 : parameterTypeMatch[1]) {
15709
+ return parameterTypeMatch[1];
15710
+ }
15711
+ return null;
15712
+ }
15713
+ /**
15714
+ * Creates a copy of model requirements with the specified parameter removed
15715
+ *
15716
+ * @param modelRequirements Original model requirements
15717
+ * @param unsupportedParameter The parameter to remove
15718
+ * @returns New model requirements without the unsupported parameter
15719
+ * @private utility of LLM Tools
15720
+ */
15721
+ function removeUnsupportedModelRequirement(modelRequirements, unsupportedParameter) {
15722
+ const newRequirements = { ...modelRequirements };
15723
+ // Map of parameter names that might appear in error messages to ModelRequirements properties
15724
+ const parameterMap = {
15725
+ temperature: 'temperature',
15726
+ max_tokens: 'maxTokens',
15727
+ maxTokens: 'maxTokens',
15728
+ seed: 'seed',
15729
+ };
15730
+ const propertyToRemove = parameterMap[unsupportedParameter];
15731
+ if (propertyToRemove && propertyToRemove in newRequirements) {
15732
+ delete newRequirements[propertyToRemove];
15733
+ }
15734
+ return newRequirements;
15735
+ }
15736
+ /**
15737
+ * Checks if an error is an "Unsupported value" error from OpenAI
15738
+ * @param error The error to check
15739
+ * @returns true if this is an unsupported parameter error
15740
+ * @private utility of LLM Tools
15741
+ */
15742
+ function isUnsupportedParameterError(error) {
15743
+ const errorMessage = error.message.toLowerCase();
15744
+ return (errorMessage.includes('unsupported value:') ||
15745
+ errorMessage.includes('is not supported with this model') ||
15746
+ errorMessage.includes('does not support'));
15747
+ }
15748
+
15749
+ /**
15750
+ * Execution Tools for calling OpenAI API or other OpenAI compatible provider
15751
+ *
15752
+ * @public exported from `@promptbook/openai`
15753
+ */
15754
+ class OpenAiCompatibleExecutionTools {
15755
+ // Removed retriedUnsupportedParameters and attemptHistory instance fields
15756
+ /**
15757
+ * Creates OpenAI compatible Execution Tools.
15758
+ *
15759
+ * @param options which are relevant are directly passed to the OpenAI compatible client
15760
+ */
15761
+ constructor(options) {
15762
+ this.options = options;
15763
+ /**
15764
+ * OpenAI API client.
15765
+ */
15766
+ this.client = null;
15767
+ // TODO: Allow configuring rate limits via options
15768
+ this.limiter = new Bottleneck__default["default"]({
15769
+ minTime: 60000 / (this.options.maxRequestsPerMinute || DEFAULT_MAX_REQUESTS_PER_MINUTE),
15770
+ });
15771
+ }
15772
+ async getClient() {
15773
+ if (this.client === null) {
15774
+ // Note: Passing only OpenAI relevant options to OpenAI constructor
15775
+ const openAiOptions = { ...this.options };
15776
+ delete openAiOptions.isVerbose;
15777
+ delete openAiOptions.userId;
15778
+ // Enhanced configuration for better ECONNRESET handling
15779
+ const enhancedOptions = {
15780
+ ...openAiOptions,
15781
+ timeout: API_REQUEST_TIMEOUT,
15782
+ maxRetries: CONNECTION_RETRIES_LIMIT,
15783
+ defaultHeaders: {
15784
+ Connection: 'keep-alive',
15785
+ 'Keep-Alive': 'timeout=30, max=100',
15786
+ ...openAiOptions.defaultHeaders,
15787
+ },
15788
+ };
15789
+ this.client = new OpenAI__default["default"](enhancedOptions);
15790
+ }
15791
+ return this.client;
15792
+ }
15793
+ /**
15794
+ * Check the `options` passed to `constructor`
15795
+ */
15796
+ async checkConfiguration() {
15797
+ await this.getClient();
15798
+ // TODO: [🎍] Do here a real check that API is online, working and API key is correct
15799
+ }
15800
+ /**
15801
+ * List all available OpenAI compatible models that can be used
15802
+ */
15803
+ async listModels() {
15804
+ const client = await this.getClient();
15805
+ const rawModelsList = await client.models.list();
15806
+ const availableModels = rawModelsList.data
15807
+ .sort((a, b) => (a.created > b.created ? 1 : -1))
15808
+ .map((modelFromApi) => {
15809
+ const modelFromList = this.HARDCODED_MODELS.find(({ modelName }) => modelName === modelFromApi.id ||
15810
+ modelName.startsWith(modelFromApi.id) ||
15811
+ modelFromApi.id.startsWith(modelName));
15812
+ if (modelFromList !== undefined) {
15813
+ return modelFromList;
15814
+ }
15815
+ return {
15816
+ modelVariant: 'CHAT',
15817
+ modelTitle: modelFromApi.id,
15818
+ modelName: modelFromApi.id,
15819
+ modelDescription: '',
15820
+ };
15821
+ });
15822
+ return availableModels;
15823
+ }
15824
+ /**
15825
+ * Calls OpenAI compatible API to use a chat model.
15826
+ */
15827
+ async callChatModel(prompt) {
15828
+ // Deep clone prompt and modelRequirements to avoid mutation across calls
15829
+ const clonedPrompt = JSON.parse(JSON.stringify(prompt));
15830
+ // Use local Set for retried parameters to ensure independence and thread safety
15831
+ const retriedUnsupportedParameters = new Set();
15832
+ return this.callChatModelWithRetry(clonedPrompt, clonedPrompt.modelRequirements, [], retriedUnsupportedParameters);
15833
+ }
15834
+ /**
15835
+ * Internal method that handles parameter retry for chat model calls
15836
+ */
15837
+ async callChatModelWithRetry(prompt, currentModelRequirements, attemptStack = [], retriedUnsupportedParameters = new Set()) {
15838
+ var _a;
15839
+ if (this.options.isVerbose) {
15840
+ console.info(`💬 ${this.title} callChatModel call`, { prompt, currentModelRequirements });
15841
+ }
15842
+ const { content, parameters, format } = prompt;
15843
+ const client = await this.getClient();
15844
+ // TODO: [☂] Use here more modelRequirements
15845
+ if (currentModelRequirements.modelVariant !== 'CHAT') {
15846
+ throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
15847
+ }
15848
+ const modelName = currentModelRequirements.modelName || this.getDefaultChatModel().modelName;
15849
+ const modelSettings = {
15850
+ model: modelName,
15851
+ max_tokens: currentModelRequirements.maxTokens,
15852
+ temperature: currentModelRequirements.temperature,
15853
+ // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
15854
+ // <- Note: [🧆]
15855
+ }; // <- TODO: [💩] Guard here types better
15856
+ if (format === 'JSON') {
15857
+ modelSettings.response_format = {
15858
+ type: 'json_object',
15859
+ };
15860
+ }
15861
+ // <- TODO: [🚸] Not all models are compatible with JSON mode
15862
+ // > 'response_format' of type 'json_object' is not supported with this model.
15863
+ const rawPromptContent = templateParameters(content, { ...parameters, modelName });
15864
+ // Convert thread to OpenAI format if present
15865
+ let threadMessages = [];
15866
+ if ('thread' in prompt && Array.isArray(prompt.thread)) {
15867
+ threadMessages = prompt.thread.map((msg) => ({
15868
+ role: msg.role === 'assistant' ? 'assistant' : 'user',
15869
+ content: msg.content,
15870
+ }));
15871
+ }
15872
+ const rawRequest = {
15873
+ ...modelSettings,
15874
+ messages: [
15875
+ ...(currentModelRequirements.systemMessage === undefined
15876
+ ? []
15877
+ : [
15878
+ {
15879
+ role: 'system',
15880
+ content: currentModelRequirements.systemMessage,
15881
+ },
15882
+ ]),
15883
+ ...threadMessages,
15884
+ {
15885
+ role: 'user',
15886
+ content: rawPromptContent,
15887
+ },
15888
+ ],
15889
+ user: (_a = this.options.userId) === null || _a === void 0 ? void 0 : _a.toString(),
15890
+ };
15891
+ const start = $getCurrentDate();
15892
+ if (this.options.isVerbose) {
15893
+ console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
15894
+ }
15895
+ try {
15896
+ const rawResponse = await this.limiter
15897
+ .schedule(() => this.makeRequestWithNetworkRetry(() => client.chat.completions.create(rawRequest)))
15898
+ .catch((error) => {
15899
+ assertsError(error);
15900
+ if (this.options.isVerbose) {
15901
+ console.info(colors__default["default"].bgRed('error'), error);
15902
+ }
15903
+ throw error;
15904
+ });
15905
+ if (this.options.isVerbose) {
15906
+ console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
15907
+ }
15908
+ const complete = $getCurrentDate();
15909
+ if (!rawResponse.choices[0]) {
15910
+ throw new PipelineExecutionError(`No choises from ${this.title}`);
15911
+ }
15912
+ if (rawResponse.choices.length > 1) {
15913
+ // TODO: This should be maybe only warning
15914
+ throw new PipelineExecutionError(`More than one choise from ${this.title}`);
15915
+ }
15916
+ const resultContent = rawResponse.choices[0].message.content;
15917
+ const usage = this.computeUsage(content || '', resultContent || '', rawResponse);
15918
+ if (resultContent === null) {
15919
+ throw new PipelineExecutionError(`No response message from ${this.title}`);
15920
+ }
15921
+ return exportJson({
15922
+ name: 'promptResult',
15923
+ message: `Result of \`OpenAiCompatibleExecutionTools.callChatModel\``,
15924
+ order: [],
15925
+ value: {
15926
+ content: resultContent,
15927
+ modelName: rawResponse.model || modelName,
15928
+ timing: {
15929
+ start,
15930
+ complete,
15931
+ },
15932
+ usage,
15933
+ rawPromptContent,
15934
+ rawRequest,
15935
+ rawResponse,
15936
+ // <- [🗯]
15937
+ },
15938
+ });
15939
+ }
15940
+ catch (error) {
15941
+ assertsError(error);
15942
+ // Check if this is an unsupported parameter error
15943
+ if (!isUnsupportedParameterError(error)) {
15944
+ // If we have attemptStack, include it in the error message
15945
+ if (attemptStack.length > 0) {
15946
+ throw new PipelineExecutionError(`All attempts failed. Attempt history:\n` +
15947
+ attemptStack
15948
+ .map((a, i) => ` ${i + 1}. Model: ${a.modelName}` +
15949
+ (a.unsupportedParameter ? `, Stripped: ${a.unsupportedParameter}` : '') +
15950
+ `, Error: ${a.errorMessage}` +
15951
+ (a.stripped ? ' (stripped and retried)' : ''))
15952
+ .join('\n') +
15953
+ `\nFinal error: ${error.message}`);
15954
+ }
15955
+ throw error;
15956
+ }
15957
+ // Parse which parameter is unsupported
15958
+ const unsupportedParameter = parseUnsupportedParameterError(error.message);
15959
+ if (!unsupportedParameter) {
15960
+ if (this.options.isVerbose) {
15961
+ console.warn(colors__default["default"].bgYellow('Warning'), 'Could not parse unsupported parameter from error:', error.message);
15962
+ }
15963
+ throw error;
15964
+ }
15965
+ // Create a unique key for this model + parameter combination to prevent infinite loops
15966
+ const retryKey = `${modelName}-${unsupportedParameter}`;
15967
+ if (retriedUnsupportedParameters.has(retryKey)) {
15968
+ // Already retried this parameter, throw the error with attemptStack
15969
+ attemptStack.push({
15970
+ modelName,
15971
+ unsupportedParameter,
15972
+ errorMessage: error.message,
15973
+ stripped: true,
15974
+ });
15975
+ throw new PipelineExecutionError(`All attempts failed. Attempt history:\n` +
15976
+ attemptStack
15977
+ .map((a, i) => ` ${i + 1}. Model: ${a.modelName}` +
15978
+ (a.unsupportedParameter ? `, Stripped: ${a.unsupportedParameter}` : '') +
15979
+ `, Error: ${a.errorMessage}` +
15980
+ (a.stripped ? ' (stripped and retried)' : ''))
15981
+ .join('\n') +
15982
+ `\nFinal error: ${error.message}`);
15983
+ }
15984
+ // Mark this parameter as retried
15985
+ retriedUnsupportedParameters.add(retryKey);
15986
+ // Log warning in verbose mode
15987
+ if (this.options.isVerbose) {
15988
+ console.warn(colors__default["default"].bgYellow('Warning'), `Removing unsupported parameter '${unsupportedParameter}' for model '${modelName}' and retrying request`);
15989
+ }
15990
+ // Add to attemptStack
15991
+ attemptStack.push({
15992
+ modelName,
15993
+ unsupportedParameter,
15994
+ errorMessage: error.message,
15995
+ stripped: true,
15996
+ });
15997
+ // Remove the unsupported parameter and retry
15998
+ const modifiedModelRequirements = removeUnsupportedModelRequirement(currentModelRequirements, unsupportedParameter);
15999
+ return this.callChatModelWithRetry(prompt, modifiedModelRequirements, attemptStack, retriedUnsupportedParameters);
16000
+ }
16001
+ }
16002
+ /**
16003
+ * Calls OpenAI API to use a complete model.
16004
+ */
16005
+ async callCompletionModel(prompt) {
16006
+ // Deep clone prompt and modelRequirements to avoid mutation across calls
16007
+ const clonedPrompt = JSON.parse(JSON.stringify(prompt));
16008
+ const retriedUnsupportedParameters = new Set();
16009
+ return this.callCompletionModelWithRetry(clonedPrompt, clonedPrompt.modelRequirements, [], retriedUnsupportedParameters);
16010
+ }
16011
+ /**
16012
+ * Internal method that handles parameter retry for completion model calls
16013
+ */
16014
+ async callCompletionModelWithRetry(prompt, currentModelRequirements, attemptStack = [], retriedUnsupportedParameters = new Set()) {
16015
+ var _a;
16016
+ if (this.options.isVerbose) {
16017
+ console.info(`🖋 ${this.title} callCompletionModel call`, { prompt, currentModelRequirements });
16018
+ }
16019
+ const { content, parameters } = prompt;
16020
+ const client = await this.getClient();
16021
+ // TODO: [☂] Use here more modelRequirements
16022
+ if (currentModelRequirements.modelVariant !== 'COMPLETION') {
16023
+ throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
16024
+ }
16025
+ const modelName = currentModelRequirements.modelName || this.getDefaultCompletionModel().modelName;
16026
+ const modelSettings = {
16027
+ model: modelName,
16028
+ max_tokens: currentModelRequirements.maxTokens,
16029
+ temperature: currentModelRequirements.temperature,
16030
+ };
16031
+ const rawPromptContent = templateParameters(content, { ...parameters, modelName });
16032
+ const rawRequest = {
16033
+ ...modelSettings,
16034
+ prompt: rawPromptContent,
16035
+ user: (_a = this.options.userId) === null || _a === void 0 ? void 0 : _a.toString(),
16036
+ };
16037
+ const start = $getCurrentDate();
16038
+ if (this.options.isVerbose) {
16039
+ console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
16040
+ }
16041
+ try {
16042
+ const rawResponse = await this.limiter
16043
+ .schedule(() => this.makeRequestWithNetworkRetry(() => client.completions.create(rawRequest)))
16044
+ .catch((error) => {
16045
+ assertsError(error);
16046
+ if (this.options.isVerbose) {
16047
+ console.info(colors__default["default"].bgRed('error'), error);
16048
+ }
16049
+ throw error;
16050
+ });
16051
+ if (this.options.isVerbose) {
16052
+ console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
16053
+ }
16054
+ const complete = $getCurrentDate();
16055
+ if (!rawResponse.choices[0]) {
16056
+ throw new PipelineExecutionError(`No choises from ${this.title}`);
16057
+ }
16058
+ if (rawResponse.choices.length > 1) {
16059
+ throw new PipelineExecutionError(`More than one choise from ${this.title}`);
16060
+ }
16061
+ const resultContent = rawResponse.choices[0].text;
16062
+ const usage = this.computeUsage(content || '', resultContent || '', rawResponse);
16063
+ return exportJson({
16064
+ name: 'promptResult',
16065
+ message: `Result of \`OpenAiCompatibleExecutionTools.callCompletionModel\``,
16066
+ order: [],
16067
+ value: {
16068
+ content: resultContent,
16069
+ modelName: rawResponse.model || modelName,
16070
+ timing: {
16071
+ start,
16072
+ complete,
16073
+ },
16074
+ usage,
16075
+ rawPromptContent,
16076
+ rawRequest,
16077
+ rawResponse,
16078
+ },
16079
+ });
16080
+ }
16081
+ catch (error) {
16082
+ assertsError(error);
16083
+ if (!isUnsupportedParameterError(error)) {
16084
+ if (attemptStack.length > 0) {
16085
+ throw new PipelineExecutionError(`All attempts failed. Attempt history:\n` +
16086
+ attemptStack
16087
+ .map((a, i) => ` ${i + 1}. Model: ${a.modelName}` +
16088
+ (a.unsupportedParameter ? `, Stripped: ${a.unsupportedParameter}` : '') +
16089
+ `, Error: ${a.errorMessage}` +
16090
+ (a.stripped ? ' (stripped and retried)' : ''))
16091
+ .join('\n') +
16092
+ `\nFinal error: ${error.message}`);
16093
+ }
16094
+ throw error;
16095
+ }
16096
+ const unsupportedParameter = parseUnsupportedParameterError(error.message);
16097
+ if (!unsupportedParameter) {
16098
+ if (this.options.isVerbose) {
16099
+ console.warn(colors__default["default"].bgYellow('Warning'), 'Could not parse unsupported parameter from error:', error.message);
16100
+ }
16101
+ throw error;
16102
+ }
16103
+ const retryKey = `${modelName}-${unsupportedParameter}`;
16104
+ if (retriedUnsupportedParameters.has(retryKey)) {
16105
+ attemptStack.push({
16106
+ modelName,
16107
+ unsupportedParameter,
16108
+ errorMessage: error.message,
16109
+ stripped: true,
16110
+ });
16111
+ throw new PipelineExecutionError(`All attempts failed. Attempt history:\n` +
16112
+ attemptStack
16113
+ .map((a, i) => ` ${i + 1}. Model: ${a.modelName}` +
16114
+ (a.unsupportedParameter ? `, Stripped: ${a.unsupportedParameter}` : '') +
16115
+ `, Error: ${a.errorMessage}` +
16116
+ (a.stripped ? ' (stripped and retried)' : ''))
16117
+ .join('\n') +
16118
+ `\nFinal error: ${error.message}`);
16119
+ }
16120
+ retriedUnsupportedParameters.add(retryKey);
16121
+ if (this.options.isVerbose) {
16122
+ console.warn(colors__default["default"].bgYellow('Warning'), `Removing unsupported parameter '${unsupportedParameter}' for model '${modelName}' and retrying request`);
16123
+ }
16124
+ attemptStack.push({
16125
+ modelName,
16126
+ unsupportedParameter,
16127
+ errorMessage: error.message,
16128
+ stripped: true,
16129
+ });
16130
+ const modifiedModelRequirements = removeUnsupportedModelRequirement(currentModelRequirements, unsupportedParameter);
16131
+ return this.callCompletionModelWithRetry(prompt, modifiedModelRequirements, attemptStack, retriedUnsupportedParameters);
16132
+ }
16133
+ }
16134
+ /**
16135
+ * Calls OpenAI compatible API to use a embedding model
16136
+ */
16137
+ async callEmbeddingModel(prompt) {
16138
+ // Deep clone prompt and modelRequirements to avoid mutation across calls
16139
+ const clonedPrompt = JSON.parse(JSON.stringify(prompt));
16140
+ const retriedUnsupportedParameters = new Set();
16141
+ return this.callEmbeddingModelWithRetry(clonedPrompt, clonedPrompt.modelRequirements, [], retriedUnsupportedParameters);
16142
+ }
16143
+ /**
16144
+ * Internal method that handles parameter retry for embedding model calls
16145
+ */
16146
+ async callEmbeddingModelWithRetry(prompt, currentModelRequirements, attemptStack = [], retriedUnsupportedParameters = new Set()) {
16147
+ if (this.options.isVerbose) {
16148
+ console.info(`🖋 ${this.title} embedding call`, { prompt, currentModelRequirements });
16149
+ }
16150
+ const { content, parameters } = prompt;
16151
+ const client = await this.getClient();
16152
+ if (currentModelRequirements.modelVariant !== 'EMBEDDING') {
16153
+ throw new PipelineExecutionError('Use embed only for EMBEDDING variant');
16154
+ }
16155
+ const modelName = currentModelRequirements.modelName || this.getDefaultEmbeddingModel().modelName;
16156
+ const rawPromptContent = templateParameters(content, { ...parameters, modelName });
16157
+ const rawRequest = {
16158
+ input: rawPromptContent,
16159
+ model: modelName,
16160
+ };
16161
+ const start = $getCurrentDate();
16162
+ if (this.options.isVerbose) {
16163
+ console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
16164
+ }
16165
+ try {
16166
+ const rawResponse = await this.limiter
16167
+ .schedule(() => this.makeRequestWithNetworkRetry(() => client.embeddings.create(rawRequest)))
16168
+ .catch((error) => {
16169
+ assertsError(error);
16170
+ if (this.options.isVerbose) {
16171
+ console.info(colors__default["default"].bgRed('error'), error);
16172
+ }
16173
+ throw error;
16174
+ });
16175
+ if (this.options.isVerbose) {
16176
+ console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
16177
+ }
16178
+ const complete = $getCurrentDate();
16179
+ if (rawResponse.data.length !== 1) {
16180
+ throw new PipelineExecutionError(`Expected exactly 1 data item in response, got ${rawResponse.data.length}`);
16181
+ }
16182
+ const resultContent = rawResponse.data[0].embedding;
16183
+ const usage = this.computeUsage(content || '', '', rawResponse);
16184
+ return exportJson({
16185
+ name: 'promptResult',
16186
+ message: `Result of \`OpenAiCompatibleExecutionTools.callEmbeddingModel\``,
16187
+ order: [],
16188
+ value: {
16189
+ content: resultContent,
16190
+ modelName: rawResponse.model || modelName,
16191
+ timing: {
16192
+ start,
16193
+ complete,
16194
+ },
16195
+ usage,
16196
+ rawPromptContent,
16197
+ rawRequest,
16198
+ rawResponse,
16199
+ },
16200
+ });
16201
+ }
16202
+ catch (error) {
16203
+ assertsError(error);
16204
+ if (!isUnsupportedParameterError(error)) {
16205
+ if (attemptStack.length > 0) {
16206
+ throw new PipelineExecutionError(`All attempts failed. Attempt history:\n` +
16207
+ attemptStack
16208
+ .map((a, i) => ` ${i + 1}. Model: ${a.modelName}` +
16209
+ (a.unsupportedParameter ? `, Stripped: ${a.unsupportedParameter}` : '') +
16210
+ `, Error: ${a.errorMessage}` +
16211
+ (a.stripped ? ' (stripped and retried)' : ''))
16212
+ .join('\n') +
16213
+ `\nFinal error: ${error.message}`);
16214
+ }
16215
+ throw error;
16216
+ }
16217
+ const unsupportedParameter = parseUnsupportedParameterError(error.message);
16218
+ if (!unsupportedParameter) {
16219
+ if (this.options.isVerbose) {
16220
+ console.warn(colors__default["default"].bgYellow('Warning'), 'Could not parse unsupported parameter from error:', error.message);
16221
+ }
16222
+ throw error;
16223
+ }
16224
+ const retryKey = `${modelName}-${unsupportedParameter}`;
16225
+ if (retriedUnsupportedParameters.has(retryKey)) {
16226
+ attemptStack.push({
16227
+ modelName,
16228
+ unsupportedParameter,
16229
+ errorMessage: error.message,
16230
+ stripped: true,
16231
+ });
16232
+ throw new PipelineExecutionError(`All attempts failed. Attempt history:\n` +
16233
+ attemptStack
16234
+ .map((a, i) => ` ${i + 1}. Model: ${a.modelName}` +
16235
+ (a.unsupportedParameter ? `, Stripped: ${a.unsupportedParameter}` : '') +
16236
+ `, Error: ${a.errorMessage}` +
16237
+ (a.stripped ? ' (stripped and retried)' : ''))
16238
+ .join('\n') +
16239
+ `\nFinal error: ${error.message}`);
16240
+ }
16241
+ retriedUnsupportedParameters.add(retryKey);
16242
+ if (this.options.isVerbose) {
16243
+ console.warn(colors__default["default"].bgYellow('Warning'), `Removing unsupported parameter '${unsupportedParameter}' for model '${modelName}' and retrying request`);
16244
+ }
16245
+ attemptStack.push({
16246
+ modelName,
16247
+ unsupportedParameter,
16248
+ errorMessage: error.message,
16249
+ stripped: true,
16250
+ });
16251
+ const modifiedModelRequirements = removeUnsupportedModelRequirement(currentModelRequirements, unsupportedParameter);
16252
+ return this.callEmbeddingModelWithRetry(prompt, modifiedModelRequirements, attemptStack, retriedUnsupportedParameters);
16253
+ }
16254
+ }
16255
+ // <- Note: [🤖] callXxxModel
16256
+ /**
16257
+ * Get the model that should be used as default
16258
+ */
16259
+ getDefaultModel(defaultModelName) {
16260
+ // Note: Match exact or prefix for model families
16261
+ const model = this.HARDCODED_MODELS.find(({ modelName }) => modelName === defaultModelName || modelName.startsWith(defaultModelName));
16262
+ if (model === undefined) {
16263
+ throw new PipelineExecutionError(spaceTrim__default["default"]((block) => `
16264
+ Cannot find model in ${this.title} models with name "${defaultModelName}" which should be used as default.
16265
+
16266
+ Available models:
16267
+ ${block(this.HARDCODED_MODELS.map(({ modelName }) => `- "${modelName}"`).join('\n'))}
16268
+
16269
+ Model "${defaultModelName}" is probably not available anymore, not installed, inaccessible or misconfigured.
16270
+
16271
+ `));
16272
+ }
16273
+ return model;
16274
+ }
16275
+ // <- Note: [🤖] getDefaultXxxModel
16276
+ /**
16277
+ * Makes a request with retry logic for network errors like ECONNRESET
16278
+ */
16279
+ async makeRequestWithNetworkRetry(requestFn) {
16280
+ let lastError;
16281
+ for (let attempt = 1; attempt <= CONNECTION_RETRIES_LIMIT; attempt++) {
16282
+ try {
16283
+ return await requestFn();
16284
+ }
16285
+ catch (error) {
16286
+ assertsError(error);
16287
+ lastError = error;
16288
+ // Check if this is a retryable network error
16289
+ const isRetryableError = this.isRetryableNetworkError(error);
16290
+ if (!isRetryableError || attempt === CONNECTION_RETRIES_LIMIT) {
16291
+ if (this.options.isVerbose && this.isRetryableNetworkError(error)) {
16292
+ console.info(colors__default["default"].bgRed('Final network error after retries'), `Attempt ${attempt}/${CONNECTION_RETRIES_LIMIT}:`, error);
16293
+ }
16294
+ throw error;
16295
+ }
16296
+ // Calculate exponential backoff delay
16297
+ const baseDelay = 1000; // 1 second
16298
+ const backoffDelay = baseDelay * Math.pow(2, attempt - 1);
16299
+ const jitterDelay = Math.random() * 500; // Add some randomness
16300
+ const totalDelay = backoffDelay + jitterDelay;
16301
+ if (this.options.isVerbose) {
16302
+ console.info(colors__default["default"].bgYellow('Retrying network request'), `Attempt ${attempt}/${CONNECTION_RETRIES_LIMIT}, waiting ${Math.round(totalDelay)}ms:`, error.message);
16303
+ }
16304
+ // Wait before retrying
16305
+ await new Promise((resolve) => setTimeout(resolve, totalDelay));
16306
+ }
16307
+ }
16308
+ throw lastError;
16309
+ }
16310
+ /**
16311
+ * Determines if an error is retryable (network-related errors)
16312
+ */
16313
+ isRetryableNetworkError(error) {
16314
+ const errorMessage = error.message.toLowerCase();
16315
+ const errorCode = error.code;
16316
+ // Network connection errors that should be retried
16317
+ const retryableErrors = [
16318
+ 'econnreset',
16319
+ 'enotfound',
16320
+ 'econnrefused',
16321
+ 'etimedout',
16322
+ 'socket hang up',
16323
+ 'network error',
16324
+ 'fetch failed',
16325
+ 'connection reset',
16326
+ 'connection refused',
16327
+ 'timeout',
16328
+ ];
16329
+ // Check error message
16330
+ if (retryableErrors.some((retryableError) => errorMessage.includes(retryableError))) {
16331
+ return true;
16332
+ }
16333
+ // Check error code
16334
+ if (errorCode && retryableErrors.includes(errorCode.toLowerCase())) {
16335
+ return true;
16336
+ }
16337
+ // Check for specific HTTP status codes that are retryable
16338
+ const errorWithStatus = error;
16339
+ const httpStatus = errorWithStatus.status || errorWithStatus.statusCode;
16340
+ if (httpStatus && [429, 500, 502, 503, 504].includes(httpStatus)) {
16341
+ return true;
16342
+ }
16343
+ return false;
16344
+ }
16345
+ }
16346
+ /**
16347
+ * TODO: [🛄] Some way how to re-wrap the errors from `OpenAiCompatibleExecutionTools`
16348
+ * TODO: [🛄] Maybe make custom `OpenAiCompatibleError`
16349
+ * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
16350
+ * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
16351
+ * TODO: [🧠][🦢] Make reverse adapter from LlmExecutionTools to OpenAI-compatible:
16352
+ */
16353
+
16354
+ /**
16355
+ * Profile for OpenAI provider
16356
+ */
16357
+ const OPENAI_PROVIDER_PROFILE = {
16358
+ name: 'OPENAI',
16359
+ fullname: 'OpenAI GPT',
16360
+ color: '#10a37f',
16361
+ };
16362
+ /**
16363
+ * Execution Tools for calling OpenAI API
16364
+ *
16365
+ * @public exported from `@promptbook/openai`
16366
+ */
16367
+ class OpenAiExecutionTools extends OpenAiCompatibleExecutionTools {
16368
+ constructor() {
16369
+ super(...arguments);
16370
+ /**
16371
+ * Computes the usage of the OpenAI API based on the response from OpenAI
16372
+ */
16373
+ this.computeUsage = computeOpenAiUsage;
16374
+ // <- Note: [🤖] getDefaultXxxModel
16375
+ }
16376
+ /* <- TODO: [🍚] `, Destroyable` */
16377
+ get title() {
16378
+ return 'OpenAI';
16379
+ }
16380
+ get description() {
16381
+ return 'Use all models provided by OpenAI';
16382
+ }
16383
+ get profile() {
16384
+ return OPENAI_PROVIDER_PROFILE;
16385
+ }
16386
+ /*
16387
+ Note: Commenting this out to avoid circular dependency
16388
+ /**
16389
+ * Create (sub)tools for calling OpenAI API Assistants
16390
+ *
16391
+ * @param assistantId Which assistant to use
16392
+ * @returns Tools for calling OpenAI API Assistants with same token
16393
+ * /
16394
+ public createAssistantSubtools(assistantId: string_token): OpenAiAssistantExecutionTools {
16395
+ return new OpenAiAssistantExecutionTools({ ...this.options, assistantId });
16396
+ }
16397
+ */
16398
+ /**
16399
+ * List all available models (non dynamically)
16400
+ *
16401
+ * Note: Purpose of this is to provide more information about models than standard listing from API
16402
+ */
16403
+ get HARDCODED_MODELS() {
16404
+ return OPENAI_MODELS;
16405
+ }
16406
+ /**
16407
+ * Default model for chat variant.
16408
+ */
16409
+ getDefaultChatModel() {
16410
+ return this.getDefaultModel('gpt-5');
16411
+ }
16412
+ /**
16413
+ * Default model for completion variant.
16414
+ */
16415
+ getDefaultCompletionModel() {
16416
+ return this.getDefaultModel('gpt-3.5-turbo-instruct');
16417
+ }
16418
+ /**
16419
+ * Default model for completion variant.
16420
+ */
16421
+ getDefaultEmbeddingModel() {
16422
+ return this.getDefaultModel('text-embedding-3-large');
16423
+ }
16424
+ }
16425
+
16426
+ /**
16427
+ * Execution Tools for calling OpenAI API Assistants
16428
+ *
16429
+ * This is useful for calling OpenAI API with a single assistant, for more wide usage use `OpenAiExecutionTools`.
16430
+ *
16431
+ * Note: [🦖] There are several different things in Promptbook:
16432
+ * - `Agent` - which represents an AI Agent with its source, memories, actions, etc. Agent is a higher-level abstraction which is internally using:
16433
+ * - `LlmExecutionTools` - which wraps one or more LLM models and provides an interface to execute them
16434
+ * - `AgentLlmExecutionTools` - which is a specific implementation of `LlmExecutionTools` that wraps another LlmExecutionTools and applies agent-specific system prompts and requirements
16435
+ * - `OpenAiAssistantExecutionTools` - which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities, recommended for usage in `Agent` or `AgentLlmExecutionTools`
16436
+ *
16437
+ * @public exported from `@promptbook/openai`
16438
+ */
16439
+ class OpenAiAssistantExecutionTools extends OpenAiExecutionTools {
16440
+ /**
16441
+ * Creates OpenAI Execution Tools.
16442
+ *
16443
+ * @param options which are relevant are directly passed to the OpenAI client
16444
+ */
16445
+ constructor(options) {
16446
+ var _a;
16447
+ if (options.isProxied) {
16448
+ throw new NotYetImplementedError(`Proxy mode is not yet implemented for OpenAI assistants`);
16449
+ }
16450
+ super(options);
16451
+ this.isCreatingNewAssistantsAllowed = false;
16452
+ this.assistantId = options.assistantId;
16453
+ this.isCreatingNewAssistantsAllowed = (_a = options.isCreatingNewAssistantsAllowed) !== null && _a !== void 0 ? _a : false;
16454
+ if (this.assistantId === null && !this.isCreatingNewAssistantsAllowed) {
16455
+ throw new NotAllowed(`Assistant ID is null and creating new assistants is not allowed - this configuration does not make sense`);
16456
+ }
16457
+ // <- TODO: !!! `OpenAiAssistantExecutionToolsOptions` - Allow `assistantId: null` together with `isCreatingNewAssistantsAllowed: true`
16458
+ // TODO: [👱] Make limiter same as in `OpenAiExecutionTools`
16459
+ }
16460
+ get title() {
16461
+ return 'OpenAI Assistant';
16462
+ }
16463
+ get description() {
16464
+ return 'Use single assistant provided by OpenAI';
16465
+ }
16466
+ /**
16467
+ * Calls OpenAI API to use a chat model.
16468
+ */
16469
+ async callChatModel(prompt) {
16470
+ var _a, _b, _c;
16471
+ if (this.options.isVerbose) {
16472
+ console.info('💬 OpenAI callChatModel call', { prompt });
16473
+ }
16474
+ const { content, parameters, modelRequirements /*, format*/ } = prompt;
16475
+ const client = await this.getClient();
16476
+ // TODO: [☂] Use here more modelRequirements
16477
+ if (modelRequirements.modelVariant !== 'CHAT') {
16478
+ throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
16479
+ }
16480
+ // TODO: [👨‍👨‍👧‍👧] Remove:
16481
+ for (const key of ['maxTokens', 'modelName', 'seed', 'temperature']) {
16482
+ if (modelRequirements[key] !== undefined) {
16483
+ throw new NotYetImplementedError(`In \`OpenAiAssistantExecutionTools\` you cannot specify \`${key}\``);
16484
+ }
16485
+ }
16486
+ /*
16487
+ TODO: [👨‍👨‍👧‍👧] Implement all of this for Assistants
16488
+ const modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
16489
+ const modelSettings = {
16490
+ model: modelName,
16491
+
16492
+ temperature: modelRequirements.temperature,
16493
+
16494
+ // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
16495
+ // <- Note: [🧆]
16496
+ } as OpenAI.Chat.Completions.CompletionCreateParamsNonStreaming; // <- TODO: Guard here types better
16497
+
16498
+ if (format === 'JSON') {
16499
+ modelSettings.response_format = {
16500
+ type: 'json_object',
16501
+ };
16502
+ }
16503
+ */
16504
+ // <- TODO: [🚸] Not all models are compatible with JSON mode
16505
+ // > 'response_format' of type 'json_object' is not supported with this model.
16506
+ const rawPromptContent = templateParameters(content, {
16507
+ ...parameters,
16508
+ modelName: 'assistant',
16509
+ // <- [🧠] What is the best value here
16510
+ });
16511
+ const rawRequest = {
16512
+ // TODO: [👨‍👨‍👧‍👧] ...modelSettings,
16513
+ // TODO: [👨‍👨‍👧‍👧][🧠] What about system message for assistants, does it make sense - combination of OpenAI assistants with Promptbook Personas
16514
+ assistant_id: this.assistantId,
16515
+ thread: {
16516
+ messages: 'thread' in prompt &&
16517
+ Array.isArray(prompt.thread)
16518
+ ? prompt.thread.map((msg) => ({
16519
+ role: msg.role === 'assistant' ? 'assistant' : 'user',
16520
+ content: msg.content,
16521
+ }))
16522
+ : [{ role: 'user', content: rawPromptContent }],
16523
+ },
16524
+ // <- TODO: Add user identification here> user: this.options.user,
16525
+ };
16526
+ const start = $getCurrentDate();
16527
+ let complete;
16528
+ if (this.options.isVerbose) {
16529
+ console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
16530
+ }
16531
+ const stream = await client.beta.threads.createAndRunStream(rawRequest);
16532
+ stream.on('connect', () => {
16533
+ if (this.options.isVerbose) {
16534
+ console.info('connect', stream.currentEvent);
16535
+ }
16536
+ });
16537
+ stream.on('messageDelta', (messageDelta) => {
16538
+ var _a;
16539
+ if (this.options.isVerbose &&
16540
+ messageDelta &&
16541
+ messageDelta.content &&
16542
+ messageDelta.content[0] &&
16543
+ messageDelta.content[0].type === 'text') {
16544
+ console.info('messageDelta', (_a = messageDelta.content[0].text) === null || _a === void 0 ? void 0 : _a.value);
16545
+ }
16546
+ // <- TODO: [🐚] Make streaming and running tasks working
16547
+ });
16548
+ stream.on('messageCreated', (message) => {
16549
+ if (this.options.isVerbose) {
16550
+ console.info('messageCreated', message);
16551
+ }
16552
+ });
16553
+ stream.on('messageDone', (message) => {
16554
+ if (this.options.isVerbose) {
16555
+ console.info('messageDone', message);
16556
+ }
16557
+ });
16558
+ const rawResponse = await stream.finalMessages();
16559
+ if (this.options.isVerbose) {
16560
+ console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
16561
+ }
16562
+ if (rawResponse.length !== 1) {
16563
+ throw new PipelineExecutionError(`There is NOT 1 BUT ${rawResponse.length} finalMessages from OpenAI`);
16564
+ }
16565
+ if (rawResponse[0].content.length !== 1) {
16566
+ throw new PipelineExecutionError(`There is NOT 1 BUT ${rawResponse[0].content.length} finalMessages content from OpenAI`);
16567
+ }
16568
+ if (((_a = rawResponse[0].content[0]) === null || _a === void 0 ? void 0 : _a.type) !== 'text') {
16569
+ throw new PipelineExecutionError(`There is NOT 'text' BUT ${(_b = rawResponse[0].content[0]) === null || _b === void 0 ? void 0 : _b.type} finalMessages content type from OpenAI`);
16570
+ }
16571
+ const resultContent = (_c = rawResponse[0].content[0]) === null || _c === void 0 ? void 0 : _c.text.value;
16572
+ // <- TODO: [🧠] There are also annotations, maybe use them
16573
+ // eslint-disable-next-line prefer-const
16574
+ complete = $getCurrentDate();
16575
+ const usage = UNCERTAIN_USAGE;
16576
+ // <- TODO: [🥘] Compute real usage for assistant
16577
+ // ?> const usage = computeOpenAiUsage(content, resultContent || '', rawResponse);
16578
+ if (resultContent === null) {
16579
+ throw new PipelineExecutionError('No response message from OpenAI');
16580
+ }
16581
+ return exportJson({
16582
+ name: 'promptResult',
16583
+ message: `Result of \`OpenAiAssistantExecutionTools.callChatModel\``,
16584
+ order: [],
16585
+ value: {
16586
+ content: resultContent,
16587
+ modelName: 'assistant',
16588
+ // <- TODO: [🥘] Detect used model in assistant
16589
+ // ?> model: rawResponse.model || modelName,
16590
+ timing: {
16591
+ start,
16592
+ complete,
16593
+ },
16594
+ usage,
16595
+ rawPromptContent,
16596
+ rawRequest,
16597
+ rawResponse,
16598
+ // <- [🗯]
16599
+ },
16600
+ });
16601
+ }
16602
+ async playground() {
16603
+ const client = await this.getClient();
16604
+ // List all assistants
16605
+ const assistants = await client.beta.assistants.list();
16606
+ console.log('!!! Assistants:', assistants);
16607
+ // Get details of a specific assistant
16608
+ const assistantId = 'asst_MO8fhZf4dGloCfXSHeLcIik0';
16609
+ const assistant = await client.beta.assistants.retrieve(assistantId);
16610
+ console.log('!!! Assistant Details:', assistant);
16611
+ // Update an assistant
16612
+ const updatedAssistant = await client.beta.assistants.update(assistantId, {
16613
+ name: assistant.name + '(M)',
16614
+ description: 'Updated description via Promptbook',
16615
+ metadata: {
16616
+ [Math.random().toString(36).substring(2, 15)]: new Date().toISOString(),
16617
+ },
16618
+ });
16619
+ console.log('!!! Updated Assistant:', updatedAssistant);
16620
+ await waitasecond.forEver();
16621
+ }
16622
+ async createNewAssistant(options) {
16623
+ if (!this.isCreatingNewAssistantsAllowed) {
16624
+ throw new NotAllowed(`Creating new assistants is not allowed. Set \`isCreatingNewAssistantsAllowed: true\` in options to enable this feature.`);
16625
+ }
16626
+ await this.playground();
16627
+ const { name, instructions } = options;
16628
+ const client = await this.getClient();
16629
+ /*
16630
+ TODO: !!!
16631
+ async function downloadFile(url: string, folder = './tmp'): Promise<string> {
16632
+ const filename = path.basename(url.split('?')[0]);
16633
+ const filepath = path.join(folder, filename);
16634
+
16635
+ if (!fs.existsSync(folder)) fs.mkdirSync(folder);
16636
+
16637
+ const res = await fetch(url);
16638
+ if (!res.ok) throw new Error(`Download error: ${url}`);
16639
+ const buffer = await res.arrayBuffer();
16640
+ fs.writeFileSync(filepath, Buffer.from(buffer));
16641
+ console.log(`📥 File downloaded: ${filename}`);
16642
+
16643
+ return filepath;
16644
+ }
16645
+
16646
+ async function uploadFileToOpenAI(filepath: string) {
16647
+ const file = await client.files.create({
16648
+ file: fs.createReadStream(filepath),
16649
+ purpose: 'assistants',
16650
+ });
16651
+ console.log(`⬆️ File uploaded to OpenAI: ${file.filename} (${file.id})`);
16652
+ return file;
16653
+ }
16654
+
16655
+ // 🌐 URL addresses of files to upload
16656
+ const fileUrls = [
16657
+ 'https://raw.githubusercontent.com/vercel/next.js/canary/packages/next/README.md',
16658
+ 'https://raw.githubusercontent.com/openai/openai-cookbook/main/examples/How_to_call_the_Assistants_API_with_Node.js.ipynb',
16659
+ ];
16660
+
16661
+ // 1️⃣ Download files from URL
16662
+ const localFiles = [];
16663
+ for (const url of fileUrls) {
16664
+ const filepath = await downloadFile(url);
16665
+ localFiles.push(filepath);
16666
+ }
16667
+
16668
+ // 2️⃣ Upload files to OpenAI
16669
+ const uploadedFiles = [];
16670
+ for (const filepath of localFiles) {
16671
+ const file = await uploadFileToOpenAI(filepath);
16672
+ uploadedFiles.push(file.id);
16673
+ }
16674
+ */
16675
+ alert('!!!! Creating new OpenAI assistant');
16676
+ // 3️⃣ Create assistant with uploaded files
16677
+ const assistant = await client.beta.assistants.create({
16678
+ name,
16679
+ description: 'Assistant created via Promptbook',
16680
+ model: 'gpt-4o',
16681
+ instructions,
16682
+ tools: [/* TODO: [🧠] Maybe add { type: 'code_interpreter' }, */ { type: 'file_search' }],
16683
+ // !!!! file_ids: uploadedFiles,
16684
+ });
16685
+ console.log(`✅ Assistant created: ${assistant.id}`);
16686
+ // TODO: !!!! Try listing existing assistants
16687
+ // TODO: !!!! Try marking existing assistants by DISCRIMINANT
16688
+ // TODO: !!!! Allow to update and reconnect to existing assistants
16689
+ return new OpenAiAssistantExecutionTools({
16690
+ ...this.options,
16691
+ isCreatingNewAssistantsAllowed: false,
16692
+ assistantId: assistant.id,
16693
+ });
16694
+ }
16695
+ /**
16696
+ * Discriminant for type guards
16697
+ */
16698
+ get discriminant() {
16699
+ return DISCRIMINANT;
16700
+ }
16701
+ /**
16702
+ * Type guard to check if given `LlmExecutionTools` are instanceof `OpenAiAssistantExecutionTools`
16703
+ *
16704
+ * Note: This is useful when you can possibly have multiple versions of `@promptbook/openai` installed
16705
+ */
16706
+ static isOpenAiAssistantExecutionTools(llmExecutionTools) {
16707
+ return llmExecutionTools.discriminant === DISCRIMINANT;
16708
+ }
16709
+ }
16710
+ /**
16711
+ * Discriminant for type guards
16712
+ *
16713
+ * @private const of `OpenAiAssistantExecutionTools`
16714
+ */
16715
+ const DISCRIMINANT = 'OPEN_AI_ASSISTANT_V1';
16716
+ /**
16717
+ * TODO: [🧠][🧙‍♂️] Maybe there can be some wizard for those who want to use just OpenAI
16718
+ * TODO: Maybe make custom OpenAiError
16719
+ * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
16720
+ * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
16721
+ */
16722
+
16723
+ /**
16724
+ * Execution Tools for calling LLM models with a predefined agent "soul"
16725
+ * This wraps underlying LLM execution tools and applies agent-specific system prompts and requirements
16726
+ *
16727
+ * Note: [🦖] There are several different things in Promptbook:
16728
+ * - `Agent` - which represents an AI Agent with its source, memories, actions, etc. Agent is a higher-level abstraction which is internally using:
16729
+ * - `LlmExecutionTools` - which wraps one or more LLM models and provides an interface to execute them
16730
+ * - `AgentLlmExecutionTools` - which is a specific implementation of `LlmExecutionTools` that wraps another LlmExecutionTools and applies agent-specific system prompts and requirements
16731
+ * - `OpenAiAssistantExecutionTools` - which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities, recommended for usage in `Agent` or `AgentLlmExecutionTools`
16732
+ *
16733
+ * @public exported from `@promptbook/core`
16734
+ */
16735
+ class AgentLlmExecutionTools {
16736
+ /**
16737
+ * Creates new AgentLlmExecutionTools
16738
+ *
16739
+ * @param llmTools The underlying LLM execution tools to wrap
16740
+ * @param agentSource The agent source string that defines the agent's behavior
16741
+ */
16742
+ constructor(options) {
16743
+ this.options = options;
16744
+ /**
16745
+ * Cached model requirements to avoid re-parsing the agent source
16746
+ */
16747
+ this._cachedModelRequirements = null;
16748
+ /**
16749
+ * Cached parsed agent information
16750
+ */
16751
+ this._cachedAgentInfo = null;
16752
+ }
16753
+ /**
16754
+ * Get cached or parse agent information
16755
+ */
16756
+ getAgentInfo() {
16757
+ if (this._cachedAgentInfo === null) {
16758
+ this._cachedAgentInfo = parseAgentSource(this.options.agentSource);
16759
+ }
16760
+ return this._cachedAgentInfo;
16761
+ }
16762
+ /**
16763
+ * Get cached or create agent model requirements
16764
+ */
16765
+ async getAgentModelRequirements() {
16766
+ if (this._cachedModelRequirements === null) {
16767
+ // Get available models from underlying LLM tools for best model selection
16768
+ const availableModels = await this.options.llmTools.listModels();
16769
+ this._cachedModelRequirements = await createAgentModelRequirements(this.options.agentSource, undefined, // Let the function pick the best model
16770
+ availableModels);
16771
+ }
16772
+ return this._cachedModelRequirements;
16773
+ }
16774
+ get title() {
16775
+ const agentInfo = this.getAgentInfo();
16776
+ return (agentInfo.agentName || 'Agent');
16777
+ }
16778
+ get description() {
16779
+ const agentInfo = this.getAgentInfo();
16780
+ return agentInfo.personaDescription || 'AI Agent with predefined personality and behavior';
16781
+ }
16782
+ get profile() {
16783
+ const agentInfo = this.getAgentInfo();
16784
+ if (!agentInfo.agentName) {
16785
+ return undefined;
16786
+ }
16787
+ return {
16788
+ name: agentInfo.agentName.toUpperCase().replace(/\s+/g, '_'),
16789
+ fullname: agentInfo.agentName,
16790
+ color: agentInfo.meta.color || '#6366f1',
16791
+ avatarSrc: agentInfo.meta.image,
16792
+ };
16793
+ }
16794
+ checkConfiguration() {
16795
+ // Check underlying tools configuration
16796
+ return this.options.llmTools.checkConfiguration();
16797
+ }
16798
+ /**
16799
+ * Returns a virtual model name representing the agent behavior
16800
+ */
16801
+ get modelName() {
16802
+ const hash = cryptoJs.SHA256(hexEncoder__default["default"].parse(this.options.agentSource))
16803
+ // <- TODO: [🥬] Encapsulate sha256 to some private utility function
16804
+ .toString( /* hex */);
16805
+ // <- TODO: [🥬] Make some system for hashes and ids of promptbook
16806
+ const agentId = hash.substring(0, 10);
16807
+ // <- TODO: [🥬] Make some system for hashes and ids of promptbook
16808
+ return (normalizeToKebabCase(this.title) + '-' + agentId);
16809
+ }
16810
+ listModels() {
16811
+ return [
16812
+ {
16813
+ modelName: this.modelName,
16814
+ modelVariant: 'CHAT',
16815
+ modelTitle: `${this.title} (Agent Chat Default)`,
16816
+ modelDescription: `Chat model with agent behavior: ${this.description}`,
16817
+ },
16818
+ // <- Note: We only list a single "virtual" agent model here as this wrapper only supports chat prompts
16819
+ ];
16820
+ }
16821
+ /**
16822
+ * Calls the chat model with agent-specific system prompt and requirements
16823
+ */
16824
+ async callChatModel(prompt) {
16825
+ if (!this.options.llmTools.callChatModel) {
16826
+ throw new Error('Underlying LLM execution tools do not support chat model calls');
16827
+ }
16828
+ // Ensure we're working with a chat prompt
14861
16829
  if (prompt.modelRequirements.modelVariant !== 'CHAT') {
14862
16830
  throw new Error('AgentLlmExecutionTools only supports chat prompts');
14863
16831
  }
14864
- const chatPrompt = prompt;
14865
- // Get agent model requirements (cached with best model selection)
14866
16832
  const modelRequirements = await this.getAgentModelRequirements();
14867
- // Create modified chat prompt with agent system message
14868
- const modifiedChatPrompt = {
14869
- ...chatPrompt,
14870
- modelRequirements: {
14871
- ...chatPrompt.modelRequirements,
14872
- ...modelRequirements,
14873
- // Prepend agent system message to existing system message
14874
- systemMessage: modelRequirements.systemMessage +
14875
- (chatPrompt.modelRequirements.systemMessage
14876
- ? `\n\n${chatPrompt.modelRequirements.systemMessage}`
14877
- : ''),
14878
- },
14879
- };
14880
- const underlyingLlmResult = await this.llmTools.callChatModel(modifiedChatPrompt);
16833
+ const chatPrompt = prompt;
16834
+ let underlyingLlmResult;
16835
+ if (OpenAiAssistantExecutionTools.isOpenAiAssistantExecutionTools(this.options.llmTools)) {
16836
+ // <- TODO: !!! Check also `isCreatingNewAssistantsAllowed` and warn about it
16837
+ const assistant = await this.options.llmTools.createNewAssistant({
16838
+ name: this.title,
16839
+ instructions: modelRequirements.systemMessage,
16840
+ });
16841
+ // <- TODO: !!! Cache the assistant in prepareCache
16842
+ underlyingLlmResult = await assistant.callChatModel(chatPrompt);
16843
+ }
16844
+ else {
16845
+ // Create modified chat prompt with agent system message
16846
+ const modifiedChatPrompt = {
16847
+ ...chatPrompt,
16848
+ modelRequirements: {
16849
+ ...chatPrompt.modelRequirements,
16850
+ ...modelRequirements,
16851
+ // Prepend agent system message to existing system message
16852
+ systemMessage: modelRequirements.systemMessage +
16853
+ (chatPrompt.modelRequirements.systemMessage
16854
+ ? `\n\n${chatPrompt.modelRequirements.systemMessage}`
16855
+ : ''),
16856
+ },
16857
+ };
16858
+ underlyingLlmResult = await this.options.llmTools.callChatModel(modifiedChatPrompt);
16859
+ }
14881
16860
  let content = underlyingLlmResult.content;
14882
16861
  // Note: Cleanup the AI artifacts from the content
14883
16862
  content = humanizeAiText(content);
@@ -14902,15 +16881,11 @@
14902
16881
  * @public exported from `@promptbook/core`
14903
16882
  */
14904
16883
  const createAgentLlmExecutionTools = Object.assign((options) => {
14905
- return new AgentLlmExecutionTools(options.llmTools, options.agentSource);
16884
+ return new AgentLlmExecutionTools(options);
14906
16885
  }, {
14907
16886
  packageName: '@promptbook/core',
14908
16887
  className: 'AgentLlmExecutionTools',
14909
16888
  });
14910
- /**
14911
- * TODO: [🧠] Consider adding validation for agent source format
14912
- * TODO: [🧠] Consider adding options for caching behavior
14913
- */
14914
16889
 
14915
16890
  /**
14916
16891
  * Metadata for Agent LLM execution tools
@@ -15541,7 +17516,7 @@
15541
17516
  \`
15542
17517
  `));
15543
17518
  }
15544
- return bookString;
17519
+ return padBook(bookString);
15545
17520
  }
15546
17521
  /**
15547
17522
  * TODO: [🧠][🈴] Where is the best location for this file
@@ -15871,6 +17846,308 @@
15871
17846
  }
15872
17847
  }
15873
17848
 
17849
+ /**
17850
+ * Register for book transpilers.
17851
+ *
17852
+ * Note: `$` is used to indicate that this interacts with the global scope
17853
+ * @singleton Only one instance of each register is created per build, but there can be more instances across different builds or environments.
17854
+ * @see https://github.com/webgptorg/promptbook/issues/249
17855
+ *
17856
+ * @public exported from `@promptbook/core`
17857
+ */
17858
+ const $bookTranspilersRegister = new $Register('book_transpilers');
17859
+ /**
17860
+ * TODO: [®] DRY Register logic
17861
+ */
17862
+
17863
+ /**
17864
+ * Converts a book into a 1:1 formatted markdown
17865
+ *
17866
+ * @public exported from `@promptbook/core`
17867
+ */
17868
+ const FormattedBookInMarkdownTranspiler = {
17869
+ name: 'formatted-book-in-markdown',
17870
+ title: 'Formatted Book in Markdown',
17871
+ packageName: '@promptbook/core',
17872
+ className: 'FormattedBookInMarkdownTranspiler',
17873
+ transpileBook(book, tools, options) {
17874
+ let lines = book.trim( /* <- Note: Not using `spaceTrim` because its not needed */).split('\n');
17875
+ if (lines[0]) {
17876
+ lines[0] = `**<ins>${lines[0]}</ins>**`;
17877
+ }
17878
+ for (let i = 1; i < lines.length; i++) {
17879
+ let line = lines[i];
17880
+ line = line === null || line === void 0 ? void 0 : line.split('PERSONA').join('**PERSONA**');
17881
+ line = line === null || line === void 0 ? void 0 : line.split('RULE').join('**RULE**');
17882
+ line = line === null || line === void 0 ? void 0 : line.split('META').join('**META**');
17883
+ line = line === null || line === void 0 ? void 0 : line.split('KNOWLEDGE').join('**KNOWLEDGE**');
17884
+ line = line === null || line === void 0 ? void 0 : line.split('ACTION').join('**ACTION**');
17885
+ // <- TODO: !!! Unhardcode these commitments
17886
+ lines[i] = line;
17887
+ }
17888
+ // lines = lines.map((line) => `> ${line}`);
17889
+ lines = lines.map((line) => `${line}<br/>`);
17890
+ return lines.join('\n');
17891
+ },
17892
+ };
17893
+
17894
+ /**
17895
+ * Transpiler to Javascript code using OpenAI SDK.
17896
+ *
17897
+ * @public exported from `@promptbook/core`
17898
+ */
17899
+ const OpenAiSdkTranspiler = {
17900
+ name: 'openai-sdk',
17901
+ title: 'OpenAI SDK',
17902
+ packageName: '@promptbook/core',
17903
+ className: 'OpenAiSdkTranspiler',
17904
+ async transpileBook(book, tools, options) {
17905
+ const { agentName } = await parseAgentSource(book);
17906
+ const modelRequirements = await createAgentModelRequirements(book);
17907
+ const { commitments } = parseAgentSourceWithCommitments(book);
17908
+ const knowledgeCommitments = commitments.filter((commitment) => commitment.type === 'KNOWLEDGE');
17909
+ const directKnowledge = knowledgeCommitments
17910
+ .map((commitment) => commitment.content.trim())
17911
+ .filter((content) => {
17912
+ try {
17913
+ new URL(content);
17914
+ return false;
17915
+ }
17916
+ catch (_a) {
17917
+ return true;
17918
+ }
17919
+ });
17920
+ const knowledgeSources = knowledgeCommitments
17921
+ .map((commitment) => commitment.content.trim())
17922
+ .filter((content) => {
17923
+ try {
17924
+ new URL(content);
17925
+ return true;
17926
+ }
17927
+ catch (_a) {
17928
+ return false;
17929
+ }
17930
+ });
17931
+ const KNOWLEDGE_THRESHOLD = 1000;
17932
+ if (directKnowledge.join('\n').length > KNOWLEDGE_THRESHOLD || knowledgeSources.length > 0) {
17933
+ return spaceTrim__default["default"]((block) => `
17934
+ #!/usr/bin/env node
17935
+
17936
+ import * as dotenv from 'dotenv';
17937
+ dotenv.config({ path: '.env' });
17938
+
17939
+ import { spaceTrim } from '@promptbook/utils';
17940
+ import OpenAI from 'openai';
17941
+ import readline from 'readline';
17942
+ import { Document, VectorStoreIndex, SimpleDirectoryReader } from 'llamaindex';
17943
+
17944
+ // ---- CONFIG ----
17945
+ const client = new OpenAI({
17946
+ apiKey: process.env.OPENAI_API_KEY,
17947
+ });
17948
+
17949
+ // ---- KNOWLEDGE ----
17950
+ const knowledge = ${block(JSON.stringify(directKnowledge, null, 4) /* <- TODO: Use here Promptbook stringify */)};
17951
+ const knowledgeSources = ${block(JSON.stringify(knowledgeSources, null, 4) /* <- TODO: Use here Promptbook stringify */)};
17952
+ let index;
17953
+
17954
+ async function setupKnowledge() {
17955
+ const documents = knowledge.map((text) => new Document({ text }));
17956
+
17957
+ for (const source of knowledgeSources) {
17958
+ try {
17959
+ // Note: SimpleDirectoryReader is a bit of a misnomer, it can read single files
17960
+ const reader = new SimpleDirectoryReader();
17961
+ const sourceDocuments = await reader.loadData(source);
17962
+ documents.push(...sourceDocuments);
17963
+ } catch (error) {
17964
+ console.error(\`Error loading knowledge from \${source}:\`, error);
17965
+ }
17966
+ }
17967
+
17968
+ if (documents.length > 0) {
17969
+ index = await VectorStoreIndex.fromDocuments(documents);
17970
+ console.log('🧠 Knowledge base prepared.');
17971
+ }
17972
+ }
17973
+
17974
+ // ---- CLI SETUP ----
17975
+ const rl = readline.createInterface({
17976
+ input: process.stdin,
17977
+ output: process.stdout,
17978
+ });
17979
+
17980
+ const chatHistory = [
17981
+ {
17982
+ role: 'system',
17983
+ content: spaceTrim(\`
17984
+ ${block(modelRequirements.systemMessage)}
17985
+ \`),
17986
+ },
17987
+ ];
17988
+
17989
+ async function ask(question) {
17990
+ let context = '';
17991
+ if (index) {
17992
+ const retriever = index.asRetriever();
17993
+ const relevantNodes = await retriever.retrieve(question);
17994
+ context = relevantNodes.map((node) => node.getContent()).join('\\n\\n');
17995
+ }
17996
+
17997
+ const userMessage = spaceTrim(\`
17998
+ ${block(spaceTrim__default["default"](`
17999
+ Here is some additional context to help you answer the question:
18000
+ \${context}
18001
+
18002
+ ---
18003
+
18004
+ My question is:
18005
+ \${question}
18006
+ `))}
18007
+ \`);
18008
+
18009
+
18010
+ chatHistory.push({ role: 'user', content: userMessage });
18011
+
18012
+ const response = await client.chat.completions.create({
18013
+ model: 'gpt-4o',
18014
+ messages: chatHistory,
18015
+ temperature: ${modelRequirements.temperature},
18016
+ });
18017
+
18018
+ const answer = response.choices[0].message.content;
18019
+ console.log('\\n🧠 ${agentName}:', answer, '\\n');
18020
+
18021
+ chatHistory.push({ role: 'assistant', content: answer });
18022
+ promptUser();
18023
+ }
18024
+
18025
+ function promptUser() {
18026
+ rl.question('💬 You: ', (input) => {
18027
+ if (input.trim().toLowerCase() === 'exit') {
18028
+ console.log('👋 Bye!');
18029
+ rl.close();
18030
+ return;
18031
+ }
18032
+ ask(input);
18033
+ });
18034
+ }
18035
+
18036
+ (async () => {
18037
+ await setupKnowledge();
18038
+ console.log("🤖 Chat with ${agentName} (type 'exit' to quit)\\n");
18039
+ promptUser();
18040
+ })();
18041
+ `);
18042
+ }
18043
+ const source = spaceTrim__default["default"]((block) => `
18044
+
18045
+ #!/usr/bin/env node
18046
+
18047
+ import * as dotenv from 'dotenv';
18048
+
18049
+ dotenv.config({ path: '.env' });
18050
+
18051
+ import { spaceTrim } from '@promptbook/utils';
18052
+ import OpenAI from 'openai';
18053
+ import readline from 'readline';
18054
+
18055
+ // ---- CONFIG ----
18056
+ const client = new OpenAI({
18057
+ apiKey: process.env.OPENAI_API_KEY,
18058
+ });
18059
+
18060
+ // ---- CLI SETUP ----
18061
+ const rl = readline.createInterface({
18062
+ input: process.stdin,
18063
+ output: process.stdout,
18064
+ });
18065
+
18066
+ const chatHistory = [
18067
+ {
18068
+ role: 'system',
18069
+ content: spaceTrim(\`
18070
+ ${block(modelRequirements.systemMessage)}
18071
+ \`),
18072
+ },
18073
+ ];
18074
+
18075
+ async function ask(question) {
18076
+ chatHistory.push({ role: 'user', content: question });
18077
+
18078
+ const response = await client.chat.completions.create({
18079
+ model: 'gpt-4o',
18080
+ messages: chatHistory,
18081
+ temperature: ${modelRequirements.temperature},
18082
+ });
18083
+
18084
+ const answer = response.choices[0].message.content;
18085
+ console.log('\\n🧠 ${agentName}:', answer, '\\n');
18086
+
18087
+ chatHistory.push({ role: 'assistant', content: answer });
18088
+ promptUser();
18089
+ }
18090
+
18091
+ function promptUser() {
18092
+ rl.question('💬 You: ', (input) => {
18093
+ if (input.trim().toLowerCase() === 'exit') {
18094
+ console.log('👋 Bye!');
18095
+ rl.close();
18096
+ return;
18097
+ }
18098
+ ask(input);
18099
+ });
18100
+ }
18101
+
18102
+ console.log("🤖 Chat with ${agentName} (type 'exit' to quit)\\n");
18103
+ promptUser();
18104
+
18105
+ `);
18106
+ return source;
18107
+ },
18108
+ };
18109
+
18110
+ /**
18111
+ * Provide information about Promptbook, engine version, book language version, servers, ...
18112
+ *
18113
+ * @param options Which information to include
18114
+ * @returns Information about Promptbook in markdown format
18115
+ *
18116
+ * @public exported from `@promptbook/core`
18117
+ */
18118
+ function aboutPromptbookInformation(options) {
18119
+ const { isServersInfoIncluded = true } = options || {};
18120
+ const fullInfoPieces = [];
18121
+ const basicInfo = spaceTrim__default["default"](`
18122
+
18123
+ # ${NAME}
18124
+
18125
+ ${CLAIM}
18126
+
18127
+ - [Promptbook engine version \`${PROMPTBOOK_ENGINE_VERSION}\`](https://github.com/webgptorg/promptbook)
18128
+ - [Book language version \`${BOOK_LANGUAGE_VERSION}\`](https://github.com/webgptorg/book)
18129
+
18130
+ `);
18131
+ fullInfoPieces.push(basicInfo);
18132
+ if (isServersInfoIncluded) {
18133
+ const serversInfo = spaceTrim__default["default"]((block) => `
18134
+
18135
+ ## Servers
18136
+
18137
+ ${block(REMOTE_SERVER_URLS.map(({ title, urls, isAnonymousModeAllowed, description }, index) => `${index + 1}. ${title} ${description}
18138
+ ${isAnonymousModeAllowed ? '🐱‍💻 ' : ''} ${urls.join(', ')}
18139
+ `).join('\n'))}
18140
+ `);
18141
+ fullInfoPieces.push(serversInfo);
18142
+ }
18143
+ const fullInfo = spaceTrim__default["default"](fullInfoPieces.join('\n\n'));
18144
+ return fullInfo;
18145
+ }
18146
+ /**
18147
+ * TODO: [🗽] Unite branding and make single place for it
18148
+ */
18149
+
18150
+ exports.$bookTranspilersRegister = $bookTranspilersRegister;
15874
18151
  exports.$llmToolsMetadataRegister = $llmToolsMetadataRegister;
15875
18152
  exports.$llmToolsRegister = $llmToolsRegister;
15876
18153
  exports.$scrapersMetadataRegister = $scrapersMetadataRegister;
@@ -15879,6 +18156,7 @@
15879
18156
  exports.ADMIN_GITHUB_NAME = ADMIN_GITHUB_NAME;
15880
18157
  exports.API_REQUEST_TIMEOUT = API_REQUEST_TIMEOUT;
15881
18158
  exports.AbstractFormatError = AbstractFormatError;
18159
+ exports.Agent = Agent;
15882
18160
  exports.AgentLlmExecutionTools = AgentLlmExecutionTools;
15883
18161
  exports.AuthenticationError = AuthenticationError;
15884
18162
  exports.BIG_DATASET_TRESHOLD = BIG_DATASET_TRESHOLD;
@@ -15894,6 +18172,7 @@
15894
18172
  exports.CompletionFormfactorDefinition = CompletionFormfactorDefinition;
15895
18173
  exports.CsvFormatError = CsvFormatError;
15896
18174
  exports.CsvFormatParser = CsvFormatParser;
18175
+ exports.DEFAULT_AGENTS_DIRNAME = DEFAULT_AGENTS_DIRNAME;
15897
18176
  exports.DEFAULT_BOOK = DEFAULT_BOOK;
15898
18177
  exports.DEFAULT_BOOKS_DIRNAME = DEFAULT_BOOKS_DIRNAME;
15899
18178
  exports.DEFAULT_BOOK_OUTPUT_PARAMETER_NAME = DEFAULT_BOOK_OUTPUT_PARAMETER_NAME;
@@ -15922,6 +18201,7 @@
15922
18201
  exports.ExpectError = ExpectError;
15923
18202
  exports.FAILED_VALUE_PLACEHOLDER = FAILED_VALUE_PLACEHOLDER;
15924
18203
  exports.FORMFACTOR_DEFINITIONS = FORMFACTOR_DEFINITIONS;
18204
+ exports.FormattedBookInMarkdownTranspiler = FormattedBookInMarkdownTranspiler;
15925
18205
  exports.GENERIC_PIPELINE_INTERFACE = GENERIC_PIPELINE_INTERFACE;
15926
18206
  exports.GeneratorFormfactorDefinition = GeneratorFormfactorDefinition;
15927
18207
  exports.GenericFormfactorDefinition = GenericFormfactorDefinition;
@@ -15939,10 +18219,13 @@
15939
18219
  exports.MultipleLlmExecutionTools = MultipleLlmExecutionTools;
15940
18220
  exports.NAME = NAME;
15941
18221
  exports.NonTaskSectionTypes = NonTaskSectionTypes;
18222
+ exports.NotAllowed = NotAllowed;
15942
18223
  exports.NotFoundError = NotFoundError;
15943
18224
  exports.NotYetImplementedCommitmentDefinition = NotYetImplementedCommitmentDefinition;
15944
18225
  exports.NotYetImplementedError = NotYetImplementedError;
15945
18226
  exports.ORDER_OF_PIPELINE_JSON = ORDER_OF_PIPELINE_JSON;
18227
+ exports.OpenAiSdkTranspiler = OpenAiSdkTranspiler;
18228
+ exports.PADDING_LINES = PADDING_LINES;
15946
18229
  exports.PENDING_VALUE_PLACEHOLDER = PENDING_VALUE_PLACEHOLDER;
15947
18230
  exports.PLAYGROUND_APP_ID = PLAYGROUND_APP_ID;
15948
18231
  exports.PROMPTBOOK_CHAT_COLOR = PROMPTBOOK_CHAT_COLOR;
@@ -15950,6 +18233,7 @@
15950
18233
  exports.PROMPTBOOK_ENGINE_VERSION = PROMPTBOOK_ENGINE_VERSION;
15951
18234
  exports.PROMPTBOOK_ERRORS = PROMPTBOOK_ERRORS;
15952
18235
  exports.PROMPTBOOK_LOGO_URL = PROMPTBOOK_LOGO_URL;
18236
+ exports.PROMPTBOOK_SYNTAX_COLORS = PROMPTBOOK_SYNTAX_COLORS;
15953
18237
  exports.ParseError = ParseError;
15954
18238
  exports.PipelineExecutionError = PipelineExecutionError;
15955
18239
  exports.PipelineLogicError = PipelineLogicError;
@@ -15988,10 +18272,10 @@
15988
18272
  exports._OpenAiMetadataRegistration = _OpenAiMetadataRegistration;
15989
18273
  exports._PdfScraperMetadataRegistration = _PdfScraperMetadataRegistration;
15990
18274
  exports._WebsiteScraperMetadataRegistration = _WebsiteScraperMetadataRegistration;
18275
+ exports.aboutPromptbookInformation = aboutPromptbookInformation;
15991
18276
  exports.addUsage = addUsage;
15992
18277
  exports.book = book;
15993
18278
  exports.cacheLlmTools = cacheLlmTools;
15994
- exports.collectionToJson = collectionToJson;
15995
18279
  exports.compilePipeline = compilePipeline;
15996
18280
  exports.computeCosineSimilarity = computeCosineSimilarity;
15997
18281
  exports.countUsage = countUsage;
@@ -15999,13 +18283,13 @@
15999
18283
  exports.createAgentModelRequirements = createAgentModelRequirements;
16000
18284
  exports.createAgentModelRequirementsWithCommitments = createAgentModelRequirementsWithCommitments;
16001
18285
  exports.createBasicAgentModelRequirements = createBasicAgentModelRequirements;
16002
- exports.createCollectionFromJson = createCollectionFromJson;
16003
- exports.createCollectionFromPromise = createCollectionFromPromise;
16004
- exports.createCollectionFromUrl = createCollectionFromUrl;
16005
18286
  exports.createEmptyAgentModelRequirements = createEmptyAgentModelRequirements;
16006
18287
  exports.createLlmToolsFromConfiguration = createLlmToolsFromConfiguration;
18288
+ exports.createPipelineCollectionFromJson = createPipelineCollectionFromJson;
18289
+ exports.createPipelineCollectionFromPromise = createPipelineCollectionFromPromise;
18290
+ exports.createPipelineCollectionFromUrl = createPipelineCollectionFromUrl;
16007
18291
  exports.createPipelineExecutor = createPipelineExecutor;
16008
- exports.createSubcollection = createSubcollection;
18292
+ exports.createPipelineSubcollection = createPipelineSubcollection;
16009
18293
  exports.embeddingVectorToString = embeddingVectorToString;
16010
18294
  exports.executionReportJsonToString = executionReportJsonToString;
16011
18295
  exports.extractParameterNamesFromTask = extractParameterNamesFromTask;
@@ -16028,9 +18312,11 @@
16028
18312
  exports.limitTotalUsage = limitTotalUsage;
16029
18313
  exports.makeKnowledgeSourceHandler = makeKnowledgeSourceHandler;
16030
18314
  exports.migratePipeline = migratePipeline;
18315
+ exports.padBook = padBook;
16031
18316
  exports.parseAgentSource = parseAgentSource;
16032
18317
  exports.parseParameters = parseParameters;
16033
18318
  exports.parsePipeline = parsePipeline;
18319
+ exports.pipelineCollectionToJson = pipelineCollectionToJson;
16034
18320
  exports.pipelineJsonToString = pipelineJsonToString;
16035
18321
  exports.prepareKnowledgePieces = prepareKnowledgePieces;
16036
18322
  exports.preparePersona = preparePersona;