@promptbook/core 0.103.0-4 → 0.103.0-40
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +262 -203
- package/esm/index.es.js +2459 -183
- package/esm/index.es.js.map +1 -1
- package/esm/typings/books/index.d.ts +0 -81
- package/esm/typings/src/_packages/browser.index.d.ts +6 -0
- package/esm/typings/src/_packages/cli.index.d.ts +4 -0
- package/esm/typings/src/_packages/components.index.d.ts +12 -8
- package/esm/typings/src/_packages/core.index.d.ts +30 -10
- package/esm/typings/src/_packages/node.index.d.ts +4 -2
- package/esm/typings/src/_packages/types.index.d.ts +18 -2
- package/esm/typings/src/_packages/wizard.index.d.ts +4 -0
- package/esm/typings/src/book-2.0/agent-source/AgentBasicInformation.d.ts +1 -0
- package/esm/typings/src/book-2.0/agent-source/padBook.d.ts +16 -0
- package/esm/typings/src/book-components/BookEditor/BookEditor.d.ts +76 -15
- package/esm/typings/src/book-components/BookEditor/BookEditorActionbar.d.ts +14 -0
- package/esm/typings/src/book-components/BookEditor/BookEditorMonaco.d.ts +5 -0
- package/esm/typings/src/book-components/Chat/MarkdownContent/MarkdownContent.d.ts +15 -0
- package/esm/typings/src/book-components/Chat/MockedChat/MockedChat.d.ts +5 -0
- package/esm/typings/src/book-components/Chat/save/html/htmlSaveFormatDefinition.d.ts +1 -0
- package/esm/typings/src/book-components/Chat/save/pdf/pdfSaveFormatDefinition.d.ts +4 -0
- package/esm/typings/src/book-components/Qr/BrandedQrCode.d.ts +18 -0
- package/esm/typings/src/book-components/Qr/GenericQrCode.d.ts +10 -0
- package/esm/typings/src/book-components/Qr/PromptbookQrCode.d.ts +18 -0
- package/esm/typings/src/book-components/Qr/useQrCode.d.ts +15 -0
- package/esm/typings/src/book-components/_common/Dropdown/Dropdown.d.ts +15 -0
- package/esm/typings/src/book-components/_common/Modal/Modal.d.ts +2 -2
- package/esm/typings/src/book-components/_common/Tooltip/Tooltip.d.ts +47 -0
- package/esm/typings/src/book-components/_common/react-utils/classNames.d.ts +1 -1
- package/esm/typings/src/book-components/icons/AboutIcon.d.ts +9 -0
- package/esm/typings/src/book-components/icons/CloseIcon.d.ts +4 -8
- package/esm/typings/src/book-components/icons/DownloadIcon.d.ts +9 -0
- package/esm/typings/src/book-components/icons/ExitFullscreenIcon.d.ts +7 -0
- package/esm/typings/src/book-components/icons/FullscreenIcon.d.ts +7 -0
- package/esm/typings/src/book-components/icons/MenuIcon.d.ts +12 -0
- package/esm/typings/src/cli/cli-commands/_boilerplate.d.ts +2 -1
- package/esm/typings/src/cli/cli-commands/about.d.ts +3 -1
- package/esm/typings/src/cli/cli-commands/hello.d.ts +2 -1
- package/esm/typings/src/cli/cli-commands/list-models.d.ts +2 -1
- package/esm/typings/src/cli/cli-commands/list-scrapers.d.ts +2 -1
- package/esm/typings/src/cli/cli-commands/login.d.ts +2 -1
- package/esm/typings/src/cli/cli-commands/make.d.ts +2 -1
- package/esm/typings/src/cli/cli-commands/prettify.d.ts +2 -1
- package/esm/typings/src/cli/cli-commands/run.d.ts +2 -1
- package/esm/typings/src/cli/cli-commands/{start-server.d.ts → start-agents-server.d.ts} +3 -2
- package/esm/typings/src/cli/cli-commands/start-pipelines-server.d.ts +15 -0
- package/esm/typings/src/cli/cli-commands/test-command.d.ts +2 -1
- package/esm/typings/src/cli/common/$addGlobalOptionsToCommand.d.ts +2 -1
- package/esm/typings/src/collection/agent-collection/AgentCollection.d.ts +36 -0
- package/esm/typings/src/collection/agent-collection/constructors/AgentCollectionInDirectory.d.ts +88 -0
- package/esm/typings/src/collection/{PipelineCollection.d.ts → pipeline-collection/PipelineCollection.d.ts} +7 -3
- package/esm/typings/src/collection/{SimplePipelineCollection.d.ts → pipeline-collection/SimplePipelineCollection.d.ts} +5 -5
- package/esm/typings/src/collection/{constructors/createCollectionFromDirectory.d.ts → pipeline-collection/constructors/createPipelineCollectionFromDirectory.d.ts} +8 -11
- package/esm/typings/src/collection/pipeline-collection/constructors/createPipelineCollectionFromJson.d.ts +13 -0
- package/esm/typings/src/collection/{constructors/createCollectionFromPromise.d.ts → pipeline-collection/constructors/createPipelineCollectionFromPromise.d.ts} +6 -5
- package/esm/typings/src/collection/{constructors/createCollectionFromUrl.d.ts → pipeline-collection/constructors/createPipelineCollectionFromUrl.d.ts} +3 -3
- package/esm/typings/src/collection/{constructors/createSubcollection.d.ts → pipeline-collection/constructors/createPipelineSubcollection.d.ts} +3 -3
- package/esm/typings/src/collection/pipeline-collection/pipelineCollectionToJson.d.ts +13 -0
- package/esm/typings/src/commands/_common/types/CommandParser.d.ts +4 -5
- package/esm/typings/src/config.d.ts +22 -2
- package/esm/typings/src/errors/0-index.d.ts +3 -0
- package/esm/typings/src/errors/NotAllowed.d.ts +9 -0
- package/esm/typings/src/execution/AvailableModel.d.ts +1 -0
- package/esm/typings/src/execution/Executables.d.ts +3 -0
- package/esm/typings/src/execution/ExecutionTask.d.ts +12 -3
- package/esm/typings/src/execution/ExecutionTools.d.ts +5 -0
- package/esm/typings/src/execution/FilesystemTools.d.ts +1 -1
- package/esm/typings/src/execution/LlmExecutionTools.d.ts +7 -1
- package/esm/typings/src/execution/createPipelineExecutor/10-executePipeline.d.ts +5 -0
- package/esm/typings/src/execution/createPipelineExecutor/20-executeTask.d.ts +5 -0
- package/esm/typings/src/execution/createPipelineExecutor/30-executeFormatSubvalues.d.ts +5 -0
- package/esm/typings/src/execution/createPipelineExecutor/40-executeAttempts.d.ts +5 -0
- package/esm/typings/src/execution/utils/logLlmCall.d.ts +8 -0
- package/esm/typings/src/execution/utils/usage-constants.d.ts +4 -124
- package/esm/typings/src/high-level-abstractions/_common/HighLevelAbstraction.d.ts +2 -1
- package/esm/typings/src/llm-providers/_common/register/$registeredLlmToolsMessage.d.ts +2 -1
- package/esm/typings/src/llm-providers/agent/Agent.d.ts +49 -0
- package/esm/typings/src/llm-providers/agent/AgentLlmExecutionTools.d.ts +9 -4
- package/esm/typings/src/llm-providers/agent/AgentOptions.d.ts +17 -0
- package/esm/typings/src/llm-providers/agent/CreateAgentLlmExecutionToolsOptions.d.ts +16 -0
- package/esm/typings/src/llm-providers/agent/createAgentLlmExecutionTools.d.ts +1 -19
- package/esm/typings/src/llm-providers/openai/OpenAiAssistantExecutionTools.d.ts +28 -0
- package/esm/typings/src/llm-providers/openai/OpenAiAssistantExecutionToolsOptions.d.ts +7 -1
- package/esm/typings/src/other/templates/getTemplatesPipelineCollection.d.ts +1 -1
- package/esm/typings/src/playground/permanent/_boilerplate.d.ts +5 -0
- package/esm/typings/src/playground/permanent/agent-with-browser-playground.d.ts +5 -0
- package/esm/typings/src/playground/playground.d.ts +0 -3
- package/esm/typings/src/playground/playground1.d.ts +2 -0
- package/esm/typings/src/remote-server/startRemoteServer.d.ts +4 -1
- package/esm/typings/src/remote-server/types/RemoteServerOptions.d.ts +22 -8
- package/esm/typings/src/scrapers/_boilerplate/createBoilerplateScraper.d.ts +1 -12
- package/esm/typings/src/scrapers/_boilerplate/register-metadata.d.ts +1 -9
- package/esm/typings/src/scrapers/document/createDocumentScraper.d.ts +1 -12
- package/esm/typings/src/scrapers/document/register-metadata.d.ts +1 -9
- package/esm/typings/src/scrapers/document-legacy/createLegacyDocumentScraper.d.ts +1 -12
- package/esm/typings/src/scrapers/document-legacy/register-metadata.d.ts +1 -9
- package/esm/typings/src/scrapers/markdown/createMarkdownScraper.d.ts +1 -12
- package/esm/typings/src/scrapers/markdown/register-metadata.d.ts +1 -9
- package/esm/typings/src/scrapers/markitdown/createMarkitdownScraper.d.ts +1 -12
- package/esm/typings/src/scrapers/markitdown/register-metadata.d.ts +1 -9
- package/esm/typings/src/scrapers/pdf/createPdfScraper.d.ts +1 -12
- package/esm/typings/src/scrapers/pdf/register-metadata.d.ts +1 -9
- package/esm/typings/src/scrapers/website/createWebsiteScraper.d.ts +1 -12
- package/esm/typings/src/scrapers/website/register-metadata.d.ts +1 -9
- package/esm/typings/src/storage/env-storage/$EnvStorage.d.ts +2 -1
- package/esm/typings/src/transpilers/_common/BookTranspiler.d.ts +29 -0
- package/esm/typings/src/transpilers/_common/BookTranspilerOptions.d.ts +18 -0
- package/esm/typings/src/transpilers/_common/register/$bookTranspilersRegister.d.ts +15 -0
- package/esm/typings/src/transpilers/formatted-book-in-markdown/FormattedBookInMarkdownTranspiler.d.ts +13 -0
- package/esm/typings/src/transpilers/formatted-book-in-markdown/register.d.ts +15 -0
- package/esm/typings/src/transpilers/openai-sdk/OpenAiSdkTranspiler.d.ts +13 -0
- package/esm/typings/src/transpilers/openai-sdk/OpenAiSdkTranspiler.test.d.ts +1 -0
- package/esm/typings/src/transpilers/openai-sdk/playground/playground.d.ts +5 -0
- package/esm/typings/src/transpilers/openai-sdk/register.d.ts +15 -0
- package/esm/typings/src/types/LlmCall.d.ts +20 -0
- package/esm/typings/src/types/Updatable.d.ts +19 -0
- package/esm/typings/src/types/typeAliases.d.ts +1 -1
- package/esm/typings/src/utils/execCommand/$execCommand.d.ts +2 -1
- package/esm/typings/src/utils/execCommand/$execCommands.d.ts +2 -1
- package/esm/typings/src/utils/files/$induceBookDownload.d.ts +13 -0
- package/esm/typings/src/utils/files/$induceFileDownload.d.ts +13 -0
- package/esm/typings/src/utils/files/ObjectUrl.d.ts +46 -0
- package/esm/typings/src/utils/files/listAllFiles.d.ts +2 -3
- package/esm/typings/src/utils/misc/aboutPromptbookInformation.d.ts +21 -0
- package/esm/typings/src/utils/misc/injectCssModuleIntoShadowRoot.d.ts +1 -0
- package/esm/typings/src/utils/misc/xAboutPromptbookInformation.d.ts +13 -0
- package/esm/typings/src/utils/organization/$side_effect.d.ts +7 -0
- package/esm/typings/src/utils/serialization/$deepFreeze.d.ts +2 -1
- package/esm/typings/src/version.d.ts +1 -1
- package/esm/typings/src/wizard/$getCompiledBook.d.ts +1 -2
- package/package.json +8 -5
- package/umd/index.umd.js +2474 -188
- package/umd/index.umd.js.map +1 -1
- package/esm/typings/src/book-components/BookEditor/BookEditorInner.d.ts +0 -5
- package/esm/typings/src/book-components/BookEditor/BookEditorWrapper.d.ts +0 -9
- package/esm/typings/src/book-components/BookEditor/config.d.ts +0 -10
- package/esm/typings/src/book-components/Chat/utils/renderMarkdown.d.ts +0 -21
- package/esm/typings/src/collection/collectionToJson.d.ts +0 -13
- package/esm/typings/src/collection/constructors/createCollectionFromJson.d.ts +0 -13
- /package/esm/typings/src/{book-components/Chat/utils/renderMarkdown.test.d.ts → collection/agent-collection/constructors/AgentCollectionInDirectory.test.d.ts} +0 -0
- /package/esm/typings/src/collection/{constructors/createCollectionFromDirectory.test.d.ts → pipeline-collection/constructors/createPipelineCollectionFromDirectory.test.d.ts} +0 -0
- /package/esm/typings/src/collection/{constructors/createCollectionFromJson.test.d.ts → pipeline-collection/constructors/createPipelineCollectionFromJson.test.d.ts} +0 -0
- /package/esm/typings/src/collection/{constructors/createCollectionFromPromise.test.d.ts → pipeline-collection/constructors/createPipelineCollectionFromPromise.test.d.ts} +0 -0
- /package/esm/typings/src/collection/{collectionToJson.test.d.ts → pipeline-collection/pipelineCollectionToJson.test.d.ts} +0 -0
package/esm/index.es.js
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import spaceTrim, { spaceTrim as spaceTrim$1 } from 'spacetrim';
|
|
2
2
|
import { randomBytes } from 'crypto';
|
|
3
|
-
import { Subject } from 'rxjs';
|
|
4
|
-
import { forTime } from 'waitasecond';
|
|
3
|
+
import { Subject, BehaviorSubject } from 'rxjs';
|
|
4
|
+
import { forTime, forEver } from 'waitasecond';
|
|
5
5
|
import hexEncoder from 'crypto-js/enc-hex';
|
|
6
6
|
import sha256 from 'crypto-js/sha256';
|
|
7
7
|
import { basename, join, dirname, isAbsolute } from 'path';
|
|
@@ -10,6 +10,8 @@ import { lookup, extension } from 'mime-types';
|
|
|
10
10
|
import { parse, unparse } from 'papaparse';
|
|
11
11
|
import moment from 'moment';
|
|
12
12
|
import colors from 'colors';
|
|
13
|
+
import Bottleneck from 'bottleneck';
|
|
14
|
+
import OpenAI from 'openai';
|
|
13
15
|
|
|
14
16
|
// ⚠️ WARNING: This code has been generated so that any manual changes will be overwritten
|
|
15
17
|
/**
|
|
@@ -25,13 +27,13 @@ const BOOK_LANGUAGE_VERSION = '1.0.0';
|
|
|
25
27
|
* @generated
|
|
26
28
|
* @see https://github.com/webgptorg/promptbook
|
|
27
29
|
*/
|
|
28
|
-
const PROMPTBOOK_ENGINE_VERSION = '0.103.0-
|
|
30
|
+
const PROMPTBOOK_ENGINE_VERSION = '0.103.0-40';
|
|
29
31
|
/**
|
|
30
32
|
* TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
|
|
31
33
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
32
34
|
*/
|
|
33
35
|
|
|
34
|
-
var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book"},{title:"Prepare Knowledge-piece Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge-piece Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book"},{title:"Prepare Persona",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book",formfactorName:"GENERIC",parameters:[{name:"availableModels",description:"List of available model names together with their descriptions as JSON",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelsRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n```json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpful assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n```\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n```json\n{availableModels}\n```\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelsRequirements",format:"JSON",dependentParameterNames:["availableModels","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Persona\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book`\n- INPUT PARAMETER `{availableModels}` List of available model names together with their descriptions as JSON\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelsRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n\\`\\`\\`json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpful assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n\\`\\`\\`json\n{availableModels}\n\\`\\`\\`\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelsRequirements}`\n"}],sourceFile:"./books/prepare-persona.book"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-title.book",formfactorName:"GENERIC",parameters:[{name:"book",description:"The book to prepare the title for",isInput:true,isOutput:false},{name:"title",description:"Best title for the book",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-title",title:"Make title",content:"Make best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}",resultingParameterName:"title",expectations:{words:{min:1,max:8},lines:{min:1,max:1}},dependentParameterNames:["book"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-title.book`\n- INPUT PARAMETER `{book}` The book to prepare the title for\n- OUTPUT PARAMETER `{title}` Best title for the book\n\n## Make title\n\n- EXPECT MIN 1 Word\n- EXPECT MAX 8 Words\n- EXPECT EXACTLY 1 Line\n\n```markdown\nMake best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-title.book"},{title:"📊 Curriculum Audit",pipelineUrl:"https://promptbook.studio/promptbook//examples/lsvp-asistent.book",formfactorName:"GENERIC",parameters:[{name:"result",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"prompt",title:"Prompt",content:"Asistent pro LŠVP\n\nPERSONA Jsi asistent pro RVP Lyceum v rámci Národního pedagogického institutu České Republiky\nMETA IMAGE https://edulk.cz/getFile/id:475818/type:large/02%20zna%C4%8Dka%20npi.jpg\nRULE Pokud jsi nejsi jistý, napiš nevím\nKNOWLEDGE ./241129_Lyceum_final.pdf\nCONTEXT Obecně dokážeš řešit libovolné ŠVP, aktuálně řešíš {Školní vzdělávací program LYCEUM}\nRULE Z {Porovnání RVP a ŠVP - postup} je nejdůležitější fáze 3\nKNOWLEDGE {Školní vzdělávací program LYCEUM} ./ŠVP Lyceum - Finance v digitální době.pdf\nKNOWLEDGE @Slovník\n\n**Interní slovník - RVP/ŠVP**\n\n**RVP**\n\nRámcový vzdělávací program pro obor vzdělání Lyceum je dokument na národní úrovni, který formuluje požadavky na školní vzdělávací programy ve formě především očekávaných výsledků učení, kterých mají žáci absolvováním tohoto programu na dané škole dosáhnout.\n\n**ŠVP**\n\nŠkolní vzdělávací program pro obor vzdělání Lyceum je dokument každé jednotlivé školy, který popisuje v jakých vyučovacích předmětech/ vzdělávacích modulech a v jakých ročnících budou požadované očekávané výsledky učení naplněny. Zároveň formuluje další očekávané výsledky učení, které naplňují disponibilní část vyučovacího času určeného RVP pro tento obor vzdělání.\n\n**Očekávaný výsledek učení (OVU)**\n\nVyjadřuje jednotlivý požadavek na to, co mají žáci umět na konci vzdělávacího programu, tzn. jejich požadované kompetence. Je vyjádřen formulací, která je uvozena činnostním slovesem a dále obsahuje předmět této činnosti. Formulace je konkretizována resp. doplněna zpravidla formou odrážek vymezením dílčích znalostí, dovedností, postojů, jejichž splnění je předpokladem dosažení OVU jako celku.\n\n_Příklad:_\n\n<div class=\"joplin-table-wrapper\"><table><tbody><tr><th><p><strong>Žák/žákyně řídí realizaci jednoduchého projektu</strong></p></th></tr><tr><td><ul><li>naplánuje aktivity projektu</li></ul></td></tr><tr><td><ul><li>navrhne rozpočet projektu vzhledem k navrženým aktivitám</li></ul></td></tr><tr><td><ul><li>stanoví základní ukazatele a sleduje jejich naplňování</li></ul></td></tr><tr><td><ul><li>vede projektový tým</li></ul></td></tr><tr><td><ul><li>uvede, jak by řešil krizové situace v projektu</li></ul></td></tr><tr><td><ul><li>vyhodnotí úspěšnost projektu</li></ul></td></tr></tbody></table></div>\n\n**Vzdělávací oblasti**\n\nOčekávané výsledky učení jsou v **_RVP členěny do 4 vzdělávacích oblastí_**, které tvoří společný všeobecně vzdělávací základ:\n\n- Osobnostní rozvoj, vzdělávání ke zdraví, bezpečí a produktivnímu pracovnímu životu (kariéře)\n- Komunikační a jazykové vzdělávání\n- Aplikované vzdělávání STEM (Science, Technology, Engeneering, Math), tj. přírodní vědy, informatika, technika, matematika\n- Prakticky orientované vzdělávání společenskovědní a humanitní\n\nKaždá vzdělávací oblast se dále člení na okruhy, v jejichž rámci jsou OVU samostatně číslované.\n\n<div class=\"joplin-table-wrapper\"><table><tbody><tr><th rowspan=\"21\"><ul><li>Prakticky orientované vzdělávání společenskovědní a humanitní</li></ul></th><th rowspan=\"21\"><p><strong>Člověk, ekonomie a podnikání</strong></p></th><th rowspan=\"7\"><p><strong>1</strong></p></th><th><p><strong>zpracuje podklady související s podnikáním</strong></p></th></tr><tr><td><p>připraví podnikatelský záměr</p></td></tr><tr><td><p>sestaví zakladatelský rozpočet</p></td></tr><tr><td><p>zkalkuluje cenu zboží nebo služby</p></td></tr><tr><td><p>vysvětlí na příkladu základní povinnosti podnikatele vůči státu a zaměstnancům</p></td></tr><tr><td><p>vede daňovou evidenci</p></td></tr><tr><td><p>vysvětlí na příkladech etiku v podnikání</p></td></tr><tr><td rowspan=\"7\"><p><strong>2</strong></p></td><td><p><strong>řídí realizaci jednoduchého projektu</strong></p></td></tr><tr><td><p>naplánuje aktivity projektu</p></td></tr><tr><td><p>navrhne rozpočet projektu vzhledem k navrženým aktivitám</p></td></tr><tr><td><p>stanoví základní ukazatele a sleduje jejich naplňování</p></td></tr><tr><td><p>vede projektový tým</p></td></tr><tr><td><p>uvede, jak by řešil krizové situace v projektu</p></td></tr><tr><td><p>vyhodnotí úspěšnost projektu</p></td></tr><tr><td rowspan=\"7\"><p><strong>3</strong></p></td><td><p><strong>aplikuje ekonomické teorie v osobním a profesním životě</strong></p></td></tr><tr><td><p>vysvětlí základní ekonomické otázky</p></td></tr><tr><td><p>vysvětí stanovení rovnovážné ceny na dokonalém i nedokonalém trhu</p></td></tr><tr><td><p>charakterizuje výrobní faktory a vysvětlí hranici produkčních možností a náklady obětované příležitosti</p></td></tr><tr><td><p>uvede nejdůležitější makroekonomické pojmy a vliv jejich výše na kvalitu života a podnikání v daném státě</p></td></tr><tr><td><p>vysvětlí podstatu inflace a její důsledky na finanční situaci obyvatel a na příkladu ukáže jak se bránit jejím nepříznivým důsledkům</p></td></tr><tr><td><p>uvede hlavní výhody a nevýhody mezinárodního obchodu a vliv ochranářských opatření na ekonomickou situaci dané země</p></td></tr><tr><td></td><td></td><td><p><strong>4</strong></p></td><td><p>Atd.</p></td></tr></tbody></table></div>\n\n**Vyučovací předmět / vzdělávací modul**\n\nOčekávané výsledky učení jsou v **ŠVP** členěny do vyučovacích předmětů nebo vzdělávacích modulů, které jsou dále zařazeny do jednoho nebo více ročníků 4letého studia. Vyučovací předmět / vzdělávací modul tvoří vyučovací jednotku, kde jsou očekávané výsledky učení dále rozpracovány pro potřeby výuky podle následující šablony\n\n| **A. VSTUPNÍ ČÁST** |\n| --- |\n| **1\\. Název** |\n| **2\\. Kód** (kódy by měly být navázány na obory vzdělání a výsledky učení) |\n| **2a) Kategorie vzdělání** - v případě, že nebude součástí kódu |\n| **3\\. Typ vyučovací jednotky** (modul, předmět, stáž apod.) |\n| **4\\. Délka** (počet hodin - dělitelný čtyřmi (optimální modul 16, 32 hodin = týden výuky) |\n| **5\\. Platnost** (datum, od kterého platí) |\n| **6\\. Vstupní předpoklady** (vymezení požadované úrovně vstupních vědomostí a dovedností, které jsou předpokladem úspěšného studia) |\n| |\n| **B. JÁDRO VYUČOVACÍ JEDNOTKY** |\n| **1\\. Charakteristika** (stručná anotace popisující obecné cíle a pojetí) |\n| **2\\. Očekávané výsledky učení a jejich indikátory (převzaté z RVP nebo dále konkretizované)** |\n| **3\\. Podpora rozvoje klíčových kompetencí a základních gramotností** (které klíčové kompetence jsou v rozvíjeny) |\n| **4\\. Obsah vzdělávání** (rozpis učiva) |\n| **5\\. Vzdělávací strategie** (strategie výuky, resp. učební činnosti žáků, které jsou doporučené pro dosažení výsledků) |\n| |\n| **C. VÝSTUPNÍ ČÁST** |\n| **1\\. Způsob ověřování dosažených výsledků** (ve vazbě na jednotlivé výsledky učení) |\n| **2\\. Kritéria hodnocení** (co znamená splnění výsledků učení, kdy je splněna celá vyučovací jednotka, kritéria pro známky, příp. procentuální, slovní hodnocení) |\n| **3\\. Doporučená studijní literatura, odkazy na ilustrační zdroje** |\n| **4\\. Poznámky** |\n\n**Soulad OVU RVP a ŠVP**\n\nTento soulad je předmětem zjišťování. Soulad nastává, jestliže jsou očekávané výsledky učení z jednotlivých vzdělávacích oblastí RVP **obsaženy** ve vyučovacích předmětech/ vzdělávacích modulech ŠVP jednotlivých škol, tzn. že v ŠVP se objevuje jejich formulace buď v doslovném nebo podobném znění v jednom nebo více vyučovacích předmětech/ vzdělávacích modulech.\n\n_Příklad souladu:_\n\nRVP ŠVP - komunikace a marketing (SŠ obchodní Č.\n\n| **2** | **řídí realizaci jednoduchého projektu** |\n| --- | --- |\n| naplánuje aktivity projektu |\n| navrhne rozpočet projektu vzhledem k navrženým aktivitám |\n| stanoví základní ukazatele a sleduje jejich naplňování |\n| vede projektový tým |\n| uvede, jak by řešil krizové situace v projektu |\n| vyhodnotí úspěšnost projektu |\n\nKNOWLEDGE {Porovnání RVP a ŠVP - postup}\n\n\n# AUDITNÍ PROTOKOL ŠVP-RVP\n\n# (POPIS KONTROLNÍHO ALGORITMU)\n\nMetodika je určena pro **Kvantifikaci Shody** školního vzdělávacího programu (ŠVP) s Rámcovým vzdělávacím programem (RVP).\n\n## FÁZE 1: VALIDACE DOKUMENTACE\n\n**Cíl:** Ověřit platnost, aktuálnost a strukturu zdrojových dokumentů.\n\n- **RVP Verifikace:** Otevřít aktuální verzi RVP (např. RVP ZV/G/SOŠ).\n- **Typová shoda:** Ověřit, že RVP se vztahuje k danému typu školy.\n- **ŠVP Dimenze:** Identifikovat a izolovat relevantní části ŠVP: Profil absolventa, Klíčové kompetence (KK), Vzdělávací oblasti (VO), případně Učební plán (UP).\n- **Verzování:** Potvrdit, že obě verze (RVP a ŠVP) jsou nejnovější a platné (včetně dodatků RVP).\n\n## FÁZE 2: DATABÁZOVÉ MAPOVÁNÍ VÝSTUPŮ (MASTER MATICE)\n\n**Cíl:** Vytvořit systémovou databázi pro křížové porovnání všech povinných komponent RVP se ŠVP.\n\n- **Dekompozice RVP:** Rozložit RVP na základní povinné komponenty: Klíčové kompetence, Vzdělávací oblasti a obory, Očekávané výstupy (OVU), Průřezová témata (PT).\n- **Přiřazovací mapa:** Vytvořit hlavní kontrolní matici (Master Matice) pro záznam vazeb.\n\n| Oblast RVP | Výstup RVP (OVU) | Odpovídající Část ŠVP (Předmět/Ročník) | Konkrétní Tématický Celek v ŠVP | Stav Shody (Protokol) |\n| --- | --- | --- | --- | --- |\n| ... | ... | ... | ... | ... |\n| --- | --- | --- | --- | --- |\n\n## FÁZE 3: ALGORITMICKÁ KONTROLA POKRYTÍ A HLOUBKY\n\n**Cíl:** Posoudit, zda každý povinný výstup RVP je adekvátně reflektován v obsahu ŠVP, a přidělit bodovou hodnotu pro kvantifikaci.\n\n- **Audit OVU:** Projít každý jednotlivý Očekávaný výstup (OVU) z RVP.\n- **Kódování stavu a bodování:** U každého OVU v matici označit stav pokrytí dle následujícího schématu:\n\n| Kód (Protokol) | Popis (Kvalitativní zjištění) | Bodová hodnota (Kvantifikace) |\n| --- | --- | --- |\n| ✅ | Plná shoda (Výstup pokryt v plném rozsahu, odpovídající úrovni RVP) | 1,0 |\n| --- | --- | --- |\n| ⚠️ | Částečná shoda (Formální pokrytí, omezený rozsah, chybná návaznost) | 0,5 |\n| --- | --- | --- |\n| ❌ | Absence (Výstup zcela chybí v obsahu ŠVP) | 0,0 |\n| --- | --- | --- |\n\n- **Defektologie ŠVP:** Identifikovat a zaznamenat deficity ŠVP: Chybějící výstupy (❌), Sémantické překryvy, Přetížení obsahu.\n- **Kvalitativní posun:** Ověřit, zda je formulace výstupů v ŠVP **aktivní, měřitelná a v souladu** s úrovní RVP.\n\n## FÁZE 4: STRUKTURÁLNÍ VERIFIKACE NÁVAZNOSTI (VERTIKÁLA/HORIZONTÁLA)\n\n**Cíl:** Zkontrolovat logickou posloupnost a provázanost učiva v rámci ŠVP.\n\n- **Vertikální Kontrola:** Ověřit posloupnost OVU a učiva uvnitř jednoho předmětu/oblasti (postup od jednodušších ke složitějším konceptům napříč ročníky).\n- **Horizontální Kontrola:** Zkontrolovat logické provázání napříč vzdělávacími oblastmi a předměty (např. fyzika ↔ matematika).\n- **PT Integrace:** Audit reálné integrace Průřezových témat (PT) do konkrétních částí obsahu, metod a projektů.\n\n## FÁZE 5: ANALÝZA ŠKOLNÍ PROFILACE A ROZŠÍŘENÍ RVP\n\n**Cíl:** Validovat, že profilace školy je **v souladu** s RVP a nejedná se o **rozpor**.\n\n- **Nekonfliktnost:** Porovnat definovaný Profil absolventa školy s Klíčovými kompetencemi RVP. Profil ŠVP musí RVP rozvíjet, nikoli mu odporovat.\n- **Modularita:** Zkontrolovat, zda volitelné předměty a rozšiřující moduly logicky navazují na vzdělávací oblasti RVP.\n- **Implementace specializace:** Popisně uvést, jak je školní profilace (např. STEM zaměření, projektová výuka) integrována do OVU a kompetencí definovaných RVP.\n\n## FÁZE 6: GENERÁTOR ZÁVĚREČNÉ ZPRÁVY A KVANTIFIKACE\n\n**Cíl:** Syntetizovat výsledky, kvantifikovat soulad a generovat závazné návrhy na korekce.\n\n### 6.1 Kvantifikace Souladu\n\nVypočítat Index shody (IS) na základě bodového hodnocení (Fáze 3):\n\n### 6.2 Interpretace Indexu Shody (IS)\n\nKlasifikace souladu pro standardizované vyhodnocení:\n\n| Interval IS | Klasifikace souladu | Popis |\n| --- | --- | --- |\n| 95-100 % | Výborný soulad | ŠVP plně odpovídá RVP, pouze stylistické nebo formální rozdíly. |\n| --- | --- | --- |\n| 85-94 % | Dobrá shoda | ŠVP pokrývá všechny klíčové výstupy, menší korekce nutné. |\n| --- | --- | --- |\n| 70-84 % | Částečná shoda | Významné nedostatky v některých oblastech, nutná revize obsahu. |\n| --- | --- | --- |\n| < 70 % | Kritická neshoda | ŠVP neplní rámcové požadavky, ohrožuje legislativní soulad. |\n| --- | --- | --- |\n\n### 6.3 Doplňkové Indexy\n\nVypočítat následující doplňkové indexy pro detailní kvalitativní analýzu:\n\n- **Index kompetenčního souladu (IKS):** Poměr pokrytí klíčových kompetencí RVP v ŠVP.\n- **Index průřezové integrace (IPI):** Míra reálné integrace průřezových témat do výuky.\n- **Index hloubky pokrytí (IHP):** Procento výstupů, které jsou v ŠVP rozvedeny na konkrétní výukové cíle (měřitelné, aktivní formulace).\n- **Index profilové rozšiřitelnosti (IPR):** Kolik rozšiřujících nebo profilových výstupů přesahuje rámec RVP, aniž by narušily jeho strukturu.\n\n### 6.4 Vizuální výstupy\n\nZajistit generování následujících vizualizací pro Závěrečnou zprávu:\n\n- Graf pokrytí po vzdělávacích oblastech (Sloupcový graf IS pro VO).\n- Pavoukový diagram Klíčových kompetencí (RVP vs. ŠVP).\n- Mapa defektů (Vizualizace ❌ a ⚠️ výstupů).\n\n### 6.5 Struktura Závěrečné Zprávy\n\nZpráva musí být strukturována dle standardizovaného formátu:\n\n| Oddíl | Obsah |\n| --- | --- |\n| A. Identifikace | Název školy, IZO, typ školy, datum revize, zpracovatel, verze ŠVP a RVP. |\n| --- | --- |\n| B. Shrnutí výsledků | Celkový Index Shody (IS), hlavní závěry a doporučení. |\n| --- | --- |\n| C. Kvantitativní analýza | Přehled IS v % dle kategorií OVU / VO / kompetencí. |\n| --- | --- |\n| D. Kvalitativní analýza | Slovní zhodnocení kvality souladu (formulace, obtížnost, integrace PT). |\n| --- | --- |\n| E. Rizikové oblasti | Přehled nalezených defektů (chybějící OVU, přetížení, formální shoda). |\n| --- | --- |\n| F. Návrhy opatření (Korekční plán) | Přesné návrhy změn - **Co, Kde, Kdo** má upravit, včetně termínu. |\n| --- | --- |\n| G. Přílohy | Master Matice (Fáze 2-3), revizní tabulka, výstupní grafy a metriky. |\n| --- | --- |\n\n\n\n\n.",resultingParameterName:"result",dependentParameterNames:[]}],personas:[],preparations:[{id:1,promptbookVersion:"0.103.0-3",usage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"Asistent pro LŠVP\n\nPERSONA Jsi asistent pro RVP Lyceum v rámci Národního pedagogického institutu České Republiky\nMETA IMAGE https://edulk.cz/getFile/id:475818/type:large/02%20zna%C4%8Dka%20npi.jpg\nRULE Pokud jsi nejsi jistý, napiš nevím\nKNOWLEDGE ./241129_Lyceum_final.pdf\nCONTEXT Obecně dokážeš řešit libovolné ŠVP, aktuálně řešíš {Školní vzdělávací program LYCEUM}\nRULE Z {Porovnání RVP a ŠVP - postup} je nejdůležitější fáze 3\nKNOWLEDGE {Školní vzdělávací program LYCEUM} ./ŠVP Lyceum - Finance v digitální době.pdf\nKNOWLEDGE @Slovník\n\n**Interní slovník - RVP/ŠVP**\n\n**RVP**\n\nRámcový vzdělávací program pro obor vzdělání Lyceum je dokument na národní úrovni, který formuluje požadavky na školní vzdělávací programy ve formě především očekávaných výsledků učení, kterých mají žáci absolvováním tohoto programu na dané škole dosáhnout.\n\n**ŠVP**\n\nŠkolní vzdělávací program pro obor vzdělání Lyceum je dokument každé jednotlivé školy, který popisuje v jakých vyučovacích předmětech/ vzdělávacích modulech a v jakých ročnících budou požadované očekávané výsledky učení naplněny. Zároveň formuluje další očekávané výsledky učení, které naplňují disponibilní část vyučovacího času určeného RVP pro tento obor vzdělání.\n\n**Očekávaný výsledek učení (OVU)**\n\nVyjadřuje jednotlivý požadavek na to, co mají žáci umět na konci vzdělávacího programu, tzn. jejich požadované kompetence. Je vyjádřen formulací, která je uvozena činnostním slovesem a dále obsahuje předmět této činnosti. Formulace je konkretizována resp. doplněna zpravidla formou odrážek vymezením dílčích znalostí, dovedností, postojů, jejichž splnění je předpokladem dosažení OVU jako celku.\n\n_Příklad:_\n\n<div class=\"joplin-table-wrapper\"><table><tbody><tr><th><p><strong>Žák/žákyně řídí realizaci jednoduchého projektu</strong></p></th></tr><tr><td><ul><li>naplánuje aktivity projektu</li></ul></td></tr><tr><td><ul><li>navrhne rozpočet projektu vzhledem k navrženým aktivitám</li></ul></td></tr><tr><td><ul><li>stanoví základní ukazatele a sleduje jejich naplňování</li></ul></td></tr><tr><td><ul><li>vede projektový tým</li></ul></td></tr><tr><td><ul><li>uvede, jak by řešil krizové situace v projektu</li></ul></td></tr><tr><td><ul><li>vyhodnotí úspěšnost projektu</li></ul></td></tr></tbody></table></div>\n\n**Vzdělávací oblasti**\n\nOčekávané výsledky učení jsou v **_RVP členěny do 4 vzdělávacích oblastí_**, které tvoří společný všeobecně vzdělávací základ:\n\n- Osobnostní rozvoj, vzdělávání ke zdraví, bezpečí a produktivnímu pracovnímu životu (kariéře)\n- Komunikační a jazykové vzdělávání\n- Aplikované vzdělávání STEM (Science, Technology, Engeneering, Math), tj. přírodní vědy, informatika, technika, matematika\n- Prakticky orientované vzdělávání společenskovědní a humanitní\n\nKaždá vzdělávací oblast se dále člení na okruhy, v jejichž rámci jsou OVU samostatně číslované.\n\n<div class=\"joplin-table-wrapper\"><table><tbody><tr><th rowspan=\"21\"><ul><li>Prakticky orientované vzdělávání společenskovědní a humanitní</li></ul></th><th rowspan=\"21\"><p><strong>Člověk, ekonomie a podnikání</strong></p></th><th rowspan=\"7\"><p><strong>1</strong></p></th><th><p><strong>zpracuje podklady související s podnikáním</strong></p></th></tr><tr><td><p>připraví podnikatelský záměr</p></td></tr><tr><td><p>sestaví zakladatelský rozpočet</p></td></tr><tr><td><p>zkalkuluje cenu zboží nebo služby</p></td></tr><tr><td><p>vysvětlí na příkladu základní povinnosti podnikatele vůči státu a zaměstnancům</p></td></tr><tr><td><p>vede daňovou evidenci</p></td></tr><tr><td><p>vysvětlí na příkladech etiku v podnikání</p></td></tr><tr><td rowspan=\"7\"><p><strong>2</strong></p></td><td><p><strong>řídí realizaci jednoduchého projektu</strong></p></td></tr><tr><td><p>naplánuje aktivity projektu</p></td></tr><tr><td><p>navrhne rozpočet projektu vzhledem k navrženým aktivitám</p></td></tr><tr><td><p>stanoví základní ukazatele a sleduje jejich naplňování</p></td></tr><tr><td><p>vede projektový tým</p></td></tr><tr><td><p>uvede, jak by řešil krizové situace v projektu</p></td></tr><tr><td><p>vyhodnotí úspěšnost projektu</p></td></tr><tr><td rowspan=\"7\"><p><strong>3</strong></p></td><td><p><strong>aplikuje ekonomické teorie v osobním a profesním životě</strong></p></td></tr><tr><td><p>vysvětlí základní ekonomické otázky</p></td></tr><tr><td><p>vysvětí stanovení rovnovážné ceny na dokonalém i nedokonalém trhu</p></td></tr><tr><td><p>charakterizuje výrobní faktory a vysvětlí hranici produkčních možností a náklady obětované příležitosti</p></td></tr><tr><td><p>uvede nejdůležitější makroekonomické pojmy a vliv jejich výše na kvalitu života a podnikání v daném státě</p></td></tr><tr><td><p>vysvětlí podstatu inflace a její důsledky na finanční situaci obyvatel a na příkladu ukáže jak se bránit jejím nepříznivým důsledkům</p></td></tr><tr><td><p>uvede hlavní výhody a nevýhody mezinárodního obchodu a vliv ochranářských opatření na ekonomickou situaci dané země</p></td></tr><tr><td></td><td></td><td><p><strong>4</strong></p></td><td><p>Atd.</p></td></tr></tbody></table></div>\n\n**Vyučovací předmět / vzdělávací modul**\n\nOčekávané výsledky učení jsou v **ŠVP** členěny do vyučovacích předmětů nebo vzdělávacích modulů, které jsou dále zařazeny do jednoho nebo více ročníků 4letého studia. Vyučovací předmět / vzdělávací modul tvoří vyučovací jednotku, kde jsou očekávané výsledky učení dále rozpracovány pro potřeby výuky podle následující šablony\n\n| **A. VSTUPNÍ ČÁST** |\n| --- |\n| **1\\. Název** |\n| **2\\. Kód** (kódy by měly být navázány na obory vzdělání a výsledky učení) |\n| **2a) Kategorie vzdělání** - v případě, že nebude součástí kódu |\n| **3\\. Typ vyučovací jednotky** (modul, předmět, stáž apod.) |\n| **4\\. Délka** (počet hodin - dělitelný čtyřmi (optimální modul 16, 32 hodin = týden výuky) |\n| **5\\. Platnost** (datum, od kterého platí) |\n| **6\\. Vstupní předpoklady** (vymezení požadované úrovně vstupních vědomostí a dovedností, které jsou předpokladem úspěšného studia) |\n| |\n| **B. JÁDRO VYUČOVACÍ JEDNOTKY** |\n| **1\\. Charakteristika** (stručná anotace popisující obecné cíle a pojetí) |\n| **2\\. Očekávané výsledky učení a jejich indikátory (převzaté z RVP nebo dále konkretizované)** |\n| **3\\. Podpora rozvoje klíčových kompetencí a základních gramotností** (které klíčové kompetence jsou v rozvíjeny) |\n| **4\\. Obsah vzdělávání** (rozpis učiva) |\n| **5\\. Vzdělávací strategie** (strategie výuky, resp. učební činnosti žáků, které jsou doporučené pro dosažení výsledků) |\n| |\n| **C. VÝSTUPNÍ ČÁST** |\n| **1\\. Způsob ověřování dosažených výsledků** (ve vazbě na jednotlivé výsledky učení) |\n| **2\\. Kritéria hodnocení** (co znamená splnění výsledků učení, kdy je splněna celá vyučovací jednotka, kritéria pro známky, příp. procentuální, slovní hodnocení) |\n| **3\\. Doporučená studijní literatura, odkazy na ilustrační zdroje** |\n| **4\\. Poznámky** |\n\n**Soulad OVU RVP a ŠVP**\n\nTento soulad je předmětem zjišťování. Soulad nastává, jestliže jsou očekávané výsledky učení z jednotlivých vzdělávacích oblastí RVP **obsaženy** ve vyučovacích předmětech/ vzdělávacích modulech ŠVP jednotlivých škol, tzn. že v ŠVP se objevuje jejich formulace buď v doslovném nebo podobném znění v jednom nebo více vyučovacích předmětech/ vzdělávacích modulech.\n\n_Příklad souladu:_\n\nRVP ŠVP - komunikace a marketing (SŠ obchodní Č.\n\n| **2** | **řídí realizaci jednoduchého projektu** |\n| --- | --- |\n| naplánuje aktivity projektu |\n| navrhne rozpočet projektu vzhledem k navrženým aktivitám |\n| stanoví základní ukazatele a sleduje jejich naplňování |\n| vede projektový tým |\n| uvede, jak by řešil krizové situace v projektu |\n| vyhodnotí úspěšnost projektu |\n\nKNOWLEDGE {Porovnání RVP a ŠVP - postup}\n\n\n# AUDITNÍ PROTOKOL ŠVP-RVP\n\n# (POPIS KONTROLNÍHO ALGORITMU)\n\nMetodika je určena pro **Kvantifikaci Shody** školního vzdělávacího programu (ŠVP) s Rámcovým vzdělávacím programem (RVP).\n\n## FÁZE 1: VALIDACE DOKUMENTACE\n\n**Cíl:** Ověřit platnost, aktuálnost a strukturu zdrojových dokumentů.\n\n- **RVP Verifikace:** Otevřít aktuální verzi RVP (např. RVP ZV/G/SOŠ).\n- **Typová shoda:** Ověřit, že RVP se vztahuje k danému typu školy.\n- **ŠVP Dimenze:** Identifikovat a izolovat relevantní části ŠVP: Profil absolventa, Klíčové kompetence (KK), Vzdělávací oblasti (VO), případně Učební plán (UP).\n- **Verzování:** Potvrdit, že obě verze (RVP a ŠVP) jsou nejnovější a platné (včetně dodatků RVP).\n\n## FÁZE 2: DATABÁZOVÉ MAPOVÁNÍ VÝSTUPŮ (MASTER MATICE)\n\n**Cíl:** Vytvořit systémovou databázi pro křížové porovnání všech povinných komponent RVP se ŠVP.\n\n- **Dekompozice RVP:** Rozložit RVP na základní povinné komponenty: Klíčové kompetence, Vzdělávací oblasti a obory, Očekávané výstupy (OVU), Průřezová témata (PT).\n- **Přiřazovací mapa:** Vytvořit hlavní kontrolní matici (Master Matice) pro záznam vazeb.\n\n| Oblast RVP | Výstup RVP (OVU) | Odpovídající Část ŠVP (Předmět/Ročník) | Konkrétní Tématický Celek v ŠVP | Stav Shody (Protokol) |\n| --- | --- | --- | --- | --- |\n| ... | ... | ... | ... | ... |\n| --- | --- | --- | --- | --- |\n\n## FÁZE 3: ALGORITMICKÁ KONTROLA POKRYTÍ A HLOUBKY\n\n**Cíl:** Posoudit, zda každý povinný výstup RVP je adekvátně reflektován v obsahu ŠVP, a přidělit bodovou hodnotu pro kvantifikaci.\n\n- **Audit OVU:** Projít každý jednotlivý Očekávaný výstup (OVU) z RVP.\n- **Kódování stavu a bodování:** U každého OVU v matici označit stav pokrytí dle následujícího schématu:\n\n| Kód (Protokol) | Popis (Kvalitativní zjištění) | Bodová hodnota (Kvantifikace) |\n| --- | --- | --- |\n| ✅ | Plná shoda (Výstup pokryt v plném rozsahu, odpovídající úrovni RVP) | 1,0 |\n| --- | --- | --- |\n| ⚠️ | Částečná shoda (Formální pokrytí, omezený rozsah, chybná návaznost) | 0,5 |\n| --- | --- | --- |\n| ❌ | Absence (Výstup zcela chybí v obsahu ŠVP) | 0,0 |\n| --- | --- | --- |\n\n- **Defektologie ŠVP:** Identifikovat a zaznamenat deficity ŠVP: Chybějící výstupy (❌), Sémantické překryvy, Přetížení obsahu.\n- **Kvalitativní posun:** Ověřit, zda je formulace výstupů v ŠVP **aktivní, měřitelná a v souladu** s úrovní RVP.\n\n## FÁZE 4: STRUKTURÁLNÍ VERIFIKACE NÁVAZNOSTI (VERTIKÁLA/HORIZONTÁLA)\n\n**Cíl:** Zkontrolovat logickou posloupnost a provázanost učiva v rámci ŠVP.\n\n- **Vertikální Kontrola:** Ověřit posloupnost OVU a učiva uvnitř jednoho předmětu/oblasti (postup od jednodušších ke složitějším konceptům napříč ročníky).\n- **Horizontální Kontrola:** Zkontrolovat logické provázání napříč vzdělávacími oblastmi a předměty (např. fyzika ↔ matematika).\n- **PT Integrace:** Audit reálné integrace Průřezových témat (PT) do konkrétních částí obsahu, metod a projektů.\n\n## FÁZE 5: ANALÝZA ŠKOLNÍ PROFILACE A ROZŠÍŘENÍ RVP\n\n**Cíl:** Validovat, že profilace školy je **v souladu** s RVP a nejedná se o **rozpor**.\n\n- **Nekonfliktnost:** Porovnat definovaný Profil absolventa školy s Klíčovými kompetencemi RVP. Profil ŠVP musí RVP rozvíjet, nikoli mu odporovat.\n- **Modularita:** Zkontrolovat, zda volitelné předměty a rozšiřující moduly logicky navazují na vzdělávací oblasti RVP.\n- **Implementace specializace:** Popisně uvést, jak je školní profilace (např. STEM zaměření, projektová výuka) integrována do OVU a kompetencí definovaných RVP.\n\n## FÁZE 6: GENERÁTOR ZÁVĚREČNÉ ZPRÁVY A KVANTIFIKACE\n\n**Cíl:** Syntetizovat výsledky, kvantifikovat soulad a generovat závazné návrhy na korekce.\n\n### 6.1 Kvantifikace Souladu\n\nVypočítat Index shody (IS) na základě bodového hodnocení (Fáze 3):\n\n### 6.2 Interpretace Indexu Shody (IS)\n\nKlasifikace souladu pro standardizované vyhodnocení:\n\n| Interval IS | Klasifikace souladu | Popis |\n| --- | --- | --- |\n| 95-100 % | Výborný soulad | ŠVP plně odpovídá RVP, pouze stylistické nebo formální rozdíly. |\n| --- | --- | --- |\n| 85-94 % | Dobrá shoda | ŠVP pokrývá všechny klíčové výstupy, menší korekce nutné. |\n| --- | --- | --- |\n| 70-84 % | Částečná shoda | Významné nedostatky v některých oblastech, nutná revize obsahu. |\n| --- | --- | --- |\n| < 70 % | Kritická neshoda | ŠVP neplní rámcové požadavky, ohrožuje legislativní soulad. |\n| --- | --- | --- |\n\n### 6.3 Doplňkové Indexy\n\nVypočítat následující doplňkové indexy pro detailní kvalitativní analýzu:\n\n- **Index kompetenčního souladu (IKS):** Poměr pokrytí klíčových kompetencí RVP v ŠVP.\n- **Index průřezové integrace (IPI):** Míra reálné integrace průřezových témat do výuky.\n- **Index hloubky pokrytí (IHP):** Procento výstupů, které jsou v ŠVP rozvedeny na konkrétní výukové cíle (měřitelné, aktivní formulace).\n- **Index profilové rozšiřitelnosti (IPR):** Kolik rozšiřujících nebo profilových výstupů přesahuje rámec RVP, aniž by narušily jeho strukturu.\n\n### 6.4 Vizuální výstupy\n\nZajistit generování následujících vizualizací pro Závěrečnou zprávu:\n\n- Graf pokrytí po vzdělávacích oblastech (Sloupcový graf IS pro VO).\n- Pavoukový diagram Klíčových kompetencí (RVP vs. ŠVP).\n- Mapa defektů (Vizualizace ❌ a ⚠️ výstupů).\n\n### 6.5 Struktura Závěrečné Zprávy\n\nZpráva musí být strukturována dle standardizovaného formátu:\n\n| Oddíl | Obsah |\n| --- | --- |\n| A. Identifikace | Název školy, IZO, typ školy, datum revize, zpracovatel, verze ŠVP a RVP. |\n| --- | --- |\n| B. Shrnutí výsledků | Celkový Index Shody (IS), hlavní závěry a doporučení. |\n| --- | --- |\n| C. Kvantitativní analýza | Přehled IS v % dle kategorií OVU / VO / kompetencí. |\n| --- | --- |\n| D. Kvalitativní analýza | Slovní zhodnocení kvality souladu (formulace, obtížnost, integrace PT). |\n| --- | --- |\n| E. Rizikové oblasti | Přehled nalezených defektů (chybějící OVU, přetížení, formální shoda). |\n| --- | --- |\n| F. Návrhy opatření (Korekční plán) | Přesné návrhy změn - **Co, Kde, Kdo** má upravit, včetně termínu. |\n| --- | --- |\n| G. Přílohy | Master Matice (Fáze 2-3), revizní tabulka, výstupní grafy a metriky. |\n| --- | --- |\n\n\n\n\n.\n"}],sourceFile:"./books/examples/lsvp-asistent.book"}];
|
|
36
|
+
var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book"},{title:"Prepare Knowledge-piece Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge-piece Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book"},{title:"Prepare Persona",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book",formfactorName:"GENERIC",parameters:[{name:"availableModels",description:"List of available model names together with their descriptions as JSON",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelsRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n```json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpful assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n```\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n```json\n{availableModels}\n```\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelsRequirements",format:"JSON",dependentParameterNames:["availableModels","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Persona\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book`\n- INPUT PARAMETER `{availableModels}` List of available model names together with their descriptions as JSON\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelsRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n\\`\\`\\`json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpful assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n\\`\\`\\`json\n{availableModels}\n\\`\\`\\`\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelsRequirements}`\n"}],sourceFile:"./books/prepare-persona.book"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-title.book",formfactorName:"GENERIC",parameters:[{name:"book",description:"The book to prepare the title for",isInput:true,isOutput:false},{name:"title",description:"Best title for the book",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-title",title:"Make title",content:"Make best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}",resultingParameterName:"title",expectations:{words:{min:1,max:8},lines:{min:1,max:1}},dependentParameterNames:["book"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-title.book`\n- INPUT PARAMETER `{book}` The book to prepare the title for\n- OUTPUT PARAMETER `{title}` Best title for the book\n\n## Make title\n\n- EXPECT MIN 1 Word\n- EXPECT MAX 8 Words\n- EXPECT EXACTLY 1 Line\n\n```markdown\nMake best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-title.book"}];
|
|
35
37
|
|
|
36
38
|
/**
|
|
37
39
|
* Checks if value is valid email
|
|
@@ -1133,15 +1135,30 @@ const CLAIM = `Turn your company's scattered knowledge into AI ready books`;
|
|
|
1133
1135
|
* @public exported from `@promptbook/core`
|
|
1134
1136
|
*/
|
|
1135
1137
|
const PROMPTBOOK_COLOR = Color.fromHex('#79EAFD');
|
|
1136
|
-
// <- TODO: [🧠] Using `Color` here increases the package size approx 3kb, maybe remove it
|
|
1138
|
+
// <- TODO: [🧠][🈵] Using `Color` here increases the package size approx 3kb, maybe remove it
|
|
1137
1139
|
/**
|
|
1138
|
-
*
|
|
1140
|
+
* Colors for syntax highlighting in the `<BookEditor/>`
|
|
1141
|
+
*
|
|
1142
|
+
* TODO: [🗽] Unite branding and make single place for it
|
|
1143
|
+
*
|
|
1144
|
+
* @public exported from `@promptbook/core`
|
|
1145
|
+
*/
|
|
1146
|
+
const PROMPTBOOK_SYNTAX_COLORS = {
|
|
1147
|
+
TITLE: Color.fromHex('#244EA8'),
|
|
1148
|
+
LINE: Color.fromHex('#eeeeee'),
|
|
1149
|
+
COMMITMENT: Color.fromHex('#DA0F78'),
|
|
1150
|
+
PARAMETER: Color.fromHex('#8e44ad'),
|
|
1151
|
+
};
|
|
1152
|
+
// <- TODO: [🧠][🈵] Using `Color` here increases the package size approx 3kb, maybe remove it
|
|
1153
|
+
/**
|
|
1154
|
+
* Chat color of the Promptbook (in chat)
|
|
1139
1155
|
*
|
|
1140
1156
|
* TODO: [🗽] Unite branding and make single place for it
|
|
1141
1157
|
*
|
|
1142
1158
|
* @public exported from `@promptbook/core`
|
|
1143
1159
|
*/
|
|
1144
1160
|
const PROMPTBOOK_CHAT_COLOR = PROMPTBOOK_COLOR.then(lighten(0.1)).then(saturate(0.9)).then(grayscale(0.9));
|
|
1161
|
+
// <- TODO: [🧠][🈵] Using `Color` and `lighten`, `saturate`,... here increases the package size approx 3kb, maybe remove it
|
|
1145
1162
|
/**
|
|
1146
1163
|
* Color of the user (in chat)
|
|
1147
1164
|
*
|
|
@@ -1150,6 +1167,7 @@ const PROMPTBOOK_CHAT_COLOR = PROMPTBOOK_COLOR.then(lighten(0.1)).then(saturate(
|
|
|
1150
1167
|
* @public exported from `@promptbook/core`
|
|
1151
1168
|
*/
|
|
1152
1169
|
const USER_CHAT_COLOR = Color.fromHex('#1D4ED8');
|
|
1170
|
+
// <- TODO: [🧠][🈵] Using `Color` here increases the package size approx 3kb, maybe remove it
|
|
1153
1171
|
/**
|
|
1154
1172
|
* When the title is not provided, the default title is used
|
|
1155
1173
|
*
|
|
@@ -1230,6 +1248,13 @@ const VALUE_STRINGS = {
|
|
|
1230
1248
|
* @public exported from `@promptbook/utils`
|
|
1231
1249
|
*/
|
|
1232
1250
|
const SMALL_NUMBER = 0.001;
|
|
1251
|
+
// <- TODO: [⏳] Standardize timeouts, Make DEFAULT_TIMEOUT_MS as global constant
|
|
1252
|
+
/**
|
|
1253
|
+
* How many times to retry the connections
|
|
1254
|
+
*
|
|
1255
|
+
* @private within the repository - too low-level in comparison with other `MAX_...`
|
|
1256
|
+
*/
|
|
1257
|
+
const CONNECTION_RETRIES_LIMIT = 5;
|
|
1233
1258
|
/**
|
|
1234
1259
|
* Short time interval to prevent race conditions in milliseconds
|
|
1235
1260
|
*
|
|
@@ -1285,6 +1310,14 @@ const DEFAULT_MAX_KNOWLEDGE_SOURCES_SCRAPING_TOTAL = 200;
|
|
|
1285
1310
|
*/
|
|
1286
1311
|
const DEFAULT_BOOKS_DIRNAME = './books';
|
|
1287
1312
|
// <- TODO: [🕝] Make also `BOOKS_DIRNAME_ALTERNATIVES`
|
|
1313
|
+
/**
|
|
1314
|
+
* Where to store your agents (also book files)
|
|
1315
|
+
* This is kind of a "src" for your books
|
|
1316
|
+
*
|
|
1317
|
+
* @public exported from `@promptbook/core`
|
|
1318
|
+
*/
|
|
1319
|
+
const DEFAULT_AGENTS_DIRNAME = './agents';
|
|
1320
|
+
// <- TODO: [🕝] Make also `AGENTS_DIRNAME_ALTERNATIVES`
|
|
1288
1321
|
// TODO: Just `.promptbook` in config, hardcode subfolders like `download-cache` or `execution-cache`
|
|
1289
1322
|
/**
|
|
1290
1323
|
* Where to store the temporary downloads
|
|
@@ -1332,7 +1365,7 @@ TODO: [🌃]
|
|
|
1332
1365
|
ex-port const WIZARD_APP_ID: string_app_id = 'wizard';
|
|
1333
1366
|
*/
|
|
1334
1367
|
/**
|
|
1335
|
-
* The name of the builded pipeline collection made by CLI `ptbk make` and for lookup in `
|
|
1368
|
+
* The name of the builded pipeline collection made by CLI `ptbk make` and for lookup in `createPipelineCollectionFromDirectory`
|
|
1336
1369
|
*
|
|
1337
1370
|
* @public exported from `@promptbook/core`
|
|
1338
1371
|
*/
|
|
@@ -2067,6 +2100,12 @@ const ORDER_OF_PIPELINE_JSON = [
|
|
|
2067
2100
|
* @private within the repository
|
|
2068
2101
|
*/
|
|
2069
2102
|
const REPLACING_NONCE = 'ptbkauk42kV2dzao34faw7FudQUHYPtW';
|
|
2103
|
+
/**
|
|
2104
|
+
* Nonce which is used as string which is not occurring in normal text
|
|
2105
|
+
*
|
|
2106
|
+
* @private within the repository
|
|
2107
|
+
*/
|
|
2108
|
+
const SALT_NONCE = 'ptbkghhewbvruets21t54et5';
|
|
2070
2109
|
/**
|
|
2071
2110
|
* Placeholder value indicating a parameter is missing its value.
|
|
2072
2111
|
*
|
|
@@ -2566,7 +2605,7 @@ function unpreparePipeline(pipeline) {
|
|
|
2566
2605
|
* Library of pipelines that groups together pipelines for an application.
|
|
2567
2606
|
* This implementation is a very thin wrapper around the Array / Map of pipelines.
|
|
2568
2607
|
*
|
|
2569
|
-
* @private internal function of `
|
|
2608
|
+
* @private internal function of `createPipelineCollectionFromJson`, use `createPipelineCollectionFromJson` instead
|
|
2570
2609
|
* @see https://github.com/webgptorg/pipeline#pipeline-collection
|
|
2571
2610
|
*/
|
|
2572
2611
|
class SimplePipelineCollection {
|
|
@@ -2576,7 +2615,7 @@ class SimplePipelineCollection {
|
|
|
2576
2615
|
* @param pipelines Array of pipeline JSON objects to include in the collection
|
|
2577
2616
|
*
|
|
2578
2617
|
* Note: During the construction logic of all pipelines are validated
|
|
2579
|
-
* Note: It is not recommended to use this constructor directly, use `
|
|
2618
|
+
* Note: It is not recommended to use this constructor directly, use `createPipelineCollectionFromJson` *(or other variant)* instead
|
|
2580
2619
|
*/
|
|
2581
2620
|
constructor(...pipelines) {
|
|
2582
2621
|
this.collection = new Map();
|
|
@@ -2664,16 +2703,16 @@ class SimplePipelineCollection {
|
|
|
2664
2703
|
}
|
|
2665
2704
|
|
|
2666
2705
|
/**
|
|
2667
|
-
* Creates PipelineCollection from array of PipelineJson or PipelineString
|
|
2706
|
+
* Creates `PipelineCollection` from array of PipelineJson or PipelineString
|
|
2668
2707
|
*
|
|
2669
|
-
* Note: Functions `
|
|
2708
|
+
* Note: Functions `pipelineCollectionToJson` and `createPipelineCollectionFromJson` are complementary
|
|
2670
2709
|
* Note: Syntax, parsing, and logic consistency checks are performed on all sources during build
|
|
2671
2710
|
*
|
|
2672
2711
|
* @param promptbookSources
|
|
2673
2712
|
* @returns PipelineCollection
|
|
2674
2713
|
* @public exported from `@promptbook/core`
|
|
2675
2714
|
*/
|
|
2676
|
-
function
|
|
2715
|
+
function createPipelineCollectionFromJson(...promptbooks) {
|
|
2677
2716
|
return new SimplePipelineCollection(...promptbooks);
|
|
2678
2717
|
}
|
|
2679
2718
|
|
|
@@ -2846,6 +2885,19 @@ class LimitReachedError extends Error {
|
|
|
2846
2885
|
}
|
|
2847
2886
|
}
|
|
2848
2887
|
|
|
2888
|
+
/**
|
|
2889
|
+
* This error indicates that promptbook operation is not allowed
|
|
2890
|
+
*
|
|
2891
|
+
* @public exported from `@promptbook/core`
|
|
2892
|
+
*/
|
|
2893
|
+
class NotAllowed extends Error {
|
|
2894
|
+
constructor(message) {
|
|
2895
|
+
super(message);
|
|
2896
|
+
this.name = 'NotAllowed';
|
|
2897
|
+
Object.setPrototypeOf(this, NotAllowed.prototype);
|
|
2898
|
+
}
|
|
2899
|
+
}
|
|
2900
|
+
|
|
2849
2901
|
/**
|
|
2850
2902
|
* This error type indicates that some part of the code is not implemented yet
|
|
2851
2903
|
*
|
|
@@ -2940,6 +2992,7 @@ const PROMPTBOOK_ERRORS = {
|
|
|
2940
2992
|
PromptbookFetchError,
|
|
2941
2993
|
UnexpectedError,
|
|
2942
2994
|
WrappedError,
|
|
2995
|
+
NotAllowed,
|
|
2943
2996
|
// TODO: [🪑]> VersionMismatchError,
|
|
2944
2997
|
};
|
|
2945
2998
|
/**
|
|
@@ -3155,6 +3208,7 @@ function createTask(options) {
|
|
|
3155
3208
|
let updatedAt = createdAt;
|
|
3156
3209
|
const errors = [];
|
|
3157
3210
|
const warnings = [];
|
|
3211
|
+
const llmCalls = [];
|
|
3158
3212
|
let currentValue = {};
|
|
3159
3213
|
let customTldr = null;
|
|
3160
3214
|
const partialResultSubject = new Subject();
|
|
@@ -3170,6 +3224,9 @@ function createTask(options) {
|
|
|
3170
3224
|
}, (tldrInfo) => {
|
|
3171
3225
|
customTldr = tldrInfo;
|
|
3172
3226
|
updatedAt = new Date();
|
|
3227
|
+
}, (llmCall) => {
|
|
3228
|
+
llmCalls.push(llmCall);
|
|
3229
|
+
updatedAt = new Date();
|
|
3173
3230
|
});
|
|
3174
3231
|
finalResultPromise
|
|
3175
3232
|
.catch((error) => {
|
|
@@ -3292,7 +3349,7 @@ function createTask(options) {
|
|
|
3292
3349
|
}
|
|
3293
3350
|
return {
|
|
3294
3351
|
percent: percent,
|
|
3295
|
-
message,
|
|
3352
|
+
message: message + ' (!!!fallback)',
|
|
3296
3353
|
};
|
|
3297
3354
|
},
|
|
3298
3355
|
get createdAt() {
|
|
@@ -3315,6 +3372,10 @@ function createTask(options) {
|
|
|
3315
3372
|
return warnings;
|
|
3316
3373
|
// <- Note: [1] --||--
|
|
3317
3374
|
},
|
|
3375
|
+
get llmCalls() {
|
|
3376
|
+
return [...llmCalls, { foo: '!!! bar' }];
|
|
3377
|
+
// <- Note: [1] --||--
|
|
3378
|
+
},
|
|
3318
3379
|
get currentValue() {
|
|
3319
3380
|
return currentValue;
|
|
3320
3381
|
// <- Note: [1] --||--
|
|
@@ -4932,7 +4993,7 @@ async function preparePipeline(pipeline, tools, options) {
|
|
|
4932
4993
|
let title = pipeline.title;
|
|
4933
4994
|
if (title === undefined || title === '' || title === DEFAULT_BOOK_TITLE) {
|
|
4934
4995
|
// TODO: [🌼] In future use `ptbk make` and made getPipelineCollection
|
|
4935
|
-
const collection =
|
|
4996
|
+
const collection = createPipelineCollectionFromJson(...PipelineCollection);
|
|
4936
4997
|
const prepareTitleExecutor = createPipelineExecutor({
|
|
4937
4998
|
pipeline: await collection.getPipelineByUrl('https://promptbook.studio/promptbook/prepare-title.book'),
|
|
4938
4999
|
tools,
|
|
@@ -5715,6 +5776,18 @@ function templateParameters(template, parameters) {
|
|
|
5715
5776
|
return replacedTemplates;
|
|
5716
5777
|
}
|
|
5717
5778
|
|
|
5779
|
+
/**
|
|
5780
|
+
* Logs an LLM call with the given report.
|
|
5781
|
+
*
|
|
5782
|
+
* @private internal utility of `createPipelineExecutor`
|
|
5783
|
+
*/
|
|
5784
|
+
function logLlmCall(logLlmCall, report) {
|
|
5785
|
+
logLlmCall({
|
|
5786
|
+
modelName: 'model' /* <- TODO: How to get model name from the report */,
|
|
5787
|
+
report,
|
|
5788
|
+
});
|
|
5789
|
+
}
|
|
5790
|
+
|
|
5718
5791
|
/**
|
|
5719
5792
|
* Extracts all code blocks from markdown.
|
|
5720
5793
|
*
|
|
@@ -5861,10 +5934,13 @@ const LINES_PER_STANDARD_PAGE = 44;
|
|
|
5861
5934
|
* @public exported from `@promptbook/utils`
|
|
5862
5935
|
*/
|
|
5863
5936
|
function countLines(text) {
|
|
5937
|
+
if (text === '') {
|
|
5938
|
+
return 0;
|
|
5939
|
+
}
|
|
5864
5940
|
text = text.replace('\r\n', '\n');
|
|
5865
5941
|
text = text.replace('\r', '\n');
|
|
5866
5942
|
const lines = text.split('\n');
|
|
5867
|
-
return lines.reduce((count, line) => count + Math.ceil(line.length / CHARACTERS_PER_STANDARD_LINE), 0);
|
|
5943
|
+
return lines.reduce((count, line) => count + Math.max(Math.ceil(line.length / CHARACTERS_PER_STANDARD_LINE), 1), 0);
|
|
5868
5944
|
}
|
|
5869
5945
|
/**
|
|
5870
5946
|
* TODO: [🥴] Implement counting in formats - like JSON, CSV, XML,...
|
|
@@ -6079,7 +6155,7 @@ function validatePromptResult(options) {
|
|
|
6079
6155
|
*/
|
|
6080
6156
|
async function executeAttempts(options) {
|
|
6081
6157
|
const { jokerParameterNames, priority, maxAttempts, // <- Note: [💂]
|
|
6082
|
-
preparedContent, parameters, task, preparedPipeline, tools, $executionReport, pipelineIdentification, maxExecutionAttempts, onProgress, } = options;
|
|
6158
|
+
preparedContent, parameters, task, preparedPipeline, tools, $executionReport, pipelineIdentification, maxExecutionAttempts, onProgress, logLlmCall: logLlmCall$1, } = options;
|
|
6083
6159
|
const $ongoingTaskResult = {
|
|
6084
6160
|
$result: null,
|
|
6085
6161
|
$resultString: null,
|
|
@@ -6327,14 +6403,10 @@ async function executeAttempts(options) {
|
|
|
6327
6403
|
});
|
|
6328
6404
|
}
|
|
6329
6405
|
finally {
|
|
6330
|
-
if (!isJokerAttempt &&
|
|
6331
|
-
|
|
6332
|
-
|
|
6333
|
-
|
|
6334
|
-
// In that case we don’t want to make a report about it because it’s not a llm execution error
|
|
6335
|
-
) {
|
|
6336
|
-
// TODO: [🧠] Maybe put other taskTypes into report
|
|
6337
|
-
$executionReport.promptExecutions.push({
|
|
6406
|
+
if (!isJokerAttempt && task.taskType === 'PROMPT_TASK' && $ongoingTaskResult.$prompt) {
|
|
6407
|
+
// Note: [2] When some expected parameter is not defined, error will occur in templateParameters
|
|
6408
|
+
// In that case we don’t want to make a report about it because it’s not a llm execution error
|
|
6409
|
+
const executionPromptReport = {
|
|
6338
6410
|
prompt: {
|
|
6339
6411
|
...$ongoingTaskResult.$prompt,
|
|
6340
6412
|
// <- TODO: [🧠] How to pick everyhing except `pipelineUrl`
|
|
@@ -6343,7 +6415,11 @@ async function executeAttempts(options) {
|
|
|
6343
6415
|
error: $ongoingTaskResult.$expectError === null
|
|
6344
6416
|
? undefined
|
|
6345
6417
|
: serializeError($ongoingTaskResult.$expectError),
|
|
6346
|
-
}
|
|
6418
|
+
};
|
|
6419
|
+
$executionReport.promptExecutions.push(executionPromptReport);
|
|
6420
|
+
if (logLlmCall$1) {
|
|
6421
|
+
logLlmCall(logLlmCall$1, executionPromptReport);
|
|
6422
|
+
}
|
|
6347
6423
|
}
|
|
6348
6424
|
}
|
|
6349
6425
|
if ($ongoingTaskResult.$expectError !== null && attemptIndex === maxAttempts - 1) {
|
|
@@ -6408,9 +6484,9 @@ async function executeAttempts(options) {
|
|
|
6408
6484
|
* @private internal utility of `createPipelineExecutor`
|
|
6409
6485
|
*/
|
|
6410
6486
|
async function executeFormatSubvalues(options) {
|
|
6411
|
-
const { task, jokerParameterNames, parameters, priority, csvSettings, onProgress, pipelineIdentification } = options;
|
|
6487
|
+
const { task, jokerParameterNames, parameters, priority, csvSettings, onProgress, logLlmCall, pipelineIdentification, } = options;
|
|
6412
6488
|
if (task.foreach === undefined) {
|
|
6413
|
-
return /* not await */ executeAttempts(options);
|
|
6489
|
+
return /* not await */ executeAttempts({ ...options, logLlmCall });
|
|
6414
6490
|
}
|
|
6415
6491
|
if (jokerParameterNames.length !== 0) {
|
|
6416
6492
|
throw new UnexpectedError(spaceTrim((block) => `
|
|
@@ -6711,7 +6787,7 @@ async function getReservedParametersForTask(options) {
|
|
|
6711
6787
|
* @private internal utility of `createPipelineExecutor`
|
|
6712
6788
|
*/
|
|
6713
6789
|
async function executeTask(options) {
|
|
6714
|
-
const { currentTask, preparedPipeline, parametersToPass, tools, onProgress, $executionReport, pipelineIdentification, maxExecutionAttempts, maxParallelCount, csvSettings, isVerbose, rootDirname, cacheDirname, intermediateFilesStrategy, isAutoInstalled, isNotPreparedWarningSuppressed, } = options;
|
|
6790
|
+
const { currentTask, preparedPipeline, parametersToPass, tools, onProgress, logLlmCall, $executionReport, pipelineIdentification, maxExecutionAttempts, maxParallelCount, csvSettings, isVerbose, rootDirname, cacheDirname, intermediateFilesStrategy, isAutoInstalled, isNotPreparedWarningSuppressed, } = options;
|
|
6715
6791
|
const priority = preparedPipeline.tasks.length - preparedPipeline.tasks.indexOf(currentTask);
|
|
6716
6792
|
// Note: Check consistency of used and dependent parameters which was also done in `validatePipeline`, but it’s good to doublecheck
|
|
6717
6793
|
const usedParameterNames = extractParameterNamesFromTask(currentTask);
|
|
@@ -6790,6 +6866,7 @@ async function executeTask(options) {
|
|
|
6790
6866
|
tools,
|
|
6791
6867
|
$executionReport,
|
|
6792
6868
|
onProgress,
|
|
6869
|
+
logLlmCall,
|
|
6793
6870
|
pipelineIdentification,
|
|
6794
6871
|
maxExecutionAttempts,
|
|
6795
6872
|
maxParallelCount,
|
|
@@ -6833,6 +6910,29 @@ function filterJustOutputParameters(options) {
|
|
|
6833
6910
|
$warnings.push(new PipelineExecutionError(spaceTrim$1((block) => `
|
|
6834
6911
|
Parameter \`{${parameter.name}}\` should be an output parameter, but it was not generated during pipeline execution
|
|
6835
6912
|
|
|
6913
|
+
Note: This is a warning which happened after the pipeline was executed, and \`{${parameter.name}}\` was not for some reason defined in output parameters
|
|
6914
|
+
|
|
6915
|
+
All parameters:
|
|
6916
|
+
${block(preparedPipeline.parameters
|
|
6917
|
+
.map(({ name, isInput, isOutput, description }) => {
|
|
6918
|
+
let line = `\`{${name}}\``;
|
|
6919
|
+
if (isInput) {
|
|
6920
|
+
line += ' `[input parameter]`';
|
|
6921
|
+
}
|
|
6922
|
+
if (isOutput) {
|
|
6923
|
+
line += ' `[output parameter]`';
|
|
6924
|
+
}
|
|
6925
|
+
if (parametersToPass[name] === undefined) {
|
|
6926
|
+
line += ` <- Warning: Should be in the output but its not |`;
|
|
6927
|
+
}
|
|
6928
|
+
if (description) {
|
|
6929
|
+
line += ` ${description}`;
|
|
6930
|
+
}
|
|
6931
|
+
return line;
|
|
6932
|
+
})
|
|
6933
|
+
.map((line, index) => `${index + 1}) ${line}`)
|
|
6934
|
+
.join('\n'))}
|
|
6935
|
+
|
|
6836
6936
|
${block(pipelineIdentification)}
|
|
6837
6937
|
`)));
|
|
6838
6938
|
continue;
|
|
@@ -6853,7 +6953,7 @@ function filterJustOutputParameters(options) {
|
|
|
6853
6953
|
* @private internal utility of `createPipelineExecutor`
|
|
6854
6954
|
*/
|
|
6855
6955
|
async function executePipeline(options) {
|
|
6856
|
-
const { inputParameters, tools, onProgress, pipeline, setPreparedPipeline, pipelineIdentification, maxParallelCount, rootDirname, isVerbose, } = options;
|
|
6956
|
+
const { inputParameters, tools, onProgress, logLlmCall, pipeline, setPreparedPipeline, pipelineIdentification, maxParallelCount, rootDirname, isVerbose, } = options;
|
|
6857
6957
|
let { preparedPipeline } = options;
|
|
6858
6958
|
if (preparedPipeline === undefined) {
|
|
6859
6959
|
preparedPipeline = await preparePipeline(pipeline, tools, {
|
|
@@ -7031,6 +7131,7 @@ async function executePipeline(options) {
|
|
|
7031
7131
|
onProgress(newOngoingResult);
|
|
7032
7132
|
}
|
|
7033
7133
|
},
|
|
7134
|
+
logLlmCall,
|
|
7034
7135
|
$executionReport: executionReport,
|
|
7035
7136
|
pipelineIdentification: spaceTrim$1((block) => `
|
|
7036
7137
|
${block(pipelineIdentification)}
|
|
@@ -7154,7 +7255,7 @@ function createPipelineExecutor(options) {
|
|
|
7154
7255
|
// <- TODO: [🏮] Some standard way how to transform errors into warnings and how to handle non-critical fails during the tasks
|
|
7155
7256
|
}
|
|
7156
7257
|
let runCount = 0;
|
|
7157
|
-
const pipelineExecutorWithCallback = async (inputParameters, onProgress) => {
|
|
7258
|
+
const pipelineExecutorWithCallback = async (inputParameters, onProgress, logLlmCall) => {
|
|
7158
7259
|
runCount++;
|
|
7159
7260
|
return /* not await */ executePipeline({
|
|
7160
7261
|
pipeline,
|
|
@@ -7165,6 +7266,7 @@ function createPipelineExecutor(options) {
|
|
|
7165
7266
|
inputParameters,
|
|
7166
7267
|
tools,
|
|
7167
7268
|
onProgress,
|
|
7269
|
+
logLlmCall,
|
|
7168
7270
|
pipelineIdentification: spaceTrim$1((block) => `
|
|
7169
7271
|
${block(pipelineIdentification)}
|
|
7170
7272
|
${runCount === 1 ? '' : `Run #${runCount}`}
|
|
@@ -7280,7 +7382,7 @@ async function preparePersona(personaDescription, tools, options) {
|
|
|
7280
7382
|
throw new MissingToolsError('LLM tools are required for preparing persona');
|
|
7281
7383
|
}
|
|
7282
7384
|
// TODO: [🌼] In future use `ptbk make` and made getPipelineCollection
|
|
7283
|
-
const collection =
|
|
7385
|
+
const collection = createPipelineCollectionFromJson(...PipelineCollection);
|
|
7284
7386
|
const preparePersonaExecutor = createPipelineExecutor({
|
|
7285
7387
|
pipeline: await collection.getPipelineByUrl('https://promptbook.studio/promptbook/prepare-persona.book'),
|
|
7286
7388
|
tools,
|
|
@@ -7346,7 +7448,8 @@ async function preparePersona(personaDescription, tools, options) {
|
|
|
7346
7448
|
function createEmptyAgentModelRequirements() {
|
|
7347
7449
|
return {
|
|
7348
7450
|
systemMessage: '',
|
|
7349
|
-
modelName: 'gpt-5',
|
|
7451
|
+
// modelName: 'gpt-5',
|
|
7452
|
+
modelName: 'gemini-2.5-flash-lite',
|
|
7350
7453
|
temperature: 0.7,
|
|
7351
7454
|
topP: 0.9,
|
|
7352
7455
|
topK: 50,
|
|
@@ -9764,6 +9867,46 @@ function extractMcpServers(agentSource) {
|
|
|
9764
9867
|
return mcpServers;
|
|
9765
9868
|
}
|
|
9766
9869
|
|
|
9870
|
+
/**
|
|
9871
|
+
* Number of padding lines to add at the end of the book content
|
|
9872
|
+
*
|
|
9873
|
+
* @public exported from `@promptbook/core`
|
|
9874
|
+
*/
|
|
9875
|
+
const PADDING_LINES = 11;
|
|
9876
|
+
/**
|
|
9877
|
+
* A function that adds padding to the book content
|
|
9878
|
+
*
|
|
9879
|
+
* @public exported from `@promptbook/core`
|
|
9880
|
+
*/
|
|
9881
|
+
function padBook(content) {
|
|
9882
|
+
if (!content) {
|
|
9883
|
+
return '\n'.repeat(PADDING_LINES);
|
|
9884
|
+
}
|
|
9885
|
+
const lines = content.split('\n');
|
|
9886
|
+
let trailingEmptyLines = 0;
|
|
9887
|
+
for (let i = lines.length - 1; i >= 0; i--) {
|
|
9888
|
+
const line = lines[i];
|
|
9889
|
+
if (line === undefined) {
|
|
9890
|
+
// Note: This should not happen in reality, but it's here to satisfy TypeScript's noUncheckedIndexedAccess option
|
|
9891
|
+
continue;
|
|
9892
|
+
}
|
|
9893
|
+
if (line.trim() === '') {
|
|
9894
|
+
trailingEmptyLines++;
|
|
9895
|
+
}
|
|
9896
|
+
else {
|
|
9897
|
+
break;
|
|
9898
|
+
}
|
|
9899
|
+
}
|
|
9900
|
+
if (trailingEmptyLines >= PADDING_LINES) {
|
|
9901
|
+
return content;
|
|
9902
|
+
}
|
|
9903
|
+
const linesToAdd = PADDING_LINES - trailingEmptyLines;
|
|
9904
|
+
return (content + '\n'.repeat(linesToAdd));
|
|
9905
|
+
}
|
|
9906
|
+
/**
|
|
9907
|
+
* TODO: [🧠] Maybe export
|
|
9908
|
+
*/
|
|
9909
|
+
|
|
9767
9910
|
/**
|
|
9768
9911
|
* Type guard to check if a string is a valid agent source
|
|
9769
9912
|
*
|
|
@@ -9791,49 +9934,38 @@ function validateBook(source) {
|
|
|
9791
9934
|
*
|
|
9792
9935
|
* @public exported from `@promptbook/core`
|
|
9793
9936
|
*/
|
|
9794
|
-
const DEFAULT_BOOK = validateBook(spaceTrim(`
|
|
9795
|
-
|
|
9937
|
+
const DEFAULT_BOOK = padBook(validateBook(spaceTrim(`
|
|
9938
|
+
AI Avatar
|
|
9796
9939
|
|
|
9797
|
-
|
|
9798
|
-
|
|
9940
|
+
PERSONA A friendly AI assistant that helps you with your tasks
|
|
9941
|
+
`)));
|
|
9942
|
+
// <- Note: Not using book`...` notation to avoid strange error in jest unit tests `TypeError: (0 , book_notation_1.book) is not a function`
|
|
9943
|
+
// <- TODO: !!! GENESIS_BOOK
|
|
9944
|
+
// <- !!! Buttons into genesis book
|
|
9945
|
+
// <- TODO: !!! createBookBoilerplate and deprecate `DEFAULT_BOOK`
|
|
9799
9946
|
|
|
9800
9947
|
/**
|
|
9801
|
-
*
|
|
9948
|
+
* Constructs `PipelineCollection` from async sources
|
|
9802
9949
|
*
|
|
9803
|
-
* Note: Functions `collectionToJson` and `createCollectionFromJson` are complementary
|
|
9804
|
-
*
|
|
9805
|
-
* @public exported from `@promptbook/core`
|
|
9806
|
-
*/
|
|
9807
|
-
async function collectionToJson(collection) {
|
|
9808
|
-
const pipelineUrls = await collection.listPipelines();
|
|
9809
|
-
const promptbooks = await Promise.all(pipelineUrls.map((url) => collection.getPipelineByUrl(url)));
|
|
9810
|
-
return promptbooks;
|
|
9811
|
-
}
|
|
9812
|
-
/**
|
|
9813
|
-
* TODO: [🧠] Maybe clear `sourceFile` or clear when exposing through API or remote server
|
|
9814
|
-
*/
|
|
9815
|
-
|
|
9816
|
-
/**
|
|
9817
|
-
* Constructs Promptbook from async sources
|
|
9818
9950
|
* It can be one of the following:
|
|
9819
9951
|
* - Promise of array of PipelineJson or PipelineString
|
|
9820
9952
|
* - Factory function that returns Promise of array of PipelineJson or PipelineString
|
|
9821
9953
|
*
|
|
9822
9954
|
* Note: This is useful as internal tool for other constructor functions like
|
|
9823
|
-
* `
|
|
9955
|
+
* `createPipelineCollectionFromUrl` or `createPipelineCollectionFromDirectory`
|
|
9824
9956
|
* Consider using those functions instead of this one
|
|
9825
9957
|
*
|
|
9826
9958
|
* Note: The function does NOT return promise it returns the collection directly which waits for the sources to be resolved
|
|
9827
9959
|
* when error occurs in given promise or factory function, it is thrown during `listPipelines` or `getPipelineByUrl` call
|
|
9828
9960
|
*
|
|
9829
|
-
* Note: Consider using `
|
|
9961
|
+
* Note: Consider using `createPipelineCollectionFromDirectory` or `createPipelineCollectionFromUrl`
|
|
9830
9962
|
*
|
|
9831
9963
|
* @param promptbookSourcesPromiseOrFactory
|
|
9832
9964
|
* @returns PipelineCollection
|
|
9833
9965
|
* @deprecated Do not use, it will became internal tool for other constructor functions
|
|
9834
9966
|
* @public exported from `@promptbook/core`
|
|
9835
9967
|
*/
|
|
9836
|
-
function
|
|
9968
|
+
function createPipelineCollectionFromPromise(promptbookSourcesPromiseOrFactory) {
|
|
9837
9969
|
let collection = null;
|
|
9838
9970
|
async function load() {
|
|
9839
9971
|
if (collection !== null) {
|
|
@@ -9844,7 +9976,7 @@ function createCollectionFromPromise(promptbookSourcesPromiseOrFactory) {
|
|
|
9844
9976
|
promptbookSourcesPromiseOrFactory = promptbookSourcesPromiseOrFactory();
|
|
9845
9977
|
}
|
|
9846
9978
|
const promptbookSources = await promptbookSourcesPromiseOrFactory;
|
|
9847
|
-
collection =
|
|
9979
|
+
collection = createPipelineCollectionFromJson(...promptbookSources);
|
|
9848
9980
|
}
|
|
9849
9981
|
async function listPipelines() {
|
|
9850
9982
|
await load();
|
|
@@ -9870,9 +10002,9 @@ function createCollectionFromPromise(promptbookSourcesPromiseOrFactory) {
|
|
|
9870
10002
|
* @returns PipelineCollection
|
|
9871
10003
|
* @public exported from `@promptbook/core`
|
|
9872
10004
|
*/
|
|
9873
|
-
async function
|
|
10005
|
+
async function createPipelineCollectionFromUrl(url, options) {
|
|
9874
10006
|
const { isVerbose = DEFAULT_IS_VERBOSE, isLazyLoaded = false } = options || {};
|
|
9875
|
-
const collection =
|
|
10007
|
+
const collection = createPipelineCollectionFromPromise(async () => {
|
|
9876
10008
|
if (isVerbose) {
|
|
9877
10009
|
console.info(`Creating pipeline collection from url ${url.toString()}`);
|
|
9878
10010
|
}
|
|
@@ -9885,14 +10017,14 @@ async function createCollectionFromUrl(url, options) {
|
|
|
9885
10017
|
// TODO: [main] !!3 [🏳🌈] Allow variant with .json .js and .ts files
|
|
9886
10018
|
// TODO: [🧠][🏳🌈] .js and .ts files should create getter function of the collection
|
|
9887
10019
|
// TODO: Look at WebGPT "📖 Make Promptbook collection" and https://webgpt.cz/_books.json
|
|
9888
|
-
// TODO: Implement via
|
|
10020
|
+
// TODO: Implement via createPipelineCollectionFromPromise
|
|
9889
10021
|
}
|
|
9890
10022
|
/**
|
|
9891
10023
|
* TODO: [main] !!4 [🧠] Library precompilation and do not mix markdown and json promptbooks
|
|
9892
10024
|
*/
|
|
9893
10025
|
|
|
9894
10026
|
/**
|
|
9895
|
-
* Creates PipelineCollection as a subset of another PipelineCollection
|
|
10027
|
+
* Creates `PipelineCollection` as a subset of another `PipelineCollection`
|
|
9896
10028
|
*
|
|
9897
10029
|
* Note: You can use any type of collection as a parent collection - local, remote, etc.
|
|
9898
10030
|
* Note: This is just a thin wrapper / proxy around the parent collection
|
|
@@ -9901,7 +10033,7 @@ async function createCollectionFromUrl(url, options) {
|
|
|
9901
10033
|
* @returns PipelineCollection
|
|
9902
10034
|
* @public exported from `@promptbook/core`
|
|
9903
10035
|
*/
|
|
9904
|
-
function
|
|
10036
|
+
function createPipelineSubcollection(collection, predicate) {
|
|
9905
10037
|
async function listPipelines() {
|
|
9906
10038
|
let promptbooks = await collection.listPipelines();
|
|
9907
10039
|
promptbooks = promptbooks.filter(predicate);
|
|
@@ -9935,6 +10067,22 @@ function createSubcollection(collection, predicate) {
|
|
|
9935
10067
|
};
|
|
9936
10068
|
}
|
|
9937
10069
|
|
|
10070
|
+
/**
|
|
10071
|
+
* Converts PipelineCollection to serialized JSON
|
|
10072
|
+
*
|
|
10073
|
+
* Note: Functions `pipelineCollectionToJson` and `createPipelineCollectionFromJson` are complementary
|
|
10074
|
+
*
|
|
10075
|
+
* @public exported from `@promptbook/core`
|
|
10076
|
+
*/
|
|
10077
|
+
async function pipelineCollectionToJson(collection) {
|
|
10078
|
+
const pipelineUrls = await collection.listPipelines();
|
|
10079
|
+
const promptbooks = await Promise.all(pipelineUrls.map((url) => collection.getPipelineByUrl(url)));
|
|
10080
|
+
return promptbooks;
|
|
10081
|
+
}
|
|
10082
|
+
/**
|
|
10083
|
+
* TODO: [🧠] Maybe clear `sourceFile` or clear when exposing through API or remote server
|
|
10084
|
+
*/
|
|
10085
|
+
|
|
9938
10086
|
/**
|
|
9939
10087
|
* All available task types
|
|
9940
10088
|
*
|
|
@@ -12690,6 +12838,7 @@ function deflatePipeline(pipelineString) {
|
|
|
12690
12838
|
if (!isFlatPipeline(pipelineString)) {
|
|
12691
12839
|
return pipelineString;
|
|
12692
12840
|
}
|
|
12841
|
+
pipelineString = spaceTrim(pipelineString);
|
|
12693
12842
|
const pipelineStringLines = pipelineString.split('\n');
|
|
12694
12843
|
const potentialReturnStatement = pipelineStringLines.pop();
|
|
12695
12844
|
let returnStatement;
|
|
@@ -12724,7 +12873,7 @@ function deflatePipeline(pipelineString) {
|
|
|
12724
12873
|
${returnStatement}
|
|
12725
12874
|
`));
|
|
12726
12875
|
// <- TODO: Maybe use book` notation
|
|
12727
|
-
return pipelineString;
|
|
12876
|
+
return padBook(pipelineString);
|
|
12728
12877
|
}
|
|
12729
12878
|
/**
|
|
12730
12879
|
* TODO: Unit test
|
|
@@ -14197,10 +14346,10 @@ function $registeredLlmToolsMessage() {
|
|
|
14197
14346
|
var _a, _b;
|
|
14198
14347
|
const isMetadataAviailable = $llmToolsMetadataRegister
|
|
14199
14348
|
.list()
|
|
14200
|
-
.
|
|
14349
|
+
.some(({ packageName, className }) => metadata.packageName === packageName && metadata.className === className);
|
|
14201
14350
|
const isInstalled = $llmToolsRegister
|
|
14202
14351
|
.list()
|
|
14203
|
-
.
|
|
14352
|
+
.some(({ packageName, className }) => metadata.packageName === packageName && metadata.className === className);
|
|
14204
14353
|
const isFullyConfigured = ((_a = metadata.envVariables) === null || _a === void 0 ? void 0 : _a.every((envVariableName) => env[envVariableName] !== undefined)) || false;
|
|
14205
14354
|
const isPartiallyConfigured = ((_b = metadata.envVariables) === null || _b === void 0 ? void 0 : _b.some((envVariableName) => env[envVariableName] !== undefined)) || false;
|
|
14206
14355
|
// <- Note: [🗨]
|
|
@@ -14617,6 +14766,98 @@ function limitTotalUsage(llmTools, options = {}) {
|
|
|
14617
14766
|
* TODO: [👷♂️] @@@ Manual about construction of llmTools
|
|
14618
14767
|
*/
|
|
14619
14768
|
|
|
14769
|
+
/**
|
|
14770
|
+
* Restricts an Updatable to a (2) BehaviorSubject variant
|
|
14771
|
+
*
|
|
14772
|
+
* @see Updatable
|
|
14773
|
+
* @private internal utility <- TODO: [🧠] Maybe export from `@promptbook/types`
|
|
14774
|
+
*/
|
|
14775
|
+
function asUpdatableSubject(value) {
|
|
14776
|
+
if (value instanceof BehaviorSubject) {
|
|
14777
|
+
return value;
|
|
14778
|
+
}
|
|
14779
|
+
else if (Array.isArray(value)) {
|
|
14780
|
+
if (value.length !== 2) {
|
|
14781
|
+
throw new TypeError('`asUpdatableSubject`: Invalid tuple length, expected 2 elements');
|
|
14782
|
+
}
|
|
14783
|
+
if (typeof value[1] !== 'function') {
|
|
14784
|
+
throw new TypeError('`asUpdatableSubject`: Invalid tuple, expected second element to be a function');
|
|
14785
|
+
}
|
|
14786
|
+
const [theValue, setValue] = value;
|
|
14787
|
+
const subject = new BehaviorSubject(theValue);
|
|
14788
|
+
subject.subscribe((newValue) => {
|
|
14789
|
+
setValue(newValue);
|
|
14790
|
+
});
|
|
14791
|
+
return subject;
|
|
14792
|
+
}
|
|
14793
|
+
else {
|
|
14794
|
+
return new BehaviorSubject(value);
|
|
14795
|
+
}
|
|
14796
|
+
}
|
|
14797
|
+
/**
|
|
14798
|
+
* TODO: [🧠] Maybe `BehaviorSubject` is too heavy for this use case, maybe just tuple `[value,setValue]` is enough
|
|
14799
|
+
*/
|
|
14800
|
+
|
|
14801
|
+
/**
|
|
14802
|
+
* Represents one AI Agent
|
|
14803
|
+
*
|
|
14804
|
+
* Note: [🦖] There are several different things in Promptbook:
|
|
14805
|
+
* - `Agent` - which represents an AI Agent with its source, memories, actions, etc. Agent is a higher-level abstraction which is internally using:
|
|
14806
|
+
* - `LlmExecutionTools` - which wraps one or more LLM models and provides an interface to execute them
|
|
14807
|
+
* - `AgentLlmExecutionTools` - which is a specific implementation of `LlmExecutionTools` that wraps another LlmExecutionTools and applies agent-specific system prompts and requirements
|
|
14808
|
+
* - `OpenAiAssistantExecutionTools` - which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities, recommended for usage in `Agent` or `AgentLlmExecutionTools`
|
|
14809
|
+
*
|
|
14810
|
+
* @public exported from `@promptbook/core`
|
|
14811
|
+
*/
|
|
14812
|
+
class Agent {
|
|
14813
|
+
/**
|
|
14814
|
+
* Not used in Agent, always returns empty array
|
|
14815
|
+
*/
|
|
14816
|
+
get parameters() {
|
|
14817
|
+
return [
|
|
14818
|
+
/* [😰] */
|
|
14819
|
+
];
|
|
14820
|
+
}
|
|
14821
|
+
constructor(options) {
|
|
14822
|
+
this.options = options;
|
|
14823
|
+
/**
|
|
14824
|
+
* Name of the agent
|
|
14825
|
+
*/
|
|
14826
|
+
this.agentName = null;
|
|
14827
|
+
/**
|
|
14828
|
+
* Description of the agent
|
|
14829
|
+
*/
|
|
14830
|
+
this.personaDescription = null;
|
|
14831
|
+
/**
|
|
14832
|
+
* Metadata like image or color
|
|
14833
|
+
*/
|
|
14834
|
+
this.meta = {};
|
|
14835
|
+
this.agentSource = asUpdatableSubject(options.agentSource);
|
|
14836
|
+
this.agentSource.subscribe((source) => {
|
|
14837
|
+
const { agentName, personaDescription, meta } = parseAgentSource(source);
|
|
14838
|
+
this.agentName = agentName;
|
|
14839
|
+
this.personaDescription = personaDescription;
|
|
14840
|
+
this.meta = { ...this.meta, ...meta };
|
|
14841
|
+
});
|
|
14842
|
+
}
|
|
14843
|
+
/**
|
|
14844
|
+
* Creates LlmExecutionTools which exposes the agent as a model
|
|
14845
|
+
*/
|
|
14846
|
+
getLlmExecutionTools() {
|
|
14847
|
+
const llmTools = new AgentLlmExecutionTools({
|
|
14848
|
+
llmTools: getSingleLlmExecutionTools(this.options.executionTools.llm),
|
|
14849
|
+
agentSource: this.agentSource.value, // <- TODO: !!!! Allow to pass BehaviorSubject<string_book> OR refresh llmExecutionTools.callChat on agentSource change
|
|
14850
|
+
});
|
|
14851
|
+
// TODO: !!!! Add `Agent` simple "mocked" learning by appending to agent source
|
|
14852
|
+
// TODO: !!!! Add `Agent` learning by promptbookAgent
|
|
14853
|
+
return llmTools;
|
|
14854
|
+
}
|
|
14855
|
+
}
|
|
14856
|
+
/**
|
|
14857
|
+
* TODO: [🧠][😰]Agent is not working with the parameters, should it be?
|
|
14858
|
+
* TODO: !!! Agent on remote server
|
|
14859
|
+
*/
|
|
14860
|
+
|
|
14620
14861
|
/**
|
|
14621
14862
|
* Change ellipsis character to three dots `…` -> `...`
|
|
14622
14863
|
*
|
|
@@ -14757,126 +14998,1864 @@ function promptbookifyAiText(text) {
|
|
|
14757
14998
|
*/
|
|
14758
14999
|
|
|
14759
15000
|
/**
|
|
14760
|
-
*
|
|
14761
|
-
* This wraps underlying LLM execution tools and applies agent-specific system prompts and requirements
|
|
15001
|
+
* Helper of usage compute
|
|
14762
15002
|
*
|
|
14763
|
-
* @
|
|
15003
|
+
* @param content the content of prompt or response
|
|
15004
|
+
* @returns part of UsageCounts
|
|
15005
|
+
*
|
|
15006
|
+
* @private internal utility of LlmExecutionTools
|
|
14764
15007
|
*/
|
|
14765
|
-
|
|
14766
|
-
|
|
14767
|
-
|
|
14768
|
-
|
|
14769
|
-
|
|
14770
|
-
|
|
14771
|
-
|
|
14772
|
-
|
|
14773
|
-
|
|
14774
|
-
|
|
14775
|
-
|
|
14776
|
-
|
|
14777
|
-
|
|
14778
|
-
|
|
14779
|
-
|
|
14780
|
-
|
|
14781
|
-
|
|
14782
|
-
|
|
14783
|
-
|
|
14784
|
-
|
|
14785
|
-
|
|
14786
|
-
|
|
14787
|
-
getAgentInfo() {
|
|
14788
|
-
if (this._cachedAgentInfo === null) {
|
|
14789
|
-
this._cachedAgentInfo = parseAgentSource(this.agentSource);
|
|
14790
|
-
}
|
|
14791
|
-
return this._cachedAgentInfo;
|
|
14792
|
-
}
|
|
14793
|
-
/**
|
|
14794
|
-
* Get cached or create agent model requirements
|
|
14795
|
-
*/
|
|
14796
|
-
async getAgentModelRequirements() {
|
|
14797
|
-
if (this._cachedModelRequirements === null) {
|
|
14798
|
-
// Get available models from underlying LLM tools for best model selection
|
|
14799
|
-
const availableModels = await this.llmTools.listModels();
|
|
14800
|
-
this._cachedModelRequirements = await createAgentModelRequirements(this.agentSource, undefined, // Let the function pick the best model
|
|
14801
|
-
availableModels);
|
|
14802
|
-
}
|
|
14803
|
-
return this._cachedModelRequirements;
|
|
14804
|
-
}
|
|
14805
|
-
get title() {
|
|
14806
|
-
const agentInfo = this.getAgentInfo();
|
|
14807
|
-
return (agentInfo.agentName || 'Agent');
|
|
14808
|
-
}
|
|
14809
|
-
get description() {
|
|
14810
|
-
const agentInfo = this.getAgentInfo();
|
|
14811
|
-
return agentInfo.personaDescription || 'AI Agent with predefined personality and behavior';
|
|
14812
|
-
}
|
|
14813
|
-
get profile() {
|
|
14814
|
-
const agentInfo = this.getAgentInfo();
|
|
14815
|
-
if (!agentInfo.agentName) {
|
|
14816
|
-
return undefined;
|
|
14817
|
-
}
|
|
14818
|
-
return {
|
|
14819
|
-
name: agentInfo.agentName.toUpperCase().replace(/\s+/g, '_'),
|
|
14820
|
-
fullname: agentInfo.agentName,
|
|
14821
|
-
color: agentInfo.meta.color || '#6366f1',
|
|
14822
|
-
avatarSrc: agentInfo.meta.image,
|
|
14823
|
-
};
|
|
14824
|
-
}
|
|
14825
|
-
checkConfiguration() {
|
|
14826
|
-
// Check underlying tools configuration
|
|
14827
|
-
return this.llmTools.checkConfiguration();
|
|
15008
|
+
function computeUsageCounts(content) {
|
|
15009
|
+
return {
|
|
15010
|
+
charactersCount: { value: countCharacters(content) },
|
|
15011
|
+
wordsCount: { value: countWords(content) },
|
|
15012
|
+
sentencesCount: { value: countSentences(content) },
|
|
15013
|
+
linesCount: { value: countLines(content) },
|
|
15014
|
+
paragraphsCount: { value: countParagraphs(content) },
|
|
15015
|
+
pagesCount: { value: countPages(content) },
|
|
15016
|
+
};
|
|
15017
|
+
}
|
|
15018
|
+
|
|
15019
|
+
/**
|
|
15020
|
+
* Make UncertainNumber
|
|
15021
|
+
*
|
|
15022
|
+
* @param value value of the uncertain number, if `NaN` or `undefined`, it will be set to 0 and `isUncertain=true`
|
|
15023
|
+
* @param isUncertain if `true`, the value is uncertain, otherwise depends on the value
|
|
15024
|
+
*
|
|
15025
|
+
* @private utility for initializating UncertainNumber
|
|
15026
|
+
*/
|
|
15027
|
+
function uncertainNumber(value, isUncertain) {
|
|
15028
|
+
if (value === null || value === undefined || Number.isNaN(value)) {
|
|
15029
|
+
return UNCERTAIN_ZERO_VALUE;
|
|
14828
15030
|
}
|
|
14829
|
-
|
|
14830
|
-
|
|
14831
|
-
*/
|
|
14832
|
-
get modelName() {
|
|
14833
|
-
const hash = SHA256(hexEncoder.parse(this.agentSource))
|
|
14834
|
-
// <- TODO: [🥬] Encapsulate sha256 to some private utility function
|
|
14835
|
-
.toString( /* hex */);
|
|
14836
|
-
// <- TODO: [🥬] Make some system for hashes and ids of promptbook
|
|
14837
|
-
const agentId = hash.substring(0, 10);
|
|
14838
|
-
// <- TODO: [🥬] Make some system for hashes and ids of promptbook
|
|
14839
|
-
return (normalizeToKebabCase(this.title) + '-' + agentId);
|
|
15031
|
+
if (isUncertain === true) {
|
|
15032
|
+
return { value, isUncertain };
|
|
14840
15033
|
}
|
|
14841
|
-
|
|
14842
|
-
|
|
14843
|
-
|
|
14844
|
-
|
|
14845
|
-
|
|
14846
|
-
|
|
14847
|
-
|
|
15034
|
+
return { value };
|
|
15035
|
+
}
|
|
15036
|
+
|
|
15037
|
+
/**
|
|
15038
|
+
* Create price per one token based on the string value found on openai page
|
|
15039
|
+
*
|
|
15040
|
+
* @private within the repository, used only as internal helper for `OPENAI_MODELS`
|
|
15041
|
+
*/
|
|
15042
|
+
function pricing(value) {
|
|
15043
|
+
const [price, tokens] = value.split(' / ');
|
|
15044
|
+
return parseFloat(price.replace('$', '')) / parseFloat(tokens.replace('M tokens', '')) / 1000000;
|
|
15045
|
+
}
|
|
15046
|
+
|
|
15047
|
+
/**
|
|
15048
|
+
* List of available OpenAI models with pricing
|
|
15049
|
+
*
|
|
15050
|
+
* Note: Synced with official API docs at 2025-08-20
|
|
15051
|
+
*
|
|
15052
|
+
* @see https://platform.openai.com/docs/models/
|
|
15053
|
+
* @see https://openai.com/api/pricing/
|
|
15054
|
+
* @public exported from `@promptbook/openai`
|
|
15055
|
+
*/
|
|
15056
|
+
const OPENAI_MODELS = exportJson({
|
|
15057
|
+
name: 'OPENAI_MODELS',
|
|
15058
|
+
value: [
|
|
15059
|
+
/**/
|
|
15060
|
+
{
|
|
15061
|
+
modelVariant: 'CHAT',
|
|
15062
|
+
modelTitle: 'gpt-5',
|
|
15063
|
+
modelName: 'gpt-5',
|
|
15064
|
+
modelDescription: "OpenAI's most advanced language model with unprecedented reasoning capabilities and 200K context window. Features revolutionary improvements in complex problem-solving, scientific reasoning, and creative tasks. Demonstrates human-level performance across diverse domains with enhanced safety measures and alignment. Represents the next generation of AI with superior understanding, nuanced responses, and advanced multimodal capabilities.",
|
|
15065
|
+
pricing: {
|
|
15066
|
+
prompt: pricing(`$1.25 / 1M tokens`),
|
|
15067
|
+
output: pricing(`$10.00 / 1M tokens`),
|
|
14848
15068
|
},
|
|
14849
|
-
|
|
14850
|
-
|
|
14851
|
-
|
|
14852
|
-
|
|
14853
|
-
|
|
14854
|
-
|
|
14855
|
-
|
|
14856
|
-
|
|
14857
|
-
|
|
14858
|
-
|
|
14859
|
-
|
|
14860
|
-
|
|
15069
|
+
},
|
|
15070
|
+
/**/
|
|
15071
|
+
/**/
|
|
15072
|
+
{
|
|
15073
|
+
modelVariant: 'CHAT',
|
|
15074
|
+
modelTitle: 'gpt-5-mini',
|
|
15075
|
+
modelName: 'gpt-5-mini',
|
|
15076
|
+
modelDescription: 'A faster, cost-efficient version of GPT-5 for well-defined tasks with 200K context window. Maintains core GPT-5 capabilities while offering 5x faster inference and significantly lower costs. Features enhanced instruction following and reduced latency for production applications requiring quick responses with high quality.',
|
|
15077
|
+
pricing: {
|
|
15078
|
+
prompt: pricing(`$0.25 / 1M tokens`),
|
|
15079
|
+
output: pricing(`$2.00 / 1M tokens`),
|
|
15080
|
+
},
|
|
15081
|
+
},
|
|
15082
|
+
/**/
|
|
15083
|
+
/**/
|
|
15084
|
+
{
|
|
15085
|
+
modelVariant: 'CHAT',
|
|
15086
|
+
modelTitle: 'gpt-5-nano',
|
|
15087
|
+
modelName: 'gpt-5-nano',
|
|
15088
|
+
modelDescription: 'The fastest, most cost-efficient version of GPT-5 with 200K context window. Optimized for summarization, classification, and simple reasoning tasks. Features 10x faster inference than base GPT-5 while maintaining good quality for straightforward applications. Ideal for high-volume, cost-sensitive deployments.',
|
|
15089
|
+
pricing: {
|
|
15090
|
+
prompt: pricing(`$0.05 / 1M tokens`),
|
|
15091
|
+
output: pricing(`$0.40 / 1M tokens`),
|
|
15092
|
+
},
|
|
15093
|
+
},
|
|
15094
|
+
/**/
|
|
15095
|
+
/**/
|
|
15096
|
+
{
|
|
15097
|
+
modelVariant: 'CHAT',
|
|
15098
|
+
modelTitle: 'gpt-4.1',
|
|
15099
|
+
modelName: 'gpt-4.1',
|
|
15100
|
+
modelDescription: 'Smartest non-reasoning model with 128K context window. Enhanced version of GPT-4 with improved instruction following, better factual accuracy, and reduced hallucinations. Features advanced function calling capabilities and superior performance on coding tasks. Ideal for applications requiring high intelligence without reasoning overhead.',
|
|
15101
|
+
pricing: {
|
|
15102
|
+
prompt: pricing(`$3.00 / 1M tokens`),
|
|
15103
|
+
output: pricing(`$12.00 / 1M tokens`),
|
|
15104
|
+
},
|
|
15105
|
+
},
|
|
15106
|
+
/**/
|
|
15107
|
+
/**/
|
|
15108
|
+
{
|
|
15109
|
+
modelVariant: 'CHAT',
|
|
15110
|
+
modelTitle: 'gpt-4.1-mini',
|
|
15111
|
+
modelName: 'gpt-4.1-mini',
|
|
15112
|
+
modelDescription: 'Smaller, faster version of GPT-4.1 with 128K context window. Balances intelligence and efficiency with 3x faster inference than base GPT-4.1. Maintains strong capabilities across text generation, reasoning, and coding while offering better cost-performance ratio for most applications.',
|
|
15113
|
+
pricing: {
|
|
15114
|
+
prompt: pricing(`$0.80 / 1M tokens`),
|
|
15115
|
+
output: pricing(`$3.20 / 1M tokens`),
|
|
15116
|
+
},
|
|
15117
|
+
},
|
|
15118
|
+
/**/
|
|
15119
|
+
/**/
|
|
15120
|
+
{
|
|
15121
|
+
modelVariant: 'CHAT',
|
|
15122
|
+
modelTitle: 'gpt-4.1-nano',
|
|
15123
|
+
modelName: 'gpt-4.1-nano',
|
|
15124
|
+
modelDescription: 'Fastest, most cost-efficient version of GPT-4.1 with 128K context window. Optimized for high-throughput applications requiring good quality at minimal cost. Features 5x faster inference than GPT-4.1 while maintaining adequate performance for most general-purpose tasks.',
|
|
15125
|
+
pricing: {
|
|
15126
|
+
prompt: pricing(`$0.20 / 1M tokens`),
|
|
15127
|
+
output: pricing(`$0.80 / 1M tokens`),
|
|
15128
|
+
},
|
|
15129
|
+
},
|
|
15130
|
+
/**/
|
|
15131
|
+
/**/
|
|
15132
|
+
{
|
|
15133
|
+
modelVariant: 'CHAT',
|
|
15134
|
+
modelTitle: 'o3',
|
|
15135
|
+
modelName: 'o3',
|
|
15136
|
+
modelDescription: 'Advanced reasoning model with 128K context window specializing in complex logical, mathematical, and analytical tasks. Successor to o1 with enhanced step-by-step problem-solving capabilities and superior performance on STEM-focused problems. Ideal for professional applications requiring deep analytical thinking and precise reasoning.',
|
|
15137
|
+
pricing: {
|
|
15138
|
+
prompt: pricing(`$15.00 / 1M tokens`),
|
|
15139
|
+
output: pricing(`$60.00 / 1M tokens`),
|
|
15140
|
+
},
|
|
15141
|
+
},
|
|
15142
|
+
/**/
|
|
15143
|
+
/**/
|
|
15144
|
+
{
|
|
15145
|
+
modelVariant: 'CHAT',
|
|
15146
|
+
modelTitle: 'o3-pro',
|
|
15147
|
+
modelName: 'o3-pro',
|
|
15148
|
+
modelDescription: 'Enhanced version of o3 with more compute allocated for better responses on the most challenging problems. Features extended reasoning time and improved accuracy on complex analytical tasks. Designed for applications where maximum reasoning quality is more important than response speed.',
|
|
15149
|
+
pricing: {
|
|
15150
|
+
prompt: pricing(`$30.00 / 1M tokens`),
|
|
15151
|
+
output: pricing(`$120.00 / 1M tokens`),
|
|
15152
|
+
},
|
|
15153
|
+
},
|
|
15154
|
+
/**/
|
|
15155
|
+
/**/
|
|
15156
|
+
{
|
|
15157
|
+
modelVariant: 'CHAT',
|
|
15158
|
+
modelTitle: 'o4-mini',
|
|
15159
|
+
modelName: 'o4-mini',
|
|
15160
|
+
modelDescription: 'Fast, cost-efficient reasoning model with 128K context window. Successor to o1-mini with improved analytical capabilities while maintaining speed advantages. Features enhanced mathematical reasoning and logical problem-solving at significantly lower cost than full reasoning models.',
|
|
15161
|
+
pricing: {
|
|
15162
|
+
prompt: pricing(`$4.00 / 1M tokens`),
|
|
15163
|
+
output: pricing(`$16.00 / 1M tokens`),
|
|
15164
|
+
},
|
|
15165
|
+
},
|
|
15166
|
+
/**/
|
|
15167
|
+
/**/
|
|
15168
|
+
{
|
|
15169
|
+
modelVariant: 'CHAT',
|
|
15170
|
+
modelTitle: 'o3-deep-research',
|
|
15171
|
+
modelName: 'o3-deep-research',
|
|
15172
|
+
modelDescription: 'Most powerful deep research model with 128K context window. Specialized for comprehensive research tasks, literature analysis, and complex information synthesis. Features advanced citation capabilities and enhanced factual accuracy for academic and professional research applications.',
|
|
15173
|
+
pricing: {
|
|
15174
|
+
prompt: pricing(`$25.00 / 1M tokens`),
|
|
15175
|
+
output: pricing(`$100.00 / 1M tokens`),
|
|
15176
|
+
},
|
|
15177
|
+
},
|
|
15178
|
+
/**/
|
|
15179
|
+
/**/
|
|
15180
|
+
{
|
|
15181
|
+
modelVariant: 'CHAT',
|
|
15182
|
+
modelTitle: 'o4-mini-deep-research',
|
|
15183
|
+
modelName: 'o4-mini-deep-research',
|
|
15184
|
+
modelDescription: 'Faster, more affordable deep research model with 128K context window. Balances research capabilities with cost efficiency, offering good performance on literature review, fact-checking, and information synthesis tasks at a more accessible price point.',
|
|
15185
|
+
pricing: {
|
|
15186
|
+
prompt: pricing(`$12.00 / 1M tokens`),
|
|
15187
|
+
output: pricing(`$48.00 / 1M tokens`),
|
|
15188
|
+
},
|
|
15189
|
+
},
|
|
15190
|
+
/**/
|
|
15191
|
+
/*/
|
|
15192
|
+
{
|
|
15193
|
+
modelTitle: 'dall-e-3',
|
|
15194
|
+
modelName: 'dall-e-3',
|
|
15195
|
+
},
|
|
15196
|
+
/**/
|
|
15197
|
+
/*/
|
|
15198
|
+
{
|
|
15199
|
+
modelTitle: 'whisper-1',
|
|
15200
|
+
modelName: 'whisper-1',
|
|
15201
|
+
},
|
|
15202
|
+
/**/
|
|
15203
|
+
/**/
|
|
15204
|
+
{
|
|
15205
|
+
modelVariant: 'COMPLETION',
|
|
15206
|
+
modelTitle: 'davinci-002',
|
|
15207
|
+
modelName: 'davinci-002',
|
|
15208
|
+
modelDescription: 'Legacy completion model with 4K token context window. Excels at complex text generation, creative writing, and detailed content creation with strong contextual understanding. Optimized for instructions requiring nuanced outputs and extended reasoning. Suitable for applications needing high-quality text generation without conversation management.',
|
|
15209
|
+
pricing: {
|
|
15210
|
+
prompt: pricing(`$2.00 / 1M tokens`),
|
|
15211
|
+
output: pricing(`$2.00 / 1M tokens`),
|
|
15212
|
+
},
|
|
15213
|
+
},
|
|
15214
|
+
/**/
|
|
15215
|
+
/*/
|
|
15216
|
+
{
|
|
15217
|
+
modelTitle: 'dall-e-2',
|
|
15218
|
+
modelName: 'dall-e-2',
|
|
15219
|
+
},
|
|
15220
|
+
/**/
|
|
15221
|
+
/**/
|
|
15222
|
+
{
|
|
15223
|
+
modelVariant: 'CHAT',
|
|
15224
|
+
modelTitle: 'gpt-3.5-turbo-16k',
|
|
15225
|
+
modelName: 'gpt-3.5-turbo-16k',
|
|
15226
|
+
modelDescription: 'Extended context GPT-3.5 Turbo with 16K token window. Maintains core capabilities of standard 3.5 Turbo while supporting longer conversations and documents. Features good balance of performance and cost for applications requiring more context than standard 4K models. Effective for document analysis, extended conversations, and multi-step reasoning tasks.',
|
|
15227
|
+
pricing: {
|
|
15228
|
+
prompt: pricing(`$3.00 / 1M tokens`),
|
|
15229
|
+
output: pricing(`$4.00 / 1M tokens`),
|
|
15230
|
+
},
|
|
15231
|
+
},
|
|
15232
|
+
/**/
|
|
15233
|
+
/*/
|
|
15234
|
+
{
|
|
15235
|
+
modelTitle: 'tts-1-hd-1106',
|
|
15236
|
+
modelName: 'tts-1-hd-1106',
|
|
15237
|
+
},
|
|
15238
|
+
/**/
|
|
15239
|
+
/*/
|
|
15240
|
+
{
|
|
15241
|
+
modelTitle: 'tts-1-hd',
|
|
15242
|
+
modelName: 'tts-1-hd',
|
|
15243
|
+
},
|
|
15244
|
+
/**/
|
|
15245
|
+
/**/
|
|
15246
|
+
{
|
|
15247
|
+
modelVariant: 'CHAT',
|
|
15248
|
+
modelTitle: 'gpt-4',
|
|
15249
|
+
modelName: 'gpt-4',
|
|
15250
|
+
modelDescription: 'Powerful language model with 8K context window featuring sophisticated reasoning, instruction-following, and knowledge capabilities. Demonstrates strong performance on complex tasks requiring deep understanding and multi-step reasoning. Excels at code generation, logical analysis, and nuanced content creation. Suitable for advanced applications requiring high-quality outputs.',
|
|
15251
|
+
pricing: {
|
|
15252
|
+
prompt: pricing(`$30.00 / 1M tokens`),
|
|
15253
|
+
output: pricing(`$60.00 / 1M tokens`),
|
|
15254
|
+
},
|
|
15255
|
+
},
|
|
15256
|
+
/**/
|
|
15257
|
+
/**/
|
|
15258
|
+
{
|
|
15259
|
+
modelVariant: 'CHAT',
|
|
15260
|
+
modelTitle: 'gpt-4-32k',
|
|
15261
|
+
modelName: 'gpt-4-32k',
|
|
15262
|
+
modelDescription: 'Extended context version of GPT-4 with 32K token window. Maintains all capabilities of standard GPT-4 while supporting analysis of very lengthy documents, code bases, and conversations. Features enhanced ability to maintain context over long interactions and process detailed information from large inputs. Ideal for document analysis, legal review, and complex problem-solving.',
|
|
15263
|
+
pricing: {
|
|
15264
|
+
prompt: pricing(`$60.00 / 1M tokens`),
|
|
15265
|
+
output: pricing(`$120.00 / 1M tokens`),
|
|
15266
|
+
},
|
|
15267
|
+
},
|
|
15268
|
+
/**/
|
|
15269
|
+
/*/
|
|
15270
|
+
{
|
|
15271
|
+
modelVariant: 'CHAT',
|
|
15272
|
+
modelTitle: 'gpt-4-0613',
|
|
15273
|
+
modelName: 'gpt-4-0613',
|
|
15274
|
+
pricing: {
|
|
15275
|
+
prompt: computeUsage(` / 1M tokens`),
|
|
15276
|
+
output: computeUsage(` / 1M tokens`),
|
|
15277
|
+
},
|
|
15278
|
+
},
|
|
15279
|
+
/**/
|
|
15280
|
+
/**/
|
|
15281
|
+
{
|
|
15282
|
+
modelVariant: 'CHAT',
|
|
15283
|
+
modelTitle: 'gpt-4-turbo-2024-04-09',
|
|
15284
|
+
modelName: 'gpt-4-turbo-2024-04-09',
|
|
15285
|
+
modelDescription: 'Latest stable GPT-4 Turbo from April 2024 with 128K context window. Features enhanced reasoning chains, improved factual accuracy with 40% reduction in hallucinations, and better instruction following compared to earlier versions. Includes advanced function calling capabilities and knowledge up to April 2024. Provides optimal performance for enterprise applications requiring reliability.',
|
|
15286
|
+
pricing: {
|
|
15287
|
+
prompt: pricing(`$10.00 / 1M tokens`),
|
|
15288
|
+
output: pricing(`$30.00 / 1M tokens`),
|
|
15289
|
+
},
|
|
15290
|
+
},
|
|
15291
|
+
/**/
|
|
15292
|
+
/**/
|
|
15293
|
+
{
|
|
15294
|
+
modelVariant: 'CHAT',
|
|
15295
|
+
modelTitle: 'gpt-3.5-turbo-1106',
|
|
15296
|
+
modelName: 'gpt-3.5-turbo-1106',
|
|
15297
|
+
modelDescription: 'November 2023 version of GPT-3.5 Turbo with 16K token context window. Features improved instruction following, more consistent output formatting, and enhanced function calling capabilities. Includes knowledge cutoff from April 2023. Suitable for applications requiring good performance at lower cost than GPT-4 models.',
|
|
15298
|
+
pricing: {
|
|
15299
|
+
prompt: pricing(`$1.00 / 1M tokens`),
|
|
15300
|
+
output: pricing(`$2.00 / 1M tokens`),
|
|
15301
|
+
},
|
|
15302
|
+
},
|
|
15303
|
+
/**/
|
|
15304
|
+
/**/
|
|
15305
|
+
{
|
|
15306
|
+
modelVariant: 'CHAT',
|
|
15307
|
+
modelTitle: 'gpt-4-turbo',
|
|
15308
|
+
modelName: 'gpt-4-turbo',
|
|
15309
|
+
modelDescription: 'More capable and cost-efficient version of GPT-4 with 128K token context window. Features improved instruction following, advanced function calling capabilities, and better performance on coding tasks. Maintains superior reasoning and knowledge while offering substantial cost reduction compared to base GPT-4. Ideal for complex applications requiring extensive context processing.',
|
|
15310
|
+
pricing: {
|
|
15311
|
+
prompt: pricing(`$10.00 / 1M tokens`),
|
|
15312
|
+
output: pricing(`$30.00 / 1M tokens`),
|
|
15313
|
+
},
|
|
15314
|
+
},
|
|
15315
|
+
/**/
|
|
15316
|
+
/**/
|
|
15317
|
+
{
|
|
15318
|
+
modelVariant: 'COMPLETION',
|
|
15319
|
+
modelTitle: 'gpt-3.5-turbo-instruct-0914',
|
|
15320
|
+
modelName: 'gpt-3.5-turbo-instruct-0914',
|
|
15321
|
+
modelDescription: 'September 2023 version of GPT-3.5 Turbo Instruct with 4K context window. Optimized for completion-style instruction following with deterministic responses. Better suited than chat models for applications requiring specific formatted outputs without conversation management. Knowledge cutoff from September 2021.',
|
|
15322
|
+
pricing: {
|
|
15323
|
+
prompt: pricing(`$1.50 / 1M tokens`),
|
|
15324
|
+
output: pricing(`$2.00 / 1M tokens`),
|
|
15325
|
+
},
|
|
15326
|
+
},
|
|
15327
|
+
/**/
|
|
15328
|
+
/**/
|
|
15329
|
+
{
|
|
15330
|
+
modelVariant: 'COMPLETION',
|
|
15331
|
+
modelTitle: 'gpt-3.5-turbo-instruct',
|
|
15332
|
+
modelName: 'gpt-3.5-turbo-instruct',
|
|
15333
|
+
modelDescription: 'Optimized version of GPT-3.5 for completion-style API with 4K token context window. Features strong instruction following with single-turn design rather than multi-turn conversation. Provides more consistent, deterministic outputs compared to chat models. Well-suited for templated content generation and structured text transformation tasks.',
|
|
15334
|
+
pricing: {
|
|
15335
|
+
prompt: pricing(`$1.50 / 1M tokens`),
|
|
15336
|
+
output: pricing(`$2.00 / 1M tokens`),
|
|
15337
|
+
},
|
|
15338
|
+
},
|
|
15339
|
+
/**/
|
|
15340
|
+
/*/
|
|
15341
|
+
{
|
|
15342
|
+
modelTitle: 'tts-1',
|
|
15343
|
+
modelName: 'tts-1',
|
|
15344
|
+
},
|
|
15345
|
+
/**/
|
|
15346
|
+
/**/
|
|
15347
|
+
{
|
|
15348
|
+
modelVariant: 'CHAT',
|
|
15349
|
+
modelTitle: 'gpt-3.5-turbo',
|
|
15350
|
+
modelName: 'gpt-3.5-turbo',
|
|
15351
|
+
modelDescription: 'Latest version of GPT-3.5 Turbo with 4K token default context window (16K available). Features continually improved performance with enhanced instruction following and reduced hallucinations. Offers excellent balance between capability and cost efficiency. Suitable for most general-purpose applications requiring good AI capabilities at reasonable cost.',
|
|
15352
|
+
pricing: {
|
|
15353
|
+
prompt: pricing(`$0.50 / 1M tokens`),
|
|
15354
|
+
output: pricing(`$1.50 / 1M tokens`),
|
|
15355
|
+
},
|
|
15356
|
+
},
|
|
15357
|
+
/**/
|
|
15358
|
+
/**/
|
|
15359
|
+
{
|
|
15360
|
+
modelVariant: 'CHAT',
|
|
15361
|
+
modelTitle: 'gpt-3.5-turbo-0301',
|
|
15362
|
+
modelName: 'gpt-3.5-turbo-0301',
|
|
15363
|
+
modelDescription: 'March 2023 version of GPT-3.5 Turbo with 4K token context window. Legacy model maintained for backward compatibility with specific application behaviors. Features solid conversational abilities and basic instruction following. Knowledge cutoff from September 2021. Suitable for applications explicitly designed for this version.',
|
|
15364
|
+
pricing: {
|
|
15365
|
+
prompt: pricing(`$1.50 / 1M tokens`),
|
|
15366
|
+
output: pricing(`$2.00 / 1M tokens`),
|
|
15367
|
+
},
|
|
15368
|
+
},
|
|
15369
|
+
/**/
|
|
15370
|
+
/**/
|
|
15371
|
+
{
|
|
15372
|
+
modelVariant: 'COMPLETION',
|
|
15373
|
+
modelTitle: 'babbage-002',
|
|
15374
|
+
modelName: 'babbage-002',
|
|
15375
|
+
modelDescription: 'Efficient legacy completion model with 4K context window balancing performance and speed. Features moderate reasoning capabilities with focus on straightforward text generation tasks. Significantly more efficient than davinci models while maintaining adequate quality for many applications. Suitable for high-volume, cost-sensitive text generation needs.',
|
|
15376
|
+
pricing: {
|
|
15377
|
+
prompt: pricing(`$0.40 / 1M tokens`),
|
|
15378
|
+
output: pricing(`$0.40 / 1M tokens`),
|
|
15379
|
+
},
|
|
15380
|
+
},
|
|
15381
|
+
/**/
|
|
15382
|
+
/**/
|
|
15383
|
+
{
|
|
15384
|
+
modelVariant: 'CHAT',
|
|
15385
|
+
modelTitle: 'gpt-4-1106-preview',
|
|
15386
|
+
modelName: 'gpt-4-1106-preview',
|
|
15387
|
+
modelDescription: 'November 2023 preview version of GPT-4 Turbo with 128K token context window. Features improved instruction following, better function calling capabilities, and enhanced reasoning. Includes knowledge cutoff from April 2023. Suitable for complex applications requiring extensive document understanding and sophisticated interactions.',
|
|
15388
|
+
pricing: {
|
|
15389
|
+
prompt: pricing(`$10.00 / 1M tokens`),
|
|
15390
|
+
output: pricing(`$30.00 / 1M tokens`),
|
|
15391
|
+
},
|
|
15392
|
+
},
|
|
15393
|
+
/**/
|
|
15394
|
+
/**/
|
|
15395
|
+
{
|
|
15396
|
+
modelVariant: 'CHAT',
|
|
15397
|
+
modelTitle: 'gpt-4-0125-preview',
|
|
15398
|
+
modelName: 'gpt-4-0125-preview',
|
|
15399
|
+
modelDescription: 'January 2024 preview version of GPT-4 Turbo with 128K token context window. Features improved reasoning capabilities, enhanced tool use, and more reliable function calling. Includes knowledge cutoff from October 2023. Offers better performance on complex logical tasks and more consistent outputs than previous preview versions.',
|
|
15400
|
+
pricing: {
|
|
15401
|
+
prompt: pricing(`$10.00 / 1M tokens`),
|
|
15402
|
+
output: pricing(`$30.00 / 1M tokens`),
|
|
15403
|
+
},
|
|
15404
|
+
},
|
|
15405
|
+
/**/
|
|
15406
|
+
/*/
|
|
15407
|
+
{
|
|
15408
|
+
modelTitle: 'tts-1-1106',
|
|
15409
|
+
modelName: 'tts-1-1106',
|
|
15410
|
+
},
|
|
15411
|
+
/**/
|
|
15412
|
+
/**/
|
|
15413
|
+
{
|
|
15414
|
+
modelVariant: 'CHAT',
|
|
15415
|
+
modelTitle: 'gpt-3.5-turbo-0125',
|
|
15416
|
+
modelName: 'gpt-3.5-turbo-0125',
|
|
15417
|
+
modelDescription: 'January 2024 version of GPT-3.5 Turbo with 16K token context window. Features improved reasoning capabilities, better instruction adherence, and reduced hallucinations compared to previous versions. Includes knowledge cutoff from September 2021. Provides good performance for most general applications at reasonable cost.',
|
|
15418
|
+
pricing: {
|
|
15419
|
+
prompt: pricing(`$0.50 / 1M tokens`),
|
|
15420
|
+
output: pricing(`$1.50 / 1M tokens`),
|
|
15421
|
+
},
|
|
15422
|
+
},
|
|
15423
|
+
/**/
|
|
15424
|
+
/**/
|
|
15425
|
+
{
|
|
15426
|
+
modelVariant: 'CHAT',
|
|
15427
|
+
modelTitle: 'gpt-4-turbo-preview',
|
|
15428
|
+
modelName: 'gpt-4-turbo-preview',
|
|
15429
|
+
modelDescription: 'Preview version of GPT-4 Turbo with 128K token context window that points to the latest development model. Features cutting-edge improvements to instruction following, knowledge representation, and tool use capabilities. Provides access to newest features but may have occasional behavior changes. Best for non-critical applications wanting latest capabilities.',
|
|
15430
|
+
pricing: {
|
|
15431
|
+
prompt: pricing(`$10.00 / 1M tokens`),
|
|
15432
|
+
output: pricing(`$30.00 / 1M tokens`),
|
|
15433
|
+
},
|
|
15434
|
+
},
|
|
15435
|
+
/**/
|
|
15436
|
+
/**/
|
|
15437
|
+
{
|
|
15438
|
+
modelVariant: 'EMBEDDING',
|
|
15439
|
+
modelTitle: 'text-embedding-3-large',
|
|
15440
|
+
modelName: 'text-embedding-3-large',
|
|
15441
|
+
modelDescription: "OpenAI's most capable text embedding model generating 3072-dimensional vectors. Designed for high-quality embeddings for complex similarity tasks, clustering, and information retrieval. Features enhanced cross-lingual capabilities and significantly improved performance on retrieval and classification benchmarks. Ideal for sophisticated RAG systems and semantic search applications.",
|
|
15442
|
+
pricing: {
|
|
15443
|
+
prompt: pricing(`$0.13 / 1M tokens`),
|
|
15444
|
+
output: 0,
|
|
15445
|
+
},
|
|
15446
|
+
},
|
|
15447
|
+
/**/
|
|
15448
|
+
/**/
|
|
15449
|
+
{
|
|
15450
|
+
modelVariant: 'EMBEDDING',
|
|
15451
|
+
modelTitle: 'text-embedding-3-small',
|
|
15452
|
+
modelName: 'text-embedding-3-small',
|
|
15453
|
+
modelDescription: 'Cost-effective embedding model generating 1536-dimensional vectors. Balances quality and efficiency for simpler tasks while maintaining good performance on text similarity and retrieval applications. Offers 20% better quality than ada-002 at significantly lower cost. Ideal for production embedding applications with cost constraints.',
|
|
15454
|
+
pricing: {
|
|
15455
|
+
prompt: pricing(`$0.02 / 1M tokens`),
|
|
15456
|
+
output: 0,
|
|
15457
|
+
},
|
|
15458
|
+
},
|
|
15459
|
+
/**/
|
|
15460
|
+
/**/
|
|
15461
|
+
{
|
|
15462
|
+
modelVariant: 'CHAT',
|
|
15463
|
+
modelTitle: 'gpt-3.5-turbo-0613',
|
|
15464
|
+
modelName: 'gpt-3.5-turbo-0613',
|
|
15465
|
+
modelDescription: "June 2023 version of GPT-3.5 Turbo with 4K token context window. Features function calling capabilities for structured data extraction and API interaction. Includes knowledge cutoff from September 2021. Maintained for applications specifically designed for this version's behaviors and capabilities.",
|
|
15466
|
+
pricing: {
|
|
15467
|
+
prompt: pricing(`$1.50 / 1M tokens`),
|
|
15468
|
+
output: pricing(`$2.00 / 1M tokens`),
|
|
15469
|
+
},
|
|
15470
|
+
},
|
|
15471
|
+
/**/
|
|
15472
|
+
/**/
|
|
15473
|
+
{
|
|
15474
|
+
modelVariant: 'EMBEDDING',
|
|
15475
|
+
modelTitle: 'text-embedding-ada-002',
|
|
15476
|
+
modelName: 'text-embedding-ada-002',
|
|
15477
|
+
modelDescription: 'Legacy text embedding model generating 1536-dimensional vectors suitable for text similarity and retrieval applications. Processes up to 8K tokens per request with consistent embedding quality. While superseded by newer embedding-3 models, still maintains adequate performance for many semantic search and classification tasks.',
|
|
15478
|
+
pricing: {
|
|
15479
|
+
prompt: pricing(`$0.1 / 1M tokens`),
|
|
15480
|
+
output: 0,
|
|
15481
|
+
},
|
|
15482
|
+
},
|
|
15483
|
+
/**/
|
|
15484
|
+
/*/
|
|
15485
|
+
{
|
|
15486
|
+
modelVariant: 'CHAT',
|
|
15487
|
+
modelTitle: 'gpt-4-1106-vision-preview',
|
|
15488
|
+
modelName: 'gpt-4-1106-vision-preview',
|
|
15489
|
+
},
|
|
15490
|
+
/**/
|
|
15491
|
+
/*/
|
|
15492
|
+
{
|
|
15493
|
+
modelVariant: 'CHAT',
|
|
15494
|
+
modelTitle: 'gpt-4-vision-preview',
|
|
15495
|
+
modelName: 'gpt-4-vision-preview',
|
|
15496
|
+
pricing: {
|
|
15497
|
+
prompt: computeUsage(`$10.00 / 1M tokens`),
|
|
15498
|
+
output: computeUsage(`$30.00 / 1M tokens`),
|
|
15499
|
+
},
|
|
15500
|
+
},
|
|
15501
|
+
/**/
|
|
15502
|
+
/**/
|
|
15503
|
+
{
|
|
15504
|
+
modelVariant: 'CHAT',
|
|
15505
|
+
modelTitle: 'gpt-4o-2024-05-13',
|
|
15506
|
+
modelName: 'gpt-4o-2024-05-13',
|
|
15507
|
+
modelDescription: 'May 2024 version of GPT-4o with 128K context window. Features enhanced multimodal capabilities including superior image understanding (up to 20MP), audio processing, and improved reasoning. Optimized for 2x lower latency than GPT-4 Turbo while maintaining high performance. Includes knowledge up to October 2023. Ideal for production applications requiring reliable multimodal capabilities.',
|
|
15508
|
+
pricing: {
|
|
15509
|
+
prompt: pricing(`$5.00 / 1M tokens`),
|
|
15510
|
+
output: pricing(`$15.00 / 1M tokens`),
|
|
15511
|
+
},
|
|
15512
|
+
},
|
|
15513
|
+
/**/
|
|
15514
|
+
/**/
|
|
15515
|
+
{
|
|
15516
|
+
modelVariant: 'CHAT',
|
|
15517
|
+
modelTitle: 'gpt-4o',
|
|
15518
|
+
modelName: 'gpt-4o',
|
|
15519
|
+
modelDescription: "OpenAI's most advanced general-purpose multimodal model with 128K context window. Optimized for balanced performance, speed, and cost with 2x faster responses than GPT-4 Turbo. Features excellent vision processing, audio understanding, reasoning, and text generation quality. Represents optimal balance of capability and efficiency for most advanced applications.",
|
|
15520
|
+
pricing: {
|
|
15521
|
+
prompt: pricing(`$5.00 / 1M tokens`),
|
|
15522
|
+
output: pricing(`$15.00 / 1M tokens`),
|
|
15523
|
+
},
|
|
15524
|
+
},
|
|
15525
|
+
/**/
|
|
15526
|
+
/**/
|
|
15527
|
+
{
|
|
15528
|
+
modelVariant: 'CHAT',
|
|
15529
|
+
modelTitle: 'gpt-4o-mini',
|
|
15530
|
+
modelName: 'gpt-4o-mini',
|
|
15531
|
+
modelDescription: 'Smaller, more cost-effective version of GPT-4o with 128K context window. Maintains impressive capabilities across text, vision, and audio tasks while operating at significantly lower cost. Features 3x faster inference than GPT-4o with good performance on general tasks. Excellent for applications requiring good quality multimodal capabilities at scale.',
|
|
15532
|
+
pricing: {
|
|
15533
|
+
prompt: pricing(`$0.15 / 1M tokens`),
|
|
15534
|
+
output: pricing(`$0.60 / 1M tokens`),
|
|
15535
|
+
},
|
|
15536
|
+
},
|
|
15537
|
+
/**/
|
|
15538
|
+
/**/
|
|
15539
|
+
{
|
|
15540
|
+
modelVariant: 'CHAT',
|
|
15541
|
+
modelTitle: 'o1-preview',
|
|
15542
|
+
modelName: 'o1-preview',
|
|
15543
|
+
modelDescription: 'Advanced reasoning model with 128K context window specializing in complex logical, mathematical, and analytical tasks. Features exceptional step-by-step problem-solving capabilities, advanced mathematical and scientific reasoning, and superior performance on STEM-focused problems. Significantly outperforms GPT-4 on quantitative reasoning benchmarks. Ideal for professional and specialized applications.',
|
|
15544
|
+
pricing: {
|
|
15545
|
+
prompt: pricing(`$15.00 / 1M tokens`),
|
|
15546
|
+
output: pricing(`$60.00 / 1M tokens`),
|
|
15547
|
+
},
|
|
15548
|
+
},
|
|
15549
|
+
/**/
|
|
15550
|
+
/**/
|
|
15551
|
+
{
|
|
15552
|
+
modelVariant: 'CHAT',
|
|
15553
|
+
modelTitle: 'o1-preview-2024-09-12',
|
|
15554
|
+
modelName: 'o1-preview-2024-09-12',
|
|
15555
|
+
modelDescription: 'September 2024 version of O1 preview with 128K context window. Features specialized reasoning capabilities with 30% improvement on mathematical and scientific accuracy over previous versions. Includes enhanced support for formal logic, statistical analysis, and technical domains. Optimized for professional applications requiring precise analytical thinking and rigorous methodologies.',
|
|
15556
|
+
pricing: {
|
|
15557
|
+
prompt: pricing(`$15.00 / 1M tokens`),
|
|
15558
|
+
output: pricing(`$60.00 / 1M tokens`),
|
|
15559
|
+
},
|
|
15560
|
+
},
|
|
15561
|
+
/**/
|
|
15562
|
+
/**/
|
|
15563
|
+
{
|
|
15564
|
+
modelVariant: 'CHAT',
|
|
15565
|
+
modelTitle: 'o1-mini',
|
|
15566
|
+
modelName: 'o1-mini',
|
|
15567
|
+
modelDescription: 'Smaller, cost-effective version of the O1 model with 128K context window. Maintains strong analytical reasoning abilities while reducing computational requirements by 70%. Features good performance on mathematical, logical, and scientific tasks at significantly lower cost than full O1. Excellent for everyday analytical applications that benefit from reasoning focus.',
|
|
15568
|
+
pricing: {
|
|
15569
|
+
prompt: pricing(`$3.00 / 1M tokens`),
|
|
15570
|
+
output: pricing(`$12.00 / 1M tokens`),
|
|
15571
|
+
},
|
|
15572
|
+
},
|
|
15573
|
+
/**/
|
|
15574
|
+
/**/
|
|
15575
|
+
{
|
|
15576
|
+
modelVariant: 'CHAT',
|
|
15577
|
+
modelTitle: 'o1',
|
|
15578
|
+
modelName: 'o1',
|
|
15579
|
+
modelDescription: "OpenAI's advanced reasoning model with 128K context window focusing on logical problem-solving and analytical thinking. Features exceptional performance on quantitative tasks, step-by-step deduction, and complex technical problems. Maintains 95%+ of o1-preview capabilities with production-ready stability. Ideal for scientific computing, financial analysis, and professional applications.",
|
|
15580
|
+
pricing: {
|
|
15581
|
+
prompt: pricing(`$15.00 / 1M tokens`),
|
|
15582
|
+
output: pricing(`$60.00 / 1M tokens`),
|
|
15583
|
+
},
|
|
15584
|
+
},
|
|
15585
|
+
/**/
|
|
15586
|
+
/**/
|
|
15587
|
+
{
|
|
15588
|
+
modelVariant: 'CHAT',
|
|
15589
|
+
modelTitle: 'o3-mini',
|
|
15590
|
+
modelName: 'o3-mini',
|
|
15591
|
+
modelDescription: 'Cost-effective reasoning model with 128K context window optimized for academic and scientific problem-solving. Features efficient performance on STEM tasks with specialized capabilities in mathematics, physics, chemistry, and computer science. Offers 80% of O1 performance on technical domains at significantly lower cost. Ideal for educational applications and research support.',
|
|
15592
|
+
pricing: {
|
|
15593
|
+
prompt: pricing(`$3.00 / 1M tokens`),
|
|
15594
|
+
output: pricing(`$12.00 / 1M tokens`),
|
|
15595
|
+
},
|
|
15596
|
+
},
|
|
15597
|
+
/**/
|
|
15598
|
+
/**/
|
|
15599
|
+
{
|
|
15600
|
+
modelVariant: 'CHAT',
|
|
15601
|
+
modelTitle: 'o1-mini-2024-09-12',
|
|
15602
|
+
modelName: 'o1-mini-2024-09-12',
|
|
15603
|
+
modelDescription: "September 2024 version of O1-mini with 128K context window featuring balanced reasoning capabilities and cost-efficiency. Includes 25% improvement in mathematical accuracy and enhanced performance on coding tasks compared to previous versions. Maintains efficient resource utilization while delivering improved results for analytical applications that don't require the full O1 model.",
|
|
15604
|
+
pricing: {
|
|
15605
|
+
prompt: pricing(`$3.00 / 1M tokens`),
|
|
15606
|
+
output: pricing(`$12.00 / 1M tokens`),
|
|
15607
|
+
},
|
|
15608
|
+
},
|
|
15609
|
+
/**/
|
|
15610
|
+
/**/
|
|
15611
|
+
{
|
|
15612
|
+
modelVariant: 'CHAT',
|
|
15613
|
+
modelTitle: 'gpt-3.5-turbo-16k-0613',
|
|
15614
|
+
modelName: 'gpt-3.5-turbo-16k-0613',
|
|
15615
|
+
modelDescription: "June 2023 version of GPT-3.5 Turbo with extended 16K token context window. Features good handling of longer conversations and documents with improved memory management across extended contexts. Includes knowledge cutoff from September 2021. Maintained for applications specifically designed for this version's behaviors and capabilities.",
|
|
15616
|
+
pricing: {
|
|
15617
|
+
prompt: pricing(`$3.00 / 1M tokens`),
|
|
15618
|
+
output: pricing(`$4.00 / 1M tokens`),
|
|
15619
|
+
},
|
|
15620
|
+
},
|
|
15621
|
+
/**/
|
|
15622
|
+
// <- [🕕]
|
|
15623
|
+
],
|
|
15624
|
+
});
|
|
15625
|
+
/**
|
|
15626
|
+
* Note: [🤖] Add models of new variant
|
|
15627
|
+
* TODO: [🧠] Some mechanism to propagate unsureness
|
|
15628
|
+
* TODO: [🎰] Some mechanism to auto-update available models
|
|
15629
|
+
* TODO: [🎰][👮♀️] Make this list dynamic - dynamically can be listed modelNames but not modelVariant, legacy status, context length and pricing
|
|
15630
|
+
* TODO: [🧠][👮♀️] Put here more info like description, isVision, trainingDateCutoff, languages, strengths ( Top-level performance, intelligence, fluency, and understanding), contextWindow,...
|
|
15631
|
+
* @see https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4
|
|
15632
|
+
* @see https://openai.com/api/pricing/
|
|
15633
|
+
* @see /other/playground/playground.ts
|
|
15634
|
+
* TODO: [🍓][💩] Make better
|
|
15635
|
+
* TODO: Change model titles to human eg: "gpt-4-turbo-2024-04-09" -> "GPT-4 Turbo (2024-04-09)"
|
|
15636
|
+
* TODO: [🚸] Not all models are compatible with JSON mode, add this information here and use it
|
|
15637
|
+
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
15638
|
+
*/
|
|
15639
|
+
|
|
15640
|
+
/**
|
|
15641
|
+
* Computes the usage of the OpenAI API based on the response from OpenAI
|
|
15642
|
+
*
|
|
15643
|
+
* @param promptContent The content of the prompt
|
|
15644
|
+
* @param resultContent The content of the result (for embedding prompts or failed prompts pass empty string)
|
|
15645
|
+
* @param rawResponse The raw response from OpenAI API
|
|
15646
|
+
* @throws {PipelineExecutionError} If the usage is not defined in the response from OpenAI
|
|
15647
|
+
* @private internal utility of `OpenAiExecutionTools`
|
|
15648
|
+
*/
|
|
15649
|
+
function computeOpenAiUsage(promptContent, // <- Note: Intentionally using [] to access type properties to bring jsdoc from Prompt/PromptResult to consumer
|
|
15650
|
+
resultContent, rawResponse) {
|
|
15651
|
+
var _a, _b;
|
|
15652
|
+
if (rawResponse.usage === undefined) {
|
|
15653
|
+
throw new PipelineExecutionError('The usage is not defined in the response from OpenAI');
|
|
15654
|
+
}
|
|
15655
|
+
if (((_a = rawResponse.usage) === null || _a === void 0 ? void 0 : _a.prompt_tokens) === undefined) {
|
|
15656
|
+
throw new PipelineExecutionError('In OpenAI response `usage.prompt_tokens` not defined');
|
|
15657
|
+
}
|
|
15658
|
+
const inputTokens = rawResponse.usage.prompt_tokens;
|
|
15659
|
+
const outputTokens = ((_b = rawResponse.usage) === null || _b === void 0 ? void 0 : _b.completion_tokens) || 0;
|
|
15660
|
+
let isUncertain = false;
|
|
15661
|
+
let modelInfo = OPENAI_MODELS.find((model) => model.modelName === rawResponse.model);
|
|
15662
|
+
if (modelInfo === undefined) {
|
|
15663
|
+
// Note: Model is not in the list of known models, fallback to the family of the models and mark price as uncertain
|
|
15664
|
+
modelInfo = OPENAI_MODELS.find((model) => (rawResponse.model || SALT_NONCE).startsWith(model.modelName));
|
|
15665
|
+
if (modelInfo !== undefined) {
|
|
15666
|
+
isUncertain = true;
|
|
15667
|
+
}
|
|
15668
|
+
}
|
|
15669
|
+
let price;
|
|
15670
|
+
if (modelInfo === undefined || modelInfo.pricing === undefined) {
|
|
15671
|
+
price = uncertainNumber();
|
|
15672
|
+
}
|
|
15673
|
+
else {
|
|
15674
|
+
price = uncertainNumber(inputTokens * modelInfo.pricing.prompt + outputTokens * modelInfo.pricing.output, isUncertain);
|
|
15675
|
+
}
|
|
15676
|
+
return {
|
|
15677
|
+
price,
|
|
15678
|
+
input: {
|
|
15679
|
+
tokensCount: uncertainNumber(rawResponse.usage.prompt_tokens),
|
|
15680
|
+
...computeUsageCounts(promptContent),
|
|
15681
|
+
},
|
|
15682
|
+
output: {
|
|
15683
|
+
tokensCount: uncertainNumber(outputTokens),
|
|
15684
|
+
...computeUsageCounts(resultContent),
|
|
15685
|
+
},
|
|
15686
|
+
};
|
|
15687
|
+
}
|
|
15688
|
+
/**
|
|
15689
|
+
* TODO: [🤝] DRY Maybe some common abstraction between `computeOpenAiUsage` and `computeAnthropicClaudeUsage`
|
|
15690
|
+
*/
|
|
15691
|
+
|
|
15692
|
+
/**
|
|
15693
|
+
* Parses an OpenAI error message to identify which parameter is unsupported
|
|
15694
|
+
*
|
|
15695
|
+
* @param errorMessage The error message from OpenAI API
|
|
15696
|
+
* @returns The parameter name that is unsupported, or null if not an unsupported parameter error
|
|
15697
|
+
* @private utility of LLM Tools
|
|
15698
|
+
*/
|
|
15699
|
+
function parseUnsupportedParameterError(errorMessage) {
|
|
15700
|
+
// Pattern to match "Unsupported value: 'parameter' does not support ..."
|
|
15701
|
+
const unsupportedValueMatch = errorMessage.match(/Unsupported value:\s*'([^']+)'\s*does not support/i);
|
|
15702
|
+
if (unsupportedValueMatch === null || unsupportedValueMatch === void 0 ? void 0 : unsupportedValueMatch[1]) {
|
|
15703
|
+
return unsupportedValueMatch[1];
|
|
15704
|
+
}
|
|
15705
|
+
// Pattern to match "'parameter' of type ... is not supported with this model"
|
|
15706
|
+
const parameterTypeMatch = errorMessage.match(/'([^']+)'\s*of type.*is not supported with this model/i);
|
|
15707
|
+
if (parameterTypeMatch === null || parameterTypeMatch === void 0 ? void 0 : parameterTypeMatch[1]) {
|
|
15708
|
+
return parameterTypeMatch[1];
|
|
15709
|
+
}
|
|
15710
|
+
return null;
|
|
15711
|
+
}
|
|
15712
|
+
/**
|
|
15713
|
+
* Creates a copy of model requirements with the specified parameter removed
|
|
15714
|
+
*
|
|
15715
|
+
* @param modelRequirements Original model requirements
|
|
15716
|
+
* @param unsupportedParameter The parameter to remove
|
|
15717
|
+
* @returns New model requirements without the unsupported parameter
|
|
15718
|
+
* @private utility of LLM Tools
|
|
15719
|
+
*/
|
|
15720
|
+
function removeUnsupportedModelRequirement(modelRequirements, unsupportedParameter) {
|
|
15721
|
+
const newRequirements = { ...modelRequirements };
|
|
15722
|
+
// Map of parameter names that might appear in error messages to ModelRequirements properties
|
|
15723
|
+
const parameterMap = {
|
|
15724
|
+
temperature: 'temperature',
|
|
15725
|
+
max_tokens: 'maxTokens',
|
|
15726
|
+
maxTokens: 'maxTokens',
|
|
15727
|
+
seed: 'seed',
|
|
15728
|
+
};
|
|
15729
|
+
const propertyToRemove = parameterMap[unsupportedParameter];
|
|
15730
|
+
if (propertyToRemove && propertyToRemove in newRequirements) {
|
|
15731
|
+
delete newRequirements[propertyToRemove];
|
|
15732
|
+
}
|
|
15733
|
+
return newRequirements;
|
|
15734
|
+
}
|
|
15735
|
+
/**
|
|
15736
|
+
* Checks if an error is an "Unsupported value" error from OpenAI
|
|
15737
|
+
* @param error The error to check
|
|
15738
|
+
* @returns true if this is an unsupported parameter error
|
|
15739
|
+
* @private utility of LLM Tools
|
|
15740
|
+
*/
|
|
15741
|
+
function isUnsupportedParameterError(error) {
|
|
15742
|
+
const errorMessage = error.message.toLowerCase();
|
|
15743
|
+
return (errorMessage.includes('unsupported value:') ||
|
|
15744
|
+
errorMessage.includes('is not supported with this model') ||
|
|
15745
|
+
errorMessage.includes('does not support'));
|
|
15746
|
+
}
|
|
15747
|
+
|
|
15748
|
+
/**
|
|
15749
|
+
* Execution Tools for calling OpenAI API or other OpenAI compatible provider
|
|
15750
|
+
*
|
|
15751
|
+
* @public exported from `@promptbook/openai`
|
|
15752
|
+
*/
|
|
15753
|
+
class OpenAiCompatibleExecutionTools {
|
|
15754
|
+
// Removed retriedUnsupportedParameters and attemptHistory instance fields
|
|
15755
|
+
/**
|
|
15756
|
+
* Creates OpenAI compatible Execution Tools.
|
|
15757
|
+
*
|
|
15758
|
+
* @param options which are relevant are directly passed to the OpenAI compatible client
|
|
15759
|
+
*/
|
|
15760
|
+
constructor(options) {
|
|
15761
|
+
this.options = options;
|
|
15762
|
+
/**
|
|
15763
|
+
* OpenAI API client.
|
|
15764
|
+
*/
|
|
15765
|
+
this.client = null;
|
|
15766
|
+
// TODO: Allow configuring rate limits via options
|
|
15767
|
+
this.limiter = new Bottleneck({
|
|
15768
|
+
minTime: 60000 / (this.options.maxRequestsPerMinute || DEFAULT_MAX_REQUESTS_PER_MINUTE),
|
|
15769
|
+
});
|
|
15770
|
+
}
|
|
15771
|
+
async getClient() {
|
|
15772
|
+
if (this.client === null) {
|
|
15773
|
+
// Note: Passing only OpenAI relevant options to OpenAI constructor
|
|
15774
|
+
const openAiOptions = { ...this.options };
|
|
15775
|
+
delete openAiOptions.isVerbose;
|
|
15776
|
+
delete openAiOptions.userId;
|
|
15777
|
+
// Enhanced configuration for better ECONNRESET handling
|
|
15778
|
+
const enhancedOptions = {
|
|
15779
|
+
...openAiOptions,
|
|
15780
|
+
timeout: API_REQUEST_TIMEOUT,
|
|
15781
|
+
maxRetries: CONNECTION_RETRIES_LIMIT,
|
|
15782
|
+
defaultHeaders: {
|
|
15783
|
+
Connection: 'keep-alive',
|
|
15784
|
+
'Keep-Alive': 'timeout=30, max=100',
|
|
15785
|
+
...openAiOptions.defaultHeaders,
|
|
15786
|
+
},
|
|
15787
|
+
};
|
|
15788
|
+
this.client = new OpenAI(enhancedOptions);
|
|
15789
|
+
}
|
|
15790
|
+
return this.client;
|
|
15791
|
+
}
|
|
15792
|
+
/**
|
|
15793
|
+
* Check the `options` passed to `constructor`
|
|
15794
|
+
*/
|
|
15795
|
+
async checkConfiguration() {
|
|
15796
|
+
await this.getClient();
|
|
15797
|
+
// TODO: [🎍] Do here a real check that API is online, working and API key is correct
|
|
15798
|
+
}
|
|
15799
|
+
/**
|
|
15800
|
+
* List all available OpenAI compatible models that can be used
|
|
15801
|
+
*/
|
|
15802
|
+
async listModels() {
|
|
15803
|
+
const client = await this.getClient();
|
|
15804
|
+
const rawModelsList = await client.models.list();
|
|
15805
|
+
const availableModels = rawModelsList.data
|
|
15806
|
+
.sort((a, b) => (a.created > b.created ? 1 : -1))
|
|
15807
|
+
.map((modelFromApi) => {
|
|
15808
|
+
const modelFromList = this.HARDCODED_MODELS.find(({ modelName }) => modelName === modelFromApi.id ||
|
|
15809
|
+
modelName.startsWith(modelFromApi.id) ||
|
|
15810
|
+
modelFromApi.id.startsWith(modelName));
|
|
15811
|
+
if (modelFromList !== undefined) {
|
|
15812
|
+
return modelFromList;
|
|
15813
|
+
}
|
|
15814
|
+
return {
|
|
15815
|
+
modelVariant: 'CHAT',
|
|
15816
|
+
modelTitle: modelFromApi.id,
|
|
15817
|
+
modelName: modelFromApi.id,
|
|
15818
|
+
modelDescription: '',
|
|
15819
|
+
};
|
|
15820
|
+
});
|
|
15821
|
+
return availableModels;
|
|
15822
|
+
}
|
|
15823
|
+
/**
|
|
15824
|
+
* Calls OpenAI compatible API to use a chat model.
|
|
15825
|
+
*/
|
|
15826
|
+
async callChatModel(prompt) {
|
|
15827
|
+
// Deep clone prompt and modelRequirements to avoid mutation across calls
|
|
15828
|
+
const clonedPrompt = JSON.parse(JSON.stringify(prompt));
|
|
15829
|
+
// Use local Set for retried parameters to ensure independence and thread safety
|
|
15830
|
+
const retriedUnsupportedParameters = new Set();
|
|
15831
|
+
return this.callChatModelWithRetry(clonedPrompt, clonedPrompt.modelRequirements, [], retriedUnsupportedParameters);
|
|
15832
|
+
}
|
|
15833
|
+
/**
|
|
15834
|
+
* Internal method that handles parameter retry for chat model calls
|
|
15835
|
+
*/
|
|
15836
|
+
async callChatModelWithRetry(prompt, currentModelRequirements, attemptStack = [], retriedUnsupportedParameters = new Set()) {
|
|
15837
|
+
var _a;
|
|
15838
|
+
if (this.options.isVerbose) {
|
|
15839
|
+
console.info(`💬 ${this.title} callChatModel call`, { prompt, currentModelRequirements });
|
|
15840
|
+
}
|
|
15841
|
+
const { content, parameters, format } = prompt;
|
|
15842
|
+
const client = await this.getClient();
|
|
15843
|
+
// TODO: [☂] Use here more modelRequirements
|
|
15844
|
+
if (currentModelRequirements.modelVariant !== 'CHAT') {
|
|
15845
|
+
throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
|
|
15846
|
+
}
|
|
15847
|
+
const modelName = currentModelRequirements.modelName || this.getDefaultChatModel().modelName;
|
|
15848
|
+
const modelSettings = {
|
|
15849
|
+
model: modelName,
|
|
15850
|
+
max_tokens: currentModelRequirements.maxTokens,
|
|
15851
|
+
temperature: currentModelRequirements.temperature,
|
|
15852
|
+
// <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
|
|
15853
|
+
// <- Note: [🧆]
|
|
15854
|
+
}; // <- TODO: [💩] Guard here types better
|
|
15855
|
+
if (format === 'JSON') {
|
|
15856
|
+
modelSettings.response_format = {
|
|
15857
|
+
type: 'json_object',
|
|
15858
|
+
};
|
|
15859
|
+
}
|
|
15860
|
+
// <- TODO: [🚸] Not all models are compatible with JSON mode
|
|
15861
|
+
// > 'response_format' of type 'json_object' is not supported with this model.
|
|
15862
|
+
const rawPromptContent = templateParameters(content, { ...parameters, modelName });
|
|
15863
|
+
// Convert thread to OpenAI format if present
|
|
15864
|
+
let threadMessages = [];
|
|
15865
|
+
if ('thread' in prompt && Array.isArray(prompt.thread)) {
|
|
15866
|
+
threadMessages = prompt.thread.map((msg) => ({
|
|
15867
|
+
role: msg.role === 'assistant' ? 'assistant' : 'user',
|
|
15868
|
+
content: msg.content,
|
|
15869
|
+
}));
|
|
15870
|
+
}
|
|
15871
|
+
const rawRequest = {
|
|
15872
|
+
...modelSettings,
|
|
15873
|
+
messages: [
|
|
15874
|
+
...(currentModelRequirements.systemMessage === undefined
|
|
15875
|
+
? []
|
|
15876
|
+
: [
|
|
15877
|
+
{
|
|
15878
|
+
role: 'system',
|
|
15879
|
+
content: currentModelRequirements.systemMessage,
|
|
15880
|
+
},
|
|
15881
|
+
]),
|
|
15882
|
+
...threadMessages,
|
|
15883
|
+
{
|
|
15884
|
+
role: 'user',
|
|
15885
|
+
content: rawPromptContent,
|
|
15886
|
+
},
|
|
15887
|
+
],
|
|
15888
|
+
user: (_a = this.options.userId) === null || _a === void 0 ? void 0 : _a.toString(),
|
|
15889
|
+
};
|
|
15890
|
+
const start = $getCurrentDate();
|
|
15891
|
+
if (this.options.isVerbose) {
|
|
15892
|
+
console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
|
|
15893
|
+
}
|
|
15894
|
+
try {
|
|
15895
|
+
const rawResponse = await this.limiter
|
|
15896
|
+
.schedule(() => this.makeRequestWithNetworkRetry(() => client.chat.completions.create(rawRequest)))
|
|
15897
|
+
.catch((error) => {
|
|
15898
|
+
assertsError(error);
|
|
15899
|
+
if (this.options.isVerbose) {
|
|
15900
|
+
console.info(colors.bgRed('error'), error);
|
|
15901
|
+
}
|
|
15902
|
+
throw error;
|
|
15903
|
+
});
|
|
15904
|
+
if (this.options.isVerbose) {
|
|
15905
|
+
console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
|
|
15906
|
+
}
|
|
15907
|
+
const complete = $getCurrentDate();
|
|
15908
|
+
if (!rawResponse.choices[0]) {
|
|
15909
|
+
throw new PipelineExecutionError(`No choises from ${this.title}`);
|
|
15910
|
+
}
|
|
15911
|
+
if (rawResponse.choices.length > 1) {
|
|
15912
|
+
// TODO: This should be maybe only warning
|
|
15913
|
+
throw new PipelineExecutionError(`More than one choise from ${this.title}`);
|
|
15914
|
+
}
|
|
15915
|
+
const resultContent = rawResponse.choices[0].message.content;
|
|
15916
|
+
const usage = this.computeUsage(content || '', resultContent || '', rawResponse);
|
|
15917
|
+
if (resultContent === null) {
|
|
15918
|
+
throw new PipelineExecutionError(`No response message from ${this.title}`);
|
|
15919
|
+
}
|
|
15920
|
+
return exportJson({
|
|
15921
|
+
name: 'promptResult',
|
|
15922
|
+
message: `Result of \`OpenAiCompatibleExecutionTools.callChatModel\``,
|
|
15923
|
+
order: [],
|
|
15924
|
+
value: {
|
|
15925
|
+
content: resultContent,
|
|
15926
|
+
modelName: rawResponse.model || modelName,
|
|
15927
|
+
timing: {
|
|
15928
|
+
start,
|
|
15929
|
+
complete,
|
|
15930
|
+
},
|
|
15931
|
+
usage,
|
|
15932
|
+
rawPromptContent,
|
|
15933
|
+
rawRequest,
|
|
15934
|
+
rawResponse,
|
|
15935
|
+
// <- [🗯]
|
|
15936
|
+
},
|
|
15937
|
+
});
|
|
15938
|
+
}
|
|
15939
|
+
catch (error) {
|
|
15940
|
+
assertsError(error);
|
|
15941
|
+
// Check if this is an unsupported parameter error
|
|
15942
|
+
if (!isUnsupportedParameterError(error)) {
|
|
15943
|
+
// If we have attemptStack, include it in the error message
|
|
15944
|
+
if (attemptStack.length > 0) {
|
|
15945
|
+
throw new PipelineExecutionError(`All attempts failed. Attempt history:\n` +
|
|
15946
|
+
attemptStack
|
|
15947
|
+
.map((a, i) => ` ${i + 1}. Model: ${a.modelName}` +
|
|
15948
|
+
(a.unsupportedParameter ? `, Stripped: ${a.unsupportedParameter}` : '') +
|
|
15949
|
+
`, Error: ${a.errorMessage}` +
|
|
15950
|
+
(a.stripped ? ' (stripped and retried)' : ''))
|
|
15951
|
+
.join('\n') +
|
|
15952
|
+
`\nFinal error: ${error.message}`);
|
|
15953
|
+
}
|
|
15954
|
+
throw error;
|
|
15955
|
+
}
|
|
15956
|
+
// Parse which parameter is unsupported
|
|
15957
|
+
const unsupportedParameter = parseUnsupportedParameterError(error.message);
|
|
15958
|
+
if (!unsupportedParameter) {
|
|
15959
|
+
if (this.options.isVerbose) {
|
|
15960
|
+
console.warn(colors.bgYellow('Warning'), 'Could not parse unsupported parameter from error:', error.message);
|
|
15961
|
+
}
|
|
15962
|
+
throw error;
|
|
15963
|
+
}
|
|
15964
|
+
// Create a unique key for this model + parameter combination to prevent infinite loops
|
|
15965
|
+
const retryKey = `${modelName}-${unsupportedParameter}`;
|
|
15966
|
+
if (retriedUnsupportedParameters.has(retryKey)) {
|
|
15967
|
+
// Already retried this parameter, throw the error with attemptStack
|
|
15968
|
+
attemptStack.push({
|
|
15969
|
+
modelName,
|
|
15970
|
+
unsupportedParameter,
|
|
15971
|
+
errorMessage: error.message,
|
|
15972
|
+
stripped: true,
|
|
15973
|
+
});
|
|
15974
|
+
throw new PipelineExecutionError(`All attempts failed. Attempt history:\n` +
|
|
15975
|
+
attemptStack
|
|
15976
|
+
.map((a, i) => ` ${i + 1}. Model: ${a.modelName}` +
|
|
15977
|
+
(a.unsupportedParameter ? `, Stripped: ${a.unsupportedParameter}` : '') +
|
|
15978
|
+
`, Error: ${a.errorMessage}` +
|
|
15979
|
+
(a.stripped ? ' (stripped and retried)' : ''))
|
|
15980
|
+
.join('\n') +
|
|
15981
|
+
`\nFinal error: ${error.message}`);
|
|
15982
|
+
}
|
|
15983
|
+
// Mark this parameter as retried
|
|
15984
|
+
retriedUnsupportedParameters.add(retryKey);
|
|
15985
|
+
// Log warning in verbose mode
|
|
15986
|
+
if (this.options.isVerbose) {
|
|
15987
|
+
console.warn(colors.bgYellow('Warning'), `Removing unsupported parameter '${unsupportedParameter}' for model '${modelName}' and retrying request`);
|
|
15988
|
+
}
|
|
15989
|
+
// Add to attemptStack
|
|
15990
|
+
attemptStack.push({
|
|
15991
|
+
modelName,
|
|
15992
|
+
unsupportedParameter,
|
|
15993
|
+
errorMessage: error.message,
|
|
15994
|
+
stripped: true,
|
|
15995
|
+
});
|
|
15996
|
+
// Remove the unsupported parameter and retry
|
|
15997
|
+
const modifiedModelRequirements = removeUnsupportedModelRequirement(currentModelRequirements, unsupportedParameter);
|
|
15998
|
+
return this.callChatModelWithRetry(prompt, modifiedModelRequirements, attemptStack, retriedUnsupportedParameters);
|
|
15999
|
+
}
|
|
16000
|
+
}
|
|
16001
|
+
/**
|
|
16002
|
+
* Calls OpenAI API to use a complete model.
|
|
16003
|
+
*/
|
|
16004
|
+
async callCompletionModel(prompt) {
|
|
16005
|
+
// Deep clone prompt and modelRequirements to avoid mutation across calls
|
|
16006
|
+
const clonedPrompt = JSON.parse(JSON.stringify(prompt));
|
|
16007
|
+
const retriedUnsupportedParameters = new Set();
|
|
16008
|
+
return this.callCompletionModelWithRetry(clonedPrompt, clonedPrompt.modelRequirements, [], retriedUnsupportedParameters);
|
|
16009
|
+
}
|
|
16010
|
+
/**
|
|
16011
|
+
* Internal method that handles parameter retry for completion model calls
|
|
16012
|
+
*/
|
|
16013
|
+
async callCompletionModelWithRetry(prompt, currentModelRequirements, attemptStack = [], retriedUnsupportedParameters = new Set()) {
|
|
16014
|
+
var _a;
|
|
16015
|
+
if (this.options.isVerbose) {
|
|
16016
|
+
console.info(`🖋 ${this.title} callCompletionModel call`, { prompt, currentModelRequirements });
|
|
16017
|
+
}
|
|
16018
|
+
const { content, parameters } = prompt;
|
|
16019
|
+
const client = await this.getClient();
|
|
16020
|
+
// TODO: [☂] Use here more modelRequirements
|
|
16021
|
+
if (currentModelRequirements.modelVariant !== 'COMPLETION') {
|
|
16022
|
+
throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
|
|
16023
|
+
}
|
|
16024
|
+
const modelName = currentModelRequirements.modelName || this.getDefaultCompletionModel().modelName;
|
|
16025
|
+
const modelSettings = {
|
|
16026
|
+
model: modelName,
|
|
16027
|
+
max_tokens: currentModelRequirements.maxTokens,
|
|
16028
|
+
temperature: currentModelRequirements.temperature,
|
|
16029
|
+
};
|
|
16030
|
+
const rawPromptContent = templateParameters(content, { ...parameters, modelName });
|
|
16031
|
+
const rawRequest = {
|
|
16032
|
+
...modelSettings,
|
|
16033
|
+
prompt: rawPromptContent,
|
|
16034
|
+
user: (_a = this.options.userId) === null || _a === void 0 ? void 0 : _a.toString(),
|
|
16035
|
+
};
|
|
16036
|
+
const start = $getCurrentDate();
|
|
16037
|
+
if (this.options.isVerbose) {
|
|
16038
|
+
console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
|
|
16039
|
+
}
|
|
16040
|
+
try {
|
|
16041
|
+
const rawResponse = await this.limiter
|
|
16042
|
+
.schedule(() => this.makeRequestWithNetworkRetry(() => client.completions.create(rawRequest)))
|
|
16043
|
+
.catch((error) => {
|
|
16044
|
+
assertsError(error);
|
|
16045
|
+
if (this.options.isVerbose) {
|
|
16046
|
+
console.info(colors.bgRed('error'), error);
|
|
16047
|
+
}
|
|
16048
|
+
throw error;
|
|
16049
|
+
});
|
|
16050
|
+
if (this.options.isVerbose) {
|
|
16051
|
+
console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
|
|
16052
|
+
}
|
|
16053
|
+
const complete = $getCurrentDate();
|
|
16054
|
+
if (!rawResponse.choices[0]) {
|
|
16055
|
+
throw new PipelineExecutionError(`No choises from ${this.title}`);
|
|
16056
|
+
}
|
|
16057
|
+
if (rawResponse.choices.length > 1) {
|
|
16058
|
+
throw new PipelineExecutionError(`More than one choise from ${this.title}`);
|
|
16059
|
+
}
|
|
16060
|
+
const resultContent = rawResponse.choices[0].text;
|
|
16061
|
+
const usage = this.computeUsage(content || '', resultContent || '', rawResponse);
|
|
16062
|
+
return exportJson({
|
|
16063
|
+
name: 'promptResult',
|
|
16064
|
+
message: `Result of \`OpenAiCompatibleExecutionTools.callCompletionModel\``,
|
|
16065
|
+
order: [],
|
|
16066
|
+
value: {
|
|
16067
|
+
content: resultContent,
|
|
16068
|
+
modelName: rawResponse.model || modelName,
|
|
16069
|
+
timing: {
|
|
16070
|
+
start,
|
|
16071
|
+
complete,
|
|
16072
|
+
},
|
|
16073
|
+
usage,
|
|
16074
|
+
rawPromptContent,
|
|
16075
|
+
rawRequest,
|
|
16076
|
+
rawResponse,
|
|
16077
|
+
},
|
|
16078
|
+
});
|
|
16079
|
+
}
|
|
16080
|
+
catch (error) {
|
|
16081
|
+
assertsError(error);
|
|
16082
|
+
if (!isUnsupportedParameterError(error)) {
|
|
16083
|
+
if (attemptStack.length > 0) {
|
|
16084
|
+
throw new PipelineExecutionError(`All attempts failed. Attempt history:\n` +
|
|
16085
|
+
attemptStack
|
|
16086
|
+
.map((a, i) => ` ${i + 1}. Model: ${a.modelName}` +
|
|
16087
|
+
(a.unsupportedParameter ? `, Stripped: ${a.unsupportedParameter}` : '') +
|
|
16088
|
+
`, Error: ${a.errorMessage}` +
|
|
16089
|
+
(a.stripped ? ' (stripped and retried)' : ''))
|
|
16090
|
+
.join('\n') +
|
|
16091
|
+
`\nFinal error: ${error.message}`);
|
|
16092
|
+
}
|
|
16093
|
+
throw error;
|
|
16094
|
+
}
|
|
16095
|
+
const unsupportedParameter = parseUnsupportedParameterError(error.message);
|
|
16096
|
+
if (!unsupportedParameter) {
|
|
16097
|
+
if (this.options.isVerbose) {
|
|
16098
|
+
console.warn(colors.bgYellow('Warning'), 'Could not parse unsupported parameter from error:', error.message);
|
|
16099
|
+
}
|
|
16100
|
+
throw error;
|
|
16101
|
+
}
|
|
16102
|
+
const retryKey = `${modelName}-${unsupportedParameter}`;
|
|
16103
|
+
if (retriedUnsupportedParameters.has(retryKey)) {
|
|
16104
|
+
attemptStack.push({
|
|
16105
|
+
modelName,
|
|
16106
|
+
unsupportedParameter,
|
|
16107
|
+
errorMessage: error.message,
|
|
16108
|
+
stripped: true,
|
|
16109
|
+
});
|
|
16110
|
+
throw new PipelineExecutionError(`All attempts failed. Attempt history:\n` +
|
|
16111
|
+
attemptStack
|
|
16112
|
+
.map((a, i) => ` ${i + 1}. Model: ${a.modelName}` +
|
|
16113
|
+
(a.unsupportedParameter ? `, Stripped: ${a.unsupportedParameter}` : '') +
|
|
16114
|
+
`, Error: ${a.errorMessage}` +
|
|
16115
|
+
(a.stripped ? ' (stripped and retried)' : ''))
|
|
16116
|
+
.join('\n') +
|
|
16117
|
+
`\nFinal error: ${error.message}`);
|
|
16118
|
+
}
|
|
16119
|
+
retriedUnsupportedParameters.add(retryKey);
|
|
16120
|
+
if (this.options.isVerbose) {
|
|
16121
|
+
console.warn(colors.bgYellow('Warning'), `Removing unsupported parameter '${unsupportedParameter}' for model '${modelName}' and retrying request`);
|
|
16122
|
+
}
|
|
16123
|
+
attemptStack.push({
|
|
16124
|
+
modelName,
|
|
16125
|
+
unsupportedParameter,
|
|
16126
|
+
errorMessage: error.message,
|
|
16127
|
+
stripped: true,
|
|
16128
|
+
});
|
|
16129
|
+
const modifiedModelRequirements = removeUnsupportedModelRequirement(currentModelRequirements, unsupportedParameter);
|
|
16130
|
+
return this.callCompletionModelWithRetry(prompt, modifiedModelRequirements, attemptStack, retriedUnsupportedParameters);
|
|
16131
|
+
}
|
|
16132
|
+
}
|
|
16133
|
+
/**
|
|
16134
|
+
* Calls OpenAI compatible API to use a embedding model
|
|
16135
|
+
*/
|
|
16136
|
+
async callEmbeddingModel(prompt) {
|
|
16137
|
+
// Deep clone prompt and modelRequirements to avoid mutation across calls
|
|
16138
|
+
const clonedPrompt = JSON.parse(JSON.stringify(prompt));
|
|
16139
|
+
const retriedUnsupportedParameters = new Set();
|
|
16140
|
+
return this.callEmbeddingModelWithRetry(clonedPrompt, clonedPrompt.modelRequirements, [], retriedUnsupportedParameters);
|
|
16141
|
+
}
|
|
16142
|
+
/**
|
|
16143
|
+
* Internal method that handles parameter retry for embedding model calls
|
|
16144
|
+
*/
|
|
16145
|
+
async callEmbeddingModelWithRetry(prompt, currentModelRequirements, attemptStack = [], retriedUnsupportedParameters = new Set()) {
|
|
16146
|
+
if (this.options.isVerbose) {
|
|
16147
|
+
console.info(`🖋 ${this.title} embedding call`, { prompt, currentModelRequirements });
|
|
16148
|
+
}
|
|
16149
|
+
const { content, parameters } = prompt;
|
|
16150
|
+
const client = await this.getClient();
|
|
16151
|
+
if (currentModelRequirements.modelVariant !== 'EMBEDDING') {
|
|
16152
|
+
throw new PipelineExecutionError('Use embed only for EMBEDDING variant');
|
|
16153
|
+
}
|
|
16154
|
+
const modelName = currentModelRequirements.modelName || this.getDefaultEmbeddingModel().modelName;
|
|
16155
|
+
const rawPromptContent = templateParameters(content, { ...parameters, modelName });
|
|
16156
|
+
const rawRequest = {
|
|
16157
|
+
input: rawPromptContent,
|
|
16158
|
+
model: modelName,
|
|
16159
|
+
};
|
|
16160
|
+
const start = $getCurrentDate();
|
|
16161
|
+
if (this.options.isVerbose) {
|
|
16162
|
+
console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
|
|
16163
|
+
}
|
|
16164
|
+
try {
|
|
16165
|
+
const rawResponse = await this.limiter
|
|
16166
|
+
.schedule(() => this.makeRequestWithNetworkRetry(() => client.embeddings.create(rawRequest)))
|
|
16167
|
+
.catch((error) => {
|
|
16168
|
+
assertsError(error);
|
|
16169
|
+
if (this.options.isVerbose) {
|
|
16170
|
+
console.info(colors.bgRed('error'), error);
|
|
16171
|
+
}
|
|
16172
|
+
throw error;
|
|
16173
|
+
});
|
|
16174
|
+
if (this.options.isVerbose) {
|
|
16175
|
+
console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
|
|
16176
|
+
}
|
|
16177
|
+
const complete = $getCurrentDate();
|
|
16178
|
+
if (rawResponse.data.length !== 1) {
|
|
16179
|
+
throw new PipelineExecutionError(`Expected exactly 1 data item in response, got ${rawResponse.data.length}`);
|
|
16180
|
+
}
|
|
16181
|
+
const resultContent = rawResponse.data[0].embedding;
|
|
16182
|
+
const usage = this.computeUsage(content || '', '', rawResponse);
|
|
16183
|
+
return exportJson({
|
|
16184
|
+
name: 'promptResult',
|
|
16185
|
+
message: `Result of \`OpenAiCompatibleExecutionTools.callEmbeddingModel\``,
|
|
16186
|
+
order: [],
|
|
16187
|
+
value: {
|
|
16188
|
+
content: resultContent,
|
|
16189
|
+
modelName: rawResponse.model || modelName,
|
|
16190
|
+
timing: {
|
|
16191
|
+
start,
|
|
16192
|
+
complete,
|
|
16193
|
+
},
|
|
16194
|
+
usage,
|
|
16195
|
+
rawPromptContent,
|
|
16196
|
+
rawRequest,
|
|
16197
|
+
rawResponse,
|
|
16198
|
+
},
|
|
16199
|
+
});
|
|
16200
|
+
}
|
|
16201
|
+
catch (error) {
|
|
16202
|
+
assertsError(error);
|
|
16203
|
+
if (!isUnsupportedParameterError(error)) {
|
|
16204
|
+
if (attemptStack.length > 0) {
|
|
16205
|
+
throw new PipelineExecutionError(`All attempts failed. Attempt history:\n` +
|
|
16206
|
+
attemptStack
|
|
16207
|
+
.map((a, i) => ` ${i + 1}. Model: ${a.modelName}` +
|
|
16208
|
+
(a.unsupportedParameter ? `, Stripped: ${a.unsupportedParameter}` : '') +
|
|
16209
|
+
`, Error: ${a.errorMessage}` +
|
|
16210
|
+
(a.stripped ? ' (stripped and retried)' : ''))
|
|
16211
|
+
.join('\n') +
|
|
16212
|
+
`\nFinal error: ${error.message}`);
|
|
16213
|
+
}
|
|
16214
|
+
throw error;
|
|
16215
|
+
}
|
|
16216
|
+
const unsupportedParameter = parseUnsupportedParameterError(error.message);
|
|
16217
|
+
if (!unsupportedParameter) {
|
|
16218
|
+
if (this.options.isVerbose) {
|
|
16219
|
+
console.warn(colors.bgYellow('Warning'), 'Could not parse unsupported parameter from error:', error.message);
|
|
16220
|
+
}
|
|
16221
|
+
throw error;
|
|
16222
|
+
}
|
|
16223
|
+
const retryKey = `${modelName}-${unsupportedParameter}`;
|
|
16224
|
+
if (retriedUnsupportedParameters.has(retryKey)) {
|
|
16225
|
+
attemptStack.push({
|
|
16226
|
+
modelName,
|
|
16227
|
+
unsupportedParameter,
|
|
16228
|
+
errorMessage: error.message,
|
|
16229
|
+
stripped: true,
|
|
16230
|
+
});
|
|
16231
|
+
throw new PipelineExecutionError(`All attempts failed. Attempt history:\n` +
|
|
16232
|
+
attemptStack
|
|
16233
|
+
.map((a, i) => ` ${i + 1}. Model: ${a.modelName}` +
|
|
16234
|
+
(a.unsupportedParameter ? `, Stripped: ${a.unsupportedParameter}` : '') +
|
|
16235
|
+
`, Error: ${a.errorMessage}` +
|
|
16236
|
+
(a.stripped ? ' (stripped and retried)' : ''))
|
|
16237
|
+
.join('\n') +
|
|
16238
|
+
`\nFinal error: ${error.message}`);
|
|
16239
|
+
}
|
|
16240
|
+
retriedUnsupportedParameters.add(retryKey);
|
|
16241
|
+
if (this.options.isVerbose) {
|
|
16242
|
+
console.warn(colors.bgYellow('Warning'), `Removing unsupported parameter '${unsupportedParameter}' for model '${modelName}' and retrying request`);
|
|
16243
|
+
}
|
|
16244
|
+
attemptStack.push({
|
|
16245
|
+
modelName,
|
|
16246
|
+
unsupportedParameter,
|
|
16247
|
+
errorMessage: error.message,
|
|
16248
|
+
stripped: true,
|
|
16249
|
+
});
|
|
16250
|
+
const modifiedModelRequirements = removeUnsupportedModelRequirement(currentModelRequirements, unsupportedParameter);
|
|
16251
|
+
return this.callEmbeddingModelWithRetry(prompt, modifiedModelRequirements, attemptStack, retriedUnsupportedParameters);
|
|
16252
|
+
}
|
|
16253
|
+
}
|
|
16254
|
+
// <- Note: [🤖] callXxxModel
|
|
16255
|
+
/**
|
|
16256
|
+
* Get the model that should be used as default
|
|
16257
|
+
*/
|
|
16258
|
+
getDefaultModel(defaultModelName) {
|
|
16259
|
+
// Note: Match exact or prefix for model families
|
|
16260
|
+
const model = this.HARDCODED_MODELS.find(({ modelName }) => modelName === defaultModelName || modelName.startsWith(defaultModelName));
|
|
16261
|
+
if (model === undefined) {
|
|
16262
|
+
throw new PipelineExecutionError(spaceTrim((block) => `
|
|
16263
|
+
Cannot find model in ${this.title} models with name "${defaultModelName}" which should be used as default.
|
|
16264
|
+
|
|
16265
|
+
Available models:
|
|
16266
|
+
${block(this.HARDCODED_MODELS.map(({ modelName }) => `- "${modelName}"`).join('\n'))}
|
|
16267
|
+
|
|
16268
|
+
Model "${defaultModelName}" is probably not available anymore, not installed, inaccessible or misconfigured.
|
|
16269
|
+
|
|
16270
|
+
`));
|
|
16271
|
+
}
|
|
16272
|
+
return model;
|
|
16273
|
+
}
|
|
16274
|
+
// <- Note: [🤖] getDefaultXxxModel
|
|
16275
|
+
/**
|
|
16276
|
+
* Makes a request with retry logic for network errors like ECONNRESET
|
|
16277
|
+
*/
|
|
16278
|
+
async makeRequestWithNetworkRetry(requestFn) {
|
|
16279
|
+
let lastError;
|
|
16280
|
+
for (let attempt = 1; attempt <= CONNECTION_RETRIES_LIMIT; attempt++) {
|
|
16281
|
+
try {
|
|
16282
|
+
return await requestFn();
|
|
16283
|
+
}
|
|
16284
|
+
catch (error) {
|
|
16285
|
+
assertsError(error);
|
|
16286
|
+
lastError = error;
|
|
16287
|
+
// Check if this is a retryable network error
|
|
16288
|
+
const isRetryableError = this.isRetryableNetworkError(error);
|
|
16289
|
+
if (!isRetryableError || attempt === CONNECTION_RETRIES_LIMIT) {
|
|
16290
|
+
if (this.options.isVerbose && this.isRetryableNetworkError(error)) {
|
|
16291
|
+
console.info(colors.bgRed('Final network error after retries'), `Attempt ${attempt}/${CONNECTION_RETRIES_LIMIT}:`, error);
|
|
16292
|
+
}
|
|
16293
|
+
throw error;
|
|
16294
|
+
}
|
|
16295
|
+
// Calculate exponential backoff delay
|
|
16296
|
+
const baseDelay = 1000; // 1 second
|
|
16297
|
+
const backoffDelay = baseDelay * Math.pow(2, attempt - 1);
|
|
16298
|
+
const jitterDelay = Math.random() * 500; // Add some randomness
|
|
16299
|
+
const totalDelay = backoffDelay + jitterDelay;
|
|
16300
|
+
if (this.options.isVerbose) {
|
|
16301
|
+
console.info(colors.bgYellow('Retrying network request'), `Attempt ${attempt}/${CONNECTION_RETRIES_LIMIT}, waiting ${Math.round(totalDelay)}ms:`, error.message);
|
|
16302
|
+
}
|
|
16303
|
+
// Wait before retrying
|
|
16304
|
+
await new Promise((resolve) => setTimeout(resolve, totalDelay));
|
|
16305
|
+
}
|
|
16306
|
+
}
|
|
16307
|
+
throw lastError;
|
|
16308
|
+
}
|
|
16309
|
+
/**
|
|
16310
|
+
* Determines if an error is retryable (network-related errors)
|
|
16311
|
+
*/
|
|
16312
|
+
isRetryableNetworkError(error) {
|
|
16313
|
+
const errorMessage = error.message.toLowerCase();
|
|
16314
|
+
const errorCode = error.code;
|
|
16315
|
+
// Network connection errors that should be retried
|
|
16316
|
+
const retryableErrors = [
|
|
16317
|
+
'econnreset',
|
|
16318
|
+
'enotfound',
|
|
16319
|
+
'econnrefused',
|
|
16320
|
+
'etimedout',
|
|
16321
|
+
'socket hang up',
|
|
16322
|
+
'network error',
|
|
16323
|
+
'fetch failed',
|
|
16324
|
+
'connection reset',
|
|
16325
|
+
'connection refused',
|
|
16326
|
+
'timeout',
|
|
16327
|
+
];
|
|
16328
|
+
// Check error message
|
|
16329
|
+
if (retryableErrors.some((retryableError) => errorMessage.includes(retryableError))) {
|
|
16330
|
+
return true;
|
|
16331
|
+
}
|
|
16332
|
+
// Check error code
|
|
16333
|
+
if (errorCode && retryableErrors.includes(errorCode.toLowerCase())) {
|
|
16334
|
+
return true;
|
|
16335
|
+
}
|
|
16336
|
+
// Check for specific HTTP status codes that are retryable
|
|
16337
|
+
const errorWithStatus = error;
|
|
16338
|
+
const httpStatus = errorWithStatus.status || errorWithStatus.statusCode;
|
|
16339
|
+
if (httpStatus && [429, 500, 502, 503, 504].includes(httpStatus)) {
|
|
16340
|
+
return true;
|
|
16341
|
+
}
|
|
16342
|
+
return false;
|
|
16343
|
+
}
|
|
16344
|
+
}
|
|
16345
|
+
/**
|
|
16346
|
+
* TODO: [🛄] Some way how to re-wrap the errors from `OpenAiCompatibleExecutionTools`
|
|
16347
|
+
* TODO: [🛄] Maybe make custom `OpenAiCompatibleError`
|
|
16348
|
+
* TODO: [🧠][🈁] Maybe use `isDeterministic` from options
|
|
16349
|
+
* TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
|
|
16350
|
+
* TODO: [🧠][🦢] Make reverse adapter from LlmExecutionTools to OpenAI-compatible:
|
|
16351
|
+
*/
|
|
16352
|
+
|
|
16353
|
+
/**
|
|
16354
|
+
* Profile for OpenAI provider
|
|
16355
|
+
*/
|
|
16356
|
+
const OPENAI_PROVIDER_PROFILE = {
|
|
16357
|
+
name: 'OPENAI',
|
|
16358
|
+
fullname: 'OpenAI GPT',
|
|
16359
|
+
color: '#10a37f',
|
|
16360
|
+
};
|
|
16361
|
+
/**
|
|
16362
|
+
* Execution Tools for calling OpenAI API
|
|
16363
|
+
*
|
|
16364
|
+
* @public exported from `@promptbook/openai`
|
|
16365
|
+
*/
|
|
16366
|
+
class OpenAiExecutionTools extends OpenAiCompatibleExecutionTools {
|
|
16367
|
+
constructor() {
|
|
16368
|
+
super(...arguments);
|
|
16369
|
+
/**
|
|
16370
|
+
* Computes the usage of the OpenAI API based on the response from OpenAI
|
|
16371
|
+
*/
|
|
16372
|
+
this.computeUsage = computeOpenAiUsage;
|
|
16373
|
+
// <- Note: [🤖] getDefaultXxxModel
|
|
16374
|
+
}
|
|
16375
|
+
/* <- TODO: [🍚] `, Destroyable` */
|
|
16376
|
+
get title() {
|
|
16377
|
+
return 'OpenAI';
|
|
16378
|
+
}
|
|
16379
|
+
get description() {
|
|
16380
|
+
return 'Use all models provided by OpenAI';
|
|
16381
|
+
}
|
|
16382
|
+
get profile() {
|
|
16383
|
+
return OPENAI_PROVIDER_PROFILE;
|
|
16384
|
+
}
|
|
16385
|
+
/*
|
|
16386
|
+
Note: Commenting this out to avoid circular dependency
|
|
16387
|
+
/**
|
|
16388
|
+
* Create (sub)tools for calling OpenAI API Assistants
|
|
16389
|
+
*
|
|
16390
|
+
* @param assistantId Which assistant to use
|
|
16391
|
+
* @returns Tools for calling OpenAI API Assistants with same token
|
|
16392
|
+
* /
|
|
16393
|
+
public createAssistantSubtools(assistantId: string_token): OpenAiAssistantExecutionTools {
|
|
16394
|
+
return new OpenAiAssistantExecutionTools({ ...this.options, assistantId });
|
|
16395
|
+
}
|
|
16396
|
+
*/
|
|
16397
|
+
/**
|
|
16398
|
+
* List all available models (non dynamically)
|
|
16399
|
+
*
|
|
16400
|
+
* Note: Purpose of this is to provide more information about models than standard listing from API
|
|
16401
|
+
*/
|
|
16402
|
+
get HARDCODED_MODELS() {
|
|
16403
|
+
return OPENAI_MODELS;
|
|
16404
|
+
}
|
|
16405
|
+
/**
|
|
16406
|
+
* Default model for chat variant.
|
|
16407
|
+
*/
|
|
16408
|
+
getDefaultChatModel() {
|
|
16409
|
+
return this.getDefaultModel('gpt-5');
|
|
16410
|
+
}
|
|
16411
|
+
/**
|
|
16412
|
+
* Default model for completion variant.
|
|
16413
|
+
*/
|
|
16414
|
+
getDefaultCompletionModel() {
|
|
16415
|
+
return this.getDefaultModel('gpt-3.5-turbo-instruct');
|
|
16416
|
+
}
|
|
16417
|
+
/**
|
|
16418
|
+
* Default model for completion variant.
|
|
16419
|
+
*/
|
|
16420
|
+
getDefaultEmbeddingModel() {
|
|
16421
|
+
return this.getDefaultModel('text-embedding-3-large');
|
|
16422
|
+
}
|
|
16423
|
+
}
|
|
16424
|
+
|
|
16425
|
+
/**
|
|
16426
|
+
* Execution Tools for calling OpenAI API Assistants
|
|
16427
|
+
*
|
|
16428
|
+
* This is useful for calling OpenAI API with a single assistant, for more wide usage use `OpenAiExecutionTools`.
|
|
16429
|
+
*
|
|
16430
|
+
* Note: [🦖] There are several different things in Promptbook:
|
|
16431
|
+
* - `Agent` - which represents an AI Agent with its source, memories, actions, etc. Agent is a higher-level abstraction which is internally using:
|
|
16432
|
+
* - `LlmExecutionTools` - which wraps one or more LLM models and provides an interface to execute them
|
|
16433
|
+
* - `AgentLlmExecutionTools` - which is a specific implementation of `LlmExecutionTools` that wraps another LlmExecutionTools and applies agent-specific system prompts and requirements
|
|
16434
|
+
* - `OpenAiAssistantExecutionTools` - which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities, recommended for usage in `Agent` or `AgentLlmExecutionTools`
|
|
16435
|
+
*
|
|
16436
|
+
* @public exported from `@promptbook/openai`
|
|
16437
|
+
*/
|
|
16438
|
+
class OpenAiAssistantExecutionTools extends OpenAiExecutionTools {
|
|
16439
|
+
/**
|
|
16440
|
+
* Creates OpenAI Execution Tools.
|
|
16441
|
+
*
|
|
16442
|
+
* @param options which are relevant are directly passed to the OpenAI client
|
|
16443
|
+
*/
|
|
16444
|
+
constructor(options) {
|
|
16445
|
+
var _a;
|
|
16446
|
+
if (options.isProxied) {
|
|
16447
|
+
throw new NotYetImplementedError(`Proxy mode is not yet implemented for OpenAI assistants`);
|
|
16448
|
+
}
|
|
16449
|
+
super(options);
|
|
16450
|
+
this.isCreatingNewAssistantsAllowed = false;
|
|
16451
|
+
this.assistantId = options.assistantId;
|
|
16452
|
+
this.isCreatingNewAssistantsAllowed = (_a = options.isCreatingNewAssistantsAllowed) !== null && _a !== void 0 ? _a : false;
|
|
16453
|
+
if (this.assistantId === null && !this.isCreatingNewAssistantsAllowed) {
|
|
16454
|
+
throw new NotAllowed(`Assistant ID is null and creating new assistants is not allowed - this configuration does not make sense`);
|
|
16455
|
+
}
|
|
16456
|
+
// <- TODO: !!! `OpenAiAssistantExecutionToolsOptions` - Allow `assistantId: null` together with `isCreatingNewAssistantsAllowed: true`
|
|
16457
|
+
// TODO: [👱] Make limiter same as in `OpenAiExecutionTools`
|
|
16458
|
+
}
|
|
16459
|
+
get title() {
|
|
16460
|
+
return 'OpenAI Assistant';
|
|
16461
|
+
}
|
|
16462
|
+
get description() {
|
|
16463
|
+
return 'Use single assistant provided by OpenAI';
|
|
16464
|
+
}
|
|
16465
|
+
/**
|
|
16466
|
+
* Calls OpenAI API to use a chat model.
|
|
16467
|
+
*/
|
|
16468
|
+
async callChatModel(prompt) {
|
|
16469
|
+
var _a, _b, _c;
|
|
16470
|
+
if (this.options.isVerbose) {
|
|
16471
|
+
console.info('💬 OpenAI callChatModel call', { prompt });
|
|
16472
|
+
}
|
|
16473
|
+
const { content, parameters, modelRequirements /*, format*/ } = prompt;
|
|
16474
|
+
const client = await this.getClient();
|
|
16475
|
+
// TODO: [☂] Use here more modelRequirements
|
|
16476
|
+
if (modelRequirements.modelVariant !== 'CHAT') {
|
|
16477
|
+
throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
|
|
16478
|
+
}
|
|
16479
|
+
// TODO: [👨👨👧👧] Remove:
|
|
16480
|
+
for (const key of ['maxTokens', 'modelName', 'seed', 'temperature']) {
|
|
16481
|
+
if (modelRequirements[key] !== undefined) {
|
|
16482
|
+
throw new NotYetImplementedError(`In \`OpenAiAssistantExecutionTools\` you cannot specify \`${key}\``);
|
|
16483
|
+
}
|
|
16484
|
+
}
|
|
16485
|
+
/*
|
|
16486
|
+
TODO: [👨👨👧👧] Implement all of this for Assistants
|
|
16487
|
+
const modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
|
|
16488
|
+
const modelSettings = {
|
|
16489
|
+
model: modelName,
|
|
16490
|
+
|
|
16491
|
+
temperature: modelRequirements.temperature,
|
|
16492
|
+
|
|
16493
|
+
// <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
|
|
16494
|
+
// <- Note: [🧆]
|
|
16495
|
+
} as OpenAI.Chat.Completions.CompletionCreateParamsNonStreaming; // <- TODO: Guard here types better
|
|
16496
|
+
|
|
16497
|
+
if (format === 'JSON') {
|
|
16498
|
+
modelSettings.response_format = {
|
|
16499
|
+
type: 'json_object',
|
|
16500
|
+
};
|
|
16501
|
+
}
|
|
16502
|
+
*/
|
|
16503
|
+
// <- TODO: [🚸] Not all models are compatible with JSON mode
|
|
16504
|
+
// > 'response_format' of type 'json_object' is not supported with this model.
|
|
16505
|
+
const rawPromptContent = templateParameters(content, {
|
|
16506
|
+
...parameters,
|
|
16507
|
+
modelName: 'assistant',
|
|
16508
|
+
// <- [🧠] What is the best value here
|
|
16509
|
+
});
|
|
16510
|
+
const rawRequest = {
|
|
16511
|
+
// TODO: [👨👨👧👧] ...modelSettings,
|
|
16512
|
+
// TODO: [👨👨👧👧][🧠] What about system message for assistants, does it make sense - combination of OpenAI assistants with Promptbook Personas
|
|
16513
|
+
assistant_id: this.assistantId,
|
|
16514
|
+
thread: {
|
|
16515
|
+
messages: 'thread' in prompt &&
|
|
16516
|
+
Array.isArray(prompt.thread)
|
|
16517
|
+
? prompt.thread.map((msg) => ({
|
|
16518
|
+
role: msg.role === 'assistant' ? 'assistant' : 'user',
|
|
16519
|
+
content: msg.content,
|
|
16520
|
+
}))
|
|
16521
|
+
: [{ role: 'user', content: rawPromptContent }],
|
|
16522
|
+
},
|
|
16523
|
+
// <- TODO: Add user identification here> user: this.options.user,
|
|
16524
|
+
};
|
|
16525
|
+
const start = $getCurrentDate();
|
|
16526
|
+
let complete;
|
|
16527
|
+
if (this.options.isVerbose) {
|
|
16528
|
+
console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
|
|
16529
|
+
}
|
|
16530
|
+
const stream = await client.beta.threads.createAndRunStream(rawRequest);
|
|
16531
|
+
stream.on('connect', () => {
|
|
16532
|
+
if (this.options.isVerbose) {
|
|
16533
|
+
console.info('connect', stream.currentEvent);
|
|
16534
|
+
}
|
|
16535
|
+
});
|
|
16536
|
+
stream.on('messageDelta', (messageDelta) => {
|
|
16537
|
+
var _a;
|
|
16538
|
+
if (this.options.isVerbose &&
|
|
16539
|
+
messageDelta &&
|
|
16540
|
+
messageDelta.content &&
|
|
16541
|
+
messageDelta.content[0] &&
|
|
16542
|
+
messageDelta.content[0].type === 'text') {
|
|
16543
|
+
console.info('messageDelta', (_a = messageDelta.content[0].text) === null || _a === void 0 ? void 0 : _a.value);
|
|
16544
|
+
}
|
|
16545
|
+
// <- TODO: [🐚] Make streaming and running tasks working
|
|
16546
|
+
});
|
|
16547
|
+
stream.on('messageCreated', (message) => {
|
|
16548
|
+
if (this.options.isVerbose) {
|
|
16549
|
+
console.info('messageCreated', message);
|
|
16550
|
+
}
|
|
16551
|
+
});
|
|
16552
|
+
stream.on('messageDone', (message) => {
|
|
16553
|
+
if (this.options.isVerbose) {
|
|
16554
|
+
console.info('messageDone', message);
|
|
16555
|
+
}
|
|
16556
|
+
});
|
|
16557
|
+
const rawResponse = await stream.finalMessages();
|
|
16558
|
+
if (this.options.isVerbose) {
|
|
16559
|
+
console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
|
|
16560
|
+
}
|
|
16561
|
+
if (rawResponse.length !== 1) {
|
|
16562
|
+
throw new PipelineExecutionError(`There is NOT 1 BUT ${rawResponse.length} finalMessages from OpenAI`);
|
|
16563
|
+
}
|
|
16564
|
+
if (rawResponse[0].content.length !== 1) {
|
|
16565
|
+
throw new PipelineExecutionError(`There is NOT 1 BUT ${rawResponse[0].content.length} finalMessages content from OpenAI`);
|
|
16566
|
+
}
|
|
16567
|
+
if (((_a = rawResponse[0].content[0]) === null || _a === void 0 ? void 0 : _a.type) !== 'text') {
|
|
16568
|
+
throw new PipelineExecutionError(`There is NOT 'text' BUT ${(_b = rawResponse[0].content[0]) === null || _b === void 0 ? void 0 : _b.type} finalMessages content type from OpenAI`);
|
|
16569
|
+
}
|
|
16570
|
+
const resultContent = (_c = rawResponse[0].content[0]) === null || _c === void 0 ? void 0 : _c.text.value;
|
|
16571
|
+
// <- TODO: [🧠] There are also annotations, maybe use them
|
|
16572
|
+
// eslint-disable-next-line prefer-const
|
|
16573
|
+
complete = $getCurrentDate();
|
|
16574
|
+
const usage = UNCERTAIN_USAGE;
|
|
16575
|
+
// <- TODO: [🥘] Compute real usage for assistant
|
|
16576
|
+
// ?> const usage = computeOpenAiUsage(content, resultContent || '', rawResponse);
|
|
16577
|
+
if (resultContent === null) {
|
|
16578
|
+
throw new PipelineExecutionError('No response message from OpenAI');
|
|
16579
|
+
}
|
|
16580
|
+
return exportJson({
|
|
16581
|
+
name: 'promptResult',
|
|
16582
|
+
message: `Result of \`OpenAiAssistantExecutionTools.callChatModel\``,
|
|
16583
|
+
order: [],
|
|
16584
|
+
value: {
|
|
16585
|
+
content: resultContent,
|
|
16586
|
+
modelName: 'assistant',
|
|
16587
|
+
// <- TODO: [🥘] Detect used model in assistant
|
|
16588
|
+
// ?> model: rawResponse.model || modelName,
|
|
16589
|
+
timing: {
|
|
16590
|
+
start,
|
|
16591
|
+
complete,
|
|
16592
|
+
},
|
|
16593
|
+
usage,
|
|
16594
|
+
rawPromptContent,
|
|
16595
|
+
rawRequest,
|
|
16596
|
+
rawResponse,
|
|
16597
|
+
// <- [🗯]
|
|
16598
|
+
},
|
|
16599
|
+
});
|
|
16600
|
+
}
|
|
16601
|
+
async playground() {
|
|
16602
|
+
const client = await this.getClient();
|
|
16603
|
+
// List all assistants
|
|
16604
|
+
const assistants = await client.beta.assistants.list();
|
|
16605
|
+
console.log('!!! Assistants:', assistants);
|
|
16606
|
+
// Get details of a specific assistant
|
|
16607
|
+
const assistantId = 'asst_MO8fhZf4dGloCfXSHeLcIik0';
|
|
16608
|
+
const assistant = await client.beta.assistants.retrieve(assistantId);
|
|
16609
|
+
console.log('!!! Assistant Details:', assistant);
|
|
16610
|
+
// Update an assistant
|
|
16611
|
+
const updatedAssistant = await client.beta.assistants.update(assistantId, {
|
|
16612
|
+
name: assistant.name + '(M)',
|
|
16613
|
+
description: 'Updated description via Promptbook',
|
|
16614
|
+
metadata: {
|
|
16615
|
+
[Math.random().toString(36).substring(2, 15)]: new Date().toISOString(),
|
|
16616
|
+
},
|
|
16617
|
+
});
|
|
16618
|
+
console.log('!!! Updated Assistant:', updatedAssistant);
|
|
16619
|
+
await forEver();
|
|
16620
|
+
}
|
|
16621
|
+
async createNewAssistant(options) {
|
|
16622
|
+
if (!this.isCreatingNewAssistantsAllowed) {
|
|
16623
|
+
throw new NotAllowed(`Creating new assistants is not allowed. Set \`isCreatingNewAssistantsAllowed: true\` in options to enable this feature.`);
|
|
16624
|
+
}
|
|
16625
|
+
await this.playground();
|
|
16626
|
+
const { name, instructions } = options;
|
|
16627
|
+
const client = await this.getClient();
|
|
16628
|
+
/*
|
|
16629
|
+
TODO: !!!
|
|
16630
|
+
async function downloadFile(url: string, folder = './tmp'): Promise<string> {
|
|
16631
|
+
const filename = path.basename(url.split('?')[0]);
|
|
16632
|
+
const filepath = path.join(folder, filename);
|
|
16633
|
+
|
|
16634
|
+
if (!fs.existsSync(folder)) fs.mkdirSync(folder);
|
|
16635
|
+
|
|
16636
|
+
const res = await fetch(url);
|
|
16637
|
+
if (!res.ok) throw new Error(`Download error: ${url}`);
|
|
16638
|
+
const buffer = await res.arrayBuffer();
|
|
16639
|
+
fs.writeFileSync(filepath, Buffer.from(buffer));
|
|
16640
|
+
console.log(`📥 File downloaded: ${filename}`);
|
|
16641
|
+
|
|
16642
|
+
return filepath;
|
|
16643
|
+
}
|
|
16644
|
+
|
|
16645
|
+
async function uploadFileToOpenAI(filepath: string) {
|
|
16646
|
+
const file = await client.files.create({
|
|
16647
|
+
file: fs.createReadStream(filepath),
|
|
16648
|
+
purpose: 'assistants',
|
|
16649
|
+
});
|
|
16650
|
+
console.log(`⬆️ File uploaded to OpenAI: ${file.filename} (${file.id})`);
|
|
16651
|
+
return file;
|
|
16652
|
+
}
|
|
16653
|
+
|
|
16654
|
+
// 🌐 URL addresses of files to upload
|
|
16655
|
+
const fileUrls = [
|
|
16656
|
+
'https://raw.githubusercontent.com/vercel/next.js/canary/packages/next/README.md',
|
|
16657
|
+
'https://raw.githubusercontent.com/openai/openai-cookbook/main/examples/How_to_call_the_Assistants_API_with_Node.js.ipynb',
|
|
16658
|
+
];
|
|
16659
|
+
|
|
16660
|
+
// 1️⃣ Download files from URL
|
|
16661
|
+
const localFiles = [];
|
|
16662
|
+
for (const url of fileUrls) {
|
|
16663
|
+
const filepath = await downloadFile(url);
|
|
16664
|
+
localFiles.push(filepath);
|
|
16665
|
+
}
|
|
16666
|
+
|
|
16667
|
+
// 2️⃣ Upload files to OpenAI
|
|
16668
|
+
const uploadedFiles = [];
|
|
16669
|
+
for (const filepath of localFiles) {
|
|
16670
|
+
const file = await uploadFileToOpenAI(filepath);
|
|
16671
|
+
uploadedFiles.push(file.id);
|
|
16672
|
+
}
|
|
16673
|
+
*/
|
|
16674
|
+
alert('!!!! Creating new OpenAI assistant');
|
|
16675
|
+
// 3️⃣ Create assistant with uploaded files
|
|
16676
|
+
const assistant = await client.beta.assistants.create({
|
|
16677
|
+
name,
|
|
16678
|
+
description: 'Assistant created via Promptbook',
|
|
16679
|
+
model: 'gpt-4o',
|
|
16680
|
+
instructions,
|
|
16681
|
+
tools: [/* TODO: [🧠] Maybe add { type: 'code_interpreter' }, */ { type: 'file_search' }],
|
|
16682
|
+
// !!!! file_ids: uploadedFiles,
|
|
16683
|
+
});
|
|
16684
|
+
console.log(`✅ Assistant created: ${assistant.id}`);
|
|
16685
|
+
// TODO: !!!! Try listing existing assistants
|
|
16686
|
+
// TODO: !!!! Try marking existing assistants by DISCRIMINANT
|
|
16687
|
+
// TODO: !!!! Allow to update and reconnect to existing assistants
|
|
16688
|
+
return new OpenAiAssistantExecutionTools({
|
|
16689
|
+
...this.options,
|
|
16690
|
+
isCreatingNewAssistantsAllowed: false,
|
|
16691
|
+
assistantId: assistant.id,
|
|
16692
|
+
});
|
|
16693
|
+
}
|
|
16694
|
+
/**
|
|
16695
|
+
* Discriminant for type guards
|
|
16696
|
+
*/
|
|
16697
|
+
get discriminant() {
|
|
16698
|
+
return DISCRIMINANT;
|
|
16699
|
+
}
|
|
16700
|
+
/**
|
|
16701
|
+
* Type guard to check if given `LlmExecutionTools` are instanceof `OpenAiAssistantExecutionTools`
|
|
16702
|
+
*
|
|
16703
|
+
* Note: This is useful when you can possibly have multiple versions of `@promptbook/openai` installed
|
|
16704
|
+
*/
|
|
16705
|
+
static isOpenAiAssistantExecutionTools(llmExecutionTools) {
|
|
16706
|
+
return llmExecutionTools.discriminant === DISCRIMINANT;
|
|
16707
|
+
}
|
|
16708
|
+
}
|
|
16709
|
+
/**
|
|
16710
|
+
* Discriminant for type guards
|
|
16711
|
+
*
|
|
16712
|
+
* @private const of `OpenAiAssistantExecutionTools`
|
|
16713
|
+
*/
|
|
16714
|
+
const DISCRIMINANT = 'OPEN_AI_ASSISTANT_V1';
|
|
16715
|
+
/**
|
|
16716
|
+
* TODO: [🧠][🧙♂️] Maybe there can be some wizard for those who want to use just OpenAI
|
|
16717
|
+
* TODO: Maybe make custom OpenAiError
|
|
16718
|
+
* TODO: [🧠][🈁] Maybe use `isDeterministic` from options
|
|
16719
|
+
* TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
|
|
16720
|
+
*/
|
|
16721
|
+
|
|
16722
|
+
/**
|
|
16723
|
+
* Execution Tools for calling LLM models with a predefined agent "soul"
|
|
16724
|
+
* This wraps underlying LLM execution tools and applies agent-specific system prompts and requirements
|
|
16725
|
+
*
|
|
16726
|
+
* Note: [🦖] There are several different things in Promptbook:
|
|
16727
|
+
* - `Agent` - which represents an AI Agent with its source, memories, actions, etc. Agent is a higher-level abstraction which is internally using:
|
|
16728
|
+
* - `LlmExecutionTools` - which wraps one or more LLM models and provides an interface to execute them
|
|
16729
|
+
* - `AgentLlmExecutionTools` - which is a specific implementation of `LlmExecutionTools` that wraps another LlmExecutionTools and applies agent-specific system prompts and requirements
|
|
16730
|
+
* - `OpenAiAssistantExecutionTools` - which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities, recommended for usage in `Agent` or `AgentLlmExecutionTools`
|
|
16731
|
+
*
|
|
16732
|
+
* @public exported from `@promptbook/core`
|
|
16733
|
+
*/
|
|
16734
|
+
class AgentLlmExecutionTools {
|
|
16735
|
+
/**
|
|
16736
|
+
* Creates new AgentLlmExecutionTools
|
|
16737
|
+
*
|
|
16738
|
+
* @param llmTools The underlying LLM execution tools to wrap
|
|
16739
|
+
* @param agentSource The agent source string that defines the agent's behavior
|
|
16740
|
+
*/
|
|
16741
|
+
constructor(options) {
|
|
16742
|
+
this.options = options;
|
|
16743
|
+
/**
|
|
16744
|
+
* Cached model requirements to avoid re-parsing the agent source
|
|
16745
|
+
*/
|
|
16746
|
+
this._cachedModelRequirements = null;
|
|
16747
|
+
/**
|
|
16748
|
+
* Cached parsed agent information
|
|
16749
|
+
*/
|
|
16750
|
+
this._cachedAgentInfo = null;
|
|
16751
|
+
}
|
|
16752
|
+
/**
|
|
16753
|
+
* Get cached or parse agent information
|
|
16754
|
+
*/
|
|
16755
|
+
getAgentInfo() {
|
|
16756
|
+
if (this._cachedAgentInfo === null) {
|
|
16757
|
+
this._cachedAgentInfo = parseAgentSource(this.options.agentSource);
|
|
16758
|
+
}
|
|
16759
|
+
return this._cachedAgentInfo;
|
|
16760
|
+
}
|
|
16761
|
+
/**
|
|
16762
|
+
* Get cached or create agent model requirements
|
|
16763
|
+
*/
|
|
16764
|
+
async getAgentModelRequirements() {
|
|
16765
|
+
if (this._cachedModelRequirements === null) {
|
|
16766
|
+
// Get available models from underlying LLM tools for best model selection
|
|
16767
|
+
const availableModels = await this.options.llmTools.listModels();
|
|
16768
|
+
this._cachedModelRequirements = await createAgentModelRequirements(this.options.agentSource, undefined, // Let the function pick the best model
|
|
16769
|
+
availableModels);
|
|
16770
|
+
}
|
|
16771
|
+
return this._cachedModelRequirements;
|
|
16772
|
+
}
|
|
16773
|
+
get title() {
|
|
16774
|
+
const agentInfo = this.getAgentInfo();
|
|
16775
|
+
return (agentInfo.agentName || 'Agent');
|
|
16776
|
+
}
|
|
16777
|
+
get description() {
|
|
16778
|
+
const agentInfo = this.getAgentInfo();
|
|
16779
|
+
return agentInfo.personaDescription || 'AI Agent with predefined personality and behavior';
|
|
16780
|
+
}
|
|
16781
|
+
get profile() {
|
|
16782
|
+
const agentInfo = this.getAgentInfo();
|
|
16783
|
+
if (!agentInfo.agentName) {
|
|
16784
|
+
return undefined;
|
|
16785
|
+
}
|
|
16786
|
+
return {
|
|
16787
|
+
name: agentInfo.agentName.toUpperCase().replace(/\s+/g, '_'),
|
|
16788
|
+
fullname: agentInfo.agentName,
|
|
16789
|
+
color: agentInfo.meta.color || '#6366f1',
|
|
16790
|
+
avatarSrc: agentInfo.meta.image,
|
|
16791
|
+
};
|
|
16792
|
+
}
|
|
16793
|
+
checkConfiguration() {
|
|
16794
|
+
// Check underlying tools configuration
|
|
16795
|
+
return this.options.llmTools.checkConfiguration();
|
|
16796
|
+
}
|
|
16797
|
+
/**
|
|
16798
|
+
* Returns a virtual model name representing the agent behavior
|
|
16799
|
+
*/
|
|
16800
|
+
get modelName() {
|
|
16801
|
+
const hash = SHA256(hexEncoder.parse(this.options.agentSource))
|
|
16802
|
+
// <- TODO: [🥬] Encapsulate sha256 to some private utility function
|
|
16803
|
+
.toString( /* hex */);
|
|
16804
|
+
// <- TODO: [🥬] Make some system for hashes and ids of promptbook
|
|
16805
|
+
const agentId = hash.substring(0, 10);
|
|
16806
|
+
// <- TODO: [🥬] Make some system for hashes and ids of promptbook
|
|
16807
|
+
return (normalizeToKebabCase(this.title) + '-' + agentId);
|
|
16808
|
+
}
|
|
16809
|
+
listModels() {
|
|
16810
|
+
return [
|
|
16811
|
+
{
|
|
16812
|
+
modelName: this.modelName,
|
|
16813
|
+
modelVariant: 'CHAT',
|
|
16814
|
+
modelTitle: `${this.title} (Agent Chat Default)`,
|
|
16815
|
+
modelDescription: `Chat model with agent behavior: ${this.description}`,
|
|
16816
|
+
},
|
|
16817
|
+
// <- Note: We only list a single "virtual" agent model here as this wrapper only supports chat prompts
|
|
16818
|
+
];
|
|
16819
|
+
}
|
|
16820
|
+
/**
|
|
16821
|
+
* Calls the chat model with agent-specific system prompt and requirements
|
|
16822
|
+
*/
|
|
16823
|
+
async callChatModel(prompt) {
|
|
16824
|
+
if (!this.options.llmTools.callChatModel) {
|
|
16825
|
+
throw new Error('Underlying LLM execution tools do not support chat model calls');
|
|
16826
|
+
}
|
|
16827
|
+
// Ensure we're working with a chat prompt
|
|
16828
|
+
if (prompt.modelRequirements.modelVariant !== 'CHAT') {
|
|
14861
16829
|
throw new Error('AgentLlmExecutionTools only supports chat prompts');
|
|
14862
16830
|
}
|
|
14863
|
-
const chatPrompt = prompt;
|
|
14864
|
-
// Get agent model requirements (cached with best model selection)
|
|
14865
16831
|
const modelRequirements = await this.getAgentModelRequirements();
|
|
14866
|
-
|
|
14867
|
-
|
|
14868
|
-
|
|
14869
|
-
|
|
14870
|
-
|
|
14871
|
-
|
|
14872
|
-
|
|
14873
|
-
|
|
14874
|
-
|
|
14875
|
-
|
|
14876
|
-
|
|
14877
|
-
|
|
14878
|
-
|
|
14879
|
-
|
|
16832
|
+
const chatPrompt = prompt;
|
|
16833
|
+
let underlyingLlmResult;
|
|
16834
|
+
if (OpenAiAssistantExecutionTools.isOpenAiAssistantExecutionTools(this.options.llmTools)) {
|
|
16835
|
+
// <- TODO: !!! Check also `isCreatingNewAssistantsAllowed` and warn about it
|
|
16836
|
+
const assistant = await this.options.llmTools.createNewAssistant({
|
|
16837
|
+
name: this.title,
|
|
16838
|
+
instructions: modelRequirements.systemMessage,
|
|
16839
|
+
});
|
|
16840
|
+
// <- TODO: !!! Cache the assistant in prepareCache
|
|
16841
|
+
underlyingLlmResult = await assistant.callChatModel(chatPrompt);
|
|
16842
|
+
}
|
|
16843
|
+
else {
|
|
16844
|
+
// Create modified chat prompt with agent system message
|
|
16845
|
+
const modifiedChatPrompt = {
|
|
16846
|
+
...chatPrompt,
|
|
16847
|
+
modelRequirements: {
|
|
16848
|
+
...chatPrompt.modelRequirements,
|
|
16849
|
+
...modelRequirements,
|
|
16850
|
+
// Prepend agent system message to existing system message
|
|
16851
|
+
systemMessage: modelRequirements.systemMessage +
|
|
16852
|
+
(chatPrompt.modelRequirements.systemMessage
|
|
16853
|
+
? `\n\n${chatPrompt.modelRequirements.systemMessage}`
|
|
16854
|
+
: ''),
|
|
16855
|
+
},
|
|
16856
|
+
};
|
|
16857
|
+
underlyingLlmResult = await this.options.llmTools.callChatModel(modifiedChatPrompt);
|
|
16858
|
+
}
|
|
14880
16859
|
let content = underlyingLlmResult.content;
|
|
14881
16860
|
// Note: Cleanup the AI artifacts from the content
|
|
14882
16861
|
content = humanizeAiText(content);
|
|
@@ -14901,15 +16880,11 @@ class AgentLlmExecutionTools {
|
|
|
14901
16880
|
* @public exported from `@promptbook/core`
|
|
14902
16881
|
*/
|
|
14903
16882
|
const createAgentLlmExecutionTools = Object.assign((options) => {
|
|
14904
|
-
return new AgentLlmExecutionTools(options
|
|
16883
|
+
return new AgentLlmExecutionTools(options);
|
|
14905
16884
|
}, {
|
|
14906
16885
|
packageName: '@promptbook/core',
|
|
14907
16886
|
className: 'AgentLlmExecutionTools',
|
|
14908
16887
|
});
|
|
14909
|
-
/**
|
|
14910
|
-
* TODO: [🧠] Consider adding validation for agent source format
|
|
14911
|
-
* TODO: [🧠] Consider adding options for caching behavior
|
|
14912
|
-
*/
|
|
14913
16888
|
|
|
14914
16889
|
/**
|
|
14915
16890
|
* Metadata for Agent LLM execution tools
|
|
@@ -15540,7 +17515,7 @@ function book(strings, ...values) {
|
|
|
15540
17515
|
\`
|
|
15541
17516
|
`));
|
|
15542
17517
|
}
|
|
15543
|
-
return bookString;
|
|
17518
|
+
return padBook(bookString);
|
|
15544
17519
|
}
|
|
15545
17520
|
/**
|
|
15546
17521
|
* TODO: [🧠][🈴] Where is the best location for this file
|
|
@@ -15870,5 +17845,306 @@ class PrefixStorage {
|
|
|
15870
17845
|
}
|
|
15871
17846
|
}
|
|
15872
17847
|
|
|
15873
|
-
|
|
17848
|
+
/**
|
|
17849
|
+
* Register for book transpilers.
|
|
17850
|
+
*
|
|
17851
|
+
* Note: `$` is used to indicate that this interacts with the global scope
|
|
17852
|
+
* @singleton Only one instance of each register is created per build, but there can be more instances across different builds or environments.
|
|
17853
|
+
* @see https://github.com/webgptorg/promptbook/issues/249
|
|
17854
|
+
*
|
|
17855
|
+
* @public exported from `@promptbook/core`
|
|
17856
|
+
*/
|
|
17857
|
+
const $bookTranspilersRegister = new $Register('book_transpilers');
|
|
17858
|
+
/**
|
|
17859
|
+
* TODO: [®] DRY Register logic
|
|
17860
|
+
*/
|
|
17861
|
+
|
|
17862
|
+
/**
|
|
17863
|
+
* Converts a book into a 1:1 formatted markdown
|
|
17864
|
+
*
|
|
17865
|
+
* @public exported from `@promptbook/core`
|
|
17866
|
+
*/
|
|
17867
|
+
const FormattedBookInMarkdownTranspiler = {
|
|
17868
|
+
name: 'formatted-book-in-markdown',
|
|
17869
|
+
title: 'Formatted Book in Markdown',
|
|
17870
|
+
packageName: '@promptbook/core',
|
|
17871
|
+
className: 'FormattedBookInMarkdownTranspiler',
|
|
17872
|
+
transpileBook(book, tools, options) {
|
|
17873
|
+
let lines = book.trim( /* <- Note: Not using `spaceTrim` because its not needed */).split('\n');
|
|
17874
|
+
if (lines[0]) {
|
|
17875
|
+
lines[0] = `**<ins>${lines[0]}</ins>**`;
|
|
17876
|
+
}
|
|
17877
|
+
for (let i = 1; i < lines.length; i++) {
|
|
17878
|
+
let line = lines[i];
|
|
17879
|
+
line = line === null || line === void 0 ? void 0 : line.split('PERSONA').join('**PERSONA**');
|
|
17880
|
+
line = line === null || line === void 0 ? void 0 : line.split('RULE').join('**RULE**');
|
|
17881
|
+
line = line === null || line === void 0 ? void 0 : line.split('META').join('**META**');
|
|
17882
|
+
line = line === null || line === void 0 ? void 0 : line.split('KNOWLEDGE').join('**KNOWLEDGE**');
|
|
17883
|
+
line = line === null || line === void 0 ? void 0 : line.split('ACTION').join('**ACTION**');
|
|
17884
|
+
// <- TODO: !!! Unhardcode these commitments
|
|
17885
|
+
lines[i] = line;
|
|
17886
|
+
}
|
|
17887
|
+
// lines = lines.map((line) => `> ${line}`);
|
|
17888
|
+
lines = lines.map((line) => `${line}<br/>`);
|
|
17889
|
+
return lines.join('\n');
|
|
17890
|
+
},
|
|
17891
|
+
};
|
|
17892
|
+
|
|
17893
|
+
/**
|
|
17894
|
+
* Transpiler to Javascript code using OpenAI SDK.
|
|
17895
|
+
*
|
|
17896
|
+
* @public exported from `@promptbook/core`
|
|
17897
|
+
*/
|
|
17898
|
+
const OpenAiSdkTranspiler = {
|
|
17899
|
+
name: 'openai-sdk',
|
|
17900
|
+
title: 'OpenAI SDK',
|
|
17901
|
+
packageName: '@promptbook/core',
|
|
17902
|
+
className: 'OpenAiSdkTranspiler',
|
|
17903
|
+
async transpileBook(book, tools, options) {
|
|
17904
|
+
const { agentName } = await parseAgentSource(book);
|
|
17905
|
+
const modelRequirements = await createAgentModelRequirements(book);
|
|
17906
|
+
const { commitments } = parseAgentSourceWithCommitments(book);
|
|
17907
|
+
const knowledgeCommitments = commitments.filter((commitment) => commitment.type === 'KNOWLEDGE');
|
|
17908
|
+
const directKnowledge = knowledgeCommitments
|
|
17909
|
+
.map((commitment) => commitment.content.trim())
|
|
17910
|
+
.filter((content) => {
|
|
17911
|
+
try {
|
|
17912
|
+
new URL(content);
|
|
17913
|
+
return false;
|
|
17914
|
+
}
|
|
17915
|
+
catch (_a) {
|
|
17916
|
+
return true;
|
|
17917
|
+
}
|
|
17918
|
+
});
|
|
17919
|
+
const knowledgeSources = knowledgeCommitments
|
|
17920
|
+
.map((commitment) => commitment.content.trim())
|
|
17921
|
+
.filter((content) => {
|
|
17922
|
+
try {
|
|
17923
|
+
new URL(content);
|
|
17924
|
+
return true;
|
|
17925
|
+
}
|
|
17926
|
+
catch (_a) {
|
|
17927
|
+
return false;
|
|
17928
|
+
}
|
|
17929
|
+
});
|
|
17930
|
+
const KNOWLEDGE_THRESHOLD = 1000;
|
|
17931
|
+
if (directKnowledge.join('\n').length > KNOWLEDGE_THRESHOLD || knowledgeSources.length > 0) {
|
|
17932
|
+
return spaceTrim((block) => `
|
|
17933
|
+
#!/usr/bin/env node
|
|
17934
|
+
|
|
17935
|
+
import * as dotenv from 'dotenv';
|
|
17936
|
+
dotenv.config({ path: '.env' });
|
|
17937
|
+
|
|
17938
|
+
import { spaceTrim } from '@promptbook/utils';
|
|
17939
|
+
import OpenAI from 'openai';
|
|
17940
|
+
import readline from 'readline';
|
|
17941
|
+
import { Document, VectorStoreIndex, SimpleDirectoryReader } from 'llamaindex';
|
|
17942
|
+
|
|
17943
|
+
// ---- CONFIG ----
|
|
17944
|
+
const client = new OpenAI({
|
|
17945
|
+
apiKey: process.env.OPENAI_API_KEY,
|
|
17946
|
+
});
|
|
17947
|
+
|
|
17948
|
+
// ---- KNOWLEDGE ----
|
|
17949
|
+
const knowledge = ${block(JSON.stringify(directKnowledge, null, 4) /* <- TODO: Use here Promptbook stringify */)};
|
|
17950
|
+
const knowledgeSources = ${block(JSON.stringify(knowledgeSources, null, 4) /* <- TODO: Use here Promptbook stringify */)};
|
|
17951
|
+
let index;
|
|
17952
|
+
|
|
17953
|
+
async function setupKnowledge() {
|
|
17954
|
+
const documents = knowledge.map((text) => new Document({ text }));
|
|
17955
|
+
|
|
17956
|
+
for (const source of knowledgeSources) {
|
|
17957
|
+
try {
|
|
17958
|
+
// Note: SimpleDirectoryReader is a bit of a misnomer, it can read single files
|
|
17959
|
+
const reader = new SimpleDirectoryReader();
|
|
17960
|
+
const sourceDocuments = await reader.loadData(source);
|
|
17961
|
+
documents.push(...sourceDocuments);
|
|
17962
|
+
} catch (error) {
|
|
17963
|
+
console.error(\`Error loading knowledge from \${source}:\`, error);
|
|
17964
|
+
}
|
|
17965
|
+
}
|
|
17966
|
+
|
|
17967
|
+
if (documents.length > 0) {
|
|
17968
|
+
index = await VectorStoreIndex.fromDocuments(documents);
|
|
17969
|
+
console.log('🧠 Knowledge base prepared.');
|
|
17970
|
+
}
|
|
17971
|
+
}
|
|
17972
|
+
|
|
17973
|
+
// ---- CLI SETUP ----
|
|
17974
|
+
const rl = readline.createInterface({
|
|
17975
|
+
input: process.stdin,
|
|
17976
|
+
output: process.stdout,
|
|
17977
|
+
});
|
|
17978
|
+
|
|
17979
|
+
const chatHistory = [
|
|
17980
|
+
{
|
|
17981
|
+
role: 'system',
|
|
17982
|
+
content: spaceTrim(\`
|
|
17983
|
+
${block(modelRequirements.systemMessage)}
|
|
17984
|
+
\`),
|
|
17985
|
+
},
|
|
17986
|
+
];
|
|
17987
|
+
|
|
17988
|
+
async function ask(question) {
|
|
17989
|
+
let context = '';
|
|
17990
|
+
if (index) {
|
|
17991
|
+
const retriever = index.asRetriever();
|
|
17992
|
+
const relevantNodes = await retriever.retrieve(question);
|
|
17993
|
+
context = relevantNodes.map((node) => node.getContent()).join('\\n\\n');
|
|
17994
|
+
}
|
|
17995
|
+
|
|
17996
|
+
const userMessage = spaceTrim(\`
|
|
17997
|
+
${block(spaceTrim(`
|
|
17998
|
+
Here is some additional context to help you answer the question:
|
|
17999
|
+
\${context}
|
|
18000
|
+
|
|
18001
|
+
---
|
|
18002
|
+
|
|
18003
|
+
My question is:
|
|
18004
|
+
\${question}
|
|
18005
|
+
`))}
|
|
18006
|
+
\`);
|
|
18007
|
+
|
|
18008
|
+
|
|
18009
|
+
chatHistory.push({ role: 'user', content: userMessage });
|
|
18010
|
+
|
|
18011
|
+
const response = await client.chat.completions.create({
|
|
18012
|
+
model: 'gpt-4o',
|
|
18013
|
+
messages: chatHistory,
|
|
18014
|
+
temperature: ${modelRequirements.temperature},
|
|
18015
|
+
});
|
|
18016
|
+
|
|
18017
|
+
const answer = response.choices[0].message.content;
|
|
18018
|
+
console.log('\\n🧠 ${agentName}:', answer, '\\n');
|
|
18019
|
+
|
|
18020
|
+
chatHistory.push({ role: 'assistant', content: answer });
|
|
18021
|
+
promptUser();
|
|
18022
|
+
}
|
|
18023
|
+
|
|
18024
|
+
function promptUser() {
|
|
18025
|
+
rl.question('💬 You: ', (input) => {
|
|
18026
|
+
if (input.trim().toLowerCase() === 'exit') {
|
|
18027
|
+
console.log('👋 Bye!');
|
|
18028
|
+
rl.close();
|
|
18029
|
+
return;
|
|
18030
|
+
}
|
|
18031
|
+
ask(input);
|
|
18032
|
+
});
|
|
18033
|
+
}
|
|
18034
|
+
|
|
18035
|
+
(async () => {
|
|
18036
|
+
await setupKnowledge();
|
|
18037
|
+
console.log("🤖 Chat with ${agentName} (type 'exit' to quit)\\n");
|
|
18038
|
+
promptUser();
|
|
18039
|
+
})();
|
|
18040
|
+
`);
|
|
18041
|
+
}
|
|
18042
|
+
const source = spaceTrim((block) => `
|
|
18043
|
+
|
|
18044
|
+
#!/usr/bin/env node
|
|
18045
|
+
|
|
18046
|
+
import * as dotenv from 'dotenv';
|
|
18047
|
+
|
|
18048
|
+
dotenv.config({ path: '.env' });
|
|
18049
|
+
|
|
18050
|
+
import { spaceTrim } from '@promptbook/utils';
|
|
18051
|
+
import OpenAI from 'openai';
|
|
18052
|
+
import readline from 'readline';
|
|
18053
|
+
|
|
18054
|
+
// ---- CONFIG ----
|
|
18055
|
+
const client = new OpenAI({
|
|
18056
|
+
apiKey: process.env.OPENAI_API_KEY,
|
|
18057
|
+
});
|
|
18058
|
+
|
|
18059
|
+
// ---- CLI SETUP ----
|
|
18060
|
+
const rl = readline.createInterface({
|
|
18061
|
+
input: process.stdin,
|
|
18062
|
+
output: process.stdout,
|
|
18063
|
+
});
|
|
18064
|
+
|
|
18065
|
+
const chatHistory = [
|
|
18066
|
+
{
|
|
18067
|
+
role: 'system',
|
|
18068
|
+
content: spaceTrim(\`
|
|
18069
|
+
${block(modelRequirements.systemMessage)}
|
|
18070
|
+
\`),
|
|
18071
|
+
},
|
|
18072
|
+
];
|
|
18073
|
+
|
|
18074
|
+
async function ask(question) {
|
|
18075
|
+
chatHistory.push({ role: 'user', content: question });
|
|
18076
|
+
|
|
18077
|
+
const response = await client.chat.completions.create({
|
|
18078
|
+
model: 'gpt-4o',
|
|
18079
|
+
messages: chatHistory,
|
|
18080
|
+
temperature: ${modelRequirements.temperature},
|
|
18081
|
+
});
|
|
18082
|
+
|
|
18083
|
+
const answer = response.choices[0].message.content;
|
|
18084
|
+
console.log('\\n🧠 ${agentName}:', answer, '\\n');
|
|
18085
|
+
|
|
18086
|
+
chatHistory.push({ role: 'assistant', content: answer });
|
|
18087
|
+
promptUser();
|
|
18088
|
+
}
|
|
18089
|
+
|
|
18090
|
+
function promptUser() {
|
|
18091
|
+
rl.question('💬 You: ', (input) => {
|
|
18092
|
+
if (input.trim().toLowerCase() === 'exit') {
|
|
18093
|
+
console.log('👋 Bye!');
|
|
18094
|
+
rl.close();
|
|
18095
|
+
return;
|
|
18096
|
+
}
|
|
18097
|
+
ask(input);
|
|
18098
|
+
});
|
|
18099
|
+
}
|
|
18100
|
+
|
|
18101
|
+
console.log("🤖 Chat with ${agentName} (type 'exit' to quit)\\n");
|
|
18102
|
+
promptUser();
|
|
18103
|
+
|
|
18104
|
+
`);
|
|
18105
|
+
return source;
|
|
18106
|
+
},
|
|
18107
|
+
};
|
|
18108
|
+
|
|
18109
|
+
/**
|
|
18110
|
+
* Provide information about Promptbook, engine version, book language version, servers, ...
|
|
18111
|
+
*
|
|
18112
|
+
* @param options Which information to include
|
|
18113
|
+
* @returns Information about Promptbook in markdown format
|
|
18114
|
+
*
|
|
18115
|
+
* @public exported from `@promptbook/core`
|
|
18116
|
+
*/
|
|
18117
|
+
function aboutPromptbookInformation(options) {
|
|
18118
|
+
const { isServersInfoIncluded = true } = options || {};
|
|
18119
|
+
const fullInfoPieces = [];
|
|
18120
|
+
const basicInfo = spaceTrim(`
|
|
18121
|
+
|
|
18122
|
+
# ${NAME}
|
|
18123
|
+
|
|
18124
|
+
${CLAIM}
|
|
18125
|
+
|
|
18126
|
+
- [Promptbook engine version \`${PROMPTBOOK_ENGINE_VERSION}\`](https://github.com/webgptorg/promptbook)
|
|
18127
|
+
- [Book language version \`${BOOK_LANGUAGE_VERSION}\`](https://github.com/webgptorg/book)
|
|
18128
|
+
|
|
18129
|
+
`);
|
|
18130
|
+
fullInfoPieces.push(basicInfo);
|
|
18131
|
+
if (isServersInfoIncluded) {
|
|
18132
|
+
const serversInfo = spaceTrim((block) => `
|
|
18133
|
+
|
|
18134
|
+
## Servers
|
|
18135
|
+
|
|
18136
|
+
${block(REMOTE_SERVER_URLS.map(({ title, urls, isAnonymousModeAllowed, description }, index) => `${index + 1}. ${title} ${description}
|
|
18137
|
+
${isAnonymousModeAllowed ? '🐱💻 ' : ''} ${urls.join(', ')}
|
|
18138
|
+
`).join('\n'))}
|
|
18139
|
+
`);
|
|
18140
|
+
fullInfoPieces.push(serversInfo);
|
|
18141
|
+
}
|
|
18142
|
+
const fullInfo = spaceTrim(fullInfoPieces.join('\n\n'));
|
|
18143
|
+
return fullInfo;
|
|
18144
|
+
}
|
|
18145
|
+
/**
|
|
18146
|
+
* TODO: [🗽] Unite branding and make single place for it
|
|
18147
|
+
*/
|
|
18148
|
+
|
|
18149
|
+
export { $bookTranspilersRegister, $llmToolsMetadataRegister, $llmToolsRegister, $scrapersMetadataRegister, $scrapersRegister, ADMIN_EMAIL, ADMIN_GITHUB_NAME, API_REQUEST_TIMEOUT, AbstractFormatError, Agent, AgentLlmExecutionTools, AuthenticationError, BIG_DATASET_TRESHOLD, BOOK_LANGUAGE_VERSION, BlackholeStorage, BoilerplateError, BoilerplateFormfactorDefinition, CLAIM, CLI_APP_ID, CallbackInterfaceTools, ChatbotFormfactorDefinition, CollectionError, CompletionFormfactorDefinition, CsvFormatError, CsvFormatParser, DEFAULT_AGENTS_DIRNAME, DEFAULT_BOOK, DEFAULT_BOOKS_DIRNAME, DEFAULT_BOOK_OUTPUT_PARAMETER_NAME, DEFAULT_BOOK_TITLE, DEFAULT_CSV_SETTINGS, DEFAULT_DOWNLOAD_CACHE_DIRNAME, DEFAULT_EXECUTION_CACHE_DIRNAME, DEFAULT_GET_PIPELINE_COLLECTION_FUNCTION_NAME, DEFAULT_INTERMEDIATE_FILES_STRATEGY, DEFAULT_IS_AUTO_INSTALLED, DEFAULT_IS_VERBOSE, DEFAULT_MAX_EXECUTION_ATTEMPTS, DEFAULT_MAX_FILE_SIZE, DEFAULT_MAX_KNOWLEDGE_SOURCES_SCRAPING_DEPTH, DEFAULT_MAX_KNOWLEDGE_SOURCES_SCRAPING_TOTAL, DEFAULT_MAX_PARALLEL_COUNT, DEFAULT_MAX_REQUESTS_PER_MINUTE, DEFAULT_PIPELINE_COLLECTION_BASE_FILENAME, DEFAULT_PROMPT_TASK_TITLE, DEFAULT_REMOTE_SERVER_URL, DEFAULT_SCRAPE_CACHE_DIRNAME, DEFAULT_TASK_SIMULATED_DURATION_MS, DEFAULT_TASK_TITLE, EXPECTATION_UNITS, EnvironmentMismatchError, ExecutionReportStringOptionsDefaults, ExpectError, FAILED_VALUE_PLACEHOLDER, FORMFACTOR_DEFINITIONS, FormattedBookInMarkdownTranspiler, GENERIC_PIPELINE_INTERFACE, GeneratorFormfactorDefinition, GenericFormfactorDefinition, ImageGeneratorFormfactorDefinition, KnowledgeScrapeError, LimitReachedError, MANDATORY_CSV_SETTINGS, MAX_FILENAME_LENGTH, MODEL_ORDERS, MODEL_TRUST_LEVELS, MODEL_VARIANTS, MatcherFormfactorDefinition, MemoryStorage, MissingToolsError, MultipleLlmExecutionTools, NAME, NonTaskSectionTypes, NotAllowed, NotFoundError, NotYetImplementedCommitmentDefinition, NotYetImplementedError, ORDER_OF_PIPELINE_JSON, OpenAiSdkTranspiler, PADDING_LINES, PENDING_VALUE_PLACEHOLDER, PLAYGROUND_APP_ID, PROMPTBOOK_CHAT_COLOR, PROMPTBOOK_COLOR, PROMPTBOOK_ENGINE_VERSION, PROMPTBOOK_ERRORS, PROMPTBOOK_LOGO_URL, PROMPTBOOK_SYNTAX_COLORS, ParseError, PipelineExecutionError, PipelineLogicError, PipelineUrlError, PrefixStorage, PromptbookFetchError, REMOTE_SERVER_URLS, RESERVED_PARAMETER_NAMES, SET_IS_VERBOSE, SectionTypes, SheetsFormfactorDefinition, TaskTypes, TextFormatParser, TranslatorFormfactorDefinition, UNCERTAIN_USAGE, UNCERTAIN_ZERO_VALUE, USER_CHAT_COLOR, UnexpectedError, WrappedError, ZERO_USAGE, ZERO_VALUE, _AgentMetadata, _AgentRegistration, _AnthropicClaudeMetadataRegistration, _AzureOpenAiMetadataRegistration, _BoilerplateScraperMetadataRegistration, _DeepseekMetadataRegistration, _DocumentScraperMetadataRegistration, _GoogleMetadataRegistration, _LegacyDocumentScraperMetadataRegistration, _MarkdownScraperMetadataRegistration, _MarkitdownScraperMetadataRegistration, _OllamaMetadataRegistration, _OpenAiAssistantMetadataRegistration, _OpenAiCompatibleMetadataRegistration, _OpenAiMetadataRegistration, _PdfScraperMetadataRegistration, _WebsiteScraperMetadataRegistration, aboutPromptbookInformation, addUsage, book, cacheLlmTools, compilePipeline, computeCosineSimilarity, countUsage, createAgentLlmExecutionTools, createAgentModelRequirements, createAgentModelRequirementsWithCommitments, createBasicAgentModelRequirements, createEmptyAgentModelRequirements, createLlmToolsFromConfiguration, createPipelineCollectionFromJson, createPipelineCollectionFromPromise, createPipelineCollectionFromUrl, createPipelineExecutor, createPipelineSubcollection, embeddingVectorToString, executionReportJsonToString, extractParameterNamesFromTask, filterModels, generatePlaceholderAgentProfileImageUrl, getAllCommitmentDefinitions, getAllCommitmentTypes, getCommitmentDefinition, getPipelineInterface, getSingleLlmExecutionTools, identificationToPromptbookToken, isCommitmentSupported, isPassingExpectations, isPipelineImplementingInterface, isPipelineInterfacesEqual, isPipelinePrepared, isValidBook, isValidPipelineString, joinLlmExecutionTools, limitTotalUsage, makeKnowledgeSourceHandler, migratePipeline, padBook, parseAgentSource, parseParameters, parsePipeline, pipelineCollectionToJson, pipelineJsonToString, prepareKnowledgePieces, preparePersona, preparePipeline, prettifyPipelineString, promptbookFetch, promptbookTokenToIdentification, unpreparePipeline, usageToHuman, usageToWorktime, validateBook, validatePipeline, validatePipelineString };
|
|
15874
18150
|
//# sourceMappingURL=index.es.js.map
|