@promptbook/markdown-utils 0.94.0 → 0.98.0-10
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +6 -2
- package/esm/index.es.js +153 -92
- package/esm/index.es.js.map +1 -1
- package/esm/typings/src/_packages/anthropic-claude.index.d.ts +2 -2
- package/esm/typings/src/_packages/cli.index.d.ts +4 -0
- package/esm/typings/src/_packages/core.index.d.ts +2 -0
- package/esm/typings/src/_packages/openai.index.d.ts +10 -0
- package/esm/typings/src/_packages/types.index.d.ts +14 -4
- package/esm/typings/src/_packages/{wizzard.index.d.ts → wizard.index.d.ts} +6 -2
- package/esm/typings/src/cli/cli-commands/prettify.d.ts +1 -1
- package/esm/typings/src/cli/cli-commands/test-command.d.ts +1 -1
- package/esm/typings/src/config.d.ts +1 -1
- package/esm/typings/src/conversion/archive/loadArchive.d.ts +1 -1
- package/esm/typings/src/conversion/archive/saveArchive.d.ts +2 -2
- package/esm/typings/src/conversion/prettify/renderPipelineMermaidOptions.d.ts +1 -1
- package/esm/typings/src/dialogs/callback/CallbackInterfaceTools.d.ts +1 -1
- package/esm/typings/src/execution/AbstractTaskResult.d.ts +2 -2
- package/esm/typings/src/execution/createPipelineExecutor/$OngoingTaskResult.d.ts +8 -0
- package/esm/typings/src/execution/createPipelineExecutor/00-CreatePipelineExecutorOptions.d.ts +1 -1
- package/esm/typings/src/execution/execution-report/ExecutionPromptReportJson.d.ts +2 -2
- package/esm/typings/src/execution/translation/automatic-translate/translateMessages.d.ts +1 -1
- package/esm/typings/src/execution/utils/validatePromptResult.d.ts +53 -0
- package/esm/typings/src/llm-providers/_common/register/{$provideLlmToolsForWizzardOrCli.d.ts → $provideLlmToolsForWizardOrCli.d.ts} +2 -2
- package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +3 -3
- package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionToolsOptions.d.ts +2 -2
- package/esm/typings/src/llm-providers/anthropic-claude/register-configuration.d.ts +1 -1
- package/esm/typings/src/llm-providers/anthropic-claude/register-constructor.d.ts +1 -1
- package/esm/typings/src/llm-providers/azure-openai/register-configuration.d.ts +1 -1
- package/esm/typings/src/llm-providers/azure-openai/register-constructor.d.ts +1 -1
- package/esm/typings/src/llm-providers/deepseek/register-configuration.d.ts +1 -1
- package/esm/typings/src/llm-providers/deepseek/register-constructor.d.ts +1 -1
- package/esm/typings/src/llm-providers/google/register-configuration.d.ts +1 -1
- package/esm/typings/src/llm-providers/google/register-constructor.d.ts +1 -1
- package/esm/typings/src/llm-providers/ollama/register-configuration.d.ts +1 -1
- package/esm/typings/src/llm-providers/ollama/register-constructor.d.ts +1 -1
- package/esm/typings/src/llm-providers/openai/OpenAiAssistantExecutionTools.d.ts +1 -1
- package/esm/typings/src/llm-providers/openai/OpenAiAssistantExecutionToolsOptions.d.ts +2 -2
- package/esm/typings/src/llm-providers/openai/OpenAiCompatibleExecutionTools.d.ts +4 -4
- package/esm/typings/src/llm-providers/openai/OpenAiCompatibleExecutionToolsOptions.d.ts +52 -0
- package/esm/typings/src/llm-providers/openai/OpenAiExecutionToolsOptions.d.ts +3 -5
- package/esm/typings/src/llm-providers/openai/createOpenAiCompatibleExecutionTools.d.ts +74 -0
- package/esm/typings/src/llm-providers/openai/register-configuration.d.ts +13 -2
- package/esm/typings/src/llm-providers/openai/register-constructor.d.ts +16 -2
- package/esm/typings/src/remote-server/socket-types/listModels/PromptbookServer_ListModels_Request.d.ts +1 -1
- package/esm/typings/src/scrapers/_boilerplate/createBoilerplateScraper.d.ts +1 -1
- package/esm/typings/src/scrapers/_boilerplate/register-constructor.d.ts +1 -1
- package/esm/typings/src/scrapers/_boilerplate/register-metadata.d.ts +2 -2
- package/esm/typings/src/scrapers/_common/prepareKnowledgePieces.d.ts +1 -1
- package/esm/typings/src/scrapers/_common/register/ScraperAndConverterMetadata.d.ts +1 -1
- package/esm/typings/src/scrapers/document/createDocumentScraper.d.ts +1 -1
- package/esm/typings/src/scrapers/document/register-constructor.d.ts +1 -1
- package/esm/typings/src/scrapers/document/register-metadata.d.ts +2 -2
- package/esm/typings/src/scrapers/document-legacy/createLegacyDocumentScraper.d.ts +1 -1
- package/esm/typings/src/scrapers/document-legacy/register-constructor.d.ts +1 -1
- package/esm/typings/src/scrapers/document-legacy/register-metadata.d.ts +2 -2
- package/esm/typings/src/scrapers/markdown/createMarkdownScraper.d.ts +1 -4
- package/esm/typings/src/scrapers/markdown/register-constructor.d.ts +1 -1
- package/esm/typings/src/scrapers/markdown/register-metadata.d.ts +2 -2
- package/esm/typings/src/scrapers/markitdown/createMarkitdownScraper.d.ts +1 -1
- package/esm/typings/src/scrapers/markitdown/register-constructor.d.ts +1 -1
- package/esm/typings/src/scrapers/markitdown/register-metadata.d.ts +2 -2
- package/esm/typings/src/scrapers/pdf/createPdfScraper.d.ts +1 -1
- package/esm/typings/src/scrapers/pdf/register-constructor.d.ts +1 -1
- package/esm/typings/src/scrapers/pdf/register-metadata.d.ts +2 -2
- package/esm/typings/src/scrapers/website/createWebsiteScraper.d.ts +1 -1
- package/esm/typings/src/scrapers/website/register-constructor.d.ts +1 -1
- package/esm/typings/src/scrapers/website/register-metadata.d.ts +2 -2
- package/esm/typings/src/types/typeAliases.d.ts +1 -1
- package/esm/typings/src/utils/files/listAllFiles.d.ts +1 -1
- package/esm/typings/src/version.d.ts +1 -1
- package/esm/typings/src/{wizzard → wizard}/$getCompiledBook.d.ts +2 -2
- package/esm/typings/src/{wizzard/wizzard.d.ts → wizard/wizard.d.ts} +6 -6
- package/package.json +1 -13
- package/umd/index.umd.js +153 -92
- package/umd/index.umd.js.map +1 -1
package/umd/index.umd.js
CHANGED
|
@@ -25,7 +25,7 @@
|
|
|
25
25
|
* @generated
|
|
26
26
|
* @see https://github.com/webgptorg/promptbook
|
|
27
27
|
*/
|
|
28
|
-
const PROMPTBOOK_ENGINE_VERSION = '0.
|
|
28
|
+
const PROMPTBOOK_ENGINE_VERSION = '0.98.0-10';
|
|
29
29
|
/**
|
|
30
30
|
* TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
|
|
31
31
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
@@ -304,7 +304,7 @@
|
|
|
304
304
|
*
|
|
305
305
|
* @public exported from `@promptbook/core`
|
|
306
306
|
*/
|
|
307
|
-
const DEFAULT_MAX_EXECUTION_ATTEMPTS =
|
|
307
|
+
const DEFAULT_MAX_EXECUTION_ATTEMPTS = 7; // <- TODO: [🤹♂️]
|
|
308
308
|
// <- TODO: [🕝] Make also `BOOKS_DIRNAME_ALTERNATIVES`
|
|
309
309
|
// TODO: Just `.promptbook` in config, hardcode subfolders like `download-cache` or `execution-cache`
|
|
310
310
|
/**
|
|
@@ -552,7 +552,7 @@
|
|
|
552
552
|
function keepUnused(...valuesToKeep) {
|
|
553
553
|
}
|
|
554
554
|
|
|
555
|
-
var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book"},{title:"Prepare Knowledge-piece Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge-piece Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book"},{title:"Prepare Persona",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book",formfactorName:"GENERIC",parameters:[{name:"availableModels",description:"List of available model names together with their descriptions as JSON",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelsRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n```json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n```\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n```json\n{availableModels}\n```\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelsRequirements",format:"JSON",dependentParameterNames:["availableModels","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Persona\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book`\n- INPUT PARAMETER `{availableModels}` List of available model names together with their descriptions as JSON\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelsRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n\\`\\`\\`json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n\\`\\`\\`json\n{availableModels}\n\\`\\`\\`\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelsRequirements}`\n"}],sourceFile:"./books/prepare-persona.book"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-title.book",formfactorName:"GENERIC",parameters:[{name:"book",description:"The book to prepare the title for",isInput:true,isOutput:false},{name:"title",description:"Best title for the book",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-title",title:"Make title",content:"Make best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}",resultingParameterName:"title",expectations:{words:{min:1,max:8},lines:{min:1,max:1}},dependentParameterNames:["book"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-title.book`\n- INPUT PARAMETER `{book}` The book to prepare the title for\n- OUTPUT PARAMETER `{title}` Best title for the book\n\n## Make title\n\n- EXPECT MIN 1 Word\n- EXPECT MAX 8 Words\n- EXPECT EXACTLY 1 Line\n\n```markdown\nMake best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-title.book"}];
|
|
555
|
+
var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book"},{title:"Prepare Knowledge-piece Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge-piece Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book"},{title:"Prepare Persona",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book",formfactorName:"GENERIC",parameters:[{name:"availableModels",description:"List of available model names together with their descriptions as JSON",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelsRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n```json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpful assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n```\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n```json\n{availableModels}\n```\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelsRequirements",format:"JSON",dependentParameterNames:["availableModels","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Persona\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book`\n- INPUT PARAMETER `{availableModels}` List of available model names together with their descriptions as JSON\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelsRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n\\`\\`\\`json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpful assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n\\`\\`\\`json\n{availableModels}\n\\`\\`\\`\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelsRequirements}`\n"}],sourceFile:"./books/prepare-persona.book"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-title.book",formfactorName:"GENERIC",parameters:[{name:"book",description:"The book to prepare the title for",isInput:true,isOutput:false},{name:"title",description:"Best title for the book",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-title",title:"Make title",content:"Make best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}",resultingParameterName:"title",expectations:{words:{min:1,max:8},lines:{min:1,max:1}},dependentParameterNames:["book"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-title.book`\n- INPUT PARAMETER `{book}` The book to prepare the title for\n- OUTPUT PARAMETER `{title}` Best title for the book\n\n## Make title\n\n- EXPECT MIN 1 Word\n- EXPECT MAX 8 Words\n- EXPECT EXACTLY 1 Line\n\n```markdown\nMake best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-title.book"}];
|
|
556
556
|
|
|
557
557
|
/**
|
|
558
558
|
* Checks if value is valid email
|
|
@@ -701,7 +701,7 @@
|
|
|
701
701
|
});
|
|
702
702
|
}
|
|
703
703
|
catch (error) {
|
|
704
|
-
// TODO: [🟥] Detect browser / node and make it
|
|
704
|
+
// TODO: [🟥] Detect browser / node and make it colorful
|
|
705
705
|
console.error('There was an error with prettifying the markdown, using the original as the fallback', {
|
|
706
706
|
error,
|
|
707
707
|
html: content,
|
|
@@ -974,7 +974,7 @@
|
|
|
974
974
|
else {
|
|
975
975
|
for (const [subName, subValue] of Object.entries(value)) {
|
|
976
976
|
if (subValue === undefined) {
|
|
977
|
-
// Note: undefined in object is serializable - it is just
|
|
977
|
+
// Note: undefined in object is serializable - it is just omitted
|
|
978
978
|
continue;
|
|
979
979
|
}
|
|
980
980
|
checkSerializableAsJson({ name: `${name}.${subName}`, value: subValue, message });
|
|
@@ -1664,7 +1664,7 @@
|
|
|
1664
1664
|
|
|
1665
1665
|
Note: You have probably forgotten to run "ptbk make" to update the collection
|
|
1666
1666
|
Note: Pipelines with the same URL are not allowed
|
|
1667
|
-
Only
|
|
1667
|
+
Only exception is when the pipelines are identical
|
|
1668
1668
|
|
|
1669
1669
|
`));
|
|
1670
1670
|
}
|
|
@@ -2087,7 +2087,7 @@
|
|
|
2087
2087
|
throw new Error(spaceTrim__default["default"]((block) => `
|
|
2088
2088
|
${block(error.message)}
|
|
2089
2089
|
|
|
2090
|
-
The JSON text:
|
|
2090
|
+
The expected JSON text:
|
|
2091
2091
|
${block(value)}
|
|
2092
2092
|
`));
|
|
2093
2093
|
}
|
|
@@ -2458,12 +2458,12 @@
|
|
|
2458
2458
|
get title() {
|
|
2459
2459
|
return `${llmTools.title} (+usage)`;
|
|
2460
2460
|
// <- TODO: [🧈] Maybe standartize the suffix when wrapping `LlmExecutionTools` up
|
|
2461
|
-
// <- TODO: [🧈][🧠] Does it make
|
|
2461
|
+
// <- TODO: [🧈][🧠] Does it make sense to suffix "(+usage)"?
|
|
2462
2462
|
},
|
|
2463
2463
|
get description() {
|
|
2464
2464
|
return `${llmTools.description} (+usage)`;
|
|
2465
2465
|
// <- TODO: [🧈] Maybe standartize the suffix when wrapping `LlmExecutionTools` up
|
|
2466
|
-
// <- TODO: [🧈][🧠] Does it make
|
|
2466
|
+
// <- TODO: [🧈][🧠] Does it make sense to suffix "(+usage)"?
|
|
2467
2467
|
},
|
|
2468
2468
|
checkConfiguration() {
|
|
2469
2469
|
return /* not await */ llmTools.checkConfiguration();
|
|
@@ -2690,13 +2690,13 @@
|
|
|
2690
2690
|
|
|
2691
2691
|
Technically, it's not an error, but it's probably not what you want because it does not make sense to use Promptbook without language models.
|
|
2692
2692
|
`);
|
|
2693
|
-
// TODO: [🟥] Detect browser / node and make it
|
|
2693
|
+
// TODO: [🟥] Detect browser / node and make it colorful
|
|
2694
2694
|
console.warn(warningMessage);
|
|
2695
2695
|
// <- TODO: [🏮] Some standard way how to transform errors into warnings and how to handle non-critical fails during the tasks
|
|
2696
2696
|
/*
|
|
2697
2697
|
return {
|
|
2698
2698
|
async listModels() {
|
|
2699
|
-
// TODO: [🟥] Detect browser / node and make it
|
|
2699
|
+
// TODO: [🟥] Detect browser / node and make it colorful
|
|
2700
2700
|
console.warn(
|
|
2701
2701
|
spaceTrim(
|
|
2702
2702
|
(block) => `
|
|
@@ -2972,17 +2972,17 @@
|
|
|
2972
2972
|
* Mixes registered scrapers from $scrapersMetadataRegister and $scrapersRegister
|
|
2973
2973
|
*/
|
|
2974
2974
|
const all = [];
|
|
2975
|
-
for (const { packageName, className, mimeTypes, documentationUrl,
|
|
2975
|
+
for (const { packageName, className, mimeTypes, documentationUrl, isAvailableInBrowser, } of $scrapersMetadataRegister.list()) {
|
|
2976
2976
|
if (all.some((item) => item.packageName === packageName && item.className === className)) {
|
|
2977
2977
|
continue;
|
|
2978
2978
|
}
|
|
2979
|
-
all.push({ packageName, className, mimeTypes, documentationUrl,
|
|
2979
|
+
all.push({ packageName, className, mimeTypes, documentationUrl, isAvailableInBrowser });
|
|
2980
2980
|
}
|
|
2981
|
-
for (const { packageName, className, mimeTypes, documentationUrl,
|
|
2981
|
+
for (const { packageName, className, mimeTypes, documentationUrl, isAvailableInBrowser, } of $scrapersRegister.list()) {
|
|
2982
2982
|
if (all.some((item) => item.packageName === packageName && item.className === className)) {
|
|
2983
2983
|
continue;
|
|
2984
2984
|
}
|
|
2985
|
-
all.push({ packageName, className, mimeTypes, documentationUrl,
|
|
2985
|
+
all.push({ packageName, className, mimeTypes, documentationUrl, isAvailableInBrowser });
|
|
2986
2986
|
}
|
|
2987
2987
|
for (const { metadata } of availableScrapers) {
|
|
2988
2988
|
all.push(metadata);
|
|
@@ -2994,8 +2994,8 @@
|
|
|
2994
2994
|
const isInstalled = $scrapersRegister
|
|
2995
2995
|
.list()
|
|
2996
2996
|
.find(({ packageName, className }) => metadata.packageName === packageName && metadata.className === className);
|
|
2997
|
-
const
|
|
2998
|
-
return { ...metadata, isMetadataAviailable, isInstalled,
|
|
2997
|
+
const isAvailableInTools = availableScrapers.some(({ metadata: { packageName, className } }) => metadata.packageName === packageName && metadata.className === className);
|
|
2998
|
+
return { ...metadata, isMetadataAviailable, isInstalled, isAvailableInTools };
|
|
2999
2999
|
});
|
|
3000
3000
|
if (metadata.length === 0) {
|
|
3001
3001
|
return spaceTrim__default["default"](`
|
|
@@ -3008,7 +3008,7 @@
|
|
|
3008
3008
|
return spaceTrim__default["default"]((block) => `
|
|
3009
3009
|
Available scrapers are:
|
|
3010
3010
|
${block(metadata
|
|
3011
|
-
.map(({ packageName, className, isMetadataAviailable, isInstalled, mimeTypes,
|
|
3011
|
+
.map(({ packageName, className, isMetadataAviailable, isInstalled, mimeTypes, isAvailableInBrowser, isAvailableInTools, }, i) => {
|
|
3012
3012
|
const more = [];
|
|
3013
3013
|
// TODO: [🧠] Maybe use `documentationUrl`
|
|
3014
3014
|
if (isMetadataAviailable) {
|
|
@@ -3017,16 +3017,16 @@
|
|
|
3017
3017
|
if (isInstalled) {
|
|
3018
3018
|
more.push(`🟩 Installed`);
|
|
3019
3019
|
} // not else
|
|
3020
|
-
if (
|
|
3020
|
+
if (isAvailableInTools) {
|
|
3021
3021
|
more.push(`🟦 Available in tools`);
|
|
3022
3022
|
} // not else
|
|
3023
3023
|
if (!isMetadataAviailable && isInstalled) {
|
|
3024
3024
|
more.push(`When no metadata registered but scraper is installed, it is an unexpected behavior`);
|
|
3025
3025
|
} // not else
|
|
3026
|
-
if (!isInstalled &&
|
|
3026
|
+
if (!isInstalled && isAvailableInTools) {
|
|
3027
3027
|
more.push(`When the scraper is not installed but available in tools, it is an unexpected compatibility behavior`);
|
|
3028
3028
|
} // not else
|
|
3029
|
-
if (!
|
|
3029
|
+
if (!isAvailableInBrowser) {
|
|
3030
3030
|
more.push(`Not usable in browser`);
|
|
3031
3031
|
}
|
|
3032
3032
|
const moreText = more.length === 0 ? '' : ` *(${more.join('; ')})*`;
|
|
@@ -3753,7 +3753,7 @@
|
|
|
3753
3753
|
/**
|
|
3754
3754
|
* TODO: [🧊] In future one preparation can take data from previous preparation and save tokens and time
|
|
3755
3755
|
* Put `knowledgePieces` into `PrepareKnowledgeOptions`
|
|
3756
|
-
* TODO: [🪂] More than max things can run in parallel by
|
|
3756
|
+
* TODO: [🪂] More than max things can run in parallel by accident [1,[2a,2b,_],[3a,3b,_]]
|
|
3757
3757
|
* TODO: [🧠][❎] Do here proper M:N mapping
|
|
3758
3758
|
* [x] One source can make multiple pieces
|
|
3759
3759
|
* [ ] One piece can have multiple sources
|
|
@@ -4792,6 +4792,68 @@
|
|
|
4792
4792
|
* Note: [💝] and [🤠] are interconnected together
|
|
4793
4793
|
*/
|
|
4794
4794
|
|
|
4795
|
+
/**
|
|
4796
|
+
* Validates a prompt result against expectations and format requirements.
|
|
4797
|
+
* This function provides a common abstraction for result validation that can be used
|
|
4798
|
+
* by both execution logic and caching logic to ensure consistency.
|
|
4799
|
+
*
|
|
4800
|
+
* @param options - The validation options including result string, expectations, and format
|
|
4801
|
+
* @returns Validation result with processed string and validity status
|
|
4802
|
+
* @private internal function of `createPipelineExecutor` and `cacheLlmTools`
|
|
4803
|
+
*/
|
|
4804
|
+
function validatePromptResult(options) {
|
|
4805
|
+
const { resultString, expectations, format } = options;
|
|
4806
|
+
let processedResultString = resultString;
|
|
4807
|
+
let validationError;
|
|
4808
|
+
try {
|
|
4809
|
+
// TODO: [💝] Unite object for expecting amount and format
|
|
4810
|
+
if (format) {
|
|
4811
|
+
if (format === 'JSON') {
|
|
4812
|
+
if (!isValidJsonString(processedResultString)) {
|
|
4813
|
+
// TODO: [🏢] Do more universally via `FormatParser`
|
|
4814
|
+
try {
|
|
4815
|
+
processedResultString = extractJsonBlock(processedResultString);
|
|
4816
|
+
}
|
|
4817
|
+
catch (error) {
|
|
4818
|
+
keepUnused(error);
|
|
4819
|
+
throw new ExpectError(spaceTrim.spaceTrim((block) => `
|
|
4820
|
+
Expected valid JSON string
|
|
4821
|
+
|
|
4822
|
+
The expected JSON text:
|
|
4823
|
+
${block(processedResultString)}
|
|
4824
|
+
`));
|
|
4825
|
+
}
|
|
4826
|
+
}
|
|
4827
|
+
}
|
|
4828
|
+
else {
|
|
4829
|
+
throw new UnexpectedError(`Unknown format "${format}"`);
|
|
4830
|
+
}
|
|
4831
|
+
}
|
|
4832
|
+
// TODO: [💝] Unite object for expecting amount and format
|
|
4833
|
+
if (expectations) {
|
|
4834
|
+
checkExpectations(expectations, processedResultString);
|
|
4835
|
+
}
|
|
4836
|
+
return {
|
|
4837
|
+
isValid: true,
|
|
4838
|
+
processedResultString,
|
|
4839
|
+
};
|
|
4840
|
+
}
|
|
4841
|
+
catch (error) {
|
|
4842
|
+
if (error instanceof ExpectError) {
|
|
4843
|
+
validationError = error;
|
|
4844
|
+
}
|
|
4845
|
+
else {
|
|
4846
|
+
// Re-throw non-ExpectError errors (like UnexpectedError)
|
|
4847
|
+
throw error;
|
|
4848
|
+
}
|
|
4849
|
+
return {
|
|
4850
|
+
isValid: false,
|
|
4851
|
+
processedResultString,
|
|
4852
|
+
error: validationError,
|
|
4853
|
+
};
|
|
4854
|
+
}
|
|
4855
|
+
}
|
|
4856
|
+
|
|
4795
4857
|
/**
|
|
4796
4858
|
* Executes a pipeline task with multiple attempts, including joker and retry logic. Handles different task types
|
|
4797
4859
|
* (prompt, script, dialog, etc.), applies postprocessing, checks expectations, and updates the execution report.
|
|
@@ -4809,17 +4871,18 @@
|
|
|
4809
4871
|
$resultString: null,
|
|
4810
4872
|
$expectError: null,
|
|
4811
4873
|
$scriptPipelineExecutionErrors: [],
|
|
4874
|
+
$failedResults: [], // Track all failed attempts
|
|
4812
4875
|
};
|
|
4813
4876
|
// TODO: [🚐] Make arrayable LLMs -> single LLM DRY
|
|
4814
4877
|
const _llms = arrayableToArray(tools.llm);
|
|
4815
4878
|
const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
|
|
4816
|
-
attempts: for (let
|
|
4817
|
-
const isJokerAttempt =
|
|
4818
|
-
const jokerParameterName = jokerParameterNames[jokerParameterNames.length +
|
|
4879
|
+
attempts: for (let attemptIndex = -jokerParameterNames.length; attemptIndex < maxAttempts; attemptIndex++) {
|
|
4880
|
+
const isJokerAttempt = attemptIndex < 0;
|
|
4881
|
+
const jokerParameterName = jokerParameterNames[jokerParameterNames.length + attemptIndex];
|
|
4819
4882
|
// TODO: [🧠][🍭] JOKERS, EXPECTATIONS, POSTPROCESSING and FOREACH
|
|
4820
4883
|
if (isJokerAttempt && !jokerParameterName) {
|
|
4821
4884
|
throw new UnexpectedError(spaceTrim.spaceTrim((block) => `
|
|
4822
|
-
Joker not found in attempt ${
|
|
4885
|
+
Joker not found in attempt ${attemptIndex}
|
|
4823
4886
|
|
|
4824
4887
|
${block(pipelineIdentification)}
|
|
4825
4888
|
`));
|
|
@@ -5017,35 +5080,18 @@
|
|
|
5017
5080
|
}
|
|
5018
5081
|
}
|
|
5019
5082
|
// TODO: [💝] Unite object for expecting amount and format
|
|
5020
|
-
|
|
5021
|
-
|
|
5022
|
-
|
|
5023
|
-
|
|
5024
|
-
|
|
5025
|
-
|
|
5026
|
-
|
|
5027
|
-
|
|
5028
|
-
|
|
5029
|
-
throw new ExpectError(spaceTrim.spaceTrim((block) => `
|
|
5030
|
-
Expected valid JSON string
|
|
5031
|
-
|
|
5032
|
-
${block(
|
|
5033
|
-
/*<- Note: No need for `pipelineIdentification`, it will be catched and added later */ '')}
|
|
5034
|
-
`));
|
|
5035
|
-
}
|
|
5036
|
-
}
|
|
5037
|
-
}
|
|
5038
|
-
else {
|
|
5039
|
-
throw new UnexpectedError(spaceTrim.spaceTrim((block) => `
|
|
5040
|
-
Unknown format "${task.format}"
|
|
5041
|
-
|
|
5042
|
-
${block(pipelineIdentification)}
|
|
5043
|
-
`));
|
|
5083
|
+
// Use the common validation function for both format and expectations
|
|
5084
|
+
if (task.format || task.expectations) {
|
|
5085
|
+
const validationResult = validatePromptResult({
|
|
5086
|
+
resultString: $ongoingTaskResult.$resultString || '',
|
|
5087
|
+
expectations: task.expectations,
|
|
5088
|
+
format: task.format,
|
|
5089
|
+
});
|
|
5090
|
+
if (!validationResult.isValid) {
|
|
5091
|
+
throw validationResult.error;
|
|
5044
5092
|
}
|
|
5045
|
-
|
|
5046
|
-
|
|
5047
|
-
if (task.expectations) {
|
|
5048
|
-
checkExpectations(task.expectations, $ongoingTaskResult.$resultString || '');
|
|
5093
|
+
// Update the result string in case format processing modified it (e.g., JSON extraction)
|
|
5094
|
+
$ongoingTaskResult.$resultString = validationResult.processedResultString;
|
|
5049
5095
|
}
|
|
5050
5096
|
break attempts;
|
|
5051
5097
|
}
|
|
@@ -5054,6 +5100,15 @@
|
|
|
5054
5100
|
throw error;
|
|
5055
5101
|
}
|
|
5056
5102
|
$ongoingTaskResult.$expectError = error;
|
|
5103
|
+
// Store each failed attempt
|
|
5104
|
+
if (!Array.isArray($ongoingTaskResult.$failedResults)) {
|
|
5105
|
+
$ongoingTaskResult.$failedResults = [];
|
|
5106
|
+
}
|
|
5107
|
+
$ongoingTaskResult.$failedResults.push({
|
|
5108
|
+
attemptIndex,
|
|
5109
|
+
result: $ongoingTaskResult.$resultString,
|
|
5110
|
+
error: error,
|
|
5111
|
+
});
|
|
5057
5112
|
}
|
|
5058
5113
|
finally {
|
|
5059
5114
|
if (!isJokerAttempt &&
|
|
@@ -5075,35 +5130,41 @@
|
|
|
5075
5130
|
});
|
|
5076
5131
|
}
|
|
5077
5132
|
}
|
|
5078
|
-
if ($ongoingTaskResult.$expectError !== null &&
|
|
5133
|
+
if ($ongoingTaskResult.$expectError !== null && attemptIndex === maxAttempts - 1) {
|
|
5134
|
+
// Note: Create a summary of all failures
|
|
5135
|
+
const failuresSummary = $ongoingTaskResult.$failedResults
|
|
5136
|
+
.map((failure) => spaceTrim.spaceTrim((block) => {
|
|
5137
|
+
var _a, _b;
|
|
5138
|
+
return `
|
|
5139
|
+
Attempt ${failure.attemptIndex + 1}:
|
|
5140
|
+
Error ${((_a = failure.error) === null || _a === void 0 ? void 0 : _a.name) || ''}:
|
|
5141
|
+
${block((_b = failure.error) === null || _b === void 0 ? void 0 : _b.message.split('\n').map((line) => `> ${line}`).join('\n'))}
|
|
5142
|
+
|
|
5143
|
+
Result:
|
|
5144
|
+
${block(failure.result === null
|
|
5145
|
+
? 'null'
|
|
5146
|
+
: spaceTrim.spaceTrim(failure.result)
|
|
5147
|
+
.split('\n')
|
|
5148
|
+
.map((line) => `> ${line}`)
|
|
5149
|
+
.join('\n'))}
|
|
5150
|
+
`;
|
|
5151
|
+
}))
|
|
5152
|
+
.join('\n\n---\n\n');
|
|
5079
5153
|
throw new PipelineExecutionError(spaceTrim.spaceTrim((block) => {
|
|
5080
|
-
var _a
|
|
5154
|
+
var _a;
|
|
5081
5155
|
return `
|
|
5082
5156
|
LLM execution failed ${maxExecutionAttempts}x
|
|
5083
5157
|
|
|
5084
5158
|
${block(pipelineIdentification)}
|
|
5085
5159
|
|
|
5086
|
-
---
|
|
5087
5160
|
The Prompt:
|
|
5088
5161
|
${block((((_a = $ongoingTaskResult.$prompt) === null || _a === void 0 ? void 0 : _a.content) || '')
|
|
5089
5162
|
.split('\n')
|
|
5090
5163
|
.map((line) => `> ${line}`)
|
|
5091
5164
|
.join('\n'))}
|
|
5092
5165
|
|
|
5093
|
-
|
|
5094
|
-
${block(
|
|
5095
|
-
.split('\n')
|
|
5096
|
-
.map((line) => `> ${line}`)
|
|
5097
|
-
.join('\n'))}
|
|
5098
|
-
|
|
5099
|
-
Last result:
|
|
5100
|
-
${block($ongoingTaskResult.$resultString === null
|
|
5101
|
-
? 'null'
|
|
5102
|
-
: spaceTrim.spaceTrim($ongoingTaskResult.$resultString)
|
|
5103
|
-
.split('\n')
|
|
5104
|
-
.map((line) => `> ${line}`)
|
|
5105
|
-
.join('\n'))}
|
|
5106
|
-
---
|
|
5166
|
+
All Failed Attempts:
|
|
5167
|
+
${block(failuresSummary)}
|
|
5107
5168
|
`;
|
|
5108
5169
|
}));
|
|
5109
5170
|
}
|
|
@@ -5323,10 +5384,10 @@
|
|
|
5323
5384
|
*/
|
|
5324
5385
|
async function getKnowledgeForTask(options) {
|
|
5325
5386
|
const { tools, preparedPipeline, task, parameters } = options;
|
|
5326
|
-
const
|
|
5327
|
-
const
|
|
5387
|
+
const firstKnowledgePiece = preparedPipeline.knowledgePieces[0];
|
|
5388
|
+
const firstKnowledgeIndex = firstKnowledgePiece === null || firstKnowledgePiece === void 0 ? void 0 : firstKnowledgePiece.index[0];
|
|
5328
5389
|
// <- TODO: Do not use just first knowledge piece and first index to determine embedding model, use also keyword search
|
|
5329
|
-
if (
|
|
5390
|
+
if (firstKnowledgePiece === undefined || firstKnowledgeIndex === undefined) {
|
|
5330
5391
|
return ''; // <- Note: Np knowledge present, return empty string
|
|
5331
5392
|
}
|
|
5332
5393
|
try {
|
|
@@ -5337,7 +5398,7 @@
|
|
|
5337
5398
|
title: 'Knowledge Search',
|
|
5338
5399
|
modelRequirements: {
|
|
5339
5400
|
modelVariant: 'EMBEDDING',
|
|
5340
|
-
modelName:
|
|
5401
|
+
modelName: firstKnowledgeIndex.modelName,
|
|
5341
5402
|
},
|
|
5342
5403
|
content: task.content,
|
|
5343
5404
|
parameters,
|
|
@@ -5345,7 +5406,7 @@
|
|
|
5345
5406
|
const taskEmbeddingResult = await llmTools.callEmbeddingModel(taskEmbeddingPrompt);
|
|
5346
5407
|
const knowledgePiecesWithRelevance = preparedPipeline.knowledgePieces.map((knowledgePiece) => {
|
|
5347
5408
|
const { index } = knowledgePiece;
|
|
5348
|
-
const knowledgePieceIndex = index.find((i) => i.modelName ===
|
|
5409
|
+
const knowledgePieceIndex = index.find((i) => i.modelName === firstKnowledgeIndex.modelName);
|
|
5349
5410
|
// <- TODO: Do not use just first knowledge piece and first index to determine embedding model
|
|
5350
5411
|
if (knowledgePieceIndex === undefined) {
|
|
5351
5412
|
return {
|
|
@@ -5366,8 +5427,8 @@
|
|
|
5366
5427
|
task,
|
|
5367
5428
|
taskEmbeddingPrompt,
|
|
5368
5429
|
taskEmbeddingResult,
|
|
5369
|
-
|
|
5370
|
-
|
|
5430
|
+
firstKnowledgePiece,
|
|
5431
|
+
firstKnowledgeIndex,
|
|
5371
5432
|
knowledgePiecesWithRelevance,
|
|
5372
5433
|
knowledgePiecesSorted,
|
|
5373
5434
|
knowledgePiecesLimited,
|
|
@@ -5436,7 +5497,7 @@
|
|
|
5436
5497
|
* @private internal utility of `createPipelineExecutor`
|
|
5437
5498
|
*/
|
|
5438
5499
|
async function executeTask(options) {
|
|
5439
|
-
const { currentTask, preparedPipeline, parametersToPass, tools, onProgress, $executionReport, pipelineIdentification, maxExecutionAttempts, maxParallelCount, csvSettings, isVerbose, rootDirname, cacheDirname, intermediateFilesStrategy, isAutoInstalled,
|
|
5500
|
+
const { currentTask, preparedPipeline, parametersToPass, tools, onProgress, $executionReport, pipelineIdentification, maxExecutionAttempts, maxParallelCount, csvSettings, isVerbose, rootDirname, cacheDirname, intermediateFilesStrategy, isAutoInstalled, isNotPreparedWarningSuppressed, } = options;
|
|
5440
5501
|
const priority = preparedPipeline.tasks.length - preparedPipeline.tasks.indexOf(currentTask);
|
|
5441
5502
|
// Note: Check consistency of used and dependent parameters which was also done in `validatePipeline`, but it’s good to doublecheck
|
|
5442
5503
|
const usedParameterNames = extractParameterNamesFromTask(currentTask);
|
|
@@ -5524,7 +5585,7 @@
|
|
|
5524
5585
|
cacheDirname,
|
|
5525
5586
|
intermediateFilesStrategy,
|
|
5526
5587
|
isAutoInstalled,
|
|
5527
|
-
|
|
5588
|
+
isNotPreparedWarningSuppressed,
|
|
5528
5589
|
});
|
|
5529
5590
|
await onProgress({
|
|
5530
5591
|
outputParameters: {
|
|
@@ -5619,7 +5680,7 @@
|
|
|
5619
5680
|
}
|
|
5620
5681
|
return exportJson({
|
|
5621
5682
|
name: `executionReport`,
|
|
5622
|
-
message: `
|
|
5683
|
+
message: `Unsuccessful PipelineExecutorResult (with missing parameter {${parameter.name}}) PipelineExecutorResult`,
|
|
5623
5684
|
order: [],
|
|
5624
5685
|
value: {
|
|
5625
5686
|
isSuccessful: false,
|
|
@@ -5656,7 +5717,7 @@
|
|
|
5656
5717
|
return exportJson({
|
|
5657
5718
|
name: 'pipelineExecutorResult',
|
|
5658
5719
|
message: spaceTrim.spaceTrim((block) => `
|
|
5659
|
-
|
|
5720
|
+
Unsuccessful PipelineExecutorResult (with extra parameter {${parameter.name}}) PipelineExecutorResult
|
|
5660
5721
|
|
|
5661
5722
|
${block(pipelineIdentification)}
|
|
5662
5723
|
`),
|
|
@@ -5797,7 +5858,7 @@
|
|
|
5797
5858
|
}
|
|
5798
5859
|
return exportJson({
|
|
5799
5860
|
name: 'pipelineExecutorResult',
|
|
5800
|
-
message: `
|
|
5861
|
+
message: `Unsuccessful PipelineExecutorResult (with misc errors) PipelineExecutorResult`,
|
|
5801
5862
|
order: [],
|
|
5802
5863
|
value: {
|
|
5803
5864
|
isSuccessful: false,
|
|
@@ -5848,7 +5909,7 @@
|
|
|
5848
5909
|
* @public exported from `@promptbook/core`
|
|
5849
5910
|
*/
|
|
5850
5911
|
function createPipelineExecutor(options) {
|
|
5851
|
-
const { pipeline, tools, maxExecutionAttempts = DEFAULT_MAX_EXECUTION_ATTEMPTS, maxParallelCount = DEFAULT_MAX_PARALLEL_COUNT, csvSettings = DEFAULT_CSV_SETTINGS, isVerbose = DEFAULT_IS_VERBOSE,
|
|
5912
|
+
const { pipeline, tools, maxExecutionAttempts = DEFAULT_MAX_EXECUTION_ATTEMPTS, maxParallelCount = DEFAULT_MAX_PARALLEL_COUNT, csvSettings = DEFAULT_CSV_SETTINGS, isVerbose = DEFAULT_IS_VERBOSE, isNotPreparedWarningSuppressed = false, cacheDirname = DEFAULT_SCRAPE_CACHE_DIRNAME, intermediateFilesStrategy = DEFAULT_INTERMEDIATE_FILES_STRATEGY, isAutoInstalled = DEFAULT_IS_AUTO_INSTALLED, rootDirname = null, } = options;
|
|
5852
5913
|
validatePipeline(pipeline);
|
|
5853
5914
|
const pipelineIdentification = (() => {
|
|
5854
5915
|
// Note: This is a 😐 implementation of [🚞]
|
|
@@ -5865,7 +5926,7 @@
|
|
|
5865
5926
|
if (isPipelinePrepared(pipeline)) {
|
|
5866
5927
|
preparedPipeline = pipeline;
|
|
5867
5928
|
}
|
|
5868
|
-
else if (
|
|
5929
|
+
else if (isNotPreparedWarningSuppressed !== true) {
|
|
5869
5930
|
console.warn(spaceTrim.spaceTrim((block) => `
|
|
5870
5931
|
Pipeline is not prepared
|
|
5871
5932
|
|
|
@@ -5898,7 +5959,7 @@
|
|
|
5898
5959
|
maxParallelCount,
|
|
5899
5960
|
csvSettings,
|
|
5900
5961
|
isVerbose,
|
|
5901
|
-
|
|
5962
|
+
isNotPreparedWarningSuppressed,
|
|
5902
5963
|
rootDirname,
|
|
5903
5964
|
cacheDirname,
|
|
5904
5965
|
intermediateFilesStrategy,
|
|
@@ -5907,7 +5968,7 @@
|
|
|
5907
5968
|
assertsError(error);
|
|
5908
5969
|
return exportJson({
|
|
5909
5970
|
name: 'pipelineExecutorResult',
|
|
5910
|
-
message: `
|
|
5971
|
+
message: `Unsuccessful PipelineExecutorResult, last catch`,
|
|
5911
5972
|
order: [],
|
|
5912
5973
|
value: {
|
|
5913
5974
|
isSuccessful: false,
|
|
@@ -5945,7 +6006,7 @@
|
|
|
5945
6006
|
className: 'MarkdownScraper',
|
|
5946
6007
|
mimeTypes: ['text/markdown', 'text/plain'],
|
|
5947
6008
|
documentationUrl: 'https://github.com/webgptorg/promptbook/discussions/@@',
|
|
5948
|
-
|
|
6009
|
+
isAvailableInBrowser: true,
|
|
5949
6010
|
// <- Note: [🌏] This is the only scraper which makes sense to be available in the browser, for scraping non-markdown sources in the browser use a remote server
|
|
5950
6011
|
requiredExecutables: [],
|
|
5951
6012
|
}); /* <- Note: [🤛] */
|
|
@@ -5955,7 +6016,7 @@
|
|
|
5955
6016
|
* Warning: This is not useful for the end user, it is just a side effect of the mechanism that handles all available known scrapers
|
|
5956
6017
|
*
|
|
5957
6018
|
* @public exported from `@promptbook/core`
|
|
5958
|
-
* @public exported from `@promptbook/
|
|
6019
|
+
* @public exported from `@promptbook/wizard`
|
|
5959
6020
|
* @public exported from `@promptbook/cli`
|
|
5960
6021
|
*/
|
|
5961
6022
|
$scrapersMetadataRegister.register(markdownScraperMetadata);
|
|
@@ -6054,7 +6115,7 @@
|
|
|
6054
6115
|
}
|
|
6055
6116
|
// ---
|
|
6056
6117
|
if (!llmTools.callEmbeddingModel) {
|
|
6057
|
-
// TODO: [🟥] Detect browser / node and make it
|
|
6118
|
+
// TODO: [🟥] Detect browser / node and make it colorful
|
|
6058
6119
|
console.error('No callEmbeddingModel function provided');
|
|
6059
6120
|
}
|
|
6060
6121
|
else {
|
|
@@ -6080,7 +6141,7 @@
|
|
|
6080
6141
|
if (!(error instanceof PipelineExecutionError)) {
|
|
6081
6142
|
throw error;
|
|
6082
6143
|
}
|
|
6083
|
-
// TODO: [🟥] Detect browser / node and make it
|
|
6144
|
+
// TODO: [🟥] Detect browser / node and make it colorful
|
|
6084
6145
|
console.error(error, "<- Note: This error is not critical to prepare the pipeline, just knowledge pieces won't have embeddings");
|
|
6085
6146
|
}
|
|
6086
6147
|
return {
|
|
@@ -6118,7 +6179,7 @@
|
|
|
6118
6179
|
* Warning: This is not useful for the end user, it is just a side effect of the mechanism that handles all available known scrapers
|
|
6119
6180
|
*
|
|
6120
6181
|
* @public exported from `@promptbook/markdown-utils`
|
|
6121
|
-
* @public exported from `@promptbook/
|
|
6182
|
+
* @public exported from `@promptbook/wizard`
|
|
6122
6183
|
* @public exported from `@promptbook/cli`
|
|
6123
6184
|
*/
|
|
6124
6185
|
const _MarkdownScraperRegistration = $scrapersRegister.register(createMarkdownScraper);
|