@promptbook/legacy-documents 0.94.0 → 0.98.0-10
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +6 -2
- package/esm/index.es.js +228 -167
- package/esm/index.es.js.map +1 -1
- package/esm/typings/src/_packages/anthropic-claude.index.d.ts +2 -2
- package/esm/typings/src/_packages/cli.index.d.ts +4 -0
- package/esm/typings/src/_packages/core.index.d.ts +2 -0
- package/esm/typings/src/_packages/openai.index.d.ts +10 -0
- package/esm/typings/src/_packages/types.index.d.ts +14 -4
- package/esm/typings/src/_packages/{wizzard.index.d.ts → wizard.index.d.ts} +6 -2
- package/esm/typings/src/cli/cli-commands/prettify.d.ts +1 -1
- package/esm/typings/src/cli/cli-commands/test-command.d.ts +1 -1
- package/esm/typings/src/config.d.ts +1 -1
- package/esm/typings/src/conversion/archive/loadArchive.d.ts +1 -1
- package/esm/typings/src/conversion/archive/saveArchive.d.ts +2 -2
- package/esm/typings/src/conversion/prettify/renderPipelineMermaidOptions.d.ts +1 -1
- package/esm/typings/src/dialogs/callback/CallbackInterfaceTools.d.ts +1 -1
- package/esm/typings/src/execution/AbstractTaskResult.d.ts +2 -2
- package/esm/typings/src/execution/createPipelineExecutor/$OngoingTaskResult.d.ts +8 -0
- package/esm/typings/src/execution/createPipelineExecutor/00-CreatePipelineExecutorOptions.d.ts +1 -1
- package/esm/typings/src/execution/execution-report/ExecutionPromptReportJson.d.ts +2 -2
- package/esm/typings/src/execution/translation/automatic-translate/translateMessages.d.ts +1 -1
- package/esm/typings/src/execution/utils/validatePromptResult.d.ts +53 -0
- package/esm/typings/src/llm-providers/_common/register/{$provideLlmToolsForWizzardOrCli.d.ts → $provideLlmToolsForWizardOrCli.d.ts} +2 -2
- package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +3 -3
- package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionToolsOptions.d.ts +2 -2
- package/esm/typings/src/llm-providers/anthropic-claude/register-configuration.d.ts +1 -1
- package/esm/typings/src/llm-providers/anthropic-claude/register-constructor.d.ts +1 -1
- package/esm/typings/src/llm-providers/azure-openai/register-configuration.d.ts +1 -1
- package/esm/typings/src/llm-providers/azure-openai/register-constructor.d.ts +1 -1
- package/esm/typings/src/llm-providers/deepseek/register-configuration.d.ts +1 -1
- package/esm/typings/src/llm-providers/deepseek/register-constructor.d.ts +1 -1
- package/esm/typings/src/llm-providers/google/register-configuration.d.ts +1 -1
- package/esm/typings/src/llm-providers/google/register-constructor.d.ts +1 -1
- package/esm/typings/src/llm-providers/ollama/register-configuration.d.ts +1 -1
- package/esm/typings/src/llm-providers/ollama/register-constructor.d.ts +1 -1
- package/esm/typings/src/llm-providers/openai/OpenAiAssistantExecutionTools.d.ts +1 -1
- package/esm/typings/src/llm-providers/openai/OpenAiAssistantExecutionToolsOptions.d.ts +2 -2
- package/esm/typings/src/llm-providers/openai/OpenAiCompatibleExecutionTools.d.ts +4 -4
- package/esm/typings/src/llm-providers/openai/OpenAiCompatibleExecutionToolsOptions.d.ts +52 -0
- package/esm/typings/src/llm-providers/openai/OpenAiExecutionToolsOptions.d.ts +3 -5
- package/esm/typings/src/llm-providers/openai/createOpenAiCompatibleExecutionTools.d.ts +74 -0
- package/esm/typings/src/llm-providers/openai/register-configuration.d.ts +13 -2
- package/esm/typings/src/llm-providers/openai/register-constructor.d.ts +16 -2
- package/esm/typings/src/remote-server/socket-types/listModels/PromptbookServer_ListModels_Request.d.ts +1 -1
- package/esm/typings/src/scrapers/_boilerplate/createBoilerplateScraper.d.ts +1 -1
- package/esm/typings/src/scrapers/_boilerplate/register-constructor.d.ts +1 -1
- package/esm/typings/src/scrapers/_boilerplate/register-metadata.d.ts +2 -2
- package/esm/typings/src/scrapers/_common/prepareKnowledgePieces.d.ts +1 -1
- package/esm/typings/src/scrapers/_common/register/ScraperAndConverterMetadata.d.ts +1 -1
- package/esm/typings/src/scrapers/document/createDocumentScraper.d.ts +1 -1
- package/esm/typings/src/scrapers/document/register-constructor.d.ts +1 -1
- package/esm/typings/src/scrapers/document/register-metadata.d.ts +2 -2
- package/esm/typings/src/scrapers/document-legacy/createLegacyDocumentScraper.d.ts +1 -1
- package/esm/typings/src/scrapers/document-legacy/register-constructor.d.ts +1 -1
- package/esm/typings/src/scrapers/document-legacy/register-metadata.d.ts +2 -2
- package/esm/typings/src/scrapers/markdown/createMarkdownScraper.d.ts +1 -4
- package/esm/typings/src/scrapers/markdown/register-constructor.d.ts +1 -1
- package/esm/typings/src/scrapers/markdown/register-metadata.d.ts +2 -2
- package/esm/typings/src/scrapers/markitdown/createMarkitdownScraper.d.ts +1 -1
- package/esm/typings/src/scrapers/markitdown/register-constructor.d.ts +1 -1
- package/esm/typings/src/scrapers/markitdown/register-metadata.d.ts +2 -2
- package/esm/typings/src/scrapers/pdf/createPdfScraper.d.ts +1 -1
- package/esm/typings/src/scrapers/pdf/register-constructor.d.ts +1 -1
- package/esm/typings/src/scrapers/pdf/register-metadata.d.ts +2 -2
- package/esm/typings/src/scrapers/website/createWebsiteScraper.d.ts +1 -1
- package/esm/typings/src/scrapers/website/register-constructor.d.ts +1 -1
- package/esm/typings/src/scrapers/website/register-metadata.d.ts +2 -2
- package/esm/typings/src/types/typeAliases.d.ts +1 -1
- package/esm/typings/src/utils/files/listAllFiles.d.ts +1 -1
- package/esm/typings/src/version.d.ts +1 -1
- package/esm/typings/src/{wizzard → wizard}/$getCompiledBook.d.ts +2 -2
- package/esm/typings/src/{wizzard/wizzard.d.ts → wizard/wizard.d.ts} +6 -6
- package/package.json +2 -14
- package/umd/index.umd.js +228 -167
- package/umd/index.umd.js.map +1 -1
package/esm/index.es.js
CHANGED
|
@@ -28,7 +28,7 @@ const BOOK_LANGUAGE_VERSION = '1.0.0';
|
|
|
28
28
|
* @generated
|
|
29
29
|
* @see https://github.com/webgptorg/promptbook
|
|
30
30
|
*/
|
|
31
|
-
const PROMPTBOOK_ENGINE_VERSION = '0.
|
|
31
|
+
const PROMPTBOOK_ENGINE_VERSION = '0.98.0-10';
|
|
32
32
|
/**
|
|
33
33
|
* TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
|
|
34
34
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
@@ -177,7 +177,7 @@ const DEFAULT_MAX_PARALLEL_COUNT = 5; // <- TODO: [🤹♂️]
|
|
|
177
177
|
*
|
|
178
178
|
* @public exported from `@promptbook/core`
|
|
179
179
|
*/
|
|
180
|
-
const DEFAULT_MAX_EXECUTION_ATTEMPTS =
|
|
180
|
+
const DEFAULT_MAX_EXECUTION_ATTEMPTS = 7; // <- TODO: [🤹♂️]
|
|
181
181
|
// <- TODO: [🕝] Make also `BOOKS_DIRNAME_ALTERNATIVES`
|
|
182
182
|
// TODO: Just `.promptbook` in config, hardcode subfolders like `download-cache` or `execution-cache`
|
|
183
183
|
/**
|
|
@@ -1054,7 +1054,7 @@ async function getScraperIntermediateSource(source, options) {
|
|
|
1054
1054
|
* Note: [🟢] Code in this file should never be never released in packages that could be imported into browser environment
|
|
1055
1055
|
*/
|
|
1056
1056
|
|
|
1057
|
-
var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book"},{title:"Prepare Knowledge-piece Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge-piece Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book"},{title:"Prepare Persona",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book",formfactorName:"GENERIC",parameters:[{name:"availableModels",description:"List of available model names together with their descriptions as JSON",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelsRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n```json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n```\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n```json\n{availableModels}\n```\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelsRequirements",format:"JSON",dependentParameterNames:["availableModels","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Persona\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book`\n- INPUT PARAMETER `{availableModels}` List of available model names together with their descriptions as JSON\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelsRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n\\`\\`\\`json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n\\`\\`\\`json\n{availableModels}\n\\`\\`\\`\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelsRequirements}`\n"}],sourceFile:"./books/prepare-persona.book"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-title.book",formfactorName:"GENERIC",parameters:[{name:"book",description:"The book to prepare the title for",isInput:true,isOutput:false},{name:"title",description:"Best title for the book",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-title",title:"Make title",content:"Make best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}",resultingParameterName:"title",expectations:{words:{min:1,max:8},lines:{min:1,max:1}},dependentParameterNames:["book"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-title.book`\n- INPUT PARAMETER `{book}` The book to prepare the title for\n- OUTPUT PARAMETER `{title}` Best title for the book\n\n## Make title\n\n- EXPECT MIN 1 Word\n- EXPECT MAX 8 Words\n- EXPECT EXACTLY 1 Line\n\n```markdown\nMake best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-title.book"}];
|
|
1057
|
+
var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book"},{title:"Prepare Knowledge-piece Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge-piece Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book"},{title:"Prepare Persona",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book",formfactorName:"GENERIC",parameters:[{name:"availableModels",description:"List of available model names together with their descriptions as JSON",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelsRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n```json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpful assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n```\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n```json\n{availableModels}\n```\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelsRequirements",format:"JSON",dependentParameterNames:["availableModels","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Persona\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book`\n- INPUT PARAMETER `{availableModels}` List of available model names together with their descriptions as JSON\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelsRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n\\`\\`\\`json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpful assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n\\`\\`\\`json\n{availableModels}\n\\`\\`\\`\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelsRequirements}`\n"}],sourceFile:"./books/prepare-persona.book"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-title.book",formfactorName:"GENERIC",parameters:[{name:"book",description:"The book to prepare the title for",isInput:true,isOutput:false},{name:"title",description:"Best title for the book",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-title",title:"Make title",content:"Make best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}",resultingParameterName:"title",expectations:{words:{min:1,max:8},lines:{min:1,max:1}},dependentParameterNames:["book"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-title.book`\n- INPUT PARAMETER `{book}` The book to prepare the title for\n- OUTPUT PARAMETER `{title}` Best title for the book\n\n## Make title\n\n- EXPECT MIN 1 Word\n- EXPECT MAX 8 Words\n- EXPECT EXACTLY 1 Line\n\n```markdown\nMake best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-title.book"}];
|
|
1058
1058
|
|
|
1059
1059
|
/**
|
|
1060
1060
|
* Checks if value is valid email
|
|
@@ -1211,7 +1211,7 @@ function prettifyMarkdown(content) {
|
|
|
1211
1211
|
});
|
|
1212
1212
|
}
|
|
1213
1213
|
catch (error) {
|
|
1214
|
-
// TODO: [🟥] Detect browser / node and make it
|
|
1214
|
+
// TODO: [🟥] Detect browser / node and make it colorful
|
|
1215
1215
|
console.error('There was an error with prettifying the markdown, using the original as the fallback', {
|
|
1216
1216
|
error,
|
|
1217
1217
|
html: content,
|
|
@@ -1493,7 +1493,7 @@ function checkSerializableAsJson(options) {
|
|
|
1493
1493
|
else {
|
|
1494
1494
|
for (const [subName, subValue] of Object.entries(value)) {
|
|
1495
1495
|
if (subValue === undefined) {
|
|
1496
|
-
// Note: undefined in object is serializable - it is just
|
|
1496
|
+
// Note: undefined in object is serializable - it is just omitted
|
|
1497
1497
|
continue;
|
|
1498
1498
|
}
|
|
1499
1499
|
checkSerializableAsJson({ name: `${name}.${subName}`, value: subValue, message });
|
|
@@ -2183,7 +2183,7 @@ class SimplePipelineCollection {
|
|
|
2183
2183
|
|
|
2184
2184
|
Note: You have probably forgotten to run "ptbk make" to update the collection
|
|
2185
2185
|
Note: Pipelines with the same URL are not allowed
|
|
2186
|
-
Only
|
|
2186
|
+
Only exception is when the pipelines are identical
|
|
2187
2187
|
|
|
2188
2188
|
`));
|
|
2189
2189
|
}
|
|
@@ -2562,7 +2562,7 @@ function jsonParse(value) {
|
|
|
2562
2562
|
throw new Error(spaceTrim$1((block) => `
|
|
2563
2563
|
${block(error.message)}
|
|
2564
2564
|
|
|
2565
|
-
The JSON text:
|
|
2565
|
+
The expected JSON text:
|
|
2566
2566
|
${block(value)}
|
|
2567
2567
|
`));
|
|
2568
2568
|
}
|
|
@@ -2933,12 +2933,12 @@ function countUsage(llmTools) {
|
|
|
2933
2933
|
get title() {
|
|
2934
2934
|
return `${llmTools.title} (+usage)`;
|
|
2935
2935
|
// <- TODO: [🧈] Maybe standartize the suffix when wrapping `LlmExecutionTools` up
|
|
2936
|
-
// <- TODO: [🧈][🧠] Does it make
|
|
2936
|
+
// <- TODO: [🧈][🧠] Does it make sense to suffix "(+usage)"?
|
|
2937
2937
|
},
|
|
2938
2938
|
get description() {
|
|
2939
2939
|
return `${llmTools.description} (+usage)`;
|
|
2940
2940
|
// <- TODO: [🧈] Maybe standartize the suffix when wrapping `LlmExecutionTools` up
|
|
2941
|
-
// <- TODO: [🧈][🧠] Does it make
|
|
2941
|
+
// <- TODO: [🧈][🧠] Does it make sense to suffix "(+usage)"?
|
|
2942
2942
|
},
|
|
2943
2943
|
checkConfiguration() {
|
|
2944
2944
|
return /* not await */ llmTools.checkConfiguration();
|
|
@@ -3165,13 +3165,13 @@ function joinLlmExecutionTools(...llmExecutionTools) {
|
|
|
3165
3165
|
|
|
3166
3166
|
Technically, it's not an error, but it's probably not what you want because it does not make sense to use Promptbook without language models.
|
|
3167
3167
|
`);
|
|
3168
|
-
// TODO: [🟥] Detect browser / node and make it
|
|
3168
|
+
// TODO: [🟥] Detect browser / node and make it colorful
|
|
3169
3169
|
console.warn(warningMessage);
|
|
3170
3170
|
// <- TODO: [🏮] Some standard way how to transform errors into warnings and how to handle non-critical fails during the tasks
|
|
3171
3171
|
/*
|
|
3172
3172
|
return {
|
|
3173
3173
|
async listModels() {
|
|
3174
|
-
// TODO: [🟥] Detect browser / node and make it
|
|
3174
|
+
// TODO: [🟥] Detect browser / node and make it colorful
|
|
3175
3175
|
console.warn(
|
|
3176
3176
|
spaceTrim(
|
|
3177
3177
|
(block) => `
|
|
@@ -3447,17 +3447,17 @@ function $registeredScrapersMessage(availableScrapers) {
|
|
|
3447
3447
|
* Mixes registered scrapers from $scrapersMetadataRegister and $scrapersRegister
|
|
3448
3448
|
*/
|
|
3449
3449
|
const all = [];
|
|
3450
|
-
for (const { packageName, className, mimeTypes, documentationUrl,
|
|
3450
|
+
for (const { packageName, className, mimeTypes, documentationUrl, isAvailableInBrowser, } of $scrapersMetadataRegister.list()) {
|
|
3451
3451
|
if (all.some((item) => item.packageName === packageName && item.className === className)) {
|
|
3452
3452
|
continue;
|
|
3453
3453
|
}
|
|
3454
|
-
all.push({ packageName, className, mimeTypes, documentationUrl,
|
|
3454
|
+
all.push({ packageName, className, mimeTypes, documentationUrl, isAvailableInBrowser });
|
|
3455
3455
|
}
|
|
3456
|
-
for (const { packageName, className, mimeTypes, documentationUrl,
|
|
3456
|
+
for (const { packageName, className, mimeTypes, documentationUrl, isAvailableInBrowser, } of $scrapersRegister.list()) {
|
|
3457
3457
|
if (all.some((item) => item.packageName === packageName && item.className === className)) {
|
|
3458
3458
|
continue;
|
|
3459
3459
|
}
|
|
3460
|
-
all.push({ packageName, className, mimeTypes, documentationUrl,
|
|
3460
|
+
all.push({ packageName, className, mimeTypes, documentationUrl, isAvailableInBrowser });
|
|
3461
3461
|
}
|
|
3462
3462
|
for (const { metadata } of availableScrapers) {
|
|
3463
3463
|
all.push(metadata);
|
|
@@ -3469,8 +3469,8 @@ function $registeredScrapersMessage(availableScrapers) {
|
|
|
3469
3469
|
const isInstalled = $scrapersRegister
|
|
3470
3470
|
.list()
|
|
3471
3471
|
.find(({ packageName, className }) => metadata.packageName === packageName && metadata.className === className);
|
|
3472
|
-
const
|
|
3473
|
-
return { ...metadata, isMetadataAviailable, isInstalled,
|
|
3472
|
+
const isAvailableInTools = availableScrapers.some(({ metadata: { packageName, className } }) => metadata.packageName === packageName && metadata.className === className);
|
|
3473
|
+
return { ...metadata, isMetadataAviailable, isInstalled, isAvailableInTools };
|
|
3474
3474
|
});
|
|
3475
3475
|
if (metadata.length === 0) {
|
|
3476
3476
|
return spaceTrim$1(`
|
|
@@ -3483,7 +3483,7 @@ function $registeredScrapersMessage(availableScrapers) {
|
|
|
3483
3483
|
return spaceTrim$1((block) => `
|
|
3484
3484
|
Available scrapers are:
|
|
3485
3485
|
${block(metadata
|
|
3486
|
-
.map(({ packageName, className, isMetadataAviailable, isInstalled, mimeTypes,
|
|
3486
|
+
.map(({ packageName, className, isMetadataAviailable, isInstalled, mimeTypes, isAvailableInBrowser, isAvailableInTools, }, i) => {
|
|
3487
3487
|
const more = [];
|
|
3488
3488
|
// TODO: [🧠] Maybe use `documentationUrl`
|
|
3489
3489
|
if (isMetadataAviailable) {
|
|
@@ -3492,16 +3492,16 @@ function $registeredScrapersMessage(availableScrapers) {
|
|
|
3492
3492
|
if (isInstalled) {
|
|
3493
3493
|
more.push(`🟩 Installed`);
|
|
3494
3494
|
} // not else
|
|
3495
|
-
if (
|
|
3495
|
+
if (isAvailableInTools) {
|
|
3496
3496
|
more.push(`🟦 Available in tools`);
|
|
3497
3497
|
} // not else
|
|
3498
3498
|
if (!isMetadataAviailable && isInstalled) {
|
|
3499
3499
|
more.push(`When no metadata registered but scraper is installed, it is an unexpected behavior`);
|
|
3500
3500
|
} // not else
|
|
3501
|
-
if (!isInstalled &&
|
|
3501
|
+
if (!isInstalled && isAvailableInTools) {
|
|
3502
3502
|
more.push(`When the scraper is not installed but available in tools, it is an unexpected compatibility behavior`);
|
|
3503
3503
|
} // not else
|
|
3504
|
-
if (!
|
|
3504
|
+
if (!isAvailableInBrowser) {
|
|
3505
3505
|
more.push(`Not usable in browser`);
|
|
3506
3506
|
}
|
|
3507
3507
|
const moreText = more.length === 0 ? '' : ` *(${more.join('; ')})*`;
|
|
@@ -3831,7 +3831,7 @@ TODO: [🧊] This is how it can look in future
|
|
|
3831
3831
|
/**
|
|
3832
3832
|
* TODO: [🧊] In future one preparation can take data from previous preparation and save tokens and time
|
|
3833
3833
|
* Put `knowledgePieces` into `PrepareKnowledgeOptions`
|
|
3834
|
-
* TODO: [🪂] More than max things can run in parallel by
|
|
3834
|
+
* TODO: [🪂] More than max things can run in parallel by accident [1,[2a,2b,_],[3a,3b,_]]
|
|
3835
3835
|
* TODO: [🧠][❎] Do here proper M:N mapping
|
|
3836
3836
|
* [x] One source can make multiple pieces
|
|
3837
3837
|
* [ ] One piece can have multiple sources
|
|
@@ -4639,6 +4639,77 @@ function mapAvailableToExpectedParameters(options) {
|
|
|
4639
4639
|
return mappedParameters;
|
|
4640
4640
|
}
|
|
4641
4641
|
|
|
4642
|
+
/**
|
|
4643
|
+
* Replaces parameters in template with values from parameters object
|
|
4644
|
+
*
|
|
4645
|
+
* Note: This function is not places strings into string,
|
|
4646
|
+
* It's more complex and can handle this operation specifically for LLM models
|
|
4647
|
+
*
|
|
4648
|
+
* @param template the template with parameters in {curly} braces
|
|
4649
|
+
* @param parameters the object with parameters
|
|
4650
|
+
* @returns the template with replaced parameters
|
|
4651
|
+
* @throws {PipelineExecutionError} if parameter is not defined, not closed, or not opened
|
|
4652
|
+
* @public exported from `@promptbook/utils`
|
|
4653
|
+
*/
|
|
4654
|
+
function templateParameters(template, parameters) {
|
|
4655
|
+
for (const [parameterName, parameterValue] of Object.entries(parameters)) {
|
|
4656
|
+
if (parameterValue === RESERVED_PARAMETER_MISSING_VALUE) {
|
|
4657
|
+
throw new UnexpectedError(`Parameter \`{${parameterName}}\` has missing value`);
|
|
4658
|
+
}
|
|
4659
|
+
else if (parameterValue === RESERVED_PARAMETER_RESTRICTED) {
|
|
4660
|
+
// TODO: [🍵]
|
|
4661
|
+
throw new UnexpectedError(`Parameter \`{${parameterName}}\` is restricted to use`);
|
|
4662
|
+
}
|
|
4663
|
+
}
|
|
4664
|
+
let replacedTemplates = template;
|
|
4665
|
+
let match;
|
|
4666
|
+
let loopLimit = LOOP_LIMIT;
|
|
4667
|
+
while ((match = /^(?<precol>.*){(?<parameterName>\w+)}(.*)/m /* <- Not global */
|
|
4668
|
+
.exec(replacedTemplates))) {
|
|
4669
|
+
if (loopLimit-- < 0) {
|
|
4670
|
+
throw new LimitReachedError('Loop limit reached during parameters replacement in `templateParameters`');
|
|
4671
|
+
}
|
|
4672
|
+
const precol = match.groups.precol;
|
|
4673
|
+
const parameterName = match.groups.parameterName;
|
|
4674
|
+
if (parameterName === '') {
|
|
4675
|
+
// Note: Skip empty placeholders. It's used to avoid confusion with JSON-like strings
|
|
4676
|
+
continue;
|
|
4677
|
+
}
|
|
4678
|
+
if (parameterName.indexOf('{') !== -1 || parameterName.indexOf('}') !== -1) {
|
|
4679
|
+
throw new PipelineExecutionError('Parameter is already opened or not closed');
|
|
4680
|
+
}
|
|
4681
|
+
if (parameters[parameterName] === undefined) {
|
|
4682
|
+
throw new PipelineExecutionError(`Parameter \`{${parameterName}}\` is not defined`);
|
|
4683
|
+
}
|
|
4684
|
+
let parameterValue = parameters[parameterName];
|
|
4685
|
+
if (parameterValue === undefined) {
|
|
4686
|
+
throw new PipelineExecutionError(`Parameter \`{${parameterName}}\` is not defined`);
|
|
4687
|
+
}
|
|
4688
|
+
parameterValue = valueToString(parameterValue);
|
|
4689
|
+
// Escape curly braces in parameter values to prevent prompt-injection
|
|
4690
|
+
parameterValue = parameterValue.replace(/[{}]/g, '\\$&');
|
|
4691
|
+
if (parameterValue.includes('\n') && /^\s*\W{0,3}\s*$/.test(precol)) {
|
|
4692
|
+
parameterValue = parameterValue
|
|
4693
|
+
.split('\n')
|
|
4694
|
+
.map((line, index) => (index === 0 ? line : `${precol}${line}`))
|
|
4695
|
+
.join('\n');
|
|
4696
|
+
}
|
|
4697
|
+
replacedTemplates =
|
|
4698
|
+
replacedTemplates.substring(0, match.index + precol.length) +
|
|
4699
|
+
parameterValue +
|
|
4700
|
+
replacedTemplates.substring(match.index + precol.length + parameterName.length + 2);
|
|
4701
|
+
}
|
|
4702
|
+
// [💫] Check if there are parameters that are not closed properly
|
|
4703
|
+
if (/{\w+$/.test(replacedTemplates)) {
|
|
4704
|
+
throw new PipelineExecutionError('Parameter is not closed');
|
|
4705
|
+
}
|
|
4706
|
+
// [💫] Check if there are parameters that are not opened properly
|
|
4707
|
+
if (/^\w+}/.test(replacedTemplates)) {
|
|
4708
|
+
throw new PipelineExecutionError('Parameter is not opened');
|
|
4709
|
+
}
|
|
4710
|
+
return replacedTemplates;
|
|
4711
|
+
}
|
|
4712
|
+
|
|
4642
4713
|
/**
|
|
4643
4714
|
* Extracts all code blocks from markdown.
|
|
4644
4715
|
*
|
|
@@ -4741,77 +4812,6 @@ function extractJsonBlock(markdown) {
|
|
|
4741
4812
|
* TODO: [🏢] Make this logic part of `JsonFormatParser` or `isValidJsonString`
|
|
4742
4813
|
*/
|
|
4743
4814
|
|
|
4744
|
-
/**
|
|
4745
|
-
* Replaces parameters in template with values from parameters object
|
|
4746
|
-
*
|
|
4747
|
-
* Note: This function is not places strings into string,
|
|
4748
|
-
* It's more complex and can handle this operation specifically for LLM models
|
|
4749
|
-
*
|
|
4750
|
-
* @param template the template with parameters in {curly} braces
|
|
4751
|
-
* @param parameters the object with parameters
|
|
4752
|
-
* @returns the template with replaced parameters
|
|
4753
|
-
* @throws {PipelineExecutionError} if parameter is not defined, not closed, or not opened
|
|
4754
|
-
* @public exported from `@promptbook/utils`
|
|
4755
|
-
*/
|
|
4756
|
-
function templateParameters(template, parameters) {
|
|
4757
|
-
for (const [parameterName, parameterValue] of Object.entries(parameters)) {
|
|
4758
|
-
if (parameterValue === RESERVED_PARAMETER_MISSING_VALUE) {
|
|
4759
|
-
throw new UnexpectedError(`Parameter \`{${parameterName}}\` has missing value`);
|
|
4760
|
-
}
|
|
4761
|
-
else if (parameterValue === RESERVED_PARAMETER_RESTRICTED) {
|
|
4762
|
-
// TODO: [🍵]
|
|
4763
|
-
throw new UnexpectedError(`Parameter \`{${parameterName}}\` is restricted to use`);
|
|
4764
|
-
}
|
|
4765
|
-
}
|
|
4766
|
-
let replacedTemplates = template;
|
|
4767
|
-
let match;
|
|
4768
|
-
let loopLimit = LOOP_LIMIT;
|
|
4769
|
-
while ((match = /^(?<precol>.*){(?<parameterName>\w+)}(.*)/m /* <- Not global */
|
|
4770
|
-
.exec(replacedTemplates))) {
|
|
4771
|
-
if (loopLimit-- < 0) {
|
|
4772
|
-
throw new LimitReachedError('Loop limit reached during parameters replacement in `templateParameters`');
|
|
4773
|
-
}
|
|
4774
|
-
const precol = match.groups.precol;
|
|
4775
|
-
const parameterName = match.groups.parameterName;
|
|
4776
|
-
if (parameterName === '') {
|
|
4777
|
-
// Note: Skip empty placeholders. It's used to avoid confusion with JSON-like strings
|
|
4778
|
-
continue;
|
|
4779
|
-
}
|
|
4780
|
-
if (parameterName.indexOf('{') !== -1 || parameterName.indexOf('}') !== -1) {
|
|
4781
|
-
throw new PipelineExecutionError('Parameter is already opened or not closed');
|
|
4782
|
-
}
|
|
4783
|
-
if (parameters[parameterName] === undefined) {
|
|
4784
|
-
throw new PipelineExecutionError(`Parameter \`{${parameterName}}\` is not defined`);
|
|
4785
|
-
}
|
|
4786
|
-
let parameterValue = parameters[parameterName];
|
|
4787
|
-
if (parameterValue === undefined) {
|
|
4788
|
-
throw new PipelineExecutionError(`Parameter \`{${parameterName}}\` is not defined`);
|
|
4789
|
-
}
|
|
4790
|
-
parameterValue = valueToString(parameterValue);
|
|
4791
|
-
// Escape curly braces in parameter values to prevent prompt-injection
|
|
4792
|
-
parameterValue = parameterValue.replace(/[{}]/g, '\\$&');
|
|
4793
|
-
if (parameterValue.includes('\n') && /^\s*\W{0,3}\s*$/.test(precol)) {
|
|
4794
|
-
parameterValue = parameterValue
|
|
4795
|
-
.split('\n')
|
|
4796
|
-
.map((line, index) => (index === 0 ? line : `${precol}${line}`))
|
|
4797
|
-
.join('\n');
|
|
4798
|
-
}
|
|
4799
|
-
replacedTemplates =
|
|
4800
|
-
replacedTemplates.substring(0, match.index + precol.length) +
|
|
4801
|
-
parameterValue +
|
|
4802
|
-
replacedTemplates.substring(match.index + precol.length + parameterName.length + 2);
|
|
4803
|
-
}
|
|
4804
|
-
// [💫] Check if there are parameters that are not closed properly
|
|
4805
|
-
if (/{\w+$/.test(replacedTemplates)) {
|
|
4806
|
-
throw new PipelineExecutionError('Parameter is not closed');
|
|
4807
|
-
}
|
|
4808
|
-
// [💫] Check if there are parameters that are not opened properly
|
|
4809
|
-
if (/^\w+}/.test(replacedTemplates)) {
|
|
4810
|
-
throw new PipelineExecutionError('Parameter is not opened');
|
|
4811
|
-
}
|
|
4812
|
-
return replacedTemplates;
|
|
4813
|
-
}
|
|
4814
|
-
|
|
4815
4815
|
/**
|
|
4816
4816
|
* Counts number of characters in the text
|
|
4817
4817
|
*
|
|
@@ -4972,6 +4972,68 @@ function checkExpectations(expectations, value) {
|
|
|
4972
4972
|
* Note: [💝] and [🤠] are interconnected together
|
|
4973
4973
|
*/
|
|
4974
4974
|
|
|
4975
|
+
/**
|
|
4976
|
+
* Validates a prompt result against expectations and format requirements.
|
|
4977
|
+
* This function provides a common abstraction for result validation that can be used
|
|
4978
|
+
* by both execution logic and caching logic to ensure consistency.
|
|
4979
|
+
*
|
|
4980
|
+
* @param options - The validation options including result string, expectations, and format
|
|
4981
|
+
* @returns Validation result with processed string and validity status
|
|
4982
|
+
* @private internal function of `createPipelineExecutor` and `cacheLlmTools`
|
|
4983
|
+
*/
|
|
4984
|
+
function validatePromptResult(options) {
|
|
4985
|
+
const { resultString, expectations, format } = options;
|
|
4986
|
+
let processedResultString = resultString;
|
|
4987
|
+
let validationError;
|
|
4988
|
+
try {
|
|
4989
|
+
// TODO: [💝] Unite object for expecting amount and format
|
|
4990
|
+
if (format) {
|
|
4991
|
+
if (format === 'JSON') {
|
|
4992
|
+
if (!isValidJsonString(processedResultString)) {
|
|
4993
|
+
// TODO: [🏢] Do more universally via `FormatParser`
|
|
4994
|
+
try {
|
|
4995
|
+
processedResultString = extractJsonBlock(processedResultString);
|
|
4996
|
+
}
|
|
4997
|
+
catch (error) {
|
|
4998
|
+
keepUnused(error);
|
|
4999
|
+
throw new ExpectError(spaceTrim((block) => `
|
|
5000
|
+
Expected valid JSON string
|
|
5001
|
+
|
|
5002
|
+
The expected JSON text:
|
|
5003
|
+
${block(processedResultString)}
|
|
5004
|
+
`));
|
|
5005
|
+
}
|
|
5006
|
+
}
|
|
5007
|
+
}
|
|
5008
|
+
else {
|
|
5009
|
+
throw new UnexpectedError(`Unknown format "${format}"`);
|
|
5010
|
+
}
|
|
5011
|
+
}
|
|
5012
|
+
// TODO: [💝] Unite object for expecting amount and format
|
|
5013
|
+
if (expectations) {
|
|
5014
|
+
checkExpectations(expectations, processedResultString);
|
|
5015
|
+
}
|
|
5016
|
+
return {
|
|
5017
|
+
isValid: true,
|
|
5018
|
+
processedResultString,
|
|
5019
|
+
};
|
|
5020
|
+
}
|
|
5021
|
+
catch (error) {
|
|
5022
|
+
if (error instanceof ExpectError) {
|
|
5023
|
+
validationError = error;
|
|
5024
|
+
}
|
|
5025
|
+
else {
|
|
5026
|
+
// Re-throw non-ExpectError errors (like UnexpectedError)
|
|
5027
|
+
throw error;
|
|
5028
|
+
}
|
|
5029
|
+
return {
|
|
5030
|
+
isValid: false,
|
|
5031
|
+
processedResultString,
|
|
5032
|
+
error: validationError,
|
|
5033
|
+
};
|
|
5034
|
+
}
|
|
5035
|
+
}
|
|
5036
|
+
|
|
4975
5037
|
/**
|
|
4976
5038
|
* Executes a pipeline task with multiple attempts, including joker and retry logic. Handles different task types
|
|
4977
5039
|
* (prompt, script, dialog, etc.), applies postprocessing, checks expectations, and updates the execution report.
|
|
@@ -4989,17 +5051,18 @@ async function executeAttempts(options) {
|
|
|
4989
5051
|
$resultString: null,
|
|
4990
5052
|
$expectError: null,
|
|
4991
5053
|
$scriptPipelineExecutionErrors: [],
|
|
5054
|
+
$failedResults: [], // Track all failed attempts
|
|
4992
5055
|
};
|
|
4993
5056
|
// TODO: [🚐] Make arrayable LLMs -> single LLM DRY
|
|
4994
5057
|
const _llms = arrayableToArray(tools.llm);
|
|
4995
5058
|
const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
|
|
4996
|
-
attempts: for (let
|
|
4997
|
-
const isJokerAttempt =
|
|
4998
|
-
const jokerParameterName = jokerParameterNames[jokerParameterNames.length +
|
|
5059
|
+
attempts: for (let attemptIndex = -jokerParameterNames.length; attemptIndex < maxAttempts; attemptIndex++) {
|
|
5060
|
+
const isJokerAttempt = attemptIndex < 0;
|
|
5061
|
+
const jokerParameterName = jokerParameterNames[jokerParameterNames.length + attemptIndex];
|
|
4999
5062
|
// TODO: [🧠][🍭] JOKERS, EXPECTATIONS, POSTPROCESSING and FOREACH
|
|
5000
5063
|
if (isJokerAttempt && !jokerParameterName) {
|
|
5001
5064
|
throw new UnexpectedError(spaceTrim((block) => `
|
|
5002
|
-
Joker not found in attempt ${
|
|
5065
|
+
Joker not found in attempt ${attemptIndex}
|
|
5003
5066
|
|
|
5004
5067
|
${block(pipelineIdentification)}
|
|
5005
5068
|
`));
|
|
@@ -5197,35 +5260,18 @@ async function executeAttempts(options) {
|
|
|
5197
5260
|
}
|
|
5198
5261
|
}
|
|
5199
5262
|
// TODO: [💝] Unite object for expecting amount and format
|
|
5200
|
-
|
|
5201
|
-
|
|
5202
|
-
|
|
5203
|
-
|
|
5204
|
-
|
|
5205
|
-
|
|
5206
|
-
|
|
5207
|
-
|
|
5208
|
-
|
|
5209
|
-
throw new ExpectError(spaceTrim((block) => `
|
|
5210
|
-
Expected valid JSON string
|
|
5211
|
-
|
|
5212
|
-
${block(
|
|
5213
|
-
/*<- Note: No need for `pipelineIdentification`, it will be catched and added later */ '')}
|
|
5214
|
-
`));
|
|
5215
|
-
}
|
|
5216
|
-
}
|
|
5217
|
-
}
|
|
5218
|
-
else {
|
|
5219
|
-
throw new UnexpectedError(spaceTrim((block) => `
|
|
5220
|
-
Unknown format "${task.format}"
|
|
5221
|
-
|
|
5222
|
-
${block(pipelineIdentification)}
|
|
5223
|
-
`));
|
|
5263
|
+
// Use the common validation function for both format and expectations
|
|
5264
|
+
if (task.format || task.expectations) {
|
|
5265
|
+
const validationResult = validatePromptResult({
|
|
5266
|
+
resultString: $ongoingTaskResult.$resultString || '',
|
|
5267
|
+
expectations: task.expectations,
|
|
5268
|
+
format: task.format,
|
|
5269
|
+
});
|
|
5270
|
+
if (!validationResult.isValid) {
|
|
5271
|
+
throw validationResult.error;
|
|
5224
5272
|
}
|
|
5225
|
-
|
|
5226
|
-
|
|
5227
|
-
if (task.expectations) {
|
|
5228
|
-
checkExpectations(task.expectations, $ongoingTaskResult.$resultString || '');
|
|
5273
|
+
// Update the result string in case format processing modified it (e.g., JSON extraction)
|
|
5274
|
+
$ongoingTaskResult.$resultString = validationResult.processedResultString;
|
|
5229
5275
|
}
|
|
5230
5276
|
break attempts;
|
|
5231
5277
|
}
|
|
@@ -5234,6 +5280,15 @@ async function executeAttempts(options) {
|
|
|
5234
5280
|
throw error;
|
|
5235
5281
|
}
|
|
5236
5282
|
$ongoingTaskResult.$expectError = error;
|
|
5283
|
+
// Store each failed attempt
|
|
5284
|
+
if (!Array.isArray($ongoingTaskResult.$failedResults)) {
|
|
5285
|
+
$ongoingTaskResult.$failedResults = [];
|
|
5286
|
+
}
|
|
5287
|
+
$ongoingTaskResult.$failedResults.push({
|
|
5288
|
+
attemptIndex,
|
|
5289
|
+
result: $ongoingTaskResult.$resultString,
|
|
5290
|
+
error: error,
|
|
5291
|
+
});
|
|
5237
5292
|
}
|
|
5238
5293
|
finally {
|
|
5239
5294
|
if (!isJokerAttempt &&
|
|
@@ -5255,35 +5310,41 @@ async function executeAttempts(options) {
|
|
|
5255
5310
|
});
|
|
5256
5311
|
}
|
|
5257
5312
|
}
|
|
5258
|
-
if ($ongoingTaskResult.$expectError !== null &&
|
|
5313
|
+
if ($ongoingTaskResult.$expectError !== null && attemptIndex === maxAttempts - 1) {
|
|
5314
|
+
// Note: Create a summary of all failures
|
|
5315
|
+
const failuresSummary = $ongoingTaskResult.$failedResults
|
|
5316
|
+
.map((failure) => spaceTrim((block) => {
|
|
5317
|
+
var _a, _b;
|
|
5318
|
+
return `
|
|
5319
|
+
Attempt ${failure.attemptIndex + 1}:
|
|
5320
|
+
Error ${((_a = failure.error) === null || _a === void 0 ? void 0 : _a.name) || ''}:
|
|
5321
|
+
${block((_b = failure.error) === null || _b === void 0 ? void 0 : _b.message.split('\n').map((line) => `> ${line}`).join('\n'))}
|
|
5322
|
+
|
|
5323
|
+
Result:
|
|
5324
|
+
${block(failure.result === null
|
|
5325
|
+
? 'null'
|
|
5326
|
+
: spaceTrim(failure.result)
|
|
5327
|
+
.split('\n')
|
|
5328
|
+
.map((line) => `> ${line}`)
|
|
5329
|
+
.join('\n'))}
|
|
5330
|
+
`;
|
|
5331
|
+
}))
|
|
5332
|
+
.join('\n\n---\n\n');
|
|
5259
5333
|
throw new PipelineExecutionError(spaceTrim((block) => {
|
|
5260
|
-
var _a
|
|
5334
|
+
var _a;
|
|
5261
5335
|
return `
|
|
5262
5336
|
LLM execution failed ${maxExecutionAttempts}x
|
|
5263
5337
|
|
|
5264
5338
|
${block(pipelineIdentification)}
|
|
5265
5339
|
|
|
5266
|
-
---
|
|
5267
5340
|
The Prompt:
|
|
5268
5341
|
${block((((_a = $ongoingTaskResult.$prompt) === null || _a === void 0 ? void 0 : _a.content) || '')
|
|
5269
5342
|
.split('\n')
|
|
5270
5343
|
.map((line) => `> ${line}`)
|
|
5271
5344
|
.join('\n'))}
|
|
5272
5345
|
|
|
5273
|
-
|
|
5274
|
-
${block(
|
|
5275
|
-
.split('\n')
|
|
5276
|
-
.map((line) => `> ${line}`)
|
|
5277
|
-
.join('\n'))}
|
|
5278
|
-
|
|
5279
|
-
Last result:
|
|
5280
|
-
${block($ongoingTaskResult.$resultString === null
|
|
5281
|
-
? 'null'
|
|
5282
|
-
: spaceTrim($ongoingTaskResult.$resultString)
|
|
5283
|
-
.split('\n')
|
|
5284
|
-
.map((line) => `> ${line}`)
|
|
5285
|
-
.join('\n'))}
|
|
5286
|
-
---
|
|
5346
|
+
All Failed Attempts:
|
|
5347
|
+
${block(failuresSummary)}
|
|
5287
5348
|
`;
|
|
5288
5349
|
}));
|
|
5289
5350
|
}
|
|
@@ -5503,10 +5564,10 @@ function knowledgePiecesToString(knowledgePieces) {
|
|
|
5503
5564
|
*/
|
|
5504
5565
|
async function getKnowledgeForTask(options) {
|
|
5505
5566
|
const { tools, preparedPipeline, task, parameters } = options;
|
|
5506
|
-
const
|
|
5507
|
-
const
|
|
5567
|
+
const firstKnowledgePiece = preparedPipeline.knowledgePieces[0];
|
|
5568
|
+
const firstKnowledgeIndex = firstKnowledgePiece === null || firstKnowledgePiece === void 0 ? void 0 : firstKnowledgePiece.index[0];
|
|
5508
5569
|
// <- TODO: Do not use just first knowledge piece and first index to determine embedding model, use also keyword search
|
|
5509
|
-
if (
|
|
5570
|
+
if (firstKnowledgePiece === undefined || firstKnowledgeIndex === undefined) {
|
|
5510
5571
|
return ''; // <- Note: Np knowledge present, return empty string
|
|
5511
5572
|
}
|
|
5512
5573
|
try {
|
|
@@ -5517,7 +5578,7 @@ async function getKnowledgeForTask(options) {
|
|
|
5517
5578
|
title: 'Knowledge Search',
|
|
5518
5579
|
modelRequirements: {
|
|
5519
5580
|
modelVariant: 'EMBEDDING',
|
|
5520
|
-
modelName:
|
|
5581
|
+
modelName: firstKnowledgeIndex.modelName,
|
|
5521
5582
|
},
|
|
5522
5583
|
content: task.content,
|
|
5523
5584
|
parameters,
|
|
@@ -5525,7 +5586,7 @@ async function getKnowledgeForTask(options) {
|
|
|
5525
5586
|
const taskEmbeddingResult = await llmTools.callEmbeddingModel(taskEmbeddingPrompt);
|
|
5526
5587
|
const knowledgePiecesWithRelevance = preparedPipeline.knowledgePieces.map((knowledgePiece) => {
|
|
5527
5588
|
const { index } = knowledgePiece;
|
|
5528
|
-
const knowledgePieceIndex = index.find((i) => i.modelName ===
|
|
5589
|
+
const knowledgePieceIndex = index.find((i) => i.modelName === firstKnowledgeIndex.modelName);
|
|
5529
5590
|
// <- TODO: Do not use just first knowledge piece and first index to determine embedding model
|
|
5530
5591
|
if (knowledgePieceIndex === undefined) {
|
|
5531
5592
|
return {
|
|
@@ -5546,8 +5607,8 @@ async function getKnowledgeForTask(options) {
|
|
|
5546
5607
|
task,
|
|
5547
5608
|
taskEmbeddingPrompt,
|
|
5548
5609
|
taskEmbeddingResult,
|
|
5549
|
-
|
|
5550
|
-
|
|
5610
|
+
firstKnowledgePiece,
|
|
5611
|
+
firstKnowledgeIndex,
|
|
5551
5612
|
knowledgePiecesWithRelevance,
|
|
5552
5613
|
knowledgePiecesSorted,
|
|
5553
5614
|
knowledgePiecesLimited,
|
|
@@ -5616,7 +5677,7 @@ async function getReservedParametersForTask(options) {
|
|
|
5616
5677
|
* @private internal utility of `createPipelineExecutor`
|
|
5617
5678
|
*/
|
|
5618
5679
|
async function executeTask(options) {
|
|
5619
|
-
const { currentTask, preparedPipeline, parametersToPass, tools, onProgress, $executionReport, pipelineIdentification, maxExecutionAttempts, maxParallelCount, csvSettings, isVerbose, rootDirname, cacheDirname, intermediateFilesStrategy, isAutoInstalled,
|
|
5680
|
+
const { currentTask, preparedPipeline, parametersToPass, tools, onProgress, $executionReport, pipelineIdentification, maxExecutionAttempts, maxParallelCount, csvSettings, isVerbose, rootDirname, cacheDirname, intermediateFilesStrategy, isAutoInstalled, isNotPreparedWarningSuppressed, } = options;
|
|
5620
5681
|
const priority = preparedPipeline.tasks.length - preparedPipeline.tasks.indexOf(currentTask);
|
|
5621
5682
|
// Note: Check consistency of used and dependent parameters which was also done in `validatePipeline`, but it’s good to doublecheck
|
|
5622
5683
|
const usedParameterNames = extractParameterNamesFromTask(currentTask);
|
|
@@ -5704,7 +5765,7 @@ async function executeTask(options) {
|
|
|
5704
5765
|
cacheDirname,
|
|
5705
5766
|
intermediateFilesStrategy,
|
|
5706
5767
|
isAutoInstalled,
|
|
5707
|
-
|
|
5768
|
+
isNotPreparedWarningSuppressed,
|
|
5708
5769
|
});
|
|
5709
5770
|
await onProgress({
|
|
5710
5771
|
outputParameters: {
|
|
@@ -5799,7 +5860,7 @@ async function executePipeline(options) {
|
|
|
5799
5860
|
}
|
|
5800
5861
|
return exportJson({
|
|
5801
5862
|
name: `executionReport`,
|
|
5802
|
-
message: `
|
|
5863
|
+
message: `Unsuccessful PipelineExecutorResult (with missing parameter {${parameter.name}}) PipelineExecutorResult`,
|
|
5803
5864
|
order: [],
|
|
5804
5865
|
value: {
|
|
5805
5866
|
isSuccessful: false,
|
|
@@ -5836,7 +5897,7 @@ async function executePipeline(options) {
|
|
|
5836
5897
|
return exportJson({
|
|
5837
5898
|
name: 'pipelineExecutorResult',
|
|
5838
5899
|
message: spaceTrim((block) => `
|
|
5839
|
-
|
|
5900
|
+
Unsuccessful PipelineExecutorResult (with extra parameter {${parameter.name}}) PipelineExecutorResult
|
|
5840
5901
|
|
|
5841
5902
|
${block(pipelineIdentification)}
|
|
5842
5903
|
`),
|
|
@@ -5977,7 +6038,7 @@ async function executePipeline(options) {
|
|
|
5977
6038
|
}
|
|
5978
6039
|
return exportJson({
|
|
5979
6040
|
name: 'pipelineExecutorResult',
|
|
5980
|
-
message: `
|
|
6041
|
+
message: `Unsuccessful PipelineExecutorResult (with misc errors) PipelineExecutorResult`,
|
|
5981
6042
|
order: [],
|
|
5982
6043
|
value: {
|
|
5983
6044
|
isSuccessful: false,
|
|
@@ -6028,7 +6089,7 @@ async function executePipeline(options) {
|
|
|
6028
6089
|
* @public exported from `@promptbook/core`
|
|
6029
6090
|
*/
|
|
6030
6091
|
function createPipelineExecutor(options) {
|
|
6031
|
-
const { pipeline, tools, maxExecutionAttempts = DEFAULT_MAX_EXECUTION_ATTEMPTS, maxParallelCount = DEFAULT_MAX_PARALLEL_COUNT, csvSettings = DEFAULT_CSV_SETTINGS, isVerbose = DEFAULT_IS_VERBOSE,
|
|
6092
|
+
const { pipeline, tools, maxExecutionAttempts = DEFAULT_MAX_EXECUTION_ATTEMPTS, maxParallelCount = DEFAULT_MAX_PARALLEL_COUNT, csvSettings = DEFAULT_CSV_SETTINGS, isVerbose = DEFAULT_IS_VERBOSE, isNotPreparedWarningSuppressed = false, cacheDirname = DEFAULT_SCRAPE_CACHE_DIRNAME, intermediateFilesStrategy = DEFAULT_INTERMEDIATE_FILES_STRATEGY, isAutoInstalled = DEFAULT_IS_AUTO_INSTALLED, rootDirname = null, } = options;
|
|
6032
6093
|
validatePipeline(pipeline);
|
|
6033
6094
|
const pipelineIdentification = (() => {
|
|
6034
6095
|
// Note: This is a 😐 implementation of [🚞]
|
|
@@ -6045,7 +6106,7 @@ function createPipelineExecutor(options) {
|
|
|
6045
6106
|
if (isPipelinePrepared(pipeline)) {
|
|
6046
6107
|
preparedPipeline = pipeline;
|
|
6047
6108
|
}
|
|
6048
|
-
else if (
|
|
6109
|
+
else if (isNotPreparedWarningSuppressed !== true) {
|
|
6049
6110
|
console.warn(spaceTrim((block) => `
|
|
6050
6111
|
Pipeline is not prepared
|
|
6051
6112
|
|
|
@@ -6078,7 +6139,7 @@ function createPipelineExecutor(options) {
|
|
|
6078
6139
|
maxParallelCount,
|
|
6079
6140
|
csvSettings,
|
|
6080
6141
|
isVerbose,
|
|
6081
|
-
|
|
6142
|
+
isNotPreparedWarningSuppressed,
|
|
6082
6143
|
rootDirname,
|
|
6083
6144
|
cacheDirname,
|
|
6084
6145
|
intermediateFilesStrategy,
|
|
@@ -6087,7 +6148,7 @@ function createPipelineExecutor(options) {
|
|
|
6087
6148
|
assertsError(error);
|
|
6088
6149
|
return exportJson({
|
|
6089
6150
|
name: 'pipelineExecutorResult',
|
|
6090
|
-
message: `
|
|
6151
|
+
message: `Unsuccessful PipelineExecutorResult, last catch`,
|
|
6091
6152
|
order: [],
|
|
6092
6153
|
value: {
|
|
6093
6154
|
isSuccessful: false,
|
|
@@ -6125,7 +6186,7 @@ const markdownScraperMetadata = $deepFreeze({
|
|
|
6125
6186
|
className: 'MarkdownScraper',
|
|
6126
6187
|
mimeTypes: ['text/markdown', 'text/plain'],
|
|
6127
6188
|
documentationUrl: 'https://github.com/webgptorg/promptbook/discussions/@@',
|
|
6128
|
-
|
|
6189
|
+
isAvailableInBrowser: true,
|
|
6129
6190
|
// <- Note: [🌏] This is the only scraper which makes sense to be available in the browser, for scraping non-markdown sources in the browser use a remote server
|
|
6130
6191
|
requiredExecutables: [],
|
|
6131
6192
|
}); /* <- Note: [🤛] */
|
|
@@ -6135,7 +6196,7 @@ const markdownScraperMetadata = $deepFreeze({
|
|
|
6135
6196
|
* Warning: This is not useful for the end user, it is just a side effect of the mechanism that handles all available known scrapers
|
|
6136
6197
|
*
|
|
6137
6198
|
* @public exported from `@promptbook/core`
|
|
6138
|
-
* @public exported from `@promptbook/
|
|
6199
|
+
* @public exported from `@promptbook/wizard`
|
|
6139
6200
|
* @public exported from `@promptbook/cli`
|
|
6140
6201
|
*/
|
|
6141
6202
|
$scrapersMetadataRegister.register(markdownScraperMetadata);
|
|
@@ -6234,7 +6295,7 @@ class MarkdownScraper {
|
|
|
6234
6295
|
}
|
|
6235
6296
|
// ---
|
|
6236
6297
|
if (!llmTools.callEmbeddingModel) {
|
|
6237
|
-
// TODO: [🟥] Detect browser / node and make it
|
|
6298
|
+
// TODO: [🟥] Detect browser / node and make it colorful
|
|
6238
6299
|
console.error('No callEmbeddingModel function provided');
|
|
6239
6300
|
}
|
|
6240
6301
|
else {
|
|
@@ -6260,7 +6321,7 @@ class MarkdownScraper {
|
|
|
6260
6321
|
if (!(error instanceof PipelineExecutionError)) {
|
|
6261
6322
|
throw error;
|
|
6262
6323
|
}
|
|
6263
|
-
// TODO: [🟥] Detect browser / node and make it
|
|
6324
|
+
// TODO: [🟥] Detect browser / node and make it colorful
|
|
6264
6325
|
console.error(error, "<- Note: This error is not critical to prepare the pipeline, just knowledge pieces won't have embeddings");
|
|
6265
6326
|
}
|
|
6266
6327
|
return {
|
|
@@ -6291,7 +6352,7 @@ const documentScraperMetadata = $deepFreeze({
|
|
|
6291
6352
|
className: 'DocumentScraper',
|
|
6292
6353
|
mimeTypes: ['application/vnd.openxmlformats-officedocument.wordprocessingml.document'],
|
|
6293
6354
|
documentationUrl: 'https://github.com/webgptorg/promptbook/discussions/@@',
|
|
6294
|
-
|
|
6355
|
+
isAvailableInBrowser: false,
|
|
6295
6356
|
// <- Note: [🌏] Only `MarkdownScraper` makes sense to be available in the browser, for scraping non-markdown sources in the browser use a remote server
|
|
6296
6357
|
requiredExecutables: ['Pandoc'],
|
|
6297
6358
|
}); /* <- Note: [🤛] */
|
|
@@ -6301,7 +6362,7 @@ const documentScraperMetadata = $deepFreeze({
|
|
|
6301
6362
|
* Warning: This is not useful for the end user, it is just a side effect of the mechanism that handles all available known scrapers
|
|
6302
6363
|
*
|
|
6303
6364
|
* @public exported from `@promptbook/core`
|
|
6304
|
-
* @public exported from `@promptbook/
|
|
6365
|
+
* @public exported from `@promptbook/wizard`
|
|
6305
6366
|
* @public exported from `@promptbook/cli`
|
|
6306
6367
|
*/
|
|
6307
6368
|
$scrapersMetadataRegister.register(documentScraperMetadata);
|
|
@@ -6427,7 +6488,7 @@ const legacyDocumentScraperMetadata = $deepFreeze({
|
|
|
6427
6488
|
className: 'LegacyDocumentScraper',
|
|
6428
6489
|
mimeTypes: ['application/msword', 'text/rtf'],
|
|
6429
6490
|
documentationUrl: 'https://github.com/webgptorg/promptbook/discussions/@@',
|
|
6430
|
-
|
|
6491
|
+
isAvailableInBrowser: false,
|
|
6431
6492
|
// <- Note: [🌏] Only `MarkdownScraper` makes sense to be available in the browser, for scraping non-markdown sources in the browser use a remote server
|
|
6432
6493
|
requiredExecutables: [
|
|
6433
6494
|
'Pandoc',
|
|
@@ -6441,7 +6502,7 @@ const legacyDocumentScraperMetadata = $deepFreeze({
|
|
|
6441
6502
|
* Warning: This is not useful for the end user, it is just a side effect of the mechanism that handles all available known scrapers
|
|
6442
6503
|
*
|
|
6443
6504
|
* @public exported from `@promptbook/core`
|
|
6444
|
-
* @public exported from `@promptbook/
|
|
6505
|
+
* @public exported from `@promptbook/wizard`
|
|
6445
6506
|
* @public exported from `@promptbook/cli`
|
|
6446
6507
|
*/
|
|
6447
6508
|
$scrapersMetadataRegister.register(legacyDocumentScraperMetadata);
|
|
@@ -6602,7 +6663,7 @@ const createLegacyDocumentScraper = Object.assign((tools, options) => {
|
|
|
6602
6663
|
* Warning: This is not useful for the end user, it is just a side effect of the mechanism that handles all available known scrapers
|
|
6603
6664
|
*
|
|
6604
6665
|
* @public exported from `@promptbook/legacy-documents`
|
|
6605
|
-
* @public exported from `@promptbook/
|
|
6666
|
+
* @public exported from `@promptbook/wizard`
|
|
6606
6667
|
* @public exported from `@promptbook/cli`
|
|
6607
6668
|
*/
|
|
6608
6669
|
const _LegacyDocumentScraperRegistration = $scrapersRegister.register(createLegacyDocumentScraper);
|