@promptbook/legacy-documents 0.94.0 → 0.98.0-10
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +6 -2
- package/esm/index.es.js +228 -167
- package/esm/index.es.js.map +1 -1
- package/esm/typings/src/_packages/anthropic-claude.index.d.ts +2 -2
- package/esm/typings/src/_packages/cli.index.d.ts +4 -0
- package/esm/typings/src/_packages/core.index.d.ts +2 -0
- package/esm/typings/src/_packages/openai.index.d.ts +10 -0
- package/esm/typings/src/_packages/types.index.d.ts +14 -4
- package/esm/typings/src/_packages/{wizzard.index.d.ts → wizard.index.d.ts} +6 -2
- package/esm/typings/src/cli/cli-commands/prettify.d.ts +1 -1
- package/esm/typings/src/cli/cli-commands/test-command.d.ts +1 -1
- package/esm/typings/src/config.d.ts +1 -1
- package/esm/typings/src/conversion/archive/loadArchive.d.ts +1 -1
- package/esm/typings/src/conversion/archive/saveArchive.d.ts +2 -2
- package/esm/typings/src/conversion/prettify/renderPipelineMermaidOptions.d.ts +1 -1
- package/esm/typings/src/dialogs/callback/CallbackInterfaceTools.d.ts +1 -1
- package/esm/typings/src/execution/AbstractTaskResult.d.ts +2 -2
- package/esm/typings/src/execution/createPipelineExecutor/$OngoingTaskResult.d.ts +8 -0
- package/esm/typings/src/execution/createPipelineExecutor/00-CreatePipelineExecutorOptions.d.ts +1 -1
- package/esm/typings/src/execution/execution-report/ExecutionPromptReportJson.d.ts +2 -2
- package/esm/typings/src/execution/translation/automatic-translate/translateMessages.d.ts +1 -1
- package/esm/typings/src/execution/utils/validatePromptResult.d.ts +53 -0
- package/esm/typings/src/llm-providers/_common/register/{$provideLlmToolsForWizzardOrCli.d.ts → $provideLlmToolsForWizardOrCli.d.ts} +2 -2
- package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +3 -3
- package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionToolsOptions.d.ts +2 -2
- package/esm/typings/src/llm-providers/anthropic-claude/register-configuration.d.ts +1 -1
- package/esm/typings/src/llm-providers/anthropic-claude/register-constructor.d.ts +1 -1
- package/esm/typings/src/llm-providers/azure-openai/register-configuration.d.ts +1 -1
- package/esm/typings/src/llm-providers/azure-openai/register-constructor.d.ts +1 -1
- package/esm/typings/src/llm-providers/deepseek/register-configuration.d.ts +1 -1
- package/esm/typings/src/llm-providers/deepseek/register-constructor.d.ts +1 -1
- package/esm/typings/src/llm-providers/google/register-configuration.d.ts +1 -1
- package/esm/typings/src/llm-providers/google/register-constructor.d.ts +1 -1
- package/esm/typings/src/llm-providers/ollama/register-configuration.d.ts +1 -1
- package/esm/typings/src/llm-providers/ollama/register-constructor.d.ts +1 -1
- package/esm/typings/src/llm-providers/openai/OpenAiAssistantExecutionTools.d.ts +1 -1
- package/esm/typings/src/llm-providers/openai/OpenAiAssistantExecutionToolsOptions.d.ts +2 -2
- package/esm/typings/src/llm-providers/openai/OpenAiCompatibleExecutionTools.d.ts +4 -4
- package/esm/typings/src/llm-providers/openai/OpenAiCompatibleExecutionToolsOptions.d.ts +52 -0
- package/esm/typings/src/llm-providers/openai/OpenAiExecutionToolsOptions.d.ts +3 -5
- package/esm/typings/src/llm-providers/openai/createOpenAiCompatibleExecutionTools.d.ts +74 -0
- package/esm/typings/src/llm-providers/openai/register-configuration.d.ts +13 -2
- package/esm/typings/src/llm-providers/openai/register-constructor.d.ts +16 -2
- package/esm/typings/src/remote-server/socket-types/listModels/PromptbookServer_ListModels_Request.d.ts +1 -1
- package/esm/typings/src/scrapers/_boilerplate/createBoilerplateScraper.d.ts +1 -1
- package/esm/typings/src/scrapers/_boilerplate/register-constructor.d.ts +1 -1
- package/esm/typings/src/scrapers/_boilerplate/register-metadata.d.ts +2 -2
- package/esm/typings/src/scrapers/_common/prepareKnowledgePieces.d.ts +1 -1
- package/esm/typings/src/scrapers/_common/register/ScraperAndConverterMetadata.d.ts +1 -1
- package/esm/typings/src/scrapers/document/createDocumentScraper.d.ts +1 -1
- package/esm/typings/src/scrapers/document/register-constructor.d.ts +1 -1
- package/esm/typings/src/scrapers/document/register-metadata.d.ts +2 -2
- package/esm/typings/src/scrapers/document-legacy/createLegacyDocumentScraper.d.ts +1 -1
- package/esm/typings/src/scrapers/document-legacy/register-constructor.d.ts +1 -1
- package/esm/typings/src/scrapers/document-legacy/register-metadata.d.ts +2 -2
- package/esm/typings/src/scrapers/markdown/createMarkdownScraper.d.ts +1 -4
- package/esm/typings/src/scrapers/markdown/register-constructor.d.ts +1 -1
- package/esm/typings/src/scrapers/markdown/register-metadata.d.ts +2 -2
- package/esm/typings/src/scrapers/markitdown/createMarkitdownScraper.d.ts +1 -1
- package/esm/typings/src/scrapers/markitdown/register-constructor.d.ts +1 -1
- package/esm/typings/src/scrapers/markitdown/register-metadata.d.ts +2 -2
- package/esm/typings/src/scrapers/pdf/createPdfScraper.d.ts +1 -1
- package/esm/typings/src/scrapers/pdf/register-constructor.d.ts +1 -1
- package/esm/typings/src/scrapers/pdf/register-metadata.d.ts +2 -2
- package/esm/typings/src/scrapers/website/createWebsiteScraper.d.ts +1 -1
- package/esm/typings/src/scrapers/website/register-constructor.d.ts +1 -1
- package/esm/typings/src/scrapers/website/register-metadata.d.ts +2 -2
- package/esm/typings/src/types/typeAliases.d.ts +1 -1
- package/esm/typings/src/utils/files/listAllFiles.d.ts +1 -1
- package/esm/typings/src/version.d.ts +1 -1
- package/esm/typings/src/{wizzard → wizard}/$getCompiledBook.d.ts +2 -2
- package/esm/typings/src/{wizzard/wizzard.d.ts → wizard/wizard.d.ts} +6 -6
- package/package.json +2 -14
- package/umd/index.umd.js +228 -167
- package/umd/index.umd.js.map +1 -1
package/umd/index.umd.js
CHANGED
|
@@ -26,7 +26,7 @@
|
|
|
26
26
|
* @generated
|
|
27
27
|
* @see https://github.com/webgptorg/promptbook
|
|
28
28
|
*/
|
|
29
|
-
const PROMPTBOOK_ENGINE_VERSION = '0.
|
|
29
|
+
const PROMPTBOOK_ENGINE_VERSION = '0.98.0-10';
|
|
30
30
|
/**
|
|
31
31
|
* TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
|
|
32
32
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
@@ -175,7 +175,7 @@
|
|
|
175
175
|
*
|
|
176
176
|
* @public exported from `@promptbook/core`
|
|
177
177
|
*/
|
|
178
|
-
const DEFAULT_MAX_EXECUTION_ATTEMPTS =
|
|
178
|
+
const DEFAULT_MAX_EXECUTION_ATTEMPTS = 7; // <- TODO: [🤹♂️]
|
|
179
179
|
// <- TODO: [🕝] Make also `BOOKS_DIRNAME_ALTERNATIVES`
|
|
180
180
|
// TODO: Just `.promptbook` in config, hardcode subfolders like `download-cache` or `execution-cache`
|
|
181
181
|
/**
|
|
@@ -1052,7 +1052,7 @@
|
|
|
1052
1052
|
* Note: [🟢] Code in this file should never be never released in packages that could be imported into browser environment
|
|
1053
1053
|
*/
|
|
1054
1054
|
|
|
1055
|
-
var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book"},{title:"Prepare Knowledge-piece Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge-piece Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book"},{title:"Prepare Persona",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book",formfactorName:"GENERIC",parameters:[{name:"availableModels",description:"List of available model names together with their descriptions as JSON",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelsRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n```json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n```\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n```json\n{availableModels}\n```\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelsRequirements",format:"JSON",dependentParameterNames:["availableModels","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Persona\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book`\n- INPUT PARAMETER `{availableModels}` List of available model names together with their descriptions as JSON\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelsRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n\\`\\`\\`json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n\\`\\`\\`json\n{availableModels}\n\\`\\`\\`\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelsRequirements}`\n"}],sourceFile:"./books/prepare-persona.book"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-title.book",formfactorName:"GENERIC",parameters:[{name:"book",description:"The book to prepare the title for",isInput:true,isOutput:false},{name:"title",description:"Best title for the book",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-title",title:"Make title",content:"Make best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}",resultingParameterName:"title",expectations:{words:{min:1,max:8},lines:{min:1,max:1}},dependentParameterNames:["book"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-title.book`\n- INPUT PARAMETER `{book}` The book to prepare the title for\n- OUTPUT PARAMETER `{title}` Best title for the book\n\n## Make title\n\n- EXPECT MIN 1 Word\n- EXPECT MAX 8 Words\n- EXPECT EXACTLY 1 Line\n\n```markdown\nMake best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-title.book"}];
|
|
1055
|
+
var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book"},{title:"Prepare Knowledge-piece Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge-piece Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book"},{title:"Prepare Persona",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book",formfactorName:"GENERIC",parameters:[{name:"availableModels",description:"List of available model names together with their descriptions as JSON",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelsRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n```json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpful assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n```\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n```json\n{availableModels}\n```\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelsRequirements",format:"JSON",dependentParameterNames:["availableModels","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Persona\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book`\n- INPUT PARAMETER `{availableModels}` List of available model names together with their descriptions as JSON\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelsRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n\\`\\`\\`json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpful assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n\\`\\`\\`json\n{availableModels}\n\\`\\`\\`\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelsRequirements}`\n"}],sourceFile:"./books/prepare-persona.book"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-title.book",formfactorName:"GENERIC",parameters:[{name:"book",description:"The book to prepare the title for",isInput:true,isOutput:false},{name:"title",description:"Best title for the book",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-title",title:"Make title",content:"Make best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}",resultingParameterName:"title",expectations:{words:{min:1,max:8},lines:{min:1,max:1}},dependentParameterNames:["book"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-title.book`\n- INPUT PARAMETER `{book}` The book to prepare the title for\n- OUTPUT PARAMETER `{title}` Best title for the book\n\n## Make title\n\n- EXPECT MIN 1 Word\n- EXPECT MAX 8 Words\n- EXPECT EXACTLY 1 Line\n\n```markdown\nMake best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-title.book"}];
|
|
1056
1056
|
|
|
1057
1057
|
/**
|
|
1058
1058
|
* Checks if value is valid email
|
|
@@ -1209,7 +1209,7 @@
|
|
|
1209
1209
|
});
|
|
1210
1210
|
}
|
|
1211
1211
|
catch (error) {
|
|
1212
|
-
// TODO: [🟥] Detect browser / node and make it
|
|
1212
|
+
// TODO: [🟥] Detect browser / node and make it colorful
|
|
1213
1213
|
console.error('There was an error with prettifying the markdown, using the original as the fallback', {
|
|
1214
1214
|
error,
|
|
1215
1215
|
html: content,
|
|
@@ -1491,7 +1491,7 @@
|
|
|
1491
1491
|
else {
|
|
1492
1492
|
for (const [subName, subValue] of Object.entries(value)) {
|
|
1493
1493
|
if (subValue === undefined) {
|
|
1494
|
-
// Note: undefined in object is serializable - it is just
|
|
1494
|
+
// Note: undefined in object is serializable - it is just omitted
|
|
1495
1495
|
continue;
|
|
1496
1496
|
}
|
|
1497
1497
|
checkSerializableAsJson({ name: `${name}.${subName}`, value: subValue, message });
|
|
@@ -2181,7 +2181,7 @@
|
|
|
2181
2181
|
|
|
2182
2182
|
Note: You have probably forgotten to run "ptbk make" to update the collection
|
|
2183
2183
|
Note: Pipelines with the same URL are not allowed
|
|
2184
|
-
Only
|
|
2184
|
+
Only exception is when the pipelines are identical
|
|
2185
2185
|
|
|
2186
2186
|
`));
|
|
2187
2187
|
}
|
|
@@ -2560,7 +2560,7 @@
|
|
|
2560
2560
|
throw new Error(spaceTrim__default["default"]((block) => `
|
|
2561
2561
|
${block(error.message)}
|
|
2562
2562
|
|
|
2563
|
-
The JSON text:
|
|
2563
|
+
The expected JSON text:
|
|
2564
2564
|
${block(value)}
|
|
2565
2565
|
`));
|
|
2566
2566
|
}
|
|
@@ -2931,12 +2931,12 @@
|
|
|
2931
2931
|
get title() {
|
|
2932
2932
|
return `${llmTools.title} (+usage)`;
|
|
2933
2933
|
// <- TODO: [🧈] Maybe standartize the suffix when wrapping `LlmExecutionTools` up
|
|
2934
|
-
// <- TODO: [🧈][🧠] Does it make
|
|
2934
|
+
// <- TODO: [🧈][🧠] Does it make sense to suffix "(+usage)"?
|
|
2935
2935
|
},
|
|
2936
2936
|
get description() {
|
|
2937
2937
|
return `${llmTools.description} (+usage)`;
|
|
2938
2938
|
// <- TODO: [🧈] Maybe standartize the suffix when wrapping `LlmExecutionTools` up
|
|
2939
|
-
// <- TODO: [🧈][🧠] Does it make
|
|
2939
|
+
// <- TODO: [🧈][🧠] Does it make sense to suffix "(+usage)"?
|
|
2940
2940
|
},
|
|
2941
2941
|
checkConfiguration() {
|
|
2942
2942
|
return /* not await */ llmTools.checkConfiguration();
|
|
@@ -3163,13 +3163,13 @@
|
|
|
3163
3163
|
|
|
3164
3164
|
Technically, it's not an error, but it's probably not what you want because it does not make sense to use Promptbook without language models.
|
|
3165
3165
|
`);
|
|
3166
|
-
// TODO: [🟥] Detect browser / node and make it
|
|
3166
|
+
// TODO: [🟥] Detect browser / node and make it colorful
|
|
3167
3167
|
console.warn(warningMessage);
|
|
3168
3168
|
// <- TODO: [🏮] Some standard way how to transform errors into warnings and how to handle non-critical fails during the tasks
|
|
3169
3169
|
/*
|
|
3170
3170
|
return {
|
|
3171
3171
|
async listModels() {
|
|
3172
|
-
// TODO: [🟥] Detect browser / node and make it
|
|
3172
|
+
// TODO: [🟥] Detect browser / node and make it colorful
|
|
3173
3173
|
console.warn(
|
|
3174
3174
|
spaceTrim(
|
|
3175
3175
|
(block) => `
|
|
@@ -3445,17 +3445,17 @@
|
|
|
3445
3445
|
* Mixes registered scrapers from $scrapersMetadataRegister and $scrapersRegister
|
|
3446
3446
|
*/
|
|
3447
3447
|
const all = [];
|
|
3448
|
-
for (const { packageName, className, mimeTypes, documentationUrl,
|
|
3448
|
+
for (const { packageName, className, mimeTypes, documentationUrl, isAvailableInBrowser, } of $scrapersMetadataRegister.list()) {
|
|
3449
3449
|
if (all.some((item) => item.packageName === packageName && item.className === className)) {
|
|
3450
3450
|
continue;
|
|
3451
3451
|
}
|
|
3452
|
-
all.push({ packageName, className, mimeTypes, documentationUrl,
|
|
3452
|
+
all.push({ packageName, className, mimeTypes, documentationUrl, isAvailableInBrowser });
|
|
3453
3453
|
}
|
|
3454
|
-
for (const { packageName, className, mimeTypes, documentationUrl,
|
|
3454
|
+
for (const { packageName, className, mimeTypes, documentationUrl, isAvailableInBrowser, } of $scrapersRegister.list()) {
|
|
3455
3455
|
if (all.some((item) => item.packageName === packageName && item.className === className)) {
|
|
3456
3456
|
continue;
|
|
3457
3457
|
}
|
|
3458
|
-
all.push({ packageName, className, mimeTypes, documentationUrl,
|
|
3458
|
+
all.push({ packageName, className, mimeTypes, documentationUrl, isAvailableInBrowser });
|
|
3459
3459
|
}
|
|
3460
3460
|
for (const { metadata } of availableScrapers) {
|
|
3461
3461
|
all.push(metadata);
|
|
@@ -3467,8 +3467,8 @@
|
|
|
3467
3467
|
const isInstalled = $scrapersRegister
|
|
3468
3468
|
.list()
|
|
3469
3469
|
.find(({ packageName, className }) => metadata.packageName === packageName && metadata.className === className);
|
|
3470
|
-
const
|
|
3471
|
-
return { ...metadata, isMetadataAviailable, isInstalled,
|
|
3470
|
+
const isAvailableInTools = availableScrapers.some(({ metadata: { packageName, className } }) => metadata.packageName === packageName && metadata.className === className);
|
|
3471
|
+
return { ...metadata, isMetadataAviailable, isInstalled, isAvailableInTools };
|
|
3472
3472
|
});
|
|
3473
3473
|
if (metadata.length === 0) {
|
|
3474
3474
|
return spaceTrim__default["default"](`
|
|
@@ -3481,7 +3481,7 @@
|
|
|
3481
3481
|
return spaceTrim__default["default"]((block) => `
|
|
3482
3482
|
Available scrapers are:
|
|
3483
3483
|
${block(metadata
|
|
3484
|
-
.map(({ packageName, className, isMetadataAviailable, isInstalled, mimeTypes,
|
|
3484
|
+
.map(({ packageName, className, isMetadataAviailable, isInstalled, mimeTypes, isAvailableInBrowser, isAvailableInTools, }, i) => {
|
|
3485
3485
|
const more = [];
|
|
3486
3486
|
// TODO: [🧠] Maybe use `documentationUrl`
|
|
3487
3487
|
if (isMetadataAviailable) {
|
|
@@ -3490,16 +3490,16 @@
|
|
|
3490
3490
|
if (isInstalled) {
|
|
3491
3491
|
more.push(`🟩 Installed`);
|
|
3492
3492
|
} // not else
|
|
3493
|
-
if (
|
|
3493
|
+
if (isAvailableInTools) {
|
|
3494
3494
|
more.push(`🟦 Available in tools`);
|
|
3495
3495
|
} // not else
|
|
3496
3496
|
if (!isMetadataAviailable && isInstalled) {
|
|
3497
3497
|
more.push(`When no metadata registered but scraper is installed, it is an unexpected behavior`);
|
|
3498
3498
|
} // not else
|
|
3499
|
-
if (!isInstalled &&
|
|
3499
|
+
if (!isInstalled && isAvailableInTools) {
|
|
3500
3500
|
more.push(`When the scraper is not installed but available in tools, it is an unexpected compatibility behavior`);
|
|
3501
3501
|
} // not else
|
|
3502
|
-
if (!
|
|
3502
|
+
if (!isAvailableInBrowser) {
|
|
3503
3503
|
more.push(`Not usable in browser`);
|
|
3504
3504
|
}
|
|
3505
3505
|
const moreText = more.length === 0 ? '' : ` *(${more.join('; ')})*`;
|
|
@@ -3829,7 +3829,7 @@
|
|
|
3829
3829
|
/**
|
|
3830
3830
|
* TODO: [🧊] In future one preparation can take data from previous preparation and save tokens and time
|
|
3831
3831
|
* Put `knowledgePieces` into `PrepareKnowledgeOptions`
|
|
3832
|
-
* TODO: [🪂] More than max things can run in parallel by
|
|
3832
|
+
* TODO: [🪂] More than max things can run in parallel by accident [1,[2a,2b,_],[3a,3b,_]]
|
|
3833
3833
|
* TODO: [🧠][❎] Do here proper M:N mapping
|
|
3834
3834
|
* [x] One source can make multiple pieces
|
|
3835
3835
|
* [ ] One piece can have multiple sources
|
|
@@ -4637,6 +4637,77 @@
|
|
|
4637
4637
|
return mappedParameters;
|
|
4638
4638
|
}
|
|
4639
4639
|
|
|
4640
|
+
/**
|
|
4641
|
+
* Replaces parameters in template with values from parameters object
|
|
4642
|
+
*
|
|
4643
|
+
* Note: This function is not places strings into string,
|
|
4644
|
+
* It's more complex and can handle this operation specifically for LLM models
|
|
4645
|
+
*
|
|
4646
|
+
* @param template the template with parameters in {curly} braces
|
|
4647
|
+
* @param parameters the object with parameters
|
|
4648
|
+
* @returns the template with replaced parameters
|
|
4649
|
+
* @throws {PipelineExecutionError} if parameter is not defined, not closed, or not opened
|
|
4650
|
+
* @public exported from `@promptbook/utils`
|
|
4651
|
+
*/
|
|
4652
|
+
function templateParameters(template, parameters) {
|
|
4653
|
+
for (const [parameterName, parameterValue] of Object.entries(parameters)) {
|
|
4654
|
+
if (parameterValue === RESERVED_PARAMETER_MISSING_VALUE) {
|
|
4655
|
+
throw new UnexpectedError(`Parameter \`{${parameterName}}\` has missing value`);
|
|
4656
|
+
}
|
|
4657
|
+
else if (parameterValue === RESERVED_PARAMETER_RESTRICTED) {
|
|
4658
|
+
// TODO: [🍵]
|
|
4659
|
+
throw new UnexpectedError(`Parameter \`{${parameterName}}\` is restricted to use`);
|
|
4660
|
+
}
|
|
4661
|
+
}
|
|
4662
|
+
let replacedTemplates = template;
|
|
4663
|
+
let match;
|
|
4664
|
+
let loopLimit = LOOP_LIMIT;
|
|
4665
|
+
while ((match = /^(?<precol>.*){(?<parameterName>\w+)}(.*)/m /* <- Not global */
|
|
4666
|
+
.exec(replacedTemplates))) {
|
|
4667
|
+
if (loopLimit-- < 0) {
|
|
4668
|
+
throw new LimitReachedError('Loop limit reached during parameters replacement in `templateParameters`');
|
|
4669
|
+
}
|
|
4670
|
+
const precol = match.groups.precol;
|
|
4671
|
+
const parameterName = match.groups.parameterName;
|
|
4672
|
+
if (parameterName === '') {
|
|
4673
|
+
// Note: Skip empty placeholders. It's used to avoid confusion with JSON-like strings
|
|
4674
|
+
continue;
|
|
4675
|
+
}
|
|
4676
|
+
if (parameterName.indexOf('{') !== -1 || parameterName.indexOf('}') !== -1) {
|
|
4677
|
+
throw new PipelineExecutionError('Parameter is already opened or not closed');
|
|
4678
|
+
}
|
|
4679
|
+
if (parameters[parameterName] === undefined) {
|
|
4680
|
+
throw new PipelineExecutionError(`Parameter \`{${parameterName}}\` is not defined`);
|
|
4681
|
+
}
|
|
4682
|
+
let parameterValue = parameters[parameterName];
|
|
4683
|
+
if (parameterValue === undefined) {
|
|
4684
|
+
throw new PipelineExecutionError(`Parameter \`{${parameterName}}\` is not defined`);
|
|
4685
|
+
}
|
|
4686
|
+
parameterValue = valueToString(parameterValue);
|
|
4687
|
+
// Escape curly braces in parameter values to prevent prompt-injection
|
|
4688
|
+
parameterValue = parameterValue.replace(/[{}]/g, '\\$&');
|
|
4689
|
+
if (parameterValue.includes('\n') && /^\s*\W{0,3}\s*$/.test(precol)) {
|
|
4690
|
+
parameterValue = parameterValue
|
|
4691
|
+
.split('\n')
|
|
4692
|
+
.map((line, index) => (index === 0 ? line : `${precol}${line}`))
|
|
4693
|
+
.join('\n');
|
|
4694
|
+
}
|
|
4695
|
+
replacedTemplates =
|
|
4696
|
+
replacedTemplates.substring(0, match.index + precol.length) +
|
|
4697
|
+
parameterValue +
|
|
4698
|
+
replacedTemplates.substring(match.index + precol.length + parameterName.length + 2);
|
|
4699
|
+
}
|
|
4700
|
+
// [💫] Check if there are parameters that are not closed properly
|
|
4701
|
+
if (/{\w+$/.test(replacedTemplates)) {
|
|
4702
|
+
throw new PipelineExecutionError('Parameter is not closed');
|
|
4703
|
+
}
|
|
4704
|
+
// [💫] Check if there are parameters that are not opened properly
|
|
4705
|
+
if (/^\w+}/.test(replacedTemplates)) {
|
|
4706
|
+
throw new PipelineExecutionError('Parameter is not opened');
|
|
4707
|
+
}
|
|
4708
|
+
return replacedTemplates;
|
|
4709
|
+
}
|
|
4710
|
+
|
|
4640
4711
|
/**
|
|
4641
4712
|
* Extracts all code blocks from markdown.
|
|
4642
4713
|
*
|
|
@@ -4739,77 +4810,6 @@
|
|
|
4739
4810
|
* TODO: [🏢] Make this logic part of `JsonFormatParser` or `isValidJsonString`
|
|
4740
4811
|
*/
|
|
4741
4812
|
|
|
4742
|
-
/**
|
|
4743
|
-
* Replaces parameters in template with values from parameters object
|
|
4744
|
-
*
|
|
4745
|
-
* Note: This function is not places strings into string,
|
|
4746
|
-
* It's more complex and can handle this operation specifically for LLM models
|
|
4747
|
-
*
|
|
4748
|
-
* @param template the template with parameters in {curly} braces
|
|
4749
|
-
* @param parameters the object with parameters
|
|
4750
|
-
* @returns the template with replaced parameters
|
|
4751
|
-
* @throws {PipelineExecutionError} if parameter is not defined, not closed, or not opened
|
|
4752
|
-
* @public exported from `@promptbook/utils`
|
|
4753
|
-
*/
|
|
4754
|
-
function templateParameters(template, parameters) {
|
|
4755
|
-
for (const [parameterName, parameterValue] of Object.entries(parameters)) {
|
|
4756
|
-
if (parameterValue === RESERVED_PARAMETER_MISSING_VALUE) {
|
|
4757
|
-
throw new UnexpectedError(`Parameter \`{${parameterName}}\` has missing value`);
|
|
4758
|
-
}
|
|
4759
|
-
else if (parameterValue === RESERVED_PARAMETER_RESTRICTED) {
|
|
4760
|
-
// TODO: [🍵]
|
|
4761
|
-
throw new UnexpectedError(`Parameter \`{${parameterName}}\` is restricted to use`);
|
|
4762
|
-
}
|
|
4763
|
-
}
|
|
4764
|
-
let replacedTemplates = template;
|
|
4765
|
-
let match;
|
|
4766
|
-
let loopLimit = LOOP_LIMIT;
|
|
4767
|
-
while ((match = /^(?<precol>.*){(?<parameterName>\w+)}(.*)/m /* <- Not global */
|
|
4768
|
-
.exec(replacedTemplates))) {
|
|
4769
|
-
if (loopLimit-- < 0) {
|
|
4770
|
-
throw new LimitReachedError('Loop limit reached during parameters replacement in `templateParameters`');
|
|
4771
|
-
}
|
|
4772
|
-
const precol = match.groups.precol;
|
|
4773
|
-
const parameterName = match.groups.parameterName;
|
|
4774
|
-
if (parameterName === '') {
|
|
4775
|
-
// Note: Skip empty placeholders. It's used to avoid confusion with JSON-like strings
|
|
4776
|
-
continue;
|
|
4777
|
-
}
|
|
4778
|
-
if (parameterName.indexOf('{') !== -1 || parameterName.indexOf('}') !== -1) {
|
|
4779
|
-
throw new PipelineExecutionError('Parameter is already opened or not closed');
|
|
4780
|
-
}
|
|
4781
|
-
if (parameters[parameterName] === undefined) {
|
|
4782
|
-
throw new PipelineExecutionError(`Parameter \`{${parameterName}}\` is not defined`);
|
|
4783
|
-
}
|
|
4784
|
-
let parameterValue = parameters[parameterName];
|
|
4785
|
-
if (parameterValue === undefined) {
|
|
4786
|
-
throw new PipelineExecutionError(`Parameter \`{${parameterName}}\` is not defined`);
|
|
4787
|
-
}
|
|
4788
|
-
parameterValue = valueToString(parameterValue);
|
|
4789
|
-
// Escape curly braces in parameter values to prevent prompt-injection
|
|
4790
|
-
parameterValue = parameterValue.replace(/[{}]/g, '\\$&');
|
|
4791
|
-
if (parameterValue.includes('\n') && /^\s*\W{0,3}\s*$/.test(precol)) {
|
|
4792
|
-
parameterValue = parameterValue
|
|
4793
|
-
.split('\n')
|
|
4794
|
-
.map((line, index) => (index === 0 ? line : `${precol}${line}`))
|
|
4795
|
-
.join('\n');
|
|
4796
|
-
}
|
|
4797
|
-
replacedTemplates =
|
|
4798
|
-
replacedTemplates.substring(0, match.index + precol.length) +
|
|
4799
|
-
parameterValue +
|
|
4800
|
-
replacedTemplates.substring(match.index + precol.length + parameterName.length + 2);
|
|
4801
|
-
}
|
|
4802
|
-
// [💫] Check if there are parameters that are not closed properly
|
|
4803
|
-
if (/{\w+$/.test(replacedTemplates)) {
|
|
4804
|
-
throw new PipelineExecutionError('Parameter is not closed');
|
|
4805
|
-
}
|
|
4806
|
-
// [💫] Check if there are parameters that are not opened properly
|
|
4807
|
-
if (/^\w+}/.test(replacedTemplates)) {
|
|
4808
|
-
throw new PipelineExecutionError('Parameter is not opened');
|
|
4809
|
-
}
|
|
4810
|
-
return replacedTemplates;
|
|
4811
|
-
}
|
|
4812
|
-
|
|
4813
4813
|
/**
|
|
4814
4814
|
* Counts number of characters in the text
|
|
4815
4815
|
*
|
|
@@ -4970,6 +4970,68 @@
|
|
|
4970
4970
|
* Note: [💝] and [🤠] are interconnected together
|
|
4971
4971
|
*/
|
|
4972
4972
|
|
|
4973
|
+
/**
|
|
4974
|
+
* Validates a prompt result against expectations and format requirements.
|
|
4975
|
+
* This function provides a common abstraction for result validation that can be used
|
|
4976
|
+
* by both execution logic and caching logic to ensure consistency.
|
|
4977
|
+
*
|
|
4978
|
+
* @param options - The validation options including result string, expectations, and format
|
|
4979
|
+
* @returns Validation result with processed string and validity status
|
|
4980
|
+
* @private internal function of `createPipelineExecutor` and `cacheLlmTools`
|
|
4981
|
+
*/
|
|
4982
|
+
function validatePromptResult(options) {
|
|
4983
|
+
const { resultString, expectations, format } = options;
|
|
4984
|
+
let processedResultString = resultString;
|
|
4985
|
+
let validationError;
|
|
4986
|
+
try {
|
|
4987
|
+
// TODO: [💝] Unite object for expecting amount and format
|
|
4988
|
+
if (format) {
|
|
4989
|
+
if (format === 'JSON') {
|
|
4990
|
+
if (!isValidJsonString(processedResultString)) {
|
|
4991
|
+
// TODO: [🏢] Do more universally via `FormatParser`
|
|
4992
|
+
try {
|
|
4993
|
+
processedResultString = extractJsonBlock(processedResultString);
|
|
4994
|
+
}
|
|
4995
|
+
catch (error) {
|
|
4996
|
+
keepUnused(error);
|
|
4997
|
+
throw new ExpectError(spaceTrim.spaceTrim((block) => `
|
|
4998
|
+
Expected valid JSON string
|
|
4999
|
+
|
|
5000
|
+
The expected JSON text:
|
|
5001
|
+
${block(processedResultString)}
|
|
5002
|
+
`));
|
|
5003
|
+
}
|
|
5004
|
+
}
|
|
5005
|
+
}
|
|
5006
|
+
else {
|
|
5007
|
+
throw new UnexpectedError(`Unknown format "${format}"`);
|
|
5008
|
+
}
|
|
5009
|
+
}
|
|
5010
|
+
// TODO: [💝] Unite object for expecting amount and format
|
|
5011
|
+
if (expectations) {
|
|
5012
|
+
checkExpectations(expectations, processedResultString);
|
|
5013
|
+
}
|
|
5014
|
+
return {
|
|
5015
|
+
isValid: true,
|
|
5016
|
+
processedResultString,
|
|
5017
|
+
};
|
|
5018
|
+
}
|
|
5019
|
+
catch (error) {
|
|
5020
|
+
if (error instanceof ExpectError) {
|
|
5021
|
+
validationError = error;
|
|
5022
|
+
}
|
|
5023
|
+
else {
|
|
5024
|
+
// Re-throw non-ExpectError errors (like UnexpectedError)
|
|
5025
|
+
throw error;
|
|
5026
|
+
}
|
|
5027
|
+
return {
|
|
5028
|
+
isValid: false,
|
|
5029
|
+
processedResultString,
|
|
5030
|
+
error: validationError,
|
|
5031
|
+
};
|
|
5032
|
+
}
|
|
5033
|
+
}
|
|
5034
|
+
|
|
4973
5035
|
/**
|
|
4974
5036
|
* Executes a pipeline task with multiple attempts, including joker and retry logic. Handles different task types
|
|
4975
5037
|
* (prompt, script, dialog, etc.), applies postprocessing, checks expectations, and updates the execution report.
|
|
@@ -4987,17 +5049,18 @@
|
|
|
4987
5049
|
$resultString: null,
|
|
4988
5050
|
$expectError: null,
|
|
4989
5051
|
$scriptPipelineExecutionErrors: [],
|
|
5052
|
+
$failedResults: [], // Track all failed attempts
|
|
4990
5053
|
};
|
|
4991
5054
|
// TODO: [🚐] Make arrayable LLMs -> single LLM DRY
|
|
4992
5055
|
const _llms = arrayableToArray(tools.llm);
|
|
4993
5056
|
const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
|
|
4994
|
-
attempts: for (let
|
|
4995
|
-
const isJokerAttempt =
|
|
4996
|
-
const jokerParameterName = jokerParameterNames[jokerParameterNames.length +
|
|
5057
|
+
attempts: for (let attemptIndex = -jokerParameterNames.length; attemptIndex < maxAttempts; attemptIndex++) {
|
|
5058
|
+
const isJokerAttempt = attemptIndex < 0;
|
|
5059
|
+
const jokerParameterName = jokerParameterNames[jokerParameterNames.length + attemptIndex];
|
|
4997
5060
|
// TODO: [🧠][🍭] JOKERS, EXPECTATIONS, POSTPROCESSING and FOREACH
|
|
4998
5061
|
if (isJokerAttempt && !jokerParameterName) {
|
|
4999
5062
|
throw new UnexpectedError(spaceTrim.spaceTrim((block) => `
|
|
5000
|
-
Joker not found in attempt ${
|
|
5063
|
+
Joker not found in attempt ${attemptIndex}
|
|
5001
5064
|
|
|
5002
5065
|
${block(pipelineIdentification)}
|
|
5003
5066
|
`));
|
|
@@ -5195,35 +5258,18 @@
|
|
|
5195
5258
|
}
|
|
5196
5259
|
}
|
|
5197
5260
|
// TODO: [💝] Unite object for expecting amount and format
|
|
5198
|
-
|
|
5199
|
-
|
|
5200
|
-
|
|
5201
|
-
|
|
5202
|
-
|
|
5203
|
-
|
|
5204
|
-
|
|
5205
|
-
|
|
5206
|
-
|
|
5207
|
-
throw new ExpectError(spaceTrim.spaceTrim((block) => `
|
|
5208
|
-
Expected valid JSON string
|
|
5209
|
-
|
|
5210
|
-
${block(
|
|
5211
|
-
/*<- Note: No need for `pipelineIdentification`, it will be catched and added later */ '')}
|
|
5212
|
-
`));
|
|
5213
|
-
}
|
|
5214
|
-
}
|
|
5215
|
-
}
|
|
5216
|
-
else {
|
|
5217
|
-
throw new UnexpectedError(spaceTrim.spaceTrim((block) => `
|
|
5218
|
-
Unknown format "${task.format}"
|
|
5219
|
-
|
|
5220
|
-
${block(pipelineIdentification)}
|
|
5221
|
-
`));
|
|
5261
|
+
// Use the common validation function for both format and expectations
|
|
5262
|
+
if (task.format || task.expectations) {
|
|
5263
|
+
const validationResult = validatePromptResult({
|
|
5264
|
+
resultString: $ongoingTaskResult.$resultString || '',
|
|
5265
|
+
expectations: task.expectations,
|
|
5266
|
+
format: task.format,
|
|
5267
|
+
});
|
|
5268
|
+
if (!validationResult.isValid) {
|
|
5269
|
+
throw validationResult.error;
|
|
5222
5270
|
}
|
|
5223
|
-
|
|
5224
|
-
|
|
5225
|
-
if (task.expectations) {
|
|
5226
|
-
checkExpectations(task.expectations, $ongoingTaskResult.$resultString || '');
|
|
5271
|
+
// Update the result string in case format processing modified it (e.g., JSON extraction)
|
|
5272
|
+
$ongoingTaskResult.$resultString = validationResult.processedResultString;
|
|
5227
5273
|
}
|
|
5228
5274
|
break attempts;
|
|
5229
5275
|
}
|
|
@@ -5232,6 +5278,15 @@
|
|
|
5232
5278
|
throw error;
|
|
5233
5279
|
}
|
|
5234
5280
|
$ongoingTaskResult.$expectError = error;
|
|
5281
|
+
// Store each failed attempt
|
|
5282
|
+
if (!Array.isArray($ongoingTaskResult.$failedResults)) {
|
|
5283
|
+
$ongoingTaskResult.$failedResults = [];
|
|
5284
|
+
}
|
|
5285
|
+
$ongoingTaskResult.$failedResults.push({
|
|
5286
|
+
attemptIndex,
|
|
5287
|
+
result: $ongoingTaskResult.$resultString,
|
|
5288
|
+
error: error,
|
|
5289
|
+
});
|
|
5235
5290
|
}
|
|
5236
5291
|
finally {
|
|
5237
5292
|
if (!isJokerAttempt &&
|
|
@@ -5253,35 +5308,41 @@
|
|
|
5253
5308
|
});
|
|
5254
5309
|
}
|
|
5255
5310
|
}
|
|
5256
|
-
if ($ongoingTaskResult.$expectError !== null &&
|
|
5311
|
+
if ($ongoingTaskResult.$expectError !== null && attemptIndex === maxAttempts - 1) {
|
|
5312
|
+
// Note: Create a summary of all failures
|
|
5313
|
+
const failuresSummary = $ongoingTaskResult.$failedResults
|
|
5314
|
+
.map((failure) => spaceTrim.spaceTrim((block) => {
|
|
5315
|
+
var _a, _b;
|
|
5316
|
+
return `
|
|
5317
|
+
Attempt ${failure.attemptIndex + 1}:
|
|
5318
|
+
Error ${((_a = failure.error) === null || _a === void 0 ? void 0 : _a.name) || ''}:
|
|
5319
|
+
${block((_b = failure.error) === null || _b === void 0 ? void 0 : _b.message.split('\n').map((line) => `> ${line}`).join('\n'))}
|
|
5320
|
+
|
|
5321
|
+
Result:
|
|
5322
|
+
${block(failure.result === null
|
|
5323
|
+
? 'null'
|
|
5324
|
+
: spaceTrim.spaceTrim(failure.result)
|
|
5325
|
+
.split('\n')
|
|
5326
|
+
.map((line) => `> ${line}`)
|
|
5327
|
+
.join('\n'))}
|
|
5328
|
+
`;
|
|
5329
|
+
}))
|
|
5330
|
+
.join('\n\n---\n\n');
|
|
5257
5331
|
throw new PipelineExecutionError(spaceTrim.spaceTrim((block) => {
|
|
5258
|
-
var _a
|
|
5332
|
+
var _a;
|
|
5259
5333
|
return `
|
|
5260
5334
|
LLM execution failed ${maxExecutionAttempts}x
|
|
5261
5335
|
|
|
5262
5336
|
${block(pipelineIdentification)}
|
|
5263
5337
|
|
|
5264
|
-
---
|
|
5265
5338
|
The Prompt:
|
|
5266
5339
|
${block((((_a = $ongoingTaskResult.$prompt) === null || _a === void 0 ? void 0 : _a.content) || '')
|
|
5267
5340
|
.split('\n')
|
|
5268
5341
|
.map((line) => `> ${line}`)
|
|
5269
5342
|
.join('\n'))}
|
|
5270
5343
|
|
|
5271
|
-
|
|
5272
|
-
${block(
|
|
5273
|
-
.split('\n')
|
|
5274
|
-
.map((line) => `> ${line}`)
|
|
5275
|
-
.join('\n'))}
|
|
5276
|
-
|
|
5277
|
-
Last result:
|
|
5278
|
-
${block($ongoingTaskResult.$resultString === null
|
|
5279
|
-
? 'null'
|
|
5280
|
-
: spaceTrim.spaceTrim($ongoingTaskResult.$resultString)
|
|
5281
|
-
.split('\n')
|
|
5282
|
-
.map((line) => `> ${line}`)
|
|
5283
|
-
.join('\n'))}
|
|
5284
|
-
---
|
|
5344
|
+
All Failed Attempts:
|
|
5345
|
+
${block(failuresSummary)}
|
|
5285
5346
|
`;
|
|
5286
5347
|
}));
|
|
5287
5348
|
}
|
|
@@ -5501,10 +5562,10 @@
|
|
|
5501
5562
|
*/
|
|
5502
5563
|
async function getKnowledgeForTask(options) {
|
|
5503
5564
|
const { tools, preparedPipeline, task, parameters } = options;
|
|
5504
|
-
const
|
|
5505
|
-
const
|
|
5565
|
+
const firstKnowledgePiece = preparedPipeline.knowledgePieces[0];
|
|
5566
|
+
const firstKnowledgeIndex = firstKnowledgePiece === null || firstKnowledgePiece === void 0 ? void 0 : firstKnowledgePiece.index[0];
|
|
5506
5567
|
// <- TODO: Do not use just first knowledge piece and first index to determine embedding model, use also keyword search
|
|
5507
|
-
if (
|
|
5568
|
+
if (firstKnowledgePiece === undefined || firstKnowledgeIndex === undefined) {
|
|
5508
5569
|
return ''; // <- Note: Np knowledge present, return empty string
|
|
5509
5570
|
}
|
|
5510
5571
|
try {
|
|
@@ -5515,7 +5576,7 @@
|
|
|
5515
5576
|
title: 'Knowledge Search',
|
|
5516
5577
|
modelRequirements: {
|
|
5517
5578
|
modelVariant: 'EMBEDDING',
|
|
5518
|
-
modelName:
|
|
5579
|
+
modelName: firstKnowledgeIndex.modelName,
|
|
5519
5580
|
},
|
|
5520
5581
|
content: task.content,
|
|
5521
5582
|
parameters,
|
|
@@ -5523,7 +5584,7 @@
|
|
|
5523
5584
|
const taskEmbeddingResult = await llmTools.callEmbeddingModel(taskEmbeddingPrompt);
|
|
5524
5585
|
const knowledgePiecesWithRelevance = preparedPipeline.knowledgePieces.map((knowledgePiece) => {
|
|
5525
5586
|
const { index } = knowledgePiece;
|
|
5526
|
-
const knowledgePieceIndex = index.find((i) => i.modelName ===
|
|
5587
|
+
const knowledgePieceIndex = index.find((i) => i.modelName === firstKnowledgeIndex.modelName);
|
|
5527
5588
|
// <- TODO: Do not use just first knowledge piece and first index to determine embedding model
|
|
5528
5589
|
if (knowledgePieceIndex === undefined) {
|
|
5529
5590
|
return {
|
|
@@ -5544,8 +5605,8 @@
|
|
|
5544
5605
|
task,
|
|
5545
5606
|
taskEmbeddingPrompt,
|
|
5546
5607
|
taskEmbeddingResult,
|
|
5547
|
-
|
|
5548
|
-
|
|
5608
|
+
firstKnowledgePiece,
|
|
5609
|
+
firstKnowledgeIndex,
|
|
5549
5610
|
knowledgePiecesWithRelevance,
|
|
5550
5611
|
knowledgePiecesSorted,
|
|
5551
5612
|
knowledgePiecesLimited,
|
|
@@ -5614,7 +5675,7 @@
|
|
|
5614
5675
|
* @private internal utility of `createPipelineExecutor`
|
|
5615
5676
|
*/
|
|
5616
5677
|
async function executeTask(options) {
|
|
5617
|
-
const { currentTask, preparedPipeline, parametersToPass, tools, onProgress, $executionReport, pipelineIdentification, maxExecutionAttempts, maxParallelCount, csvSettings, isVerbose, rootDirname, cacheDirname, intermediateFilesStrategy, isAutoInstalled,
|
|
5678
|
+
const { currentTask, preparedPipeline, parametersToPass, tools, onProgress, $executionReport, pipelineIdentification, maxExecutionAttempts, maxParallelCount, csvSettings, isVerbose, rootDirname, cacheDirname, intermediateFilesStrategy, isAutoInstalled, isNotPreparedWarningSuppressed, } = options;
|
|
5618
5679
|
const priority = preparedPipeline.tasks.length - preparedPipeline.tasks.indexOf(currentTask);
|
|
5619
5680
|
// Note: Check consistency of used and dependent parameters which was also done in `validatePipeline`, but it’s good to doublecheck
|
|
5620
5681
|
const usedParameterNames = extractParameterNamesFromTask(currentTask);
|
|
@@ -5702,7 +5763,7 @@
|
|
|
5702
5763
|
cacheDirname,
|
|
5703
5764
|
intermediateFilesStrategy,
|
|
5704
5765
|
isAutoInstalled,
|
|
5705
|
-
|
|
5766
|
+
isNotPreparedWarningSuppressed,
|
|
5706
5767
|
});
|
|
5707
5768
|
await onProgress({
|
|
5708
5769
|
outputParameters: {
|
|
@@ -5797,7 +5858,7 @@
|
|
|
5797
5858
|
}
|
|
5798
5859
|
return exportJson({
|
|
5799
5860
|
name: `executionReport`,
|
|
5800
|
-
message: `
|
|
5861
|
+
message: `Unsuccessful PipelineExecutorResult (with missing parameter {${parameter.name}}) PipelineExecutorResult`,
|
|
5801
5862
|
order: [],
|
|
5802
5863
|
value: {
|
|
5803
5864
|
isSuccessful: false,
|
|
@@ -5834,7 +5895,7 @@
|
|
|
5834
5895
|
return exportJson({
|
|
5835
5896
|
name: 'pipelineExecutorResult',
|
|
5836
5897
|
message: spaceTrim.spaceTrim((block) => `
|
|
5837
|
-
|
|
5898
|
+
Unsuccessful PipelineExecutorResult (with extra parameter {${parameter.name}}) PipelineExecutorResult
|
|
5838
5899
|
|
|
5839
5900
|
${block(pipelineIdentification)}
|
|
5840
5901
|
`),
|
|
@@ -5975,7 +6036,7 @@
|
|
|
5975
6036
|
}
|
|
5976
6037
|
return exportJson({
|
|
5977
6038
|
name: 'pipelineExecutorResult',
|
|
5978
|
-
message: `
|
|
6039
|
+
message: `Unsuccessful PipelineExecutorResult (with misc errors) PipelineExecutorResult`,
|
|
5979
6040
|
order: [],
|
|
5980
6041
|
value: {
|
|
5981
6042
|
isSuccessful: false,
|
|
@@ -6026,7 +6087,7 @@
|
|
|
6026
6087
|
* @public exported from `@promptbook/core`
|
|
6027
6088
|
*/
|
|
6028
6089
|
function createPipelineExecutor(options) {
|
|
6029
|
-
const { pipeline, tools, maxExecutionAttempts = DEFAULT_MAX_EXECUTION_ATTEMPTS, maxParallelCount = DEFAULT_MAX_PARALLEL_COUNT, csvSettings = DEFAULT_CSV_SETTINGS, isVerbose = DEFAULT_IS_VERBOSE,
|
|
6090
|
+
const { pipeline, tools, maxExecutionAttempts = DEFAULT_MAX_EXECUTION_ATTEMPTS, maxParallelCount = DEFAULT_MAX_PARALLEL_COUNT, csvSettings = DEFAULT_CSV_SETTINGS, isVerbose = DEFAULT_IS_VERBOSE, isNotPreparedWarningSuppressed = false, cacheDirname = DEFAULT_SCRAPE_CACHE_DIRNAME, intermediateFilesStrategy = DEFAULT_INTERMEDIATE_FILES_STRATEGY, isAutoInstalled = DEFAULT_IS_AUTO_INSTALLED, rootDirname = null, } = options;
|
|
6030
6091
|
validatePipeline(pipeline);
|
|
6031
6092
|
const pipelineIdentification = (() => {
|
|
6032
6093
|
// Note: This is a 😐 implementation of [🚞]
|
|
@@ -6043,7 +6104,7 @@
|
|
|
6043
6104
|
if (isPipelinePrepared(pipeline)) {
|
|
6044
6105
|
preparedPipeline = pipeline;
|
|
6045
6106
|
}
|
|
6046
|
-
else if (
|
|
6107
|
+
else if (isNotPreparedWarningSuppressed !== true) {
|
|
6047
6108
|
console.warn(spaceTrim.spaceTrim((block) => `
|
|
6048
6109
|
Pipeline is not prepared
|
|
6049
6110
|
|
|
@@ -6076,7 +6137,7 @@
|
|
|
6076
6137
|
maxParallelCount,
|
|
6077
6138
|
csvSettings,
|
|
6078
6139
|
isVerbose,
|
|
6079
|
-
|
|
6140
|
+
isNotPreparedWarningSuppressed,
|
|
6080
6141
|
rootDirname,
|
|
6081
6142
|
cacheDirname,
|
|
6082
6143
|
intermediateFilesStrategy,
|
|
@@ -6085,7 +6146,7 @@
|
|
|
6085
6146
|
assertsError(error);
|
|
6086
6147
|
return exportJson({
|
|
6087
6148
|
name: 'pipelineExecutorResult',
|
|
6088
|
-
message: `
|
|
6149
|
+
message: `Unsuccessful PipelineExecutorResult, last catch`,
|
|
6089
6150
|
order: [],
|
|
6090
6151
|
value: {
|
|
6091
6152
|
isSuccessful: false,
|
|
@@ -6123,7 +6184,7 @@
|
|
|
6123
6184
|
className: 'MarkdownScraper',
|
|
6124
6185
|
mimeTypes: ['text/markdown', 'text/plain'],
|
|
6125
6186
|
documentationUrl: 'https://github.com/webgptorg/promptbook/discussions/@@',
|
|
6126
|
-
|
|
6187
|
+
isAvailableInBrowser: true,
|
|
6127
6188
|
// <- Note: [🌏] This is the only scraper which makes sense to be available in the browser, for scraping non-markdown sources in the browser use a remote server
|
|
6128
6189
|
requiredExecutables: [],
|
|
6129
6190
|
}); /* <- Note: [🤛] */
|
|
@@ -6133,7 +6194,7 @@
|
|
|
6133
6194
|
* Warning: This is not useful for the end user, it is just a side effect of the mechanism that handles all available known scrapers
|
|
6134
6195
|
*
|
|
6135
6196
|
* @public exported from `@promptbook/core`
|
|
6136
|
-
* @public exported from `@promptbook/
|
|
6197
|
+
* @public exported from `@promptbook/wizard`
|
|
6137
6198
|
* @public exported from `@promptbook/cli`
|
|
6138
6199
|
*/
|
|
6139
6200
|
$scrapersMetadataRegister.register(markdownScraperMetadata);
|
|
@@ -6232,7 +6293,7 @@
|
|
|
6232
6293
|
}
|
|
6233
6294
|
// ---
|
|
6234
6295
|
if (!llmTools.callEmbeddingModel) {
|
|
6235
|
-
// TODO: [🟥] Detect browser / node and make it
|
|
6296
|
+
// TODO: [🟥] Detect browser / node and make it colorful
|
|
6236
6297
|
console.error('No callEmbeddingModel function provided');
|
|
6237
6298
|
}
|
|
6238
6299
|
else {
|
|
@@ -6258,7 +6319,7 @@
|
|
|
6258
6319
|
if (!(error instanceof PipelineExecutionError)) {
|
|
6259
6320
|
throw error;
|
|
6260
6321
|
}
|
|
6261
|
-
// TODO: [🟥] Detect browser / node and make it
|
|
6322
|
+
// TODO: [🟥] Detect browser / node and make it colorful
|
|
6262
6323
|
console.error(error, "<- Note: This error is not critical to prepare the pipeline, just knowledge pieces won't have embeddings");
|
|
6263
6324
|
}
|
|
6264
6325
|
return {
|
|
@@ -6289,7 +6350,7 @@
|
|
|
6289
6350
|
className: 'DocumentScraper',
|
|
6290
6351
|
mimeTypes: ['application/vnd.openxmlformats-officedocument.wordprocessingml.document'],
|
|
6291
6352
|
documentationUrl: 'https://github.com/webgptorg/promptbook/discussions/@@',
|
|
6292
|
-
|
|
6353
|
+
isAvailableInBrowser: false,
|
|
6293
6354
|
// <- Note: [🌏] Only `MarkdownScraper` makes sense to be available in the browser, for scraping non-markdown sources in the browser use a remote server
|
|
6294
6355
|
requiredExecutables: ['Pandoc'],
|
|
6295
6356
|
}); /* <- Note: [🤛] */
|
|
@@ -6299,7 +6360,7 @@
|
|
|
6299
6360
|
* Warning: This is not useful for the end user, it is just a side effect of the mechanism that handles all available known scrapers
|
|
6300
6361
|
*
|
|
6301
6362
|
* @public exported from `@promptbook/core`
|
|
6302
|
-
* @public exported from `@promptbook/
|
|
6363
|
+
* @public exported from `@promptbook/wizard`
|
|
6303
6364
|
* @public exported from `@promptbook/cli`
|
|
6304
6365
|
*/
|
|
6305
6366
|
$scrapersMetadataRegister.register(documentScraperMetadata);
|
|
@@ -6425,7 +6486,7 @@
|
|
|
6425
6486
|
className: 'LegacyDocumentScraper',
|
|
6426
6487
|
mimeTypes: ['application/msword', 'text/rtf'],
|
|
6427
6488
|
documentationUrl: 'https://github.com/webgptorg/promptbook/discussions/@@',
|
|
6428
|
-
|
|
6489
|
+
isAvailableInBrowser: false,
|
|
6429
6490
|
// <- Note: [🌏] Only `MarkdownScraper` makes sense to be available in the browser, for scraping non-markdown sources in the browser use a remote server
|
|
6430
6491
|
requiredExecutables: [
|
|
6431
6492
|
'Pandoc',
|
|
@@ -6439,7 +6500,7 @@
|
|
|
6439
6500
|
* Warning: This is not useful for the end user, it is just a side effect of the mechanism that handles all available known scrapers
|
|
6440
6501
|
*
|
|
6441
6502
|
* @public exported from `@promptbook/core`
|
|
6442
|
-
* @public exported from `@promptbook/
|
|
6503
|
+
* @public exported from `@promptbook/wizard`
|
|
6443
6504
|
* @public exported from `@promptbook/cli`
|
|
6444
6505
|
*/
|
|
6445
6506
|
$scrapersMetadataRegister.register(legacyDocumentScraperMetadata);
|
|
@@ -6600,7 +6661,7 @@
|
|
|
6600
6661
|
* Warning: This is not useful for the end user, it is just a side effect of the mechanism that handles all available known scrapers
|
|
6601
6662
|
*
|
|
6602
6663
|
* @public exported from `@promptbook/legacy-documents`
|
|
6603
|
-
* @public exported from `@promptbook/
|
|
6664
|
+
* @public exported from `@promptbook/wizard`
|
|
6604
6665
|
* @public exported from `@promptbook/cli`
|
|
6605
6666
|
*/
|
|
6606
6667
|
const _LegacyDocumentScraperRegistration = $scrapersRegister.register(createLegacyDocumentScraper);
|