@promptbook/markitdown 0.89.0 → 0.92.0-10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (27) hide show
  1. package/README.md +4 -0
  2. package/esm/index.es.js +173 -30
  3. package/esm/index.es.js.map +1 -1
  4. package/esm/typings/src/_packages/core.index.d.ts +6 -0
  5. package/esm/typings/src/_packages/deepseek.index.d.ts +2 -0
  6. package/esm/typings/src/_packages/google.index.d.ts +2 -0
  7. package/esm/typings/src/_packages/utils.index.d.ts +2 -0
  8. package/esm/typings/src/cli/common/$provideLlmToolsForCli.d.ts +1 -1
  9. package/esm/typings/src/conversion/archive/loadArchive.d.ts +2 -2
  10. package/esm/typings/src/execution/CommonToolsOptions.d.ts +4 -0
  11. package/esm/typings/src/execution/createPipelineExecutor/getKnowledgeForTask.d.ts +12 -0
  12. package/esm/typings/src/execution/createPipelineExecutor/getReservedParametersForTask.d.ts +5 -0
  13. package/esm/typings/src/formats/csv/utils/csvParse.d.ts +12 -0
  14. package/esm/typings/src/formats/json/utils/jsonParse.d.ts +11 -0
  15. package/esm/typings/src/llm-providers/_common/filterModels.d.ts +15 -0
  16. package/esm/typings/src/llm-providers/_common/register/LlmToolsMetadata.d.ts +43 -0
  17. package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionTools.d.ts +4 -0
  18. package/esm/typings/src/llm-providers/deepseek/deepseek-models.d.ts +23 -0
  19. package/esm/typings/src/llm-providers/google/google-models.d.ts +23 -0
  20. package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +4 -0
  21. package/esm/typings/src/personas/preparePersona.d.ts +1 -1
  22. package/esm/typings/src/pipeline/PipelineJson/PersonaJson.d.ts +4 -2
  23. package/esm/typings/src/remote-server/openapi-types.d.ts +348 -6
  24. package/esm/typings/src/remote-server/openapi.d.ts +397 -3
  25. package/package.json +2 -2
  26. package/umd/index.umd.js +173 -30
  27. package/umd/index.umd.js.map +1 -1
package/README.md CHANGED
@@ -23,6 +23,10 @@
23
23
 
24
24
 
25
25
 
26
+ <blockquote style="color: #ff8811">
27
+ <b>⚠ Warning:</b> This is a pre-release version of the library. It is not yet ready for production use. Please look at <a href="https://www.npmjs.com/package/@promptbook/core?activeTab=versions">latest stable release</a>.
28
+ </blockquote>
29
+
26
30
  ## 📦 Package `@promptbook/markitdown`
27
31
 
28
32
  - Promptbooks are [divided into several](#-packages) packages, all are published from [single monorepo](https://github.com/webgptorg/promptbook).
package/esm/index.es.js CHANGED
@@ -26,7 +26,7 @@ const BOOK_LANGUAGE_VERSION = '1.0.0';
26
26
  * @generated
27
27
  * @see https://github.com/webgptorg/promptbook
28
28
  */
29
- const PROMPTBOOK_ENGINE_VERSION = '0.89.0';
29
+ const PROMPTBOOK_ENGINE_VERSION = '0.92.0-10';
30
30
  /**
31
31
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
32
32
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -847,7 +847,7 @@ async function getScraperIntermediateSource(source, options) {
847
847
  * Note: [🟢] Code in this file should never be never released in packages that could be imported into browser environment
848
848
  */
849
849
 
850
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book"},{title:"Prepare Knowledge-piece Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge-piece Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book"},{title:"Prepare Persona",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book",formfactorName:"GENERIC",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelRequirements",format:"JSON",dependentParameterNames:["availableModelNames","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Persona\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book`\n- INPUT PARAMETER `{availableModelNames}` List of available model names separated by comma (,)\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n\\`\\`\\`json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelRequirements}`\n"}],sourceFile:"./books/prepare-persona.book"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-title.book",formfactorName:"GENERIC",parameters:[{name:"book",description:"The book to prepare the title for",isInput:true,isOutput:false},{name:"title",description:"Best title for the book",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-title",title:"Make title",content:"Make best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}",resultingParameterName:"title",expectations:{words:{min:1,max:8},lines:{min:1,max:1}},dependentParameterNames:["book"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-title.book`\n- INPUT PARAMETER `{book}` The book to prepare the title for\n- OUTPUT PARAMETER `{title}` Best title for the book\n\n## Make title\n\n- EXPECT MIN 1 Word\n- EXPECT MAX 8 Words\n- EXPECT EXACTLY 1 Line\n\n```markdown\nMake best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-title.book"}];
850
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book"},{title:"Prepare Knowledge-piece Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge-piece Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book"},{title:"Prepare Persona",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book",formfactorName:"GENERIC",parameters:[{name:"availableModels",description:"List of available model names together with their descriptions as JSON",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelsRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n```json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n```\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n```json\n{availableModels}\n```\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelsRequirements",format:"JSON",dependentParameterNames:["availableModels","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Persona\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book`\n- INPUT PARAMETER `{availableModels}` List of available model names together with their descriptions as JSON\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelsRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n\\`\\`\\`json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n\\`\\`\\`json\n{availableModels}\n\\`\\`\\`\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelsRequirements}`\n"}],sourceFile:"./books/prepare-persona.book"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-title.book",formfactorName:"GENERIC",parameters:[{name:"book",description:"The book to prepare the title for",isInput:true,isOutput:false},{name:"title",description:"Best title for the book",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-title",title:"Make title",content:"Make best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}",resultingParameterName:"title",expectations:{words:{min:1,max:8},lines:{min:1,max:1}},dependentParameterNames:["book"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-title.book`\n- INPUT PARAMETER `{book}` The book to prepare the title for\n- OUTPUT PARAMETER `{title}` Best title for the book\n\n## Make title\n\n- EXPECT MIN 1 Word\n- EXPECT MAX 8 Words\n- EXPECT EXACTLY 1 Line\n\n```markdown\nMake best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-title.book"}];
851
851
 
852
852
  /**
853
853
  * Checks if value is valid email
@@ -1890,7 +1890,7 @@ function extractParameterNames(template) {
1890
1890
  */
1891
1891
  function unpreparePipeline(pipeline) {
1892
1892
  let { personas, knowledgeSources, tasks } = pipeline;
1893
- personas = personas.map((persona) => ({ ...persona, modelRequirements: undefined, preparationIds: undefined }));
1893
+ personas = personas.map((persona) => ({ ...persona, modelsRequirements: undefined, preparationIds: undefined }));
1894
1894
  knowledgeSources = knowledgeSources.map((knowledgeSource) => ({ ...knowledgeSource, preparationIds: undefined }));
1895
1895
  tasks = tasks.map((task) => {
1896
1896
  let { dependentParameterNames } = task;
@@ -2100,7 +2100,7 @@ function isPipelinePrepared(pipeline) {
2100
2100
  if (pipeline.title === undefined || pipeline.title === '' || pipeline.title === DEFAULT_BOOK_TITLE) {
2101
2101
  return false;
2102
2102
  }
2103
- if (!pipeline.personas.every((persona) => persona.modelRequirements !== undefined)) {
2103
+ if (!pipeline.personas.every((persona) => persona.modelsRequirements !== undefined)) {
2104
2104
  return false;
2105
2105
  }
2106
2106
  if (!pipeline.knowledgeSources.every((knowledgeSource) => knowledgeSource.preparationIds !== undefined)) {
@@ -2124,6 +2124,45 @@ function isPipelinePrepared(pipeline) {
2124
2124
  * - [♨] Are tasks prepared
2125
2125
  */
2126
2126
 
2127
+ /**
2128
+ * Converts a JavaScript Object Notation (JSON) string into an object.
2129
+ *
2130
+ * Note: This is wrapper around `JSON.parse()` with better error and type handling
2131
+ *
2132
+ * @public exported from `@promptbook/utils`
2133
+ */
2134
+ function jsonParse(value) {
2135
+ if (value === undefined) {
2136
+ throw new Error(`Can not parse JSON from undefined value.`);
2137
+ }
2138
+ else if (typeof value !== 'string') {
2139
+ console.error('Can not parse JSON from non-string value.', { text: value });
2140
+ throw new Error(spaceTrim(`
2141
+ Can not parse JSON from non-string value.
2142
+
2143
+ The value type: ${typeof value}
2144
+ See more in console.
2145
+ `));
2146
+ }
2147
+ try {
2148
+ return JSON.parse(value);
2149
+ }
2150
+ catch (error) {
2151
+ if (!(error instanceof Error)) {
2152
+ throw error;
2153
+ }
2154
+ throw new Error(spaceTrim((block) => `
2155
+ ${block(error.message)}
2156
+
2157
+ The JSON text:
2158
+ ${block(value)}
2159
+ `));
2160
+ }
2161
+ }
2162
+ /**
2163
+ * TODO: !!!! Use in Promptbook.studio
2164
+ */
2165
+
2127
2166
  /**
2128
2167
  * Recursively converts JSON strings to JSON objects
2129
2168
 
@@ -2142,7 +2181,7 @@ function jsonStringsToJsons(object) {
2142
2181
  const newObject = { ...object };
2143
2182
  for (const [key, value] of Object.entries(object)) {
2144
2183
  if (typeof value === 'string' && isValidJsonString(value)) {
2145
- newObject[key] = JSON.parse(value);
2184
+ newObject[key] = jsonParse(value);
2146
2185
  }
2147
2186
  else {
2148
2187
  newObject[key] = jsonStringsToJsons(value);
@@ -2974,27 +3013,48 @@ async function preparePersona(personaDescription, tools, options) {
2974
3013
  pipeline: await collection.getPipelineByUrl('https://promptbook.studio/promptbook/prepare-persona.book'),
2975
3014
  tools,
2976
3015
  });
2977
- // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
2978
3016
  const _llms = arrayableToArray(tools.llm);
2979
3017
  const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
2980
- const availableModels = await llmTools.listModels();
2981
- const availableModelNames = availableModels
3018
+ const availableModels = (await llmTools.listModels())
2982
3019
  .filter(({ modelVariant }) => modelVariant === 'CHAT')
2983
- .map(({ modelName }) => modelName)
2984
- .join(',');
2985
- const result = await preparePersonaExecutor({ availableModelNames, personaDescription }).asPromise();
3020
+ .map(({ modelName, modelDescription }) => ({
3021
+ modelName,
3022
+ modelDescription,
3023
+ // <- Note: `modelTitle` and `modelVariant` is not relevant for this task
3024
+ }));
3025
+ const result = await preparePersonaExecutor({
3026
+ availableModels /* <- Note: Passing as JSON */,
3027
+ personaDescription,
3028
+ }).asPromise();
2986
3029
  const { outputParameters } = result;
2987
- const { modelRequirements: modelRequirementsRaw } = outputParameters;
2988
- const modelRequirements = JSON.parse(modelRequirementsRaw);
3030
+ const { modelsRequirements: modelsRequirementsJson } = outputParameters;
3031
+ let modelsRequirementsUnchecked = jsonParse(modelsRequirementsJson);
2989
3032
  if (isVerbose) {
2990
- console.info(`PERSONA ${personaDescription}`, modelRequirements);
3033
+ console.info(`PERSONA ${personaDescription}`, modelsRequirementsUnchecked);
2991
3034
  }
2992
- const { modelName, systemMessage, temperature } = modelRequirements;
2993
- return {
3035
+ if (!Array.isArray(modelsRequirementsUnchecked)) {
3036
+ // <- TODO: Book should have syntax and system to enforce shape of JSON
3037
+ modelsRequirementsUnchecked = [modelsRequirementsUnchecked];
3038
+ /*
3039
+ throw new UnexpectedError(
3040
+ spaceTrim(
3041
+ (block) => `
3042
+ Invalid \`modelsRequirements\`:
3043
+
3044
+ \`\`\`json
3045
+ ${block(JSON.stringify(modelsRequirementsUnchecked, null, 4))}
3046
+ \`\`\`
3047
+ `,
3048
+ ),
3049
+ );
3050
+ */
3051
+ }
3052
+ const modelsRequirements = modelsRequirementsUnchecked.map((modelRequirements) => ({
2994
3053
  modelVariant: 'CHAT',
2995
- modelName,
2996
- systemMessage,
2997
- temperature,
3054
+ ...modelRequirements,
3055
+ }));
3056
+ return {
3057
+ modelsRequirements,
2998
3058
  };
2999
3059
  }
3000
3060
  /**
@@ -3432,7 +3492,7 @@ async function makeKnowledgeSourceHandler(knowledgeSource, tools, options) {
3432
3492
  > },
3433
3493
  */
3434
3494
  async asJson() {
3435
- return JSON.parse(await tools.fs.readFile(filename, 'utf-8'));
3495
+ return jsonParse(await tools.fs.readFile(filename, 'utf-8'));
3436
3496
  },
3437
3497
  async asText() {
3438
3498
  return await tools.fs.readFile(filename, 'utf-8');
@@ -3690,14 +3750,14 @@ async function preparePipeline(pipeline, tools, options) {
3690
3750
  // TODO: [🖌][🧠] Implement some `mapAsync` function
3691
3751
  const preparedPersonas = new Array(personas.length);
3692
3752
  await forEachAsync(personas, { maxParallelCount /* <- TODO: [🪂] When there are subtasks, this maximul limit can be broken */ }, async (persona, index) => {
3693
- const modelRequirements = await preparePersona(persona.description, { ...tools, llm: llmToolsWithUsage }, {
3753
+ const { modelsRequirements } = await preparePersona(persona.description, { ...tools, llm: llmToolsWithUsage }, {
3694
3754
  rootDirname,
3695
3755
  maxParallelCount /* <- TODO: [🪂] */,
3696
3756
  isVerbose,
3697
3757
  });
3698
3758
  const preparedPersona = {
3699
3759
  ...persona,
3700
- modelRequirements,
3760
+ modelsRequirements,
3701
3761
  preparationIds: [/* TODO: [🧊] -> */ currentPreparation.id],
3702
3762
  // <- TODO: [🍙] Make some standard order of json properties
3703
3763
  };
@@ -4036,6 +4096,24 @@ function isValidCsvString(value) {
4036
4096
  }
4037
4097
  }
4038
4098
 
4099
+ /**
4100
+ * Converts a CSV string into an object
4101
+ *
4102
+ * Note: This is wrapper around `papaparse.parse()` with better autohealing
4103
+ *
4104
+ * @private - for now until `@promptbook/csv` is released
4105
+ */
4106
+ function csvParse(value /* <- TODO: string_csv */, settings, schema /* <- TODO: Make CSV Schemas */) {
4107
+ settings = { ...settings, ...MANDATORY_CSV_SETTINGS };
4108
+ // Note: Autoheal invalid '\n' characters
4109
+ if (settings.newline && !settings.newline.includes('\r') && value.includes('\r')) {
4110
+ console.warn('CSV string contains carriage return characters, but in the CSV settings the `newline` setting does not include them. Autohealing the CSV string.');
4111
+ value = value.replace(/\r\n/g, '\n').replace(/\r/g, '\n');
4112
+ }
4113
+ const csv = parse(value, settings);
4114
+ return csv;
4115
+ }
4116
+
4039
4117
  /**
4040
4118
  * Definition for CSV spreadsheet
4041
4119
  *
@@ -4058,8 +4136,7 @@ const CsvFormatDefinition = {
4058
4136
  {
4059
4137
  subvalueName: 'ROW',
4060
4138
  async mapValues(value, outputParameterName, settings, mapCallback) {
4061
- // TODO: [👨🏾‍🤝‍👨🏼] DRY csv parsing
4062
- const csv = parse(value, { ...settings, ...MANDATORY_CSV_SETTINGS });
4139
+ const csv = csvParse(value, settings);
4063
4140
  if (csv.errors.length !== 0) {
4064
4141
  throw new CsvFormatError(spaceTrim((block) => `
4065
4142
  CSV parsing error
@@ -4089,8 +4166,7 @@ const CsvFormatDefinition = {
4089
4166
  {
4090
4167
  subvalueName: 'CELL',
4091
4168
  async mapValues(value, outputParameterName, settings, mapCallback) {
4092
- // TODO: [👨🏾‍🤝‍👨🏼] DRY csv parsing
4093
- const csv = parse(value, { ...settings, ...MANDATORY_CSV_SETTINGS });
4169
+ const csv = csvParse(value, settings);
4094
4170
  if (csv.errors.length !== 0) {
4095
4171
  throw new CsvFormatError(spaceTrim((block) => `
4096
4172
  CSV parsing error
@@ -5119,13 +5195,79 @@ async function getExamplesForTask(task) {
5119
5195
  /**
5120
5196
  * @@@
5121
5197
  *
5198
+ * Here is the place where RAG (retrieval-augmented generation) happens
5199
+ *
5122
5200
  * @private internal utility of `createPipelineExecutor`
5123
5201
  */
5124
5202
  async function getKnowledgeForTask(options) {
5125
- const { preparedPipeline, task } = options;
5126
- return preparedPipeline.knowledgePieces.map(({ content }) => `- ${content}`).join('\n');
5203
+ const { tools, preparedPipeline, task } = options;
5204
+ const firstKnowlegePiece = preparedPipeline.knowledgePieces[0];
5205
+ const firstKnowlegeIndex = firstKnowlegePiece === null || firstKnowlegePiece === void 0 ? void 0 : firstKnowlegePiece.index[0];
5206
+ // <- TODO: Do not use just first knowledge piece and first index to determine embedding model, use also keyword search
5207
+ if (firstKnowlegePiece === undefined || firstKnowlegeIndex === undefined) {
5208
+ return 'No knowledge pieces found';
5209
+ }
5210
+ // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
5211
+ const _llms = arrayableToArray(tools.llm);
5212
+ const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
5213
+ const taskEmbeddingPrompt = {
5214
+ title: 'Knowledge Search',
5215
+ modelRequirements: {
5216
+ modelVariant: 'EMBEDDING',
5217
+ modelName: firstKnowlegeIndex.modelName,
5218
+ },
5219
+ content: task.content,
5220
+ parameters: {
5221
+ /* !!!!!!!! */
5222
+ },
5223
+ };
5224
+ const taskEmbeddingResult = await llmTools.callEmbeddingModel(taskEmbeddingPrompt);
5225
+ const knowledgePiecesWithRelevance = preparedPipeline.knowledgePieces.map((knowledgePiece) => {
5226
+ const { index } = knowledgePiece;
5227
+ const knowledgePieceIndex = index.find((i) => i.modelName === firstKnowlegeIndex.modelName);
5228
+ // <- TODO: Do not use just first knowledge piece and first index to determine embedding model
5229
+ if (knowledgePieceIndex === undefined) {
5230
+ return {
5231
+ content: knowledgePiece.content,
5232
+ relevance: 0,
5233
+ };
5234
+ }
5235
+ const relevance = computeCosineSimilarity(knowledgePieceIndex.position, taskEmbeddingResult.content);
5236
+ return {
5237
+ content: knowledgePiece.content,
5238
+ relevance,
5239
+ };
5240
+ });
5241
+ const knowledgePiecesSorted = knowledgePiecesWithRelevance.sort((a, b) => a.relevance - b.relevance);
5242
+ const knowledgePiecesLimited = knowledgePiecesSorted.slice(0, 5);
5243
+ console.log('!!! Embedding', {
5244
+ task,
5245
+ taskEmbeddingPrompt,
5246
+ taskEmbeddingResult,
5247
+ firstKnowlegePiece,
5248
+ firstKnowlegeIndex,
5249
+ knowledgePiecesWithRelevance,
5250
+ knowledgePiecesSorted,
5251
+ knowledgePiecesLimited,
5252
+ });
5253
+ return knowledgePiecesLimited.map(({ content }) => `- ${content}`).join('\n');
5127
5254
  // <- TODO: [🧠] Some smart aggregation of knowledge pieces, single-line vs multi-line vs mixed
5128
5255
  }
5256
+ // TODO: !!!!!! Annotate + to new file
5257
+ function computeCosineSimilarity(embeddingVector1, embeddingVector2) {
5258
+ if (embeddingVector1.length !== embeddingVector2.length) {
5259
+ throw new TypeError('Embedding vectors must have the same length');
5260
+ }
5261
+ const dotProduct = embeddingVector1.reduce((sum, value, index) => sum + value * embeddingVector2[index], 0);
5262
+ const magnitude1 = Math.sqrt(embeddingVector1.reduce((sum, value) => sum + value * value, 0));
5263
+ const magnitude2 = Math.sqrt(embeddingVector2.reduce((sum, value) => sum + value * value, 0));
5264
+ return 1 - dotProduct / (magnitude1 * magnitude2);
5265
+ }
5266
+ /**
5267
+ * TODO: !!!! Verify if this is working
5268
+ * TODO: [♨] Implement Better - use keyword search
5269
+ * TODO: [♨] Examples of values
5270
+ */
5129
5271
 
5130
5272
  /**
5131
5273
  * @@@
@@ -5133,9 +5275,9 @@ async function getKnowledgeForTask(options) {
5133
5275
  * @private internal utility of `createPipelineExecutor`
5134
5276
  */
5135
5277
  async function getReservedParametersForTask(options) {
5136
- const { preparedPipeline, task, pipelineIdentification } = options;
5278
+ const { tools, preparedPipeline, task, pipelineIdentification } = options;
5137
5279
  const context = await getContextForTask(); // <- [🏍]
5138
- const knowledge = await getKnowledgeForTask({ preparedPipeline, task });
5280
+ const knowledge = await getKnowledgeForTask({ tools, preparedPipeline, task });
5139
5281
  const examples = await getExamplesForTask();
5140
5282
  const currentDate = new Date().toISOString(); // <- TODO: [🧠][💩] Better
5141
5283
  const modelName = RESERVED_PARAMETER_MISSING_VALUE;
@@ -5197,6 +5339,7 @@ async function executeTask(options) {
5197
5339
  }
5198
5340
  const definedParameters = Object.freeze({
5199
5341
  ...(await getReservedParametersForTask({
5342
+ tools,
5200
5343
  preparedPipeline,
5201
5344
  task: currentTask,
5202
5345
  pipelineIdentification,