@promptbook/documents 0.89.0 → 0.92.0-11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (27) hide show
  1. package/README.md +4 -0
  2. package/esm/index.es.js +173 -30
  3. package/esm/index.es.js.map +1 -1
  4. package/esm/typings/src/_packages/core.index.d.ts +6 -0
  5. package/esm/typings/src/_packages/deepseek.index.d.ts +2 -0
  6. package/esm/typings/src/_packages/google.index.d.ts +2 -0
  7. package/esm/typings/src/_packages/utils.index.d.ts +2 -0
  8. package/esm/typings/src/cli/common/$provideLlmToolsForCli.d.ts +1 -1
  9. package/esm/typings/src/conversion/archive/loadArchive.d.ts +2 -2
  10. package/esm/typings/src/execution/CommonToolsOptions.d.ts +4 -0
  11. package/esm/typings/src/execution/createPipelineExecutor/getKnowledgeForTask.d.ts +12 -0
  12. package/esm/typings/src/execution/createPipelineExecutor/getReservedParametersForTask.d.ts +5 -0
  13. package/esm/typings/src/formats/csv/utils/csvParse.d.ts +12 -0
  14. package/esm/typings/src/formats/json/utils/jsonParse.d.ts +11 -0
  15. package/esm/typings/src/llm-providers/_common/filterModels.d.ts +15 -0
  16. package/esm/typings/src/llm-providers/_common/register/LlmToolsMetadata.d.ts +43 -0
  17. package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionTools.d.ts +4 -0
  18. package/esm/typings/src/llm-providers/deepseek/deepseek-models.d.ts +23 -0
  19. package/esm/typings/src/llm-providers/google/google-models.d.ts +23 -0
  20. package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +4 -0
  21. package/esm/typings/src/personas/preparePersona.d.ts +1 -1
  22. package/esm/typings/src/pipeline/PipelineJson/PersonaJson.d.ts +4 -2
  23. package/esm/typings/src/remote-server/openapi-types.d.ts +348 -6
  24. package/esm/typings/src/remote-server/openapi.d.ts +397 -3
  25. package/package.json +2 -2
  26. package/umd/index.umd.js +173 -30
  27. package/umd/index.umd.js.map +1 -1
package/README.md CHANGED
@@ -23,6 +23,10 @@
23
23
 
24
24
 
25
25
 
26
+ <blockquote style="color: #ff8811">
27
+ <b>⚠ Warning:</b> This is a pre-release version of the library. It is not yet ready for production use. Please look at <a href="https://www.npmjs.com/package/@promptbook/core?activeTab=versions">latest stable release</a>.
28
+ </blockquote>
29
+
26
30
  ## 📦 Package `@promptbook/documents`
27
31
 
28
32
  - Promptbooks are [divided into several](#-packages) packages, all are published from [single monorepo](https://github.com/webgptorg/promptbook).
package/esm/index.es.js CHANGED
@@ -28,7 +28,7 @@ const BOOK_LANGUAGE_VERSION = '1.0.0';
28
28
  * @generated
29
29
  * @see https://github.com/webgptorg/promptbook
30
30
  */
31
- const PROMPTBOOK_ENGINE_VERSION = '0.89.0';
31
+ const PROMPTBOOK_ENGINE_VERSION = '0.92.0-11';
32
32
  /**
33
33
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
34
34
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -1039,7 +1039,7 @@ async function getScraperIntermediateSource(source, options) {
1039
1039
  * Note: [🟢] Code in this file should never be never released in packages that could be imported into browser environment
1040
1040
  */
1041
1041
 
1042
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book"},{title:"Prepare Knowledge-piece Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge-piece Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book"},{title:"Prepare Persona",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book",formfactorName:"GENERIC",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelRequirements",format:"JSON",dependentParameterNames:["availableModelNames","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Persona\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book`\n- INPUT PARAMETER `{availableModelNames}` List of available model names separated by comma (,)\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n\\`\\`\\`json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelRequirements}`\n"}],sourceFile:"./books/prepare-persona.book"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-title.book",formfactorName:"GENERIC",parameters:[{name:"book",description:"The book to prepare the title for",isInput:true,isOutput:false},{name:"title",description:"Best title for the book",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-title",title:"Make title",content:"Make best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}",resultingParameterName:"title",expectations:{words:{min:1,max:8},lines:{min:1,max:1}},dependentParameterNames:["book"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-title.book`\n- INPUT PARAMETER `{book}` The book to prepare the title for\n- OUTPUT PARAMETER `{title}` Best title for the book\n\n## Make title\n\n- EXPECT MIN 1 Word\n- EXPECT MAX 8 Words\n- EXPECT EXACTLY 1 Line\n\n```markdown\nMake best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-title.book"}];
1042
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book"},{title:"Prepare Knowledge-piece Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge-piece Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book"},{title:"Prepare Persona",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book",formfactorName:"GENERIC",parameters:[{name:"availableModels",description:"List of available model names together with their descriptions as JSON",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelsRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n```json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n```\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n```json\n{availableModels}\n```\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelsRequirements",format:"JSON",dependentParameterNames:["availableModels","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Persona\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book`\n- INPUT PARAMETER `{availableModels}` List of available model names together with their descriptions as JSON\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelsRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n\\`\\`\\`json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n\\`\\`\\`json\n{availableModels}\n\\`\\`\\`\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelsRequirements}`\n"}],sourceFile:"./books/prepare-persona.book"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-title.book",formfactorName:"GENERIC",parameters:[{name:"book",description:"The book to prepare the title for",isInput:true,isOutput:false},{name:"title",description:"Best title for the book",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-title",title:"Make title",content:"Make best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}",resultingParameterName:"title",expectations:{words:{min:1,max:8},lines:{min:1,max:1}},dependentParameterNames:["book"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-title.book`\n- INPUT PARAMETER `{book}` The book to prepare the title for\n- OUTPUT PARAMETER `{title}` Best title for the book\n\n## Make title\n\n- EXPECT MIN 1 Word\n- EXPECT MAX 8 Words\n- EXPECT EXACTLY 1 Line\n\n```markdown\nMake best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-title.book"}];
1043
1043
 
1044
1044
  /**
1045
1045
  * Checks if value is valid email
@@ -2082,7 +2082,7 @@ function extractParameterNames(template) {
2082
2082
  */
2083
2083
  function unpreparePipeline(pipeline) {
2084
2084
  let { personas, knowledgeSources, tasks } = pipeline;
2085
- personas = personas.map((persona) => ({ ...persona, modelRequirements: undefined, preparationIds: undefined }));
2085
+ personas = personas.map((persona) => ({ ...persona, modelsRequirements: undefined, preparationIds: undefined }));
2086
2086
  knowledgeSources = knowledgeSources.map((knowledgeSource) => ({ ...knowledgeSource, preparationIds: undefined }));
2087
2087
  tasks = tasks.map((task) => {
2088
2088
  let { dependentParameterNames } = task;
@@ -2274,7 +2274,7 @@ function isPipelinePrepared(pipeline) {
2274
2274
  if (pipeline.title === undefined || pipeline.title === '' || pipeline.title === DEFAULT_BOOK_TITLE) {
2275
2275
  return false;
2276
2276
  }
2277
- if (!pipeline.personas.every((persona) => persona.modelRequirements !== undefined)) {
2277
+ if (!pipeline.personas.every((persona) => persona.modelsRequirements !== undefined)) {
2278
2278
  return false;
2279
2279
  }
2280
2280
  if (!pipeline.knowledgeSources.every((knowledgeSource) => knowledgeSource.preparationIds !== undefined)) {
@@ -2298,6 +2298,45 @@ function isPipelinePrepared(pipeline) {
2298
2298
  * - [♨] Are tasks prepared
2299
2299
  */
2300
2300
 
2301
+ /**
2302
+ * Converts a JavaScript Object Notation (JSON) string into an object.
2303
+ *
2304
+ * Note: This is wrapper around `JSON.parse()` with better error and type handling
2305
+ *
2306
+ * @public exported from `@promptbook/utils`
2307
+ */
2308
+ function jsonParse(value) {
2309
+ if (value === undefined) {
2310
+ throw new Error(`Can not parse JSON from undefined value.`);
2311
+ }
2312
+ else if (typeof value !== 'string') {
2313
+ console.error('Can not parse JSON from non-string value.', { text: value });
2314
+ throw new Error(spaceTrim$1(`
2315
+ Can not parse JSON from non-string value.
2316
+
2317
+ The value type: ${typeof value}
2318
+ See more in console.
2319
+ `));
2320
+ }
2321
+ try {
2322
+ return JSON.parse(value);
2323
+ }
2324
+ catch (error) {
2325
+ if (!(error instanceof Error)) {
2326
+ throw error;
2327
+ }
2328
+ throw new Error(spaceTrim$1((block) => `
2329
+ ${block(error.message)}
2330
+
2331
+ The JSON text:
2332
+ ${block(value)}
2333
+ `));
2334
+ }
2335
+ }
2336
+ /**
2337
+ * TODO: !!!! Use in Promptbook.studio
2338
+ */
2339
+
2301
2340
  /**
2302
2341
  * Recursively converts JSON strings to JSON objects
2303
2342
 
@@ -2316,7 +2355,7 @@ function jsonStringsToJsons(object) {
2316
2355
  const newObject = { ...object };
2317
2356
  for (const [key, value] of Object.entries(object)) {
2318
2357
  if (typeof value === 'string' && isValidJsonString(value)) {
2319
- newObject[key] = JSON.parse(value);
2358
+ newObject[key] = jsonParse(value);
2320
2359
  }
2321
2360
  else {
2322
2361
  newObject[key] = jsonStringsToJsons(value);
@@ -3148,27 +3187,48 @@ async function preparePersona(personaDescription, tools, options) {
3148
3187
  pipeline: await collection.getPipelineByUrl('https://promptbook.studio/promptbook/prepare-persona.book'),
3149
3188
  tools,
3150
3189
  });
3151
- // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
3152
3190
  const _llms = arrayableToArray(tools.llm);
3153
3191
  const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
3154
- const availableModels = await llmTools.listModels();
3155
- const availableModelNames = availableModels
3192
+ const availableModels = (await llmTools.listModels())
3156
3193
  .filter(({ modelVariant }) => modelVariant === 'CHAT')
3157
- .map(({ modelName }) => modelName)
3158
- .join(',');
3159
- const result = await preparePersonaExecutor({ availableModelNames, personaDescription }).asPromise();
3194
+ .map(({ modelName, modelDescription }) => ({
3195
+ modelName,
3196
+ modelDescription,
3197
+ // <- Note: `modelTitle` and `modelVariant` is not relevant for this task
3198
+ }));
3199
+ const result = await preparePersonaExecutor({
3200
+ availableModels /* <- Note: Passing as JSON */,
3201
+ personaDescription,
3202
+ }).asPromise();
3160
3203
  const { outputParameters } = result;
3161
- const { modelRequirements: modelRequirementsRaw } = outputParameters;
3162
- const modelRequirements = JSON.parse(modelRequirementsRaw);
3204
+ const { modelsRequirements: modelsRequirementsJson } = outputParameters;
3205
+ let modelsRequirementsUnchecked = jsonParse(modelsRequirementsJson);
3163
3206
  if (isVerbose) {
3164
- console.info(`PERSONA ${personaDescription}`, modelRequirements);
3207
+ console.info(`PERSONA ${personaDescription}`, modelsRequirementsUnchecked);
3165
3208
  }
3166
- const { modelName, systemMessage, temperature } = modelRequirements;
3167
- return {
3209
+ if (!Array.isArray(modelsRequirementsUnchecked)) {
3210
+ // <- TODO: Book should have syntax and system to enforce shape of JSON
3211
+ modelsRequirementsUnchecked = [modelsRequirementsUnchecked];
3212
+ /*
3213
+ throw new UnexpectedError(
3214
+ spaceTrim(
3215
+ (block) => `
3216
+ Invalid \`modelsRequirements\`:
3217
+
3218
+ \`\`\`json
3219
+ ${block(JSON.stringify(modelsRequirementsUnchecked, null, 4))}
3220
+ \`\`\`
3221
+ `,
3222
+ ),
3223
+ );
3224
+ */
3225
+ }
3226
+ const modelsRequirements = modelsRequirementsUnchecked.map((modelRequirements) => ({
3168
3227
  modelVariant: 'CHAT',
3169
- modelName,
3170
- systemMessage,
3171
- temperature,
3228
+ ...modelRequirements,
3229
+ }));
3230
+ return {
3231
+ modelsRequirements,
3172
3232
  };
3173
3233
  }
3174
3234
  /**
@@ -3596,7 +3656,7 @@ async function makeKnowledgeSourceHandler(knowledgeSource, tools, options) {
3596
3656
  > },
3597
3657
  */
3598
3658
  async asJson() {
3599
- return JSON.parse(await tools.fs.readFile(filename, 'utf-8'));
3659
+ return jsonParse(await tools.fs.readFile(filename, 'utf-8'));
3600
3660
  },
3601
3661
  async asText() {
3602
3662
  return await tools.fs.readFile(filename, 'utf-8');
@@ -3854,14 +3914,14 @@ async function preparePipeline(pipeline, tools, options) {
3854
3914
  // TODO: [🖌][🧠] Implement some `mapAsync` function
3855
3915
  const preparedPersonas = new Array(personas.length);
3856
3916
  await forEachAsync(personas, { maxParallelCount /* <- TODO: [🪂] When there are subtasks, this maximul limit can be broken */ }, async (persona, index) => {
3857
- const modelRequirements = await preparePersona(persona.description, { ...tools, llm: llmToolsWithUsage }, {
3917
+ const { modelsRequirements } = await preparePersona(persona.description, { ...tools, llm: llmToolsWithUsage }, {
3858
3918
  rootDirname,
3859
3919
  maxParallelCount /* <- TODO: [🪂] */,
3860
3920
  isVerbose,
3861
3921
  });
3862
3922
  const preparedPersona = {
3863
3923
  ...persona,
3864
- modelRequirements,
3924
+ modelsRequirements,
3865
3925
  preparationIds: [/* TODO: [🧊] -> */ currentPreparation.id],
3866
3926
  // <- TODO: [🍙] Make some standard order of json properties
3867
3927
  };
@@ -4200,6 +4260,24 @@ function isValidCsvString(value) {
4200
4260
  }
4201
4261
  }
4202
4262
 
4263
+ /**
4264
+ * Converts a CSV string into an object
4265
+ *
4266
+ * Note: This is wrapper around `papaparse.parse()` with better autohealing
4267
+ *
4268
+ * @private - for now until `@promptbook/csv` is released
4269
+ */
4270
+ function csvParse(value /* <- TODO: string_csv */, settings, schema /* <- TODO: Make CSV Schemas */) {
4271
+ settings = { ...settings, ...MANDATORY_CSV_SETTINGS };
4272
+ // Note: Autoheal invalid '\n' characters
4273
+ if (settings.newline && !settings.newline.includes('\r') && value.includes('\r')) {
4274
+ console.warn('CSV string contains carriage return characters, but in the CSV settings the `newline` setting does not include them. Autohealing the CSV string.');
4275
+ value = value.replace(/\r\n/g, '\n').replace(/\r/g, '\n');
4276
+ }
4277
+ const csv = parse(value, settings);
4278
+ return csv;
4279
+ }
4280
+
4203
4281
  /**
4204
4282
  * Definition for CSV spreadsheet
4205
4283
  *
@@ -4222,8 +4300,7 @@ const CsvFormatDefinition = {
4222
4300
  {
4223
4301
  subvalueName: 'ROW',
4224
4302
  async mapValues(value, outputParameterName, settings, mapCallback) {
4225
- // TODO: [👨🏾‍🤝‍👨🏼] DRY csv parsing
4226
- const csv = parse(value, { ...settings, ...MANDATORY_CSV_SETTINGS });
4303
+ const csv = csvParse(value, settings);
4227
4304
  if (csv.errors.length !== 0) {
4228
4305
  throw new CsvFormatError(spaceTrim$1((block) => `
4229
4306
  CSV parsing error
@@ -4253,8 +4330,7 @@ const CsvFormatDefinition = {
4253
4330
  {
4254
4331
  subvalueName: 'CELL',
4255
4332
  async mapValues(value, outputParameterName, settings, mapCallback) {
4256
- // TODO: [👨🏾‍🤝‍👨🏼] DRY csv parsing
4257
- const csv = parse(value, { ...settings, ...MANDATORY_CSV_SETTINGS });
4333
+ const csv = csvParse(value, settings);
4258
4334
  if (csv.errors.length !== 0) {
4259
4335
  throw new CsvFormatError(spaceTrim$1((block) => `
4260
4336
  CSV parsing error
@@ -5283,13 +5359,79 @@ async function getExamplesForTask(task) {
5283
5359
  /**
5284
5360
  * @@@
5285
5361
  *
5362
+ * Here is the place where RAG (retrieval-augmented generation) happens
5363
+ *
5286
5364
  * @private internal utility of `createPipelineExecutor`
5287
5365
  */
5288
5366
  async function getKnowledgeForTask(options) {
5289
- const { preparedPipeline, task } = options;
5290
- return preparedPipeline.knowledgePieces.map(({ content }) => `- ${content}`).join('\n');
5367
+ const { tools, preparedPipeline, task } = options;
5368
+ const firstKnowlegePiece = preparedPipeline.knowledgePieces[0];
5369
+ const firstKnowlegeIndex = firstKnowlegePiece === null || firstKnowlegePiece === void 0 ? void 0 : firstKnowlegePiece.index[0];
5370
+ // <- TODO: Do not use just first knowledge piece and first index to determine embedding model, use also keyword search
5371
+ if (firstKnowlegePiece === undefined || firstKnowlegeIndex === undefined) {
5372
+ return 'No knowledge pieces found';
5373
+ }
5374
+ // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
5375
+ const _llms = arrayableToArray(tools.llm);
5376
+ const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
5377
+ const taskEmbeddingPrompt = {
5378
+ title: 'Knowledge Search',
5379
+ modelRequirements: {
5380
+ modelVariant: 'EMBEDDING',
5381
+ modelName: firstKnowlegeIndex.modelName,
5382
+ },
5383
+ content: task.content,
5384
+ parameters: {
5385
+ /* !!!!!!!! */
5386
+ },
5387
+ };
5388
+ const taskEmbeddingResult = await llmTools.callEmbeddingModel(taskEmbeddingPrompt);
5389
+ const knowledgePiecesWithRelevance = preparedPipeline.knowledgePieces.map((knowledgePiece) => {
5390
+ const { index } = knowledgePiece;
5391
+ const knowledgePieceIndex = index.find((i) => i.modelName === firstKnowlegeIndex.modelName);
5392
+ // <- TODO: Do not use just first knowledge piece and first index to determine embedding model
5393
+ if (knowledgePieceIndex === undefined) {
5394
+ return {
5395
+ content: knowledgePiece.content,
5396
+ relevance: 0,
5397
+ };
5398
+ }
5399
+ const relevance = computeCosineSimilarity(knowledgePieceIndex.position, taskEmbeddingResult.content);
5400
+ return {
5401
+ content: knowledgePiece.content,
5402
+ relevance,
5403
+ };
5404
+ });
5405
+ const knowledgePiecesSorted = knowledgePiecesWithRelevance.sort((a, b) => a.relevance - b.relevance);
5406
+ const knowledgePiecesLimited = knowledgePiecesSorted.slice(0, 5);
5407
+ console.log('!!! Embedding', {
5408
+ task,
5409
+ taskEmbeddingPrompt,
5410
+ taskEmbeddingResult,
5411
+ firstKnowlegePiece,
5412
+ firstKnowlegeIndex,
5413
+ knowledgePiecesWithRelevance,
5414
+ knowledgePiecesSorted,
5415
+ knowledgePiecesLimited,
5416
+ });
5417
+ return knowledgePiecesLimited.map(({ content }) => `- ${content}`).join('\n');
5291
5418
  // <- TODO: [🧠] Some smart aggregation of knowledge pieces, single-line vs multi-line vs mixed
5292
5419
  }
5420
+ // TODO: !!!!!! Annotate + to new file
5421
+ function computeCosineSimilarity(embeddingVector1, embeddingVector2) {
5422
+ if (embeddingVector1.length !== embeddingVector2.length) {
5423
+ throw new TypeError('Embedding vectors must have the same length');
5424
+ }
5425
+ const dotProduct = embeddingVector1.reduce((sum, value, index) => sum + value * embeddingVector2[index], 0);
5426
+ const magnitude1 = Math.sqrt(embeddingVector1.reduce((sum, value) => sum + value * value, 0));
5427
+ const magnitude2 = Math.sqrt(embeddingVector2.reduce((sum, value) => sum + value * value, 0));
5428
+ return 1 - dotProduct / (magnitude1 * magnitude2);
5429
+ }
5430
+ /**
5431
+ * TODO: !!!! Verify if this is working
5432
+ * TODO: [♨] Implement Better - use keyword search
5433
+ * TODO: [♨] Examples of values
5434
+ */
5293
5435
 
5294
5436
  /**
5295
5437
  * @@@
@@ -5297,9 +5439,9 @@ async function getKnowledgeForTask(options) {
5297
5439
  * @private internal utility of `createPipelineExecutor`
5298
5440
  */
5299
5441
  async function getReservedParametersForTask(options) {
5300
- const { preparedPipeline, task, pipelineIdentification } = options;
5442
+ const { tools, preparedPipeline, task, pipelineIdentification } = options;
5301
5443
  const context = await getContextForTask(); // <- [🏍]
5302
- const knowledge = await getKnowledgeForTask({ preparedPipeline, task });
5444
+ const knowledge = await getKnowledgeForTask({ tools, preparedPipeline, task });
5303
5445
  const examples = await getExamplesForTask();
5304
5446
  const currentDate = new Date().toISOString(); // <- TODO: [🧠][💩] Better
5305
5447
  const modelName = RESERVED_PARAMETER_MISSING_VALUE;
@@ -5361,6 +5503,7 @@ async function executeTask(options) {
5361
5503
  }
5362
5504
  const definedParameters = Object.freeze({
5363
5505
  ...(await getReservedParametersForTask({
5506
+ tools,
5364
5507
  preparedPipeline,
5365
5508
  task: currentTask,
5366
5509
  pipelineIdentification,