@promptbook/pdf 0.89.0 → 0.92.0-10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (27) hide show
  1. package/README.md +4 -0
  2. package/esm/index.es.js +173 -30
  3. package/esm/index.es.js.map +1 -1
  4. package/esm/typings/src/_packages/core.index.d.ts +6 -0
  5. package/esm/typings/src/_packages/deepseek.index.d.ts +2 -0
  6. package/esm/typings/src/_packages/google.index.d.ts +2 -0
  7. package/esm/typings/src/_packages/utils.index.d.ts +2 -0
  8. package/esm/typings/src/cli/common/$provideLlmToolsForCli.d.ts +1 -1
  9. package/esm/typings/src/conversion/archive/loadArchive.d.ts +2 -2
  10. package/esm/typings/src/execution/CommonToolsOptions.d.ts +4 -0
  11. package/esm/typings/src/execution/createPipelineExecutor/getKnowledgeForTask.d.ts +12 -0
  12. package/esm/typings/src/execution/createPipelineExecutor/getReservedParametersForTask.d.ts +5 -0
  13. package/esm/typings/src/formats/csv/utils/csvParse.d.ts +12 -0
  14. package/esm/typings/src/formats/json/utils/jsonParse.d.ts +11 -0
  15. package/esm/typings/src/llm-providers/_common/filterModels.d.ts +15 -0
  16. package/esm/typings/src/llm-providers/_common/register/LlmToolsMetadata.d.ts +43 -0
  17. package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionTools.d.ts +4 -0
  18. package/esm/typings/src/llm-providers/deepseek/deepseek-models.d.ts +23 -0
  19. package/esm/typings/src/llm-providers/google/google-models.d.ts +23 -0
  20. package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +4 -0
  21. package/esm/typings/src/personas/preparePersona.d.ts +1 -1
  22. package/esm/typings/src/pipeline/PipelineJson/PersonaJson.d.ts +4 -2
  23. package/esm/typings/src/remote-server/openapi-types.d.ts +348 -6
  24. package/esm/typings/src/remote-server/openapi.d.ts +397 -3
  25. package/package.json +2 -2
  26. package/umd/index.umd.js +173 -30
  27. package/umd/index.umd.js.map +1 -1
package/umd/index.umd.js CHANGED
@@ -25,7 +25,7 @@
25
25
  * @generated
26
26
  * @see https://github.com/webgptorg/promptbook
27
27
  */
28
- const PROMPTBOOK_ENGINE_VERSION = '0.89.0';
28
+ const PROMPTBOOK_ENGINE_VERSION = '0.92.0-10';
29
29
  /**
30
30
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
31
31
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -859,7 +859,7 @@
859
859
  * Note: [🟢] Code in this file should never be never released in packages that could be imported into browser environment
860
860
  */
861
861
 
862
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book"},{title:"Prepare Knowledge-piece Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge-piece Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book"},{title:"Prepare Persona",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book",formfactorName:"GENERIC",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelRequirements",format:"JSON",dependentParameterNames:["availableModelNames","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Persona\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book`\n- INPUT PARAMETER `{availableModelNames}` List of available model names separated by comma (,)\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n\\`\\`\\`json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelRequirements}`\n"}],sourceFile:"./books/prepare-persona.book"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-title.book",formfactorName:"GENERIC",parameters:[{name:"book",description:"The book to prepare the title for",isInput:true,isOutput:false},{name:"title",description:"Best title for the book",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-title",title:"Make title",content:"Make best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}",resultingParameterName:"title",expectations:{words:{min:1,max:8},lines:{min:1,max:1}},dependentParameterNames:["book"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-title.book`\n- INPUT PARAMETER `{book}` The book to prepare the title for\n- OUTPUT PARAMETER `{title}` Best title for the book\n\n## Make title\n\n- EXPECT MIN 1 Word\n- EXPECT MAX 8 Words\n- EXPECT EXACTLY 1 Line\n\n```markdown\nMake best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-title.book"}];
862
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book"},{title:"Prepare Knowledge-piece Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge-piece Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book"},{title:"Prepare Persona",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book",formfactorName:"GENERIC",parameters:[{name:"availableModels",description:"List of available model names together with their descriptions as JSON",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelsRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n```json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n```\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n```json\n{availableModels}\n```\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelsRequirements",format:"JSON",dependentParameterNames:["availableModels","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Persona\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book`\n- INPUT PARAMETER `{availableModels}` List of available model names together with their descriptions as JSON\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelsRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n\\`\\`\\`json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n\\`\\`\\`json\n{availableModels}\n\\`\\`\\`\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelsRequirements}`\n"}],sourceFile:"./books/prepare-persona.book"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-title.book",formfactorName:"GENERIC",parameters:[{name:"book",description:"The book to prepare the title for",isInput:true,isOutput:false},{name:"title",description:"Best title for the book",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-title",title:"Make title",content:"Make best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}",resultingParameterName:"title",expectations:{words:{min:1,max:8},lines:{min:1,max:1}},dependentParameterNames:["book"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-title.book`\n- INPUT PARAMETER `{book}` The book to prepare the title for\n- OUTPUT PARAMETER `{title}` Best title for the book\n\n## Make title\n\n- EXPECT MIN 1 Word\n- EXPECT MAX 8 Words\n- EXPECT EXACTLY 1 Line\n\n```markdown\nMake best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-title.book"}];
863
863
 
864
864
  /**
865
865
  * Checks if value is valid email
@@ -1902,7 +1902,7 @@
1902
1902
  */
1903
1903
  function unpreparePipeline(pipeline) {
1904
1904
  let { personas, knowledgeSources, tasks } = pipeline;
1905
- personas = personas.map((persona) => ({ ...persona, modelRequirements: undefined, preparationIds: undefined }));
1905
+ personas = personas.map((persona) => ({ ...persona, modelsRequirements: undefined, preparationIds: undefined }));
1906
1906
  knowledgeSources = knowledgeSources.map((knowledgeSource) => ({ ...knowledgeSource, preparationIds: undefined }));
1907
1907
  tasks = tasks.map((task) => {
1908
1908
  let { dependentParameterNames } = task;
@@ -2112,7 +2112,7 @@
2112
2112
  if (pipeline.title === undefined || pipeline.title === '' || pipeline.title === DEFAULT_BOOK_TITLE) {
2113
2113
  return false;
2114
2114
  }
2115
- if (!pipeline.personas.every((persona) => persona.modelRequirements !== undefined)) {
2115
+ if (!pipeline.personas.every((persona) => persona.modelsRequirements !== undefined)) {
2116
2116
  return false;
2117
2117
  }
2118
2118
  if (!pipeline.knowledgeSources.every((knowledgeSource) => knowledgeSource.preparationIds !== undefined)) {
@@ -2136,6 +2136,45 @@
2136
2136
  * - [♨] Are tasks prepared
2137
2137
  */
2138
2138
 
2139
+ /**
2140
+ * Converts a JavaScript Object Notation (JSON) string into an object.
2141
+ *
2142
+ * Note: This is wrapper around `JSON.parse()` with better error and type handling
2143
+ *
2144
+ * @public exported from `@promptbook/utils`
2145
+ */
2146
+ function jsonParse(value) {
2147
+ if (value === undefined) {
2148
+ throw new Error(`Can not parse JSON from undefined value.`);
2149
+ }
2150
+ else if (typeof value !== 'string') {
2151
+ console.error('Can not parse JSON from non-string value.', { text: value });
2152
+ throw new Error(spaceTrim__default["default"](`
2153
+ Can not parse JSON from non-string value.
2154
+
2155
+ The value type: ${typeof value}
2156
+ See more in console.
2157
+ `));
2158
+ }
2159
+ try {
2160
+ return JSON.parse(value);
2161
+ }
2162
+ catch (error) {
2163
+ if (!(error instanceof Error)) {
2164
+ throw error;
2165
+ }
2166
+ throw new Error(spaceTrim__default["default"]((block) => `
2167
+ ${block(error.message)}
2168
+
2169
+ The JSON text:
2170
+ ${block(value)}
2171
+ `));
2172
+ }
2173
+ }
2174
+ /**
2175
+ * TODO: !!!! Use in Promptbook.studio
2176
+ */
2177
+
2139
2178
  /**
2140
2179
  * Recursively converts JSON strings to JSON objects
2141
2180
 
@@ -2154,7 +2193,7 @@
2154
2193
  const newObject = { ...object };
2155
2194
  for (const [key, value] of Object.entries(object)) {
2156
2195
  if (typeof value === 'string' && isValidJsonString(value)) {
2157
- newObject[key] = JSON.parse(value);
2196
+ newObject[key] = jsonParse(value);
2158
2197
  }
2159
2198
  else {
2160
2199
  newObject[key] = jsonStringsToJsons(value);
@@ -2986,27 +3025,48 @@
2986
3025
  pipeline: await collection.getPipelineByUrl('https://promptbook.studio/promptbook/prepare-persona.book'),
2987
3026
  tools,
2988
3027
  });
2989
- // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
2990
3028
  const _llms = arrayableToArray(tools.llm);
2991
3029
  const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
2992
- const availableModels = await llmTools.listModels();
2993
- const availableModelNames = availableModels
3030
+ const availableModels = (await llmTools.listModels())
2994
3031
  .filter(({ modelVariant }) => modelVariant === 'CHAT')
2995
- .map(({ modelName }) => modelName)
2996
- .join(',');
2997
- const result = await preparePersonaExecutor({ availableModelNames, personaDescription }).asPromise();
3032
+ .map(({ modelName, modelDescription }) => ({
3033
+ modelName,
3034
+ modelDescription,
3035
+ // <- Note: `modelTitle` and `modelVariant` is not relevant for this task
3036
+ }));
3037
+ const result = await preparePersonaExecutor({
3038
+ availableModels /* <- Note: Passing as JSON */,
3039
+ personaDescription,
3040
+ }).asPromise();
2998
3041
  const { outputParameters } = result;
2999
- const { modelRequirements: modelRequirementsRaw } = outputParameters;
3000
- const modelRequirements = JSON.parse(modelRequirementsRaw);
3042
+ const { modelsRequirements: modelsRequirementsJson } = outputParameters;
3043
+ let modelsRequirementsUnchecked = jsonParse(modelsRequirementsJson);
3001
3044
  if (isVerbose) {
3002
- console.info(`PERSONA ${personaDescription}`, modelRequirements);
3045
+ console.info(`PERSONA ${personaDescription}`, modelsRequirementsUnchecked);
3003
3046
  }
3004
- const { modelName, systemMessage, temperature } = modelRequirements;
3005
- return {
3047
+ if (!Array.isArray(modelsRequirementsUnchecked)) {
3048
+ // <- TODO: Book should have syntax and system to enforce shape of JSON
3049
+ modelsRequirementsUnchecked = [modelsRequirementsUnchecked];
3050
+ /*
3051
+ throw new UnexpectedError(
3052
+ spaceTrim(
3053
+ (block) => `
3054
+ Invalid \`modelsRequirements\`:
3055
+
3056
+ \`\`\`json
3057
+ ${block(JSON.stringify(modelsRequirementsUnchecked, null, 4))}
3058
+ \`\`\`
3059
+ `,
3060
+ ),
3061
+ );
3062
+ */
3063
+ }
3064
+ const modelsRequirements = modelsRequirementsUnchecked.map((modelRequirements) => ({
3006
3065
  modelVariant: 'CHAT',
3007
- modelName,
3008
- systemMessage,
3009
- temperature,
3066
+ ...modelRequirements,
3067
+ }));
3068
+ return {
3069
+ modelsRequirements,
3010
3070
  };
3011
3071
  }
3012
3072
  /**
@@ -3444,7 +3504,7 @@
3444
3504
  > },
3445
3505
  */
3446
3506
  async asJson() {
3447
- return JSON.parse(await tools.fs.readFile(filename, 'utf-8'));
3507
+ return jsonParse(await tools.fs.readFile(filename, 'utf-8'));
3448
3508
  },
3449
3509
  async asText() {
3450
3510
  return await tools.fs.readFile(filename, 'utf-8');
@@ -3702,14 +3762,14 @@
3702
3762
  // TODO: [🖌][🧠] Implement some `mapAsync` function
3703
3763
  const preparedPersonas = new Array(personas.length);
3704
3764
  await forEachAsync(personas, { maxParallelCount /* <- TODO: [🪂] When there are subtasks, this maximul limit can be broken */ }, async (persona, index) => {
3705
- const modelRequirements = await preparePersona(persona.description, { ...tools, llm: llmToolsWithUsage }, {
3765
+ const { modelsRequirements } = await preparePersona(persona.description, { ...tools, llm: llmToolsWithUsage }, {
3706
3766
  rootDirname,
3707
3767
  maxParallelCount /* <- TODO: [🪂] */,
3708
3768
  isVerbose,
3709
3769
  });
3710
3770
  const preparedPersona = {
3711
3771
  ...persona,
3712
- modelRequirements,
3772
+ modelsRequirements,
3713
3773
  preparationIds: [/* TODO: [🧊] -> */ currentPreparation.id],
3714
3774
  // <- TODO: [🍙] Make some standard order of json properties
3715
3775
  };
@@ -4048,6 +4108,24 @@
4048
4108
  }
4049
4109
  }
4050
4110
 
4111
+ /**
4112
+ * Converts a CSV string into an object
4113
+ *
4114
+ * Note: This is wrapper around `papaparse.parse()` with better autohealing
4115
+ *
4116
+ * @private - for now until `@promptbook/csv` is released
4117
+ */
4118
+ function csvParse(value /* <- TODO: string_csv */, settings, schema /* <- TODO: Make CSV Schemas */) {
4119
+ settings = { ...settings, ...MANDATORY_CSV_SETTINGS };
4120
+ // Note: Autoheal invalid '\n' characters
4121
+ if (settings.newline && !settings.newline.includes('\r') && value.includes('\r')) {
4122
+ console.warn('CSV string contains carriage return characters, but in the CSV settings the `newline` setting does not include them. Autohealing the CSV string.');
4123
+ value = value.replace(/\r\n/g, '\n').replace(/\r/g, '\n');
4124
+ }
4125
+ const csv = papaparse.parse(value, settings);
4126
+ return csv;
4127
+ }
4128
+
4051
4129
  /**
4052
4130
  * Definition for CSV spreadsheet
4053
4131
  *
@@ -4070,8 +4148,7 @@
4070
4148
  {
4071
4149
  subvalueName: 'ROW',
4072
4150
  async mapValues(value, outputParameterName, settings, mapCallback) {
4073
- // TODO: [👨🏾‍🤝‍👨🏼] DRY csv parsing
4074
- const csv = papaparse.parse(value, { ...settings, ...MANDATORY_CSV_SETTINGS });
4151
+ const csv = csvParse(value, settings);
4075
4152
  if (csv.errors.length !== 0) {
4076
4153
  throw new CsvFormatError(spaceTrim__default["default"]((block) => `
4077
4154
  CSV parsing error
@@ -4101,8 +4178,7 @@
4101
4178
  {
4102
4179
  subvalueName: 'CELL',
4103
4180
  async mapValues(value, outputParameterName, settings, mapCallback) {
4104
- // TODO: [👨🏾‍🤝‍👨🏼] DRY csv parsing
4105
- const csv = papaparse.parse(value, { ...settings, ...MANDATORY_CSV_SETTINGS });
4181
+ const csv = csvParse(value, settings);
4106
4182
  if (csv.errors.length !== 0) {
4107
4183
  throw new CsvFormatError(spaceTrim__default["default"]((block) => `
4108
4184
  CSV parsing error
@@ -5131,13 +5207,79 @@
5131
5207
  /**
5132
5208
  * @@@
5133
5209
  *
5210
+ * Here is the place where RAG (retrieval-augmented generation) happens
5211
+ *
5134
5212
  * @private internal utility of `createPipelineExecutor`
5135
5213
  */
5136
5214
  async function getKnowledgeForTask(options) {
5137
- const { preparedPipeline, task } = options;
5138
- return preparedPipeline.knowledgePieces.map(({ content }) => `- ${content}`).join('\n');
5215
+ const { tools, preparedPipeline, task } = options;
5216
+ const firstKnowlegePiece = preparedPipeline.knowledgePieces[0];
5217
+ const firstKnowlegeIndex = firstKnowlegePiece === null || firstKnowlegePiece === void 0 ? void 0 : firstKnowlegePiece.index[0];
5218
+ // <- TODO: Do not use just first knowledge piece and first index to determine embedding model, use also keyword search
5219
+ if (firstKnowlegePiece === undefined || firstKnowlegeIndex === undefined) {
5220
+ return 'No knowledge pieces found';
5221
+ }
5222
+ // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
5223
+ const _llms = arrayableToArray(tools.llm);
5224
+ const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
5225
+ const taskEmbeddingPrompt = {
5226
+ title: 'Knowledge Search',
5227
+ modelRequirements: {
5228
+ modelVariant: 'EMBEDDING',
5229
+ modelName: firstKnowlegeIndex.modelName,
5230
+ },
5231
+ content: task.content,
5232
+ parameters: {
5233
+ /* !!!!!!!! */
5234
+ },
5235
+ };
5236
+ const taskEmbeddingResult = await llmTools.callEmbeddingModel(taskEmbeddingPrompt);
5237
+ const knowledgePiecesWithRelevance = preparedPipeline.knowledgePieces.map((knowledgePiece) => {
5238
+ const { index } = knowledgePiece;
5239
+ const knowledgePieceIndex = index.find((i) => i.modelName === firstKnowlegeIndex.modelName);
5240
+ // <- TODO: Do not use just first knowledge piece and first index to determine embedding model
5241
+ if (knowledgePieceIndex === undefined) {
5242
+ return {
5243
+ content: knowledgePiece.content,
5244
+ relevance: 0,
5245
+ };
5246
+ }
5247
+ const relevance = computeCosineSimilarity(knowledgePieceIndex.position, taskEmbeddingResult.content);
5248
+ return {
5249
+ content: knowledgePiece.content,
5250
+ relevance,
5251
+ };
5252
+ });
5253
+ const knowledgePiecesSorted = knowledgePiecesWithRelevance.sort((a, b) => a.relevance - b.relevance);
5254
+ const knowledgePiecesLimited = knowledgePiecesSorted.slice(0, 5);
5255
+ console.log('!!! Embedding', {
5256
+ task,
5257
+ taskEmbeddingPrompt,
5258
+ taskEmbeddingResult,
5259
+ firstKnowlegePiece,
5260
+ firstKnowlegeIndex,
5261
+ knowledgePiecesWithRelevance,
5262
+ knowledgePiecesSorted,
5263
+ knowledgePiecesLimited,
5264
+ });
5265
+ return knowledgePiecesLimited.map(({ content }) => `- ${content}`).join('\n');
5139
5266
  // <- TODO: [🧠] Some smart aggregation of knowledge pieces, single-line vs multi-line vs mixed
5140
5267
  }
5268
+ // TODO: !!!!!! Annotate + to new file
5269
+ function computeCosineSimilarity(embeddingVector1, embeddingVector2) {
5270
+ if (embeddingVector1.length !== embeddingVector2.length) {
5271
+ throw new TypeError('Embedding vectors must have the same length');
5272
+ }
5273
+ const dotProduct = embeddingVector1.reduce((sum, value, index) => sum + value * embeddingVector2[index], 0);
5274
+ const magnitude1 = Math.sqrt(embeddingVector1.reduce((sum, value) => sum + value * value, 0));
5275
+ const magnitude2 = Math.sqrt(embeddingVector2.reduce((sum, value) => sum + value * value, 0));
5276
+ return 1 - dotProduct / (magnitude1 * magnitude2);
5277
+ }
5278
+ /**
5279
+ * TODO: !!!! Verify if this is working
5280
+ * TODO: [♨] Implement Better - use keyword search
5281
+ * TODO: [♨] Examples of values
5282
+ */
5141
5283
 
5142
5284
  /**
5143
5285
  * @@@
@@ -5145,9 +5287,9 @@
5145
5287
  * @private internal utility of `createPipelineExecutor`
5146
5288
  */
5147
5289
  async function getReservedParametersForTask(options) {
5148
- const { preparedPipeline, task, pipelineIdentification } = options;
5290
+ const { tools, preparedPipeline, task, pipelineIdentification } = options;
5149
5291
  const context = await getContextForTask(); // <- [🏍]
5150
- const knowledge = await getKnowledgeForTask({ preparedPipeline, task });
5292
+ const knowledge = await getKnowledgeForTask({ tools, preparedPipeline, task });
5151
5293
  const examples = await getExamplesForTask();
5152
5294
  const currentDate = new Date().toISOString(); // <- TODO: [🧠][💩] Better
5153
5295
  const modelName = RESERVED_PARAMETER_MISSING_VALUE;
@@ -5209,6 +5351,7 @@
5209
5351
  }
5210
5352
  const definedParameters = Object.freeze({
5211
5353
  ...(await getReservedParametersForTask({
5354
+ tools,
5212
5355
  preparedPipeline,
5213
5356
  task: currentTask,
5214
5357
  pipelineIdentification,