@promptbook/documents 0.89.0 → 0.92.0-10
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +4 -0
- package/esm/index.es.js +173 -30
- package/esm/index.es.js.map +1 -1
- package/esm/typings/src/_packages/core.index.d.ts +6 -0
- package/esm/typings/src/_packages/deepseek.index.d.ts +2 -0
- package/esm/typings/src/_packages/google.index.d.ts +2 -0
- package/esm/typings/src/_packages/utils.index.d.ts +2 -0
- package/esm/typings/src/cli/common/$provideLlmToolsForCli.d.ts +1 -1
- package/esm/typings/src/conversion/archive/loadArchive.d.ts +2 -2
- package/esm/typings/src/execution/CommonToolsOptions.d.ts +4 -0
- package/esm/typings/src/execution/createPipelineExecutor/getKnowledgeForTask.d.ts +12 -0
- package/esm/typings/src/execution/createPipelineExecutor/getReservedParametersForTask.d.ts +5 -0
- package/esm/typings/src/formats/csv/utils/csvParse.d.ts +12 -0
- package/esm/typings/src/formats/json/utils/jsonParse.d.ts +11 -0
- package/esm/typings/src/llm-providers/_common/filterModels.d.ts +15 -0
- package/esm/typings/src/llm-providers/_common/register/LlmToolsMetadata.d.ts +43 -0
- package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionTools.d.ts +4 -0
- package/esm/typings/src/llm-providers/deepseek/deepseek-models.d.ts +23 -0
- package/esm/typings/src/llm-providers/google/google-models.d.ts +23 -0
- package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +4 -0
- package/esm/typings/src/personas/preparePersona.d.ts +1 -1
- package/esm/typings/src/pipeline/PipelineJson/PersonaJson.d.ts +4 -2
- package/esm/typings/src/remote-server/openapi-types.d.ts +348 -6
- package/esm/typings/src/remote-server/openapi.d.ts +397 -3
- package/package.json +2 -2
- package/umd/index.umd.js +173 -30
- package/umd/index.umd.js.map +1 -1
package/umd/index.umd.js
CHANGED
|
@@ -26,7 +26,7 @@
|
|
|
26
26
|
* @generated
|
|
27
27
|
* @see https://github.com/webgptorg/promptbook
|
|
28
28
|
*/
|
|
29
|
-
const PROMPTBOOK_ENGINE_VERSION = '0.
|
|
29
|
+
const PROMPTBOOK_ENGINE_VERSION = '0.92.0-10';
|
|
30
30
|
/**
|
|
31
31
|
* TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
|
|
32
32
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
@@ -1037,7 +1037,7 @@
|
|
|
1037
1037
|
* Note: [🟢] Code in this file should never be never released in packages that could be imported into browser environment
|
|
1038
1038
|
*/
|
|
1039
1039
|
|
|
1040
|
-
var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book"},{title:"Prepare Knowledge-piece Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge-piece Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book"},{title:"Prepare Persona",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book",formfactorName:"GENERIC",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelRequirements",format:"JSON",dependentParameterNames:["availableModelNames","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Persona\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book`\n- INPUT PARAMETER `{availableModelNames}` List of available model names separated by comma (,)\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n\\`\\`\\`json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelRequirements}`\n"}],sourceFile:"./books/prepare-persona.book"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-title.book",formfactorName:"GENERIC",parameters:[{name:"book",description:"The book to prepare the title for",isInput:true,isOutput:false},{name:"title",description:"Best title for the book",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-title",title:"Make title",content:"Make best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}",resultingParameterName:"title",expectations:{words:{min:1,max:8},lines:{min:1,max:1}},dependentParameterNames:["book"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-title.book`\n- INPUT PARAMETER `{book}` The book to prepare the title for\n- OUTPUT PARAMETER `{title}` Best title for the book\n\n## Make title\n\n- EXPECT MIN 1 Word\n- EXPECT MAX 8 Words\n- EXPECT EXACTLY 1 Line\n\n```markdown\nMake best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-title.book"}];
|
|
1040
|
+
var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book"},{title:"Prepare Knowledge-piece Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge-piece Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book"},{title:"Prepare Persona",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book",formfactorName:"GENERIC",parameters:[{name:"availableModels",description:"List of available model names together with their descriptions as JSON",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelsRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n```json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n```\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n```json\n{availableModels}\n```\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelsRequirements",format:"JSON",dependentParameterNames:["availableModels","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Persona\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book`\n- INPUT PARAMETER `{availableModels}` List of available model names together with their descriptions as JSON\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelsRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n\\`\\`\\`json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n\\`\\`\\`json\n{availableModels}\n\\`\\`\\`\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelsRequirements}`\n"}],sourceFile:"./books/prepare-persona.book"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-title.book",formfactorName:"GENERIC",parameters:[{name:"book",description:"The book to prepare the title for",isInput:true,isOutput:false},{name:"title",description:"Best title for the book",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-title",title:"Make title",content:"Make best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}",resultingParameterName:"title",expectations:{words:{min:1,max:8},lines:{min:1,max:1}},dependentParameterNames:["book"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-title.book`\n- INPUT PARAMETER `{book}` The book to prepare the title for\n- OUTPUT PARAMETER `{title}` Best title for the book\n\n## Make title\n\n- EXPECT MIN 1 Word\n- EXPECT MAX 8 Words\n- EXPECT EXACTLY 1 Line\n\n```markdown\nMake best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-title.book"}];
|
|
1041
1041
|
|
|
1042
1042
|
/**
|
|
1043
1043
|
* Checks if value is valid email
|
|
@@ -2080,7 +2080,7 @@
|
|
|
2080
2080
|
*/
|
|
2081
2081
|
function unpreparePipeline(pipeline) {
|
|
2082
2082
|
let { personas, knowledgeSources, tasks } = pipeline;
|
|
2083
|
-
personas = personas.map((persona) => ({ ...persona,
|
|
2083
|
+
personas = personas.map((persona) => ({ ...persona, modelsRequirements: undefined, preparationIds: undefined }));
|
|
2084
2084
|
knowledgeSources = knowledgeSources.map((knowledgeSource) => ({ ...knowledgeSource, preparationIds: undefined }));
|
|
2085
2085
|
tasks = tasks.map((task) => {
|
|
2086
2086
|
let { dependentParameterNames } = task;
|
|
@@ -2272,7 +2272,7 @@
|
|
|
2272
2272
|
if (pipeline.title === undefined || pipeline.title === '' || pipeline.title === DEFAULT_BOOK_TITLE) {
|
|
2273
2273
|
return false;
|
|
2274
2274
|
}
|
|
2275
|
-
if (!pipeline.personas.every((persona) => persona.
|
|
2275
|
+
if (!pipeline.personas.every((persona) => persona.modelsRequirements !== undefined)) {
|
|
2276
2276
|
return false;
|
|
2277
2277
|
}
|
|
2278
2278
|
if (!pipeline.knowledgeSources.every((knowledgeSource) => knowledgeSource.preparationIds !== undefined)) {
|
|
@@ -2296,6 +2296,45 @@
|
|
|
2296
2296
|
* - [♨] Are tasks prepared
|
|
2297
2297
|
*/
|
|
2298
2298
|
|
|
2299
|
+
/**
|
|
2300
|
+
* Converts a JavaScript Object Notation (JSON) string into an object.
|
|
2301
|
+
*
|
|
2302
|
+
* Note: This is wrapper around `JSON.parse()` with better error and type handling
|
|
2303
|
+
*
|
|
2304
|
+
* @public exported from `@promptbook/utils`
|
|
2305
|
+
*/
|
|
2306
|
+
function jsonParse(value) {
|
|
2307
|
+
if (value === undefined) {
|
|
2308
|
+
throw new Error(`Can not parse JSON from undefined value.`);
|
|
2309
|
+
}
|
|
2310
|
+
else if (typeof value !== 'string') {
|
|
2311
|
+
console.error('Can not parse JSON from non-string value.', { text: value });
|
|
2312
|
+
throw new Error(spaceTrim__default["default"](`
|
|
2313
|
+
Can not parse JSON from non-string value.
|
|
2314
|
+
|
|
2315
|
+
The value type: ${typeof value}
|
|
2316
|
+
See more in console.
|
|
2317
|
+
`));
|
|
2318
|
+
}
|
|
2319
|
+
try {
|
|
2320
|
+
return JSON.parse(value);
|
|
2321
|
+
}
|
|
2322
|
+
catch (error) {
|
|
2323
|
+
if (!(error instanceof Error)) {
|
|
2324
|
+
throw error;
|
|
2325
|
+
}
|
|
2326
|
+
throw new Error(spaceTrim__default["default"]((block) => `
|
|
2327
|
+
${block(error.message)}
|
|
2328
|
+
|
|
2329
|
+
The JSON text:
|
|
2330
|
+
${block(value)}
|
|
2331
|
+
`));
|
|
2332
|
+
}
|
|
2333
|
+
}
|
|
2334
|
+
/**
|
|
2335
|
+
* TODO: !!!! Use in Promptbook.studio
|
|
2336
|
+
*/
|
|
2337
|
+
|
|
2299
2338
|
/**
|
|
2300
2339
|
* Recursively converts JSON strings to JSON objects
|
|
2301
2340
|
|
|
@@ -2314,7 +2353,7 @@
|
|
|
2314
2353
|
const newObject = { ...object };
|
|
2315
2354
|
for (const [key, value] of Object.entries(object)) {
|
|
2316
2355
|
if (typeof value === 'string' && isValidJsonString(value)) {
|
|
2317
|
-
newObject[key] =
|
|
2356
|
+
newObject[key] = jsonParse(value);
|
|
2318
2357
|
}
|
|
2319
2358
|
else {
|
|
2320
2359
|
newObject[key] = jsonStringsToJsons(value);
|
|
@@ -3146,27 +3185,48 @@
|
|
|
3146
3185
|
pipeline: await collection.getPipelineByUrl('https://promptbook.studio/promptbook/prepare-persona.book'),
|
|
3147
3186
|
tools,
|
|
3148
3187
|
});
|
|
3149
|
-
// TODO: [🚐] Make arrayable LLMs -> single LLM DRY
|
|
3150
3188
|
const _llms = arrayableToArray(tools.llm);
|
|
3151
3189
|
const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
|
|
3152
|
-
const availableModels = await llmTools.listModels()
|
|
3153
|
-
const availableModelNames = availableModels
|
|
3190
|
+
const availableModels = (await llmTools.listModels())
|
|
3154
3191
|
.filter(({ modelVariant }) => modelVariant === 'CHAT')
|
|
3155
|
-
.map(({ modelName }) =>
|
|
3156
|
-
|
|
3157
|
-
|
|
3192
|
+
.map(({ modelName, modelDescription }) => ({
|
|
3193
|
+
modelName,
|
|
3194
|
+
modelDescription,
|
|
3195
|
+
// <- Note: `modelTitle` and `modelVariant` is not relevant for this task
|
|
3196
|
+
}));
|
|
3197
|
+
const result = await preparePersonaExecutor({
|
|
3198
|
+
availableModels /* <- Note: Passing as JSON */,
|
|
3199
|
+
personaDescription,
|
|
3200
|
+
}).asPromise();
|
|
3158
3201
|
const { outputParameters } = result;
|
|
3159
|
-
const {
|
|
3160
|
-
|
|
3202
|
+
const { modelsRequirements: modelsRequirementsJson } = outputParameters;
|
|
3203
|
+
let modelsRequirementsUnchecked = jsonParse(modelsRequirementsJson);
|
|
3161
3204
|
if (isVerbose) {
|
|
3162
|
-
console.info(`PERSONA ${personaDescription}`,
|
|
3205
|
+
console.info(`PERSONA ${personaDescription}`, modelsRequirementsUnchecked);
|
|
3163
3206
|
}
|
|
3164
|
-
|
|
3165
|
-
|
|
3207
|
+
if (!Array.isArray(modelsRequirementsUnchecked)) {
|
|
3208
|
+
// <- TODO: Book should have syntax and system to enforce shape of JSON
|
|
3209
|
+
modelsRequirementsUnchecked = [modelsRequirementsUnchecked];
|
|
3210
|
+
/*
|
|
3211
|
+
throw new UnexpectedError(
|
|
3212
|
+
spaceTrim(
|
|
3213
|
+
(block) => `
|
|
3214
|
+
Invalid \`modelsRequirements\`:
|
|
3215
|
+
|
|
3216
|
+
\`\`\`json
|
|
3217
|
+
${block(JSON.stringify(modelsRequirementsUnchecked, null, 4))}
|
|
3218
|
+
\`\`\`
|
|
3219
|
+
`,
|
|
3220
|
+
),
|
|
3221
|
+
);
|
|
3222
|
+
*/
|
|
3223
|
+
}
|
|
3224
|
+
const modelsRequirements = modelsRequirementsUnchecked.map((modelRequirements) => ({
|
|
3166
3225
|
modelVariant: 'CHAT',
|
|
3167
|
-
|
|
3168
|
-
|
|
3169
|
-
|
|
3226
|
+
...modelRequirements,
|
|
3227
|
+
}));
|
|
3228
|
+
return {
|
|
3229
|
+
modelsRequirements,
|
|
3170
3230
|
};
|
|
3171
3231
|
}
|
|
3172
3232
|
/**
|
|
@@ -3594,7 +3654,7 @@
|
|
|
3594
3654
|
> },
|
|
3595
3655
|
*/
|
|
3596
3656
|
async asJson() {
|
|
3597
|
-
return
|
|
3657
|
+
return jsonParse(await tools.fs.readFile(filename, 'utf-8'));
|
|
3598
3658
|
},
|
|
3599
3659
|
async asText() {
|
|
3600
3660
|
return await tools.fs.readFile(filename, 'utf-8');
|
|
@@ -3852,14 +3912,14 @@
|
|
|
3852
3912
|
// TODO: [🖌][🧠] Implement some `mapAsync` function
|
|
3853
3913
|
const preparedPersonas = new Array(personas.length);
|
|
3854
3914
|
await forEachAsync(personas, { maxParallelCount /* <- TODO: [🪂] When there are subtasks, this maximul limit can be broken */ }, async (persona, index) => {
|
|
3855
|
-
const
|
|
3915
|
+
const { modelsRequirements } = await preparePersona(persona.description, { ...tools, llm: llmToolsWithUsage }, {
|
|
3856
3916
|
rootDirname,
|
|
3857
3917
|
maxParallelCount /* <- TODO: [🪂] */,
|
|
3858
3918
|
isVerbose,
|
|
3859
3919
|
});
|
|
3860
3920
|
const preparedPersona = {
|
|
3861
3921
|
...persona,
|
|
3862
|
-
|
|
3922
|
+
modelsRequirements,
|
|
3863
3923
|
preparationIds: [/* TODO: [🧊] -> */ currentPreparation.id],
|
|
3864
3924
|
// <- TODO: [🍙] Make some standard order of json properties
|
|
3865
3925
|
};
|
|
@@ -4198,6 +4258,24 @@
|
|
|
4198
4258
|
}
|
|
4199
4259
|
}
|
|
4200
4260
|
|
|
4261
|
+
/**
|
|
4262
|
+
* Converts a CSV string into an object
|
|
4263
|
+
*
|
|
4264
|
+
* Note: This is wrapper around `papaparse.parse()` with better autohealing
|
|
4265
|
+
*
|
|
4266
|
+
* @private - for now until `@promptbook/csv` is released
|
|
4267
|
+
*/
|
|
4268
|
+
function csvParse(value /* <- TODO: string_csv */, settings, schema /* <- TODO: Make CSV Schemas */) {
|
|
4269
|
+
settings = { ...settings, ...MANDATORY_CSV_SETTINGS };
|
|
4270
|
+
// Note: Autoheal invalid '\n' characters
|
|
4271
|
+
if (settings.newline && !settings.newline.includes('\r') && value.includes('\r')) {
|
|
4272
|
+
console.warn('CSV string contains carriage return characters, but in the CSV settings the `newline` setting does not include them. Autohealing the CSV string.');
|
|
4273
|
+
value = value.replace(/\r\n/g, '\n').replace(/\r/g, '\n');
|
|
4274
|
+
}
|
|
4275
|
+
const csv = papaparse.parse(value, settings);
|
|
4276
|
+
return csv;
|
|
4277
|
+
}
|
|
4278
|
+
|
|
4201
4279
|
/**
|
|
4202
4280
|
* Definition for CSV spreadsheet
|
|
4203
4281
|
*
|
|
@@ -4220,8 +4298,7 @@
|
|
|
4220
4298
|
{
|
|
4221
4299
|
subvalueName: 'ROW',
|
|
4222
4300
|
async mapValues(value, outputParameterName, settings, mapCallback) {
|
|
4223
|
-
|
|
4224
|
-
const csv = papaparse.parse(value, { ...settings, ...MANDATORY_CSV_SETTINGS });
|
|
4301
|
+
const csv = csvParse(value, settings);
|
|
4225
4302
|
if (csv.errors.length !== 0) {
|
|
4226
4303
|
throw new CsvFormatError(spaceTrim__default["default"]((block) => `
|
|
4227
4304
|
CSV parsing error
|
|
@@ -4251,8 +4328,7 @@
|
|
|
4251
4328
|
{
|
|
4252
4329
|
subvalueName: 'CELL',
|
|
4253
4330
|
async mapValues(value, outputParameterName, settings, mapCallback) {
|
|
4254
|
-
|
|
4255
|
-
const csv = papaparse.parse(value, { ...settings, ...MANDATORY_CSV_SETTINGS });
|
|
4331
|
+
const csv = csvParse(value, settings);
|
|
4256
4332
|
if (csv.errors.length !== 0) {
|
|
4257
4333
|
throw new CsvFormatError(spaceTrim__default["default"]((block) => `
|
|
4258
4334
|
CSV parsing error
|
|
@@ -5281,13 +5357,79 @@
|
|
|
5281
5357
|
/**
|
|
5282
5358
|
* @@@
|
|
5283
5359
|
*
|
|
5360
|
+
* Here is the place where RAG (retrieval-augmented generation) happens
|
|
5361
|
+
*
|
|
5284
5362
|
* @private internal utility of `createPipelineExecutor`
|
|
5285
5363
|
*/
|
|
5286
5364
|
async function getKnowledgeForTask(options) {
|
|
5287
|
-
const { preparedPipeline, task } = options;
|
|
5288
|
-
|
|
5365
|
+
const { tools, preparedPipeline, task } = options;
|
|
5366
|
+
const firstKnowlegePiece = preparedPipeline.knowledgePieces[0];
|
|
5367
|
+
const firstKnowlegeIndex = firstKnowlegePiece === null || firstKnowlegePiece === void 0 ? void 0 : firstKnowlegePiece.index[0];
|
|
5368
|
+
// <- TODO: Do not use just first knowledge piece and first index to determine embedding model, use also keyword search
|
|
5369
|
+
if (firstKnowlegePiece === undefined || firstKnowlegeIndex === undefined) {
|
|
5370
|
+
return 'No knowledge pieces found';
|
|
5371
|
+
}
|
|
5372
|
+
// TODO: [🚐] Make arrayable LLMs -> single LLM DRY
|
|
5373
|
+
const _llms = arrayableToArray(tools.llm);
|
|
5374
|
+
const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
|
|
5375
|
+
const taskEmbeddingPrompt = {
|
|
5376
|
+
title: 'Knowledge Search',
|
|
5377
|
+
modelRequirements: {
|
|
5378
|
+
modelVariant: 'EMBEDDING',
|
|
5379
|
+
modelName: firstKnowlegeIndex.modelName,
|
|
5380
|
+
},
|
|
5381
|
+
content: task.content,
|
|
5382
|
+
parameters: {
|
|
5383
|
+
/* !!!!!!!! */
|
|
5384
|
+
},
|
|
5385
|
+
};
|
|
5386
|
+
const taskEmbeddingResult = await llmTools.callEmbeddingModel(taskEmbeddingPrompt);
|
|
5387
|
+
const knowledgePiecesWithRelevance = preparedPipeline.knowledgePieces.map((knowledgePiece) => {
|
|
5388
|
+
const { index } = knowledgePiece;
|
|
5389
|
+
const knowledgePieceIndex = index.find((i) => i.modelName === firstKnowlegeIndex.modelName);
|
|
5390
|
+
// <- TODO: Do not use just first knowledge piece and first index to determine embedding model
|
|
5391
|
+
if (knowledgePieceIndex === undefined) {
|
|
5392
|
+
return {
|
|
5393
|
+
content: knowledgePiece.content,
|
|
5394
|
+
relevance: 0,
|
|
5395
|
+
};
|
|
5396
|
+
}
|
|
5397
|
+
const relevance = computeCosineSimilarity(knowledgePieceIndex.position, taskEmbeddingResult.content);
|
|
5398
|
+
return {
|
|
5399
|
+
content: knowledgePiece.content,
|
|
5400
|
+
relevance,
|
|
5401
|
+
};
|
|
5402
|
+
});
|
|
5403
|
+
const knowledgePiecesSorted = knowledgePiecesWithRelevance.sort((a, b) => a.relevance - b.relevance);
|
|
5404
|
+
const knowledgePiecesLimited = knowledgePiecesSorted.slice(0, 5);
|
|
5405
|
+
console.log('!!! Embedding', {
|
|
5406
|
+
task,
|
|
5407
|
+
taskEmbeddingPrompt,
|
|
5408
|
+
taskEmbeddingResult,
|
|
5409
|
+
firstKnowlegePiece,
|
|
5410
|
+
firstKnowlegeIndex,
|
|
5411
|
+
knowledgePiecesWithRelevance,
|
|
5412
|
+
knowledgePiecesSorted,
|
|
5413
|
+
knowledgePiecesLimited,
|
|
5414
|
+
});
|
|
5415
|
+
return knowledgePiecesLimited.map(({ content }) => `- ${content}`).join('\n');
|
|
5289
5416
|
// <- TODO: [🧠] Some smart aggregation of knowledge pieces, single-line vs multi-line vs mixed
|
|
5290
5417
|
}
|
|
5418
|
+
// TODO: !!!!!! Annotate + to new file
|
|
5419
|
+
function computeCosineSimilarity(embeddingVector1, embeddingVector2) {
|
|
5420
|
+
if (embeddingVector1.length !== embeddingVector2.length) {
|
|
5421
|
+
throw new TypeError('Embedding vectors must have the same length');
|
|
5422
|
+
}
|
|
5423
|
+
const dotProduct = embeddingVector1.reduce((sum, value, index) => sum + value * embeddingVector2[index], 0);
|
|
5424
|
+
const magnitude1 = Math.sqrt(embeddingVector1.reduce((sum, value) => sum + value * value, 0));
|
|
5425
|
+
const magnitude2 = Math.sqrt(embeddingVector2.reduce((sum, value) => sum + value * value, 0));
|
|
5426
|
+
return 1 - dotProduct / (magnitude1 * magnitude2);
|
|
5427
|
+
}
|
|
5428
|
+
/**
|
|
5429
|
+
* TODO: !!!! Verify if this is working
|
|
5430
|
+
* TODO: [♨] Implement Better - use keyword search
|
|
5431
|
+
* TODO: [♨] Examples of values
|
|
5432
|
+
*/
|
|
5291
5433
|
|
|
5292
5434
|
/**
|
|
5293
5435
|
* @@@
|
|
@@ -5295,9 +5437,9 @@
|
|
|
5295
5437
|
* @private internal utility of `createPipelineExecutor`
|
|
5296
5438
|
*/
|
|
5297
5439
|
async function getReservedParametersForTask(options) {
|
|
5298
|
-
const { preparedPipeline, task, pipelineIdentification } = options;
|
|
5440
|
+
const { tools, preparedPipeline, task, pipelineIdentification } = options;
|
|
5299
5441
|
const context = await getContextForTask(); // <- [🏍]
|
|
5300
|
-
const knowledge = await getKnowledgeForTask({ preparedPipeline, task });
|
|
5442
|
+
const knowledge = await getKnowledgeForTask({ tools, preparedPipeline, task });
|
|
5301
5443
|
const examples = await getExamplesForTask();
|
|
5302
5444
|
const currentDate = new Date().toISOString(); // <- TODO: [🧠][💩] Better
|
|
5303
5445
|
const modelName = RESERVED_PARAMETER_MISSING_VALUE;
|
|
@@ -5359,6 +5501,7 @@
|
|
|
5359
5501
|
}
|
|
5360
5502
|
const definedParameters = Object.freeze({
|
|
5361
5503
|
...(await getReservedParametersForTask({
|
|
5504
|
+
tools,
|
|
5362
5505
|
preparedPipeline,
|
|
5363
5506
|
task: currentTask,
|
|
5364
5507
|
pipelineIdentification,
|