@promptbook/node 0.89.0 → 0.92.0-11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (27) hide show
  1. package/README.md +4 -0
  2. package/esm/index.es.js +175 -32
  3. package/esm/index.es.js.map +1 -1
  4. package/esm/typings/src/_packages/core.index.d.ts +6 -0
  5. package/esm/typings/src/_packages/deepseek.index.d.ts +2 -0
  6. package/esm/typings/src/_packages/google.index.d.ts +2 -0
  7. package/esm/typings/src/_packages/utils.index.d.ts +2 -0
  8. package/esm/typings/src/cli/common/$provideLlmToolsForCli.d.ts +1 -1
  9. package/esm/typings/src/conversion/archive/loadArchive.d.ts +2 -2
  10. package/esm/typings/src/execution/CommonToolsOptions.d.ts +4 -0
  11. package/esm/typings/src/execution/createPipelineExecutor/getKnowledgeForTask.d.ts +12 -0
  12. package/esm/typings/src/execution/createPipelineExecutor/getReservedParametersForTask.d.ts +5 -0
  13. package/esm/typings/src/formats/csv/utils/csvParse.d.ts +12 -0
  14. package/esm/typings/src/formats/json/utils/jsonParse.d.ts +11 -0
  15. package/esm/typings/src/llm-providers/_common/filterModels.d.ts +15 -0
  16. package/esm/typings/src/llm-providers/_common/register/LlmToolsMetadata.d.ts +43 -0
  17. package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionTools.d.ts +4 -0
  18. package/esm/typings/src/llm-providers/deepseek/deepseek-models.d.ts +23 -0
  19. package/esm/typings/src/llm-providers/google/google-models.d.ts +23 -0
  20. package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +4 -0
  21. package/esm/typings/src/personas/preparePersona.d.ts +1 -1
  22. package/esm/typings/src/pipeline/PipelineJson/PersonaJson.d.ts +4 -2
  23. package/esm/typings/src/remote-server/openapi-types.d.ts +348 -6
  24. package/esm/typings/src/remote-server/openapi.d.ts +397 -3
  25. package/package.json +2 -2
  26. package/umd/index.umd.js +175 -32
  27. package/umd/index.umd.js.map +1 -1
package/README.md CHANGED
@@ -23,6 +23,10 @@
23
23
 
24
24
 
25
25
 
26
+ <blockquote style="color: #ff8811">
27
+ <b>⚠ Warning:</b> This is a pre-release version of the library. It is not yet ready for production use. Please look at <a href="https://www.npmjs.com/package/@promptbook/core?activeTab=versions">latest stable release</a>.
28
+ </blockquote>
29
+
26
30
  ## 📦 Package `@promptbook/node`
27
31
 
28
32
  - Promptbooks are [divided into several](#-packages) packages, all are published from [single monorepo](https://github.com/webgptorg/promptbook).
package/esm/index.es.js CHANGED
@@ -30,7 +30,7 @@ const BOOK_LANGUAGE_VERSION = '1.0.0';
30
30
  * @generated
31
31
  * @see https://github.com/webgptorg/promptbook
32
32
  */
33
- const PROMPTBOOK_ENGINE_VERSION = '0.89.0';
33
+ const PROMPTBOOK_ENGINE_VERSION = '0.92.0-11';
34
34
  /**
35
35
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
36
36
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -299,6 +299,45 @@ class UnexpectedError extends Error {
299
299
  }
300
300
  }
301
301
 
302
+ /**
303
+ * Converts a JavaScript Object Notation (JSON) string into an object.
304
+ *
305
+ * Note: This is wrapper around `JSON.parse()` with better error and type handling
306
+ *
307
+ * @public exported from `@promptbook/utils`
308
+ */
309
+ function jsonParse(value) {
310
+ if (value === undefined) {
311
+ throw new Error(`Can not parse JSON from undefined value.`);
312
+ }
313
+ else if (typeof value !== 'string') {
314
+ console.error('Can not parse JSON from non-string value.', { text: value });
315
+ throw new Error(spaceTrim(`
316
+ Can not parse JSON from non-string value.
317
+
318
+ The value type: ${typeof value}
319
+ See more in console.
320
+ `));
321
+ }
322
+ try {
323
+ return JSON.parse(value);
324
+ }
325
+ catch (error) {
326
+ if (!(error instanceof Error)) {
327
+ throw error;
328
+ }
329
+ throw new Error(spaceTrim((block) => `
330
+ ${block(error.message)}
331
+
332
+ The JSON text:
333
+ ${block(value)}
334
+ `));
335
+ }
336
+ }
337
+ /**
338
+ * TODO: !!!! Use in Promptbook.studio
339
+ */
340
+
302
341
  /**
303
342
  * Orders JSON object by keys
304
343
  *
@@ -1088,7 +1127,7 @@ async function loadArchive(filePath, fs) {
1088
1127
  if (!indexFile) {
1089
1128
  throw new UnexpectedError(`Archive does not contain 'index.book.json' file`);
1090
1129
  }
1091
- const collectionJson = JSON.parse(await indexFile.async('text'));
1130
+ const collectionJson = jsonParse(await indexFile.async('text'));
1092
1131
  for (const pipeline of collectionJson) {
1093
1132
  validatePipeline(pipeline);
1094
1133
  }
@@ -1098,7 +1137,7 @@ async function loadArchive(filePath, fs) {
1098
1137
  * Note: [🟢] Code in this file should never be never released in packages that could be imported into browser environment
1099
1138
  */
1100
1139
 
1101
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book"},{title:"Prepare Knowledge-piece Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge-piece Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book"},{title:"Prepare Persona",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book",formfactorName:"GENERIC",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelRequirements",format:"JSON",dependentParameterNames:["availableModelNames","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Persona\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book`\n- INPUT PARAMETER `{availableModelNames}` List of available model names separated by comma (,)\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n\\`\\`\\`json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelRequirements}`\n"}],sourceFile:"./books/prepare-persona.book"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-title.book",formfactorName:"GENERIC",parameters:[{name:"book",description:"The book to prepare the title for",isInput:true,isOutput:false},{name:"title",description:"Best title for the book",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-title",title:"Make title",content:"Make best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}",resultingParameterName:"title",expectations:{words:{min:1,max:8},lines:{min:1,max:1}},dependentParameterNames:["book"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-title.book`\n- INPUT PARAMETER `{book}` The book to prepare the title for\n- OUTPUT PARAMETER `{title}` Best title for the book\n\n## Make title\n\n- EXPECT MIN 1 Word\n- EXPECT MAX 8 Words\n- EXPECT EXACTLY 1 Line\n\n```markdown\nMake best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-title.book"}];
1140
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book"},{title:"Prepare Knowledge-piece Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge-piece Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book"},{title:"Prepare Persona",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book",formfactorName:"GENERIC",parameters:[{name:"availableModels",description:"List of available model names together with their descriptions as JSON",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelsRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n```json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n```\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n```json\n{availableModels}\n```\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelsRequirements",format:"JSON",dependentParameterNames:["availableModels","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Persona\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book`\n- INPUT PARAMETER `{availableModels}` List of available model names together with their descriptions as JSON\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelsRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n\\`\\`\\`json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n\\`\\`\\`json\n{availableModels}\n\\`\\`\\`\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelsRequirements}`\n"}],sourceFile:"./books/prepare-persona.book"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-title.book",formfactorName:"GENERIC",parameters:[{name:"book",description:"The book to prepare the title for",isInput:true,isOutput:false},{name:"title",description:"Best title for the book",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-title",title:"Make title",content:"Make best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}",resultingParameterName:"title",expectations:{words:{min:1,max:8},lines:{min:1,max:1}},dependentParameterNames:["book"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-title.book`\n- INPUT PARAMETER `{book}` The book to prepare the title for\n- OUTPUT PARAMETER `{title}` Best title for the book\n\n## Make title\n\n- EXPECT MIN 1 Word\n- EXPECT MAX 8 Words\n- EXPECT EXACTLY 1 Line\n\n```markdown\nMake best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-title.book"}];
1102
1141
 
1103
1142
  /**
1104
1143
  * Checks if value is valid email
@@ -1449,7 +1488,7 @@ function extractParameterNames(template) {
1449
1488
  */
1450
1489
  function unpreparePipeline(pipeline) {
1451
1490
  let { personas, knowledgeSources, tasks } = pipeline;
1452
- personas = personas.map((persona) => ({ ...persona, modelRequirements: undefined, preparationIds: undefined }));
1491
+ personas = personas.map((persona) => ({ ...persona, modelsRequirements: undefined, preparationIds: undefined }));
1453
1492
  knowledgeSources = knowledgeSources.map((knowledgeSource) => ({ ...knowledgeSource, preparationIds: undefined }));
1454
1493
  tasks = tasks.map((task) => {
1455
1494
  let { dependentParameterNames } = task;
@@ -1625,7 +1664,7 @@ function isPipelinePrepared(pipeline) {
1625
1664
  if (pipeline.title === undefined || pipeline.title === '' || pipeline.title === DEFAULT_BOOK_TITLE) {
1626
1665
  return false;
1627
1666
  }
1628
- if (!pipeline.personas.every((persona) => persona.modelRequirements !== undefined)) {
1667
+ if (!pipeline.personas.every((persona) => persona.modelsRequirements !== undefined)) {
1629
1668
  return false;
1630
1669
  }
1631
1670
  if (!pipeline.knowledgeSources.every((knowledgeSource) => knowledgeSource.preparationIds !== undefined)) {
@@ -1682,7 +1721,7 @@ function jsonStringsToJsons(object) {
1682
1721
  const newObject = { ...object };
1683
1722
  for (const [key, value] of Object.entries(object)) {
1684
1723
  if (typeof value === 'string' && isValidJsonString(value)) {
1685
- newObject[key] = JSON.parse(value);
1724
+ newObject[key] = jsonParse(value);
1686
1725
  }
1687
1726
  else {
1688
1727
  newObject[key] = jsonStringsToJsons(value);
@@ -2504,6 +2543,24 @@ function isValidCsvString(value) {
2504
2543
  }
2505
2544
  }
2506
2545
 
2546
+ /**
2547
+ * Converts a CSV string into an object
2548
+ *
2549
+ * Note: This is wrapper around `papaparse.parse()` with better autohealing
2550
+ *
2551
+ * @private - for now until `@promptbook/csv` is released
2552
+ */
2553
+ function csvParse(value /* <- TODO: string_csv */, settings, schema /* <- TODO: Make CSV Schemas */) {
2554
+ settings = { ...settings, ...MANDATORY_CSV_SETTINGS };
2555
+ // Note: Autoheal invalid '\n' characters
2556
+ if (settings.newline && !settings.newline.includes('\r') && value.includes('\r')) {
2557
+ console.warn('CSV string contains carriage return characters, but in the CSV settings the `newline` setting does not include them. Autohealing the CSV string.');
2558
+ value = value.replace(/\r\n/g, '\n').replace(/\r/g, '\n');
2559
+ }
2560
+ const csv = parse(value, settings);
2561
+ return csv;
2562
+ }
2563
+
2507
2564
  /**
2508
2565
  * Definition for CSV spreadsheet
2509
2566
  *
@@ -2526,8 +2583,7 @@ const CsvFormatDefinition = {
2526
2583
  {
2527
2584
  subvalueName: 'ROW',
2528
2585
  async mapValues(value, outputParameterName, settings, mapCallback) {
2529
- // TODO: [👨🏾‍🤝‍👨🏼] DRY csv parsing
2530
- const csv = parse(value, { ...settings, ...MANDATORY_CSV_SETTINGS });
2586
+ const csv = csvParse(value, settings);
2531
2587
  if (csv.errors.length !== 0) {
2532
2588
  throw new CsvFormatError(spaceTrim((block) => `
2533
2589
  CSV parsing error
@@ -2557,8 +2613,7 @@ const CsvFormatDefinition = {
2557
2613
  {
2558
2614
  subvalueName: 'CELL',
2559
2615
  async mapValues(value, outputParameterName, settings, mapCallback) {
2560
- // TODO: [👨🏾‍🤝‍👨🏼] DRY csv parsing
2561
- const csv = parse(value, { ...settings, ...MANDATORY_CSV_SETTINGS });
2616
+ const csv = csvParse(value, settings);
2562
2617
  if (csv.errors.length !== 0) {
2563
2618
  throw new CsvFormatError(spaceTrim((block) => `
2564
2619
  CSV parsing error
@@ -4076,13 +4131,79 @@ async function getExamplesForTask(task) {
4076
4131
  /**
4077
4132
  * @@@
4078
4133
  *
4134
+ * Here is the place where RAG (retrieval-augmented generation) happens
4135
+ *
4079
4136
  * @private internal utility of `createPipelineExecutor`
4080
4137
  */
4081
4138
  async function getKnowledgeForTask(options) {
4082
- const { preparedPipeline, task } = options;
4083
- return preparedPipeline.knowledgePieces.map(({ content }) => `- ${content}`).join('\n');
4139
+ const { tools, preparedPipeline, task } = options;
4140
+ const firstKnowlegePiece = preparedPipeline.knowledgePieces[0];
4141
+ const firstKnowlegeIndex = firstKnowlegePiece === null || firstKnowlegePiece === void 0 ? void 0 : firstKnowlegePiece.index[0];
4142
+ // <- TODO: Do not use just first knowledge piece and first index to determine embedding model, use also keyword search
4143
+ if (firstKnowlegePiece === undefined || firstKnowlegeIndex === undefined) {
4144
+ return 'No knowledge pieces found';
4145
+ }
4146
+ // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
4147
+ const _llms = arrayableToArray(tools.llm);
4148
+ const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
4149
+ const taskEmbeddingPrompt = {
4150
+ title: 'Knowledge Search',
4151
+ modelRequirements: {
4152
+ modelVariant: 'EMBEDDING',
4153
+ modelName: firstKnowlegeIndex.modelName,
4154
+ },
4155
+ content: task.content,
4156
+ parameters: {
4157
+ /* !!!!!!!! */
4158
+ },
4159
+ };
4160
+ const taskEmbeddingResult = await llmTools.callEmbeddingModel(taskEmbeddingPrompt);
4161
+ const knowledgePiecesWithRelevance = preparedPipeline.knowledgePieces.map((knowledgePiece) => {
4162
+ const { index } = knowledgePiece;
4163
+ const knowledgePieceIndex = index.find((i) => i.modelName === firstKnowlegeIndex.modelName);
4164
+ // <- TODO: Do not use just first knowledge piece and first index to determine embedding model
4165
+ if (knowledgePieceIndex === undefined) {
4166
+ return {
4167
+ content: knowledgePiece.content,
4168
+ relevance: 0,
4169
+ };
4170
+ }
4171
+ const relevance = computeCosineSimilarity(knowledgePieceIndex.position, taskEmbeddingResult.content);
4172
+ return {
4173
+ content: knowledgePiece.content,
4174
+ relevance,
4175
+ };
4176
+ });
4177
+ const knowledgePiecesSorted = knowledgePiecesWithRelevance.sort((a, b) => a.relevance - b.relevance);
4178
+ const knowledgePiecesLimited = knowledgePiecesSorted.slice(0, 5);
4179
+ console.log('!!! Embedding', {
4180
+ task,
4181
+ taskEmbeddingPrompt,
4182
+ taskEmbeddingResult,
4183
+ firstKnowlegePiece,
4184
+ firstKnowlegeIndex,
4185
+ knowledgePiecesWithRelevance,
4186
+ knowledgePiecesSorted,
4187
+ knowledgePiecesLimited,
4188
+ });
4189
+ return knowledgePiecesLimited.map(({ content }) => `- ${content}`).join('\n');
4084
4190
  // <- TODO: [🧠] Some smart aggregation of knowledge pieces, single-line vs multi-line vs mixed
4085
4191
  }
4192
+ // TODO: !!!!!! Annotate + to new file
4193
+ function computeCosineSimilarity(embeddingVector1, embeddingVector2) {
4194
+ if (embeddingVector1.length !== embeddingVector2.length) {
4195
+ throw new TypeError('Embedding vectors must have the same length');
4196
+ }
4197
+ const dotProduct = embeddingVector1.reduce((sum, value, index) => sum + value * embeddingVector2[index], 0);
4198
+ const magnitude1 = Math.sqrt(embeddingVector1.reduce((sum, value) => sum + value * value, 0));
4199
+ const magnitude2 = Math.sqrt(embeddingVector2.reduce((sum, value) => sum + value * value, 0));
4200
+ return 1 - dotProduct / (magnitude1 * magnitude2);
4201
+ }
4202
+ /**
4203
+ * TODO: !!!! Verify if this is working
4204
+ * TODO: [♨] Implement Better - use keyword search
4205
+ * TODO: [♨] Examples of values
4206
+ */
4086
4207
 
4087
4208
  /**
4088
4209
  * @@@
@@ -4090,9 +4211,9 @@ async function getKnowledgeForTask(options) {
4090
4211
  * @private internal utility of `createPipelineExecutor`
4091
4212
  */
4092
4213
  async function getReservedParametersForTask(options) {
4093
- const { preparedPipeline, task, pipelineIdentification } = options;
4214
+ const { tools, preparedPipeline, task, pipelineIdentification } = options;
4094
4215
  const context = await getContextForTask(); // <- [🏍]
4095
- const knowledge = await getKnowledgeForTask({ preparedPipeline, task });
4216
+ const knowledge = await getKnowledgeForTask({ tools, preparedPipeline, task });
4096
4217
  const examples = await getExamplesForTask();
4097
4218
  const currentDate = new Date().toISOString(); // <- TODO: [🧠][💩] Better
4098
4219
  const modelName = RESERVED_PARAMETER_MISSING_VALUE;
@@ -4154,6 +4275,7 @@ async function executeTask(options) {
4154
4275
  }
4155
4276
  const definedParameters = Object.freeze({
4156
4277
  ...(await getReservedParametersForTask({
4278
+ tools,
4157
4279
  preparedPipeline,
4158
4280
  task: currentTask,
4159
4281
  pipelineIdentification,
@@ -4710,27 +4832,48 @@ async function preparePersona(personaDescription, tools, options) {
4710
4832
  pipeline: await collection.getPipelineByUrl('https://promptbook.studio/promptbook/prepare-persona.book'),
4711
4833
  tools,
4712
4834
  });
4713
- // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
4714
4835
  const _llms = arrayableToArray(tools.llm);
4715
4836
  const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
4716
- const availableModels = await llmTools.listModels();
4717
- const availableModelNames = availableModels
4837
+ const availableModels = (await llmTools.listModels())
4718
4838
  .filter(({ modelVariant }) => modelVariant === 'CHAT')
4719
- .map(({ modelName }) => modelName)
4720
- .join(',');
4721
- const result = await preparePersonaExecutor({ availableModelNames, personaDescription }).asPromise();
4839
+ .map(({ modelName, modelDescription }) => ({
4840
+ modelName,
4841
+ modelDescription,
4842
+ // <- Note: `modelTitle` and `modelVariant` is not relevant for this task
4843
+ }));
4844
+ const result = await preparePersonaExecutor({
4845
+ availableModels /* <- Note: Passing as JSON */,
4846
+ personaDescription,
4847
+ }).asPromise();
4722
4848
  const { outputParameters } = result;
4723
- const { modelRequirements: modelRequirementsRaw } = outputParameters;
4724
- const modelRequirements = JSON.parse(modelRequirementsRaw);
4849
+ const { modelsRequirements: modelsRequirementsJson } = outputParameters;
4850
+ let modelsRequirementsUnchecked = jsonParse(modelsRequirementsJson);
4725
4851
  if (isVerbose) {
4726
- console.info(`PERSONA ${personaDescription}`, modelRequirements);
4852
+ console.info(`PERSONA ${personaDescription}`, modelsRequirementsUnchecked);
4727
4853
  }
4728
- const { modelName, systemMessage, temperature } = modelRequirements;
4729
- return {
4854
+ if (!Array.isArray(modelsRequirementsUnchecked)) {
4855
+ // <- TODO: Book should have syntax and system to enforce shape of JSON
4856
+ modelsRequirementsUnchecked = [modelsRequirementsUnchecked];
4857
+ /*
4858
+ throw new UnexpectedError(
4859
+ spaceTrim(
4860
+ (block) => `
4861
+ Invalid \`modelsRequirements\`:
4862
+
4863
+ \`\`\`json
4864
+ ${block(JSON.stringify(modelsRequirementsUnchecked, null, 4))}
4865
+ \`\`\`
4866
+ `,
4867
+ ),
4868
+ );
4869
+ */
4870
+ }
4871
+ const modelsRequirements = modelsRequirementsUnchecked.map((modelRequirements) => ({
4730
4872
  modelVariant: 'CHAT',
4731
- modelName,
4732
- systemMessage,
4733
- temperature,
4873
+ ...modelRequirements,
4874
+ }));
4875
+ return {
4876
+ modelsRequirements,
4734
4877
  };
4735
4878
  }
4736
4879
  /**
@@ -5293,7 +5436,7 @@ async function makeKnowledgeSourceHandler(knowledgeSource, tools, options) {
5293
5436
  > },
5294
5437
  */
5295
5438
  async asJson() {
5296
- return JSON.parse(await tools.fs.readFile(filename, 'utf-8'));
5439
+ return jsonParse(await tools.fs.readFile(filename, 'utf-8'));
5297
5440
  },
5298
5441
  async asText() {
5299
5442
  return await tools.fs.readFile(filename, 'utf-8');
@@ -5551,14 +5694,14 @@ async function preparePipeline(pipeline, tools, options) {
5551
5694
  // TODO: [🖌][🧠] Implement some `mapAsync` function
5552
5695
  const preparedPersonas = new Array(personas.length);
5553
5696
  await forEachAsync(personas, { maxParallelCount /* <- TODO: [🪂] When there are subtasks, this maximul limit can be broken */ }, async (persona, index) => {
5554
- const modelRequirements = await preparePersona(persona.description, { ...tools, llm: llmToolsWithUsage }, {
5697
+ const { modelsRequirements } = await preparePersona(persona.description, { ...tools, llm: llmToolsWithUsage }, {
5555
5698
  rootDirname,
5556
5699
  maxParallelCount /* <- TODO: [🪂] */,
5557
5700
  isVerbose,
5558
5701
  });
5559
5702
  const preparedPersona = {
5560
5703
  ...persona,
5561
- modelRequirements,
5704
+ modelsRequirements,
5562
5705
  preparationIds: [/* TODO: [🧊] -> */ currentPreparation.id],
5563
5706
  // <- TODO: [🍙] Make some standard order of json properties
5564
5707
  };
@@ -10813,7 +10956,7 @@ class FileCacheStorage {
10813
10956
  return null;
10814
10957
  }
10815
10958
  const fileContent = await readFile(filename, 'utf-8');
10816
- const value = JSON.parse(fileContent);
10959
+ const value = jsonParse(fileContent);
10817
10960
  // TODO: [🌗]
10818
10961
  return value;
10819
10962
  }