@promptbook/node 0.89.0 → 0.92.0-11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (27) hide show
  1. package/README.md +4 -0
  2. package/esm/index.es.js +175 -32
  3. package/esm/index.es.js.map +1 -1
  4. package/esm/typings/src/_packages/core.index.d.ts +6 -0
  5. package/esm/typings/src/_packages/deepseek.index.d.ts +2 -0
  6. package/esm/typings/src/_packages/google.index.d.ts +2 -0
  7. package/esm/typings/src/_packages/utils.index.d.ts +2 -0
  8. package/esm/typings/src/cli/common/$provideLlmToolsForCli.d.ts +1 -1
  9. package/esm/typings/src/conversion/archive/loadArchive.d.ts +2 -2
  10. package/esm/typings/src/execution/CommonToolsOptions.d.ts +4 -0
  11. package/esm/typings/src/execution/createPipelineExecutor/getKnowledgeForTask.d.ts +12 -0
  12. package/esm/typings/src/execution/createPipelineExecutor/getReservedParametersForTask.d.ts +5 -0
  13. package/esm/typings/src/formats/csv/utils/csvParse.d.ts +12 -0
  14. package/esm/typings/src/formats/json/utils/jsonParse.d.ts +11 -0
  15. package/esm/typings/src/llm-providers/_common/filterModels.d.ts +15 -0
  16. package/esm/typings/src/llm-providers/_common/register/LlmToolsMetadata.d.ts +43 -0
  17. package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionTools.d.ts +4 -0
  18. package/esm/typings/src/llm-providers/deepseek/deepseek-models.d.ts +23 -0
  19. package/esm/typings/src/llm-providers/google/google-models.d.ts +23 -0
  20. package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +4 -0
  21. package/esm/typings/src/personas/preparePersona.d.ts +1 -1
  22. package/esm/typings/src/pipeline/PipelineJson/PersonaJson.d.ts +4 -2
  23. package/esm/typings/src/remote-server/openapi-types.d.ts +348 -6
  24. package/esm/typings/src/remote-server/openapi.d.ts +397 -3
  25. package/package.json +2 -2
  26. package/umd/index.umd.js +175 -32
  27. package/umd/index.umd.js.map +1 -1
package/umd/index.umd.js CHANGED
@@ -46,7 +46,7 @@
46
46
  * @generated
47
47
  * @see https://github.com/webgptorg/promptbook
48
48
  */
49
- const PROMPTBOOK_ENGINE_VERSION = '0.89.0';
49
+ const PROMPTBOOK_ENGINE_VERSION = '0.92.0-11';
50
50
  /**
51
51
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
52
52
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -315,6 +315,45 @@
315
315
  }
316
316
  }
317
317
 
318
+ /**
319
+ * Converts a JavaScript Object Notation (JSON) string into an object.
320
+ *
321
+ * Note: This is wrapper around `JSON.parse()` with better error and type handling
322
+ *
323
+ * @public exported from `@promptbook/utils`
324
+ */
325
+ function jsonParse(value) {
326
+ if (value === undefined) {
327
+ throw new Error(`Can not parse JSON from undefined value.`);
328
+ }
329
+ else if (typeof value !== 'string') {
330
+ console.error('Can not parse JSON from non-string value.', { text: value });
331
+ throw new Error(spaceTrim__default["default"](`
332
+ Can not parse JSON from non-string value.
333
+
334
+ The value type: ${typeof value}
335
+ See more in console.
336
+ `));
337
+ }
338
+ try {
339
+ return JSON.parse(value);
340
+ }
341
+ catch (error) {
342
+ if (!(error instanceof Error)) {
343
+ throw error;
344
+ }
345
+ throw new Error(spaceTrim__default["default"]((block) => `
346
+ ${block(error.message)}
347
+
348
+ The JSON text:
349
+ ${block(value)}
350
+ `));
351
+ }
352
+ }
353
+ /**
354
+ * TODO: !!!! Use in Promptbook.studio
355
+ */
356
+
318
357
  /**
319
358
  * Orders JSON object by keys
320
359
  *
@@ -1104,7 +1143,7 @@
1104
1143
  if (!indexFile) {
1105
1144
  throw new UnexpectedError(`Archive does not contain 'index.book.json' file`);
1106
1145
  }
1107
- const collectionJson = JSON.parse(await indexFile.async('text'));
1146
+ const collectionJson = jsonParse(await indexFile.async('text'));
1108
1147
  for (const pipeline of collectionJson) {
1109
1148
  validatePipeline(pipeline);
1110
1149
  }
@@ -1114,7 +1153,7 @@
1114
1153
  * Note: [🟢] Code in this file should never be never released in packages that could be imported into browser environment
1115
1154
  */
1116
1155
 
1117
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book"},{title:"Prepare Knowledge-piece Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge-piece Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book"},{title:"Prepare Persona",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book",formfactorName:"GENERIC",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelRequirements",format:"JSON",dependentParameterNames:["availableModelNames","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Persona\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book`\n- INPUT PARAMETER `{availableModelNames}` List of available model names separated by comma (,)\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n\\`\\`\\`json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelRequirements}`\n"}],sourceFile:"./books/prepare-persona.book"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-title.book",formfactorName:"GENERIC",parameters:[{name:"book",description:"The book to prepare the title for",isInput:true,isOutput:false},{name:"title",description:"Best title for the book",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-title",title:"Make title",content:"Make best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}",resultingParameterName:"title",expectations:{words:{min:1,max:8},lines:{min:1,max:1}},dependentParameterNames:["book"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-title.book`\n- INPUT PARAMETER `{book}` The book to prepare the title for\n- OUTPUT PARAMETER `{title}` Best title for the book\n\n## Make title\n\n- EXPECT MIN 1 Word\n- EXPECT MAX 8 Words\n- EXPECT EXACTLY 1 Line\n\n```markdown\nMake best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-title.book"}];
1156
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book"},{title:"Prepare Knowledge-piece Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge-piece Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book"},{title:"Prepare Persona",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book",formfactorName:"GENERIC",parameters:[{name:"availableModels",description:"List of available model names together with their descriptions as JSON",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelsRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n```json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n```\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n```json\n{availableModels}\n```\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelsRequirements",format:"JSON",dependentParameterNames:["availableModels","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Persona\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book`\n- INPUT PARAMETER `{availableModels}` List of available model names together with their descriptions as JSON\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelsRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n\\`\\`\\`json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n\\`\\`\\`json\n{availableModels}\n\\`\\`\\`\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelsRequirements}`\n"}],sourceFile:"./books/prepare-persona.book"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-title.book",formfactorName:"GENERIC",parameters:[{name:"book",description:"The book to prepare the title for",isInput:true,isOutput:false},{name:"title",description:"Best title for the book",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-title",title:"Make title",content:"Make best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}",resultingParameterName:"title",expectations:{words:{min:1,max:8},lines:{min:1,max:1}},dependentParameterNames:["book"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-title.book`\n- INPUT PARAMETER `{book}` The book to prepare the title for\n- OUTPUT PARAMETER `{title}` Best title for the book\n\n## Make title\n\n- EXPECT MIN 1 Word\n- EXPECT MAX 8 Words\n- EXPECT EXACTLY 1 Line\n\n```markdown\nMake best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-title.book"}];
1118
1157
 
1119
1158
  /**
1120
1159
  * Checks if value is valid email
@@ -1465,7 +1504,7 @@
1465
1504
  */
1466
1505
  function unpreparePipeline(pipeline) {
1467
1506
  let { personas, knowledgeSources, tasks } = pipeline;
1468
- personas = personas.map((persona) => ({ ...persona, modelRequirements: undefined, preparationIds: undefined }));
1507
+ personas = personas.map((persona) => ({ ...persona, modelsRequirements: undefined, preparationIds: undefined }));
1469
1508
  knowledgeSources = knowledgeSources.map((knowledgeSource) => ({ ...knowledgeSource, preparationIds: undefined }));
1470
1509
  tasks = tasks.map((task) => {
1471
1510
  let { dependentParameterNames } = task;
@@ -1641,7 +1680,7 @@
1641
1680
  if (pipeline.title === undefined || pipeline.title === '' || pipeline.title === DEFAULT_BOOK_TITLE) {
1642
1681
  return false;
1643
1682
  }
1644
- if (!pipeline.personas.every((persona) => persona.modelRequirements !== undefined)) {
1683
+ if (!pipeline.personas.every((persona) => persona.modelsRequirements !== undefined)) {
1645
1684
  return false;
1646
1685
  }
1647
1686
  if (!pipeline.knowledgeSources.every((knowledgeSource) => knowledgeSource.preparationIds !== undefined)) {
@@ -1698,7 +1737,7 @@
1698
1737
  const newObject = { ...object };
1699
1738
  for (const [key, value] of Object.entries(object)) {
1700
1739
  if (typeof value === 'string' && isValidJsonString(value)) {
1701
- newObject[key] = JSON.parse(value);
1740
+ newObject[key] = jsonParse(value);
1702
1741
  }
1703
1742
  else {
1704
1743
  newObject[key] = jsonStringsToJsons(value);
@@ -2520,6 +2559,24 @@
2520
2559
  }
2521
2560
  }
2522
2561
 
2562
+ /**
2563
+ * Converts a CSV string into an object
2564
+ *
2565
+ * Note: This is wrapper around `papaparse.parse()` with better autohealing
2566
+ *
2567
+ * @private - for now until `@promptbook/csv` is released
2568
+ */
2569
+ function csvParse(value /* <- TODO: string_csv */, settings, schema /* <- TODO: Make CSV Schemas */) {
2570
+ settings = { ...settings, ...MANDATORY_CSV_SETTINGS };
2571
+ // Note: Autoheal invalid '\n' characters
2572
+ if (settings.newline && !settings.newline.includes('\r') && value.includes('\r')) {
2573
+ console.warn('CSV string contains carriage return characters, but in the CSV settings the `newline` setting does not include them. Autohealing the CSV string.');
2574
+ value = value.replace(/\r\n/g, '\n').replace(/\r/g, '\n');
2575
+ }
2576
+ const csv = papaparse.parse(value, settings);
2577
+ return csv;
2578
+ }
2579
+
2523
2580
  /**
2524
2581
  * Definition for CSV spreadsheet
2525
2582
  *
@@ -2542,8 +2599,7 @@
2542
2599
  {
2543
2600
  subvalueName: 'ROW',
2544
2601
  async mapValues(value, outputParameterName, settings, mapCallback) {
2545
- // TODO: [👨🏾‍🤝‍👨🏼] DRY csv parsing
2546
- const csv = papaparse.parse(value, { ...settings, ...MANDATORY_CSV_SETTINGS });
2602
+ const csv = csvParse(value, settings);
2547
2603
  if (csv.errors.length !== 0) {
2548
2604
  throw new CsvFormatError(spaceTrim__default["default"]((block) => `
2549
2605
  CSV parsing error
@@ -2573,8 +2629,7 @@
2573
2629
  {
2574
2630
  subvalueName: 'CELL',
2575
2631
  async mapValues(value, outputParameterName, settings, mapCallback) {
2576
- // TODO: [👨🏾‍🤝‍👨🏼] DRY csv parsing
2577
- const csv = papaparse.parse(value, { ...settings, ...MANDATORY_CSV_SETTINGS });
2632
+ const csv = csvParse(value, settings);
2578
2633
  if (csv.errors.length !== 0) {
2579
2634
  throw new CsvFormatError(spaceTrim__default["default"]((block) => `
2580
2635
  CSV parsing error
@@ -4092,13 +4147,79 @@
4092
4147
  /**
4093
4148
  * @@@
4094
4149
  *
4150
+ * Here is the place where RAG (retrieval-augmented generation) happens
4151
+ *
4095
4152
  * @private internal utility of `createPipelineExecutor`
4096
4153
  */
4097
4154
  async function getKnowledgeForTask(options) {
4098
- const { preparedPipeline, task } = options;
4099
- return preparedPipeline.knowledgePieces.map(({ content }) => `- ${content}`).join('\n');
4155
+ const { tools, preparedPipeline, task } = options;
4156
+ const firstKnowlegePiece = preparedPipeline.knowledgePieces[0];
4157
+ const firstKnowlegeIndex = firstKnowlegePiece === null || firstKnowlegePiece === void 0 ? void 0 : firstKnowlegePiece.index[0];
4158
+ // <- TODO: Do not use just first knowledge piece and first index to determine embedding model, use also keyword search
4159
+ if (firstKnowlegePiece === undefined || firstKnowlegeIndex === undefined) {
4160
+ return 'No knowledge pieces found';
4161
+ }
4162
+ // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
4163
+ const _llms = arrayableToArray(tools.llm);
4164
+ const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
4165
+ const taskEmbeddingPrompt = {
4166
+ title: 'Knowledge Search',
4167
+ modelRequirements: {
4168
+ modelVariant: 'EMBEDDING',
4169
+ modelName: firstKnowlegeIndex.modelName,
4170
+ },
4171
+ content: task.content,
4172
+ parameters: {
4173
+ /* !!!!!!!! */
4174
+ },
4175
+ };
4176
+ const taskEmbeddingResult = await llmTools.callEmbeddingModel(taskEmbeddingPrompt);
4177
+ const knowledgePiecesWithRelevance = preparedPipeline.knowledgePieces.map((knowledgePiece) => {
4178
+ const { index } = knowledgePiece;
4179
+ const knowledgePieceIndex = index.find((i) => i.modelName === firstKnowlegeIndex.modelName);
4180
+ // <- TODO: Do not use just first knowledge piece and first index to determine embedding model
4181
+ if (knowledgePieceIndex === undefined) {
4182
+ return {
4183
+ content: knowledgePiece.content,
4184
+ relevance: 0,
4185
+ };
4186
+ }
4187
+ const relevance = computeCosineSimilarity(knowledgePieceIndex.position, taskEmbeddingResult.content);
4188
+ return {
4189
+ content: knowledgePiece.content,
4190
+ relevance,
4191
+ };
4192
+ });
4193
+ const knowledgePiecesSorted = knowledgePiecesWithRelevance.sort((a, b) => a.relevance - b.relevance);
4194
+ const knowledgePiecesLimited = knowledgePiecesSorted.slice(0, 5);
4195
+ console.log('!!! Embedding', {
4196
+ task,
4197
+ taskEmbeddingPrompt,
4198
+ taskEmbeddingResult,
4199
+ firstKnowlegePiece,
4200
+ firstKnowlegeIndex,
4201
+ knowledgePiecesWithRelevance,
4202
+ knowledgePiecesSorted,
4203
+ knowledgePiecesLimited,
4204
+ });
4205
+ return knowledgePiecesLimited.map(({ content }) => `- ${content}`).join('\n');
4100
4206
  // <- TODO: [🧠] Some smart aggregation of knowledge pieces, single-line vs multi-line vs mixed
4101
4207
  }
4208
+ // TODO: !!!!!! Annotate + to new file
4209
+ function computeCosineSimilarity(embeddingVector1, embeddingVector2) {
4210
+ if (embeddingVector1.length !== embeddingVector2.length) {
4211
+ throw new TypeError('Embedding vectors must have the same length');
4212
+ }
4213
+ const dotProduct = embeddingVector1.reduce((sum, value, index) => sum + value * embeddingVector2[index], 0);
4214
+ const magnitude1 = Math.sqrt(embeddingVector1.reduce((sum, value) => sum + value * value, 0));
4215
+ const magnitude2 = Math.sqrt(embeddingVector2.reduce((sum, value) => sum + value * value, 0));
4216
+ return 1 - dotProduct / (magnitude1 * magnitude2);
4217
+ }
4218
+ /**
4219
+ * TODO: !!!! Verify if this is working
4220
+ * TODO: [♨] Implement Better - use keyword search
4221
+ * TODO: [♨] Examples of values
4222
+ */
4102
4223
 
4103
4224
  /**
4104
4225
  * @@@
@@ -4106,9 +4227,9 @@
4106
4227
  * @private internal utility of `createPipelineExecutor`
4107
4228
  */
4108
4229
  async function getReservedParametersForTask(options) {
4109
- const { preparedPipeline, task, pipelineIdentification } = options;
4230
+ const { tools, preparedPipeline, task, pipelineIdentification } = options;
4110
4231
  const context = await getContextForTask(); // <- [🏍]
4111
- const knowledge = await getKnowledgeForTask({ preparedPipeline, task });
4232
+ const knowledge = await getKnowledgeForTask({ tools, preparedPipeline, task });
4112
4233
  const examples = await getExamplesForTask();
4113
4234
  const currentDate = new Date().toISOString(); // <- TODO: [🧠][💩] Better
4114
4235
  const modelName = RESERVED_PARAMETER_MISSING_VALUE;
@@ -4170,6 +4291,7 @@
4170
4291
  }
4171
4292
  const definedParameters = Object.freeze({
4172
4293
  ...(await getReservedParametersForTask({
4294
+ tools,
4173
4295
  preparedPipeline,
4174
4296
  task: currentTask,
4175
4297
  pipelineIdentification,
@@ -4726,27 +4848,48 @@
4726
4848
  pipeline: await collection.getPipelineByUrl('https://promptbook.studio/promptbook/prepare-persona.book'),
4727
4849
  tools,
4728
4850
  });
4729
- // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
4730
4851
  const _llms = arrayableToArray(tools.llm);
4731
4852
  const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
4732
- const availableModels = await llmTools.listModels();
4733
- const availableModelNames = availableModels
4853
+ const availableModels = (await llmTools.listModels())
4734
4854
  .filter(({ modelVariant }) => modelVariant === 'CHAT')
4735
- .map(({ modelName }) => modelName)
4736
- .join(',');
4737
- const result = await preparePersonaExecutor({ availableModelNames, personaDescription }).asPromise();
4855
+ .map(({ modelName, modelDescription }) => ({
4856
+ modelName,
4857
+ modelDescription,
4858
+ // <- Note: `modelTitle` and `modelVariant` is not relevant for this task
4859
+ }));
4860
+ const result = await preparePersonaExecutor({
4861
+ availableModels /* <- Note: Passing as JSON */,
4862
+ personaDescription,
4863
+ }).asPromise();
4738
4864
  const { outputParameters } = result;
4739
- const { modelRequirements: modelRequirementsRaw } = outputParameters;
4740
- const modelRequirements = JSON.parse(modelRequirementsRaw);
4865
+ const { modelsRequirements: modelsRequirementsJson } = outputParameters;
4866
+ let modelsRequirementsUnchecked = jsonParse(modelsRequirementsJson);
4741
4867
  if (isVerbose) {
4742
- console.info(`PERSONA ${personaDescription}`, modelRequirements);
4868
+ console.info(`PERSONA ${personaDescription}`, modelsRequirementsUnchecked);
4743
4869
  }
4744
- const { modelName, systemMessage, temperature } = modelRequirements;
4745
- return {
4870
+ if (!Array.isArray(modelsRequirementsUnchecked)) {
4871
+ // <- TODO: Book should have syntax and system to enforce shape of JSON
4872
+ modelsRequirementsUnchecked = [modelsRequirementsUnchecked];
4873
+ /*
4874
+ throw new UnexpectedError(
4875
+ spaceTrim(
4876
+ (block) => `
4877
+ Invalid \`modelsRequirements\`:
4878
+
4879
+ \`\`\`json
4880
+ ${block(JSON.stringify(modelsRequirementsUnchecked, null, 4))}
4881
+ \`\`\`
4882
+ `,
4883
+ ),
4884
+ );
4885
+ */
4886
+ }
4887
+ const modelsRequirements = modelsRequirementsUnchecked.map((modelRequirements) => ({
4746
4888
  modelVariant: 'CHAT',
4747
- modelName,
4748
- systemMessage,
4749
- temperature,
4889
+ ...modelRequirements,
4890
+ }));
4891
+ return {
4892
+ modelsRequirements,
4750
4893
  };
4751
4894
  }
4752
4895
  /**
@@ -5309,7 +5452,7 @@
5309
5452
  > },
5310
5453
  */
5311
5454
  async asJson() {
5312
- return JSON.parse(await tools.fs.readFile(filename, 'utf-8'));
5455
+ return jsonParse(await tools.fs.readFile(filename, 'utf-8'));
5313
5456
  },
5314
5457
  async asText() {
5315
5458
  return await tools.fs.readFile(filename, 'utf-8');
@@ -5567,14 +5710,14 @@
5567
5710
  // TODO: [🖌][🧠] Implement some `mapAsync` function
5568
5711
  const preparedPersonas = new Array(personas.length);
5569
5712
  await forEachAsync(personas, { maxParallelCount /* <- TODO: [🪂] When there are subtasks, this maximul limit can be broken */ }, async (persona, index) => {
5570
- const modelRequirements = await preparePersona(persona.description, { ...tools, llm: llmToolsWithUsage }, {
5713
+ const { modelsRequirements } = await preparePersona(persona.description, { ...tools, llm: llmToolsWithUsage }, {
5571
5714
  rootDirname,
5572
5715
  maxParallelCount /* <- TODO: [🪂] */,
5573
5716
  isVerbose,
5574
5717
  });
5575
5718
  const preparedPersona = {
5576
5719
  ...persona,
5577
- modelRequirements,
5720
+ modelsRequirements,
5578
5721
  preparationIds: [/* TODO: [🧊] -> */ currentPreparation.id],
5579
5722
  // <- TODO: [🍙] Make some standard order of json properties
5580
5723
  };
@@ -10829,7 +10972,7 @@
10829
10972
  return null;
10830
10973
  }
10831
10974
  const fileContent = await promises.readFile(filename, 'utf-8');
10832
- const value = JSON.parse(fileContent);
10975
+ const value = jsonParse(fileContent);
10833
10976
  // TODO: [🌗]
10834
10977
  return value;
10835
10978
  }