@promptbook/node 0.61.0-22 → 0.61.0-24

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. package/esm/index.es.js +186 -61
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/src/_packages/types.index.d.ts +2 -2
  4. package/esm/typings/src/config.d.ts +8 -4
  5. package/esm/typings/src/conversion/pipelineStringToJsonSync.d.ts +1 -0
  6. package/esm/typings/src/conversion/utils/stringifyPipelineJson.d.ts +1 -0
  7. package/esm/typings/src/execution/PipelineExecutor.d.ts +32 -24
  8. package/esm/typings/src/execution/PromptResultUsage.d.ts +3 -0
  9. package/esm/typings/src/prepare/isPipelinePrepared.d.ts +4 -0
  10. package/esm/typings/src/prepare/preparePipeline.d.ts +0 -1
  11. package/esm/typings/src/prepare/prepareTemplates.d.ts +31 -0
  12. package/esm/typings/src/prepare/unpreparePipeline.d.ts +2 -0
  13. package/esm/typings/src/types/PipelineJson/KnowledgePieceJson.d.ts +3 -0
  14. package/esm/typings/src/types/PipelineJson/KnowledgeSourceJson.d.ts +3 -0
  15. package/esm/typings/src/types/PipelineJson/LlmTemplateJson.d.ts +1 -0
  16. package/esm/typings/src/types/PipelineJson/PersonaJson.d.ts +3 -0
  17. package/esm/typings/src/types/PipelineJson/PipelineJson.d.ts +1 -0
  18. package/esm/typings/src/types/PipelineJson/PreparationJson.d.ts +1 -0
  19. package/esm/typings/src/types/PipelineJson/PromptDialogJson.d.ts +4 -3
  20. package/esm/typings/src/types/PipelineJson/PromptTemplateJsonCommon.d.ts +10 -0
  21. package/esm/typings/src/types/PipelineJson/PromptTemplateParameterJson.d.ts +1 -0
  22. package/esm/typings/src/types/PipelineJson/ScriptJson.d.ts +5 -2
  23. package/esm/typings/src/types/PipelineJson/SimpleTemplateJson.d.ts +5 -2
  24. package/package.json +2 -2
  25. package/umd/index.umd.js +186 -61
  26. package/umd/index.umd.js.map +1 -1
  27. package/umd/typings/src/_packages/types.index.d.ts +2 -2
  28. package/umd/typings/src/config.d.ts +8 -4
  29. package/umd/typings/src/conversion/pipelineStringToJsonSync.d.ts +1 -0
  30. package/umd/typings/src/conversion/utils/stringifyPipelineJson.d.ts +1 -0
  31. package/umd/typings/src/execution/PipelineExecutor.d.ts +32 -24
  32. package/umd/typings/src/execution/PromptResultUsage.d.ts +3 -0
  33. package/umd/typings/src/prepare/isPipelinePrepared.d.ts +4 -0
  34. package/umd/typings/src/prepare/preparePipeline.d.ts +0 -1
  35. package/umd/typings/src/prepare/prepareTemplates.d.ts +31 -0
  36. package/umd/typings/src/prepare/unpreparePipeline.d.ts +2 -0
  37. package/umd/typings/src/types/PipelineJson/KnowledgePieceJson.d.ts +3 -0
  38. package/umd/typings/src/types/PipelineJson/KnowledgeSourceJson.d.ts +3 -0
  39. package/umd/typings/src/types/PipelineJson/LlmTemplateJson.d.ts +1 -0
  40. package/umd/typings/src/types/PipelineJson/PersonaJson.d.ts +3 -0
  41. package/umd/typings/src/types/PipelineJson/PipelineJson.d.ts +1 -0
  42. package/umd/typings/src/types/PipelineJson/PreparationJson.d.ts +1 -0
  43. package/umd/typings/src/types/PipelineJson/PromptDialogJson.d.ts +4 -3
  44. package/umd/typings/src/types/PipelineJson/PromptTemplateJsonCommon.d.ts +10 -0
  45. package/umd/typings/src/types/PipelineJson/PromptTemplateParameterJson.d.ts +1 -0
  46. package/umd/typings/src/types/PipelineJson/ScriptJson.d.ts +5 -2
  47. package/umd/typings/src/types/PipelineJson/SimpleTemplateJson.d.ts +5 -2
package/umd/index.umd.js CHANGED
@@ -191,15 +191,26 @@
191
191
  * The name of the builded pipeline collection made by CLI `ptbk make` and for lookup in `createCollectionFromDirectory`
192
192
  */
193
193
  var PIPELINE_COLLECTION_BASE_FILENAME = "index";
194
+ /**
195
+ * Nonce which is used for replacing things in strings
196
+ */
197
+ var REPLACING_NONCE = 'u$k42k%!V2zo34w7Fu#@QUHYPW';
194
198
  /**
195
199
  * The names of the parameters that are reserved for special purposes
196
200
  */
197
201
  var RESERVED_PARAMETER_NAMES = deepFreeze([
198
202
  'context',
203
+ 'knowledge',
204
+ 'samples',
205
+ 'modelName',
199
206
  'currentDate',
200
207
  // <- TODO: Add more like 'date', 'modelName',...
201
208
  // <- TODO: Add [emoji] + instructions ACRY when adding new reserved parameter
202
209
  ]);
210
+ /**
211
+ * @@@
212
+ */
213
+ var RESERVED_PARAMETER_MISSING_VALUE = 'MISSING-' + REPLACING_NONCE;
203
214
  /*
204
215
  TODO: !!! Just testing false-negative detection of [🟡][🟢][🔵][⚪] leak
205
216
  */
@@ -643,7 +654,7 @@
643
654
  });
644
655
  }
645
656
 
646
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.61.0-21",parameters:[{name:"content",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledge",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {content}",dependentParameterNames:["content"],resultingParameterName:"knowledge"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-21",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.61.0-21",parameters:[{name:"content",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {content}",dependentParameterNames:["content"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-21",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.61.0-21",parameters:[{name:"content",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {content}",expectations:{words:{min:1,max:8}},dependentParameterNames:["content"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-21",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.61.0-21",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-21",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
657
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.61.0-23",parameters:[{name:"content",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {content}",dependentParameterNames:["content"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-23",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.61.0-23",parameters:[{name:"content",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {content}",dependentParameterNames:["content"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-23",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.61.0-23",parameters:[{name:"content",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {content}",expectations:{words:{min:1,max:8}},dependentParameterNames:["content"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-23",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.61.0-23",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-23",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
647
658
 
648
659
  /**
649
660
  * This error indicates that the promptbook in a markdown format cannot be parsed into a valid promptbook object
@@ -911,7 +922,7 @@
911
922
  throw new PipelineLogicError("Parameter {".concat(template.resultingParameterName, "} is defined multiple times"));
912
923
  }
913
924
  if (RESERVED_PARAMETER_NAMES.includes(template.resultingParameterName)) {
914
- throw new PipelineLogicError("Parameter name {".concat(template.resultingParameterName, "} is reserved, please use fifferent name"));
925
+ throw new PipelineLogicError("Parameter name {".concat(template.resultingParameterName, "} is reserved, please use different name"));
915
926
  }
916
927
  definedParameters.add(template.resultingParameterName);
917
928
  if (template.blockType === 'PROMPT_TEMPLATE' && template.modelRequirements.modelVariant === undefined) {
@@ -1081,14 +1092,17 @@
1081
1092
  * Unprepare just strips the preparation data of the pipeline
1082
1093
  */
1083
1094
  function unpreparePipeline(pipeline) {
1084
- var personas = pipeline.personas, knowledgeSources = pipeline.knowledgeSources;
1095
+ var personas = pipeline.personas, knowledgeSources = pipeline.knowledgeSources, promptTemplates = pipeline.promptTemplates;
1085
1096
  personas = personas.map(function (persona) { return (__assign(__assign({}, persona), { modelRequirements: undefined, preparationIds: undefined })); });
1086
1097
  knowledgeSources = knowledgeSources.map(function (knowledgeSource) { return (__assign(__assign({}, knowledgeSource), { preparationIds: undefined })); });
1087
- return __assign(__assign({}, pipeline), { knowledgeSources: knowledgeSources, knowledgePieces: [], personas: personas, preparations: [] });
1098
+ promptTemplates = promptTemplates.map(function (promptTemplate) { return (__assign(__assign({}, promptTemplate), { preparedContent: undefined })); });
1099
+ return __assign(__assign({}, pipeline), { promptTemplates: promptTemplates, knowledgeSources: knowledgeSources, knowledgePieces: [], personas: personas, preparations: [] });
1088
1100
  }
1089
1101
  /**
1090
1102
  * TODO: [🔼] !!! Export via `@promptbook/core`
1103
+ * TODO: [🧿] Maybe do same process with same granularity and subfinctions as `preparePipeline`
1091
1104
  * TODO: Write tests for `preparePipeline`
1105
+ * TODO: [🍙] Make some standart order of json properties
1092
1106
  */
1093
1107
 
1094
1108
  /**
@@ -1990,22 +2004,27 @@
1990
2004
  // Note: Ignoring `pipeline.preparations` @@@
1991
2005
  // Note: Ignoring `pipeline.knowledgePieces` @@@
1992
2006
  if (!pipeline.personas.every(function (persona) { return persona.modelRequirements !== undefined; })) {
1993
- console.log('!!!!', 'Not all personas have modelRequirements');
1994
2007
  return false;
1995
2008
  }
1996
2009
  if (!pipeline.knowledgeSources.every(function (knowledgeSource) { return knowledgeSource.preparationIds !== undefined; })) {
1997
- console.log('!!!!', 'Not all knowledgeSources have preparationIds');
1998
2010
  return false;
1999
2011
  }
2000
- // TODO: !!!!! Is context in each template
2001
- // TODO: !!!!! Are samples prepared
2002
- // TODO: !!!!! Are templates prepared
2012
+ /*
2013
+ TODO: [🧠][🍫] `promptTemplates` can not be determined if they are fully prepared SO ignoring them
2014
+ > if (!pipeline.promptTemplates.every(({ preparedContent }) => preparedContent === undefined)) {
2015
+ > return false;
2016
+ > }
2017
+ */
2003
2018
  return true;
2004
2019
  }
2005
2020
  /**
2006
2021
  * TODO: [🐠] Maybe base this on `makeValidator`
2007
2022
  * TODO: [🔼] Export via core or utils
2008
2023
  * TODO: [🧊] Pipeline can be partially prepared, this should return true ONLY if fully prepared
2024
+ * TODO: [🧿] Maybe do same process with same granularity and subfinctions as `preparePipeline`
2025
+ * - Is context in each template
2026
+ * - Are samples prepared
2027
+ * - Are templates prepared
2009
2028
  */
2010
2029
 
2011
2030
  /**
@@ -2067,6 +2086,22 @@
2067
2086
  * @throws {PipelineExecutionError} if parameter is not defined, not closed, or not opened
2068
2087
  */
2069
2088
  function replaceParameters(template, parameters) {
2089
+ var e_1, _a;
2090
+ try {
2091
+ for (var _b = __values(Object.entries(parameters)), _c = _b.next(); !_c.done; _c = _b.next()) {
2092
+ var _d = __read(_c.value, 2), parameterName = _d[0], parameterValue = _d[1];
2093
+ if (parameterValue === RESERVED_PARAMETER_MISSING_VALUE) {
2094
+ throw new UnexpectedError("Parameter {".concat(parameterName, "} has missing value"));
2095
+ }
2096
+ }
2097
+ }
2098
+ catch (e_1_1) { e_1 = { error: e_1_1 }; }
2099
+ finally {
2100
+ try {
2101
+ if (_c && !_c.done && (_a = _b.return)) _a.call(_b);
2102
+ }
2103
+ finally { if (e_1) throw e_1.error; }
2104
+ }
2070
2105
  var replacedTemplate = template;
2071
2106
  var match;
2072
2107
  var loopLimit = LOOP_LIMIT;
@@ -2192,7 +2227,7 @@
2192
2227
  /**
2193
2228
  * The version of the Promptbook library
2194
2229
  */
2195
- var PROMPTBOOK_VERSION = '0.61.0-21';
2230
+ var PROMPTBOOK_VERSION = '0.61.0-23';
2196
2231
  // TODO: !!!! List here all the versions and annotate + put into script
2197
2232
 
2198
2233
  /**
@@ -2322,14 +2357,25 @@
2322
2357
  pipeline = rawPipeline;
2323
2358
  }
2324
2359
  else {
2325
- console.warn(spaceTrim.spaceTrim("\n Pipeline ".concat(rawPipeline.pipelineUrl || rawPipeline.sourceFile || rawPipeline.title, " is not prepared\n\n It will be prepared ad-hoc before the first execution\n But it is recommended to prepare the pipeline during collection preparation\n\n @see more at https://ptbk.io/prepare-pipeline\n ")));
2360
+ // TODO: !!!! This should be maybe warning in report
2361
+ console.warn(spaceTrim.spaceTrim("\n Pipeline ".concat(rawPipeline.pipelineUrl || rawPipeline.sourceFile || rawPipeline.title, " is not prepared\n\n ").concat(rawPipeline.sourceFile, "\n\n It will be prepared ad-hoc before the first execution\n But it is recommended to prepare the pipeline during collection preparation\n\n @see more at https://ptbk.io/prepare-pipeline\n ")));
2326
2362
  }
2327
2363
  var pipelineExecutor = function (inputParameters, onProgress) { return __awaiter(_this, void 0, void 0, function () {
2364
+ // TODO: !!!!! Extract to separate functions and files - ALL FUNCTIONS BELOW
2328
2365
  function getContextForTemplate(// <- TODO: [🧠][🥜]
2329
2366
  template) {
2330
2367
  return __awaiter(this, void 0, void 0, function () {
2331
2368
  return __generator(this, function (_a) {
2332
- // TODO: !!!!!! Implement Better - use real index and keyword search
2369
+ TODO_USE(template);
2370
+ return [2 /*return*/, RESERVED_PARAMETER_MISSING_VALUE /* <- TODO: !!!! Implement */];
2371
+ });
2372
+ });
2373
+ }
2374
+ function getKnowledgeForTemplate(// <- TODO: [🧠][🥜]
2375
+ template) {
2376
+ return __awaiter(this, void 0, void 0, function () {
2377
+ return __generator(this, function (_a) {
2378
+ // TODO: !!!! Implement Better - use real index and keyword search
2333
2379
  TODO_USE(template);
2334
2380
  return [2 /*return*/, pipeline.knowledgePieces.map(function (_a) {
2335
2381
  var content = _a.content;
@@ -2338,19 +2384,39 @@
2338
2384
  });
2339
2385
  });
2340
2386
  }
2387
+ function getSamplesForTemplate(// <- TODO: [🧠][🥜]
2388
+ template) {
2389
+ return __awaiter(this, void 0, void 0, function () {
2390
+ return __generator(this, function (_a) {
2391
+ // TODO: !!!! Implement Better - use real index and keyword search
2392
+ TODO_USE(template);
2393
+ return [2 /*return*/, RESERVED_PARAMETER_MISSING_VALUE /* <- TODO: !!!! Implement */];
2394
+ });
2395
+ });
2396
+ }
2341
2397
  function getReservedParametersForTemplate(template) {
2342
2398
  return __awaiter(this, void 0, void 0, function () {
2343
- var context, currentDate, reservedParameters, RESERVED_PARAMETER_NAMES_1, RESERVED_PARAMETER_NAMES_1_1, parameterName;
2399
+ var context, knowledge, samples, currentDate, modelName, reservedParameters, RESERVED_PARAMETER_NAMES_1, RESERVED_PARAMETER_NAMES_1_1, parameterName;
2344
2400
  var e_3, _a;
2345
2401
  return __generator(this, function (_b) {
2346
2402
  switch (_b.label) {
2347
2403
  case 0: return [4 /*yield*/, getContextForTemplate(template)];
2348
2404
  case 1:
2349
2405
  context = _b.sent();
2406
+ return [4 /*yield*/, getKnowledgeForTemplate(template)];
2407
+ case 2:
2408
+ knowledge = _b.sent();
2409
+ return [4 /*yield*/, getSamplesForTemplate(template)];
2410
+ case 3:
2411
+ samples = _b.sent();
2350
2412
  currentDate = new Date().toISOString();
2413
+ modelName = RESERVED_PARAMETER_MISSING_VALUE;
2351
2414
  reservedParameters = {
2352
2415
  context: context,
2416
+ knowledge: knowledge,
2417
+ samples: samples,
2353
2418
  currentDate: currentDate,
2419
+ modelName: modelName,
2354
2420
  };
2355
2421
  try {
2356
2422
  // Note: Doublecheck that ALL reserved parameters are defined:
@@ -2375,7 +2441,7 @@
2375
2441
  }
2376
2442
  function executeSingleTemplate(currentTemplate) {
2377
2443
  return __awaiter(this, void 0, void 0, function () {
2378
- var name, title, priority, usedParameterNames, dependentParameterNames, definedParameters, _a, _b, _c, definedParameterNames, parameters, _d, _e, parameterName, prompt, chatResult, completionResult, embeddingResult, result, resultString, expectError, scriptPipelineExecutionErrors, maxAttempts, jokerParameterNames, attempt, isJokerAttempt, jokerParameterName, _f, _g, _h, _j, scriptTools, error_2, e_4_1, _k, _l, functionName, postprocessingError, _m, _o, scriptTools, error_3, e_5_1, e_6_1, error_4;
2444
+ var name, title, priority, usedParameterNames, dependentParameterNames, definedParameters, _a, _b, _c, definedParameterNames, parameters, _d, _e, parameterName, prompt, chatResult, completionResult, embeddingResult, result, resultString, expectError, scriptPipelineExecutionErrors, maxAttempts, jokerParameterNames, preparedContent, attempt, isJokerAttempt, jokerParameterName, _f, _g, _h, _j, scriptTools, error_2, e_4_1, _k, _l, functionName, postprocessingError, _m, _o, scriptTools, error_3, e_5_1, e_6_1, error_4;
2379
2445
  var e_7, _p, e_4, _q, e_6, _r, e_5, _s, _t;
2380
2446
  var _this = this;
2381
2447
  return __generator(this, function (_u) {
@@ -2445,6 +2511,9 @@
2445
2511
  expectError = null;
2446
2512
  maxAttempts = currentTemplate.blockType === 'PROMPT_DIALOG' ? Infinity : maxExecutionAttempts;
2447
2513
  jokerParameterNames = currentTemplate.jokerParameterNames || [];
2514
+ preparedContent = (currentTemplate.preparedContent || '{content}')
2515
+ .split('{content}')
2516
+ .join(currentTemplate.content);
2448
2517
  attempt = -jokerParameterNames.length;
2449
2518
  _u.label = 4;
2450
2519
  case 4:
@@ -2479,7 +2548,7 @@
2479
2548
  }
2480
2549
  return [3 /*break*/, 29];
2481
2550
  case 6:
2482
- resultString = replaceParameters(currentTemplate.content, parameters);
2551
+ resultString = replaceParameters(preparedContent, parameters);
2483
2552
  return [3 /*break*/, 30];
2484
2553
  case 7:
2485
2554
  prompt = {
@@ -2488,7 +2557,7 @@
2488
2557
  ? pipeline.pipelineUrl
2489
2558
  : 'anonymous' /* <- TODO: [🧠] How to deal with anonymous pipelines, do here some auto-url like SHA-256 based ad-hoc identifier? */, "#").concat(currentTemplate.name),
2490
2559
  parameters: parameters,
2491
- content: currentTemplate.content,
2560
+ content: preparedContent,
2492
2561
  modelRequirements: currentTemplate.modelRequirements,
2493
2562
  expectations: __assign(__assign({}, (pipeline.personas.find(function (_a) {
2494
2563
  var name = _a.name;
@@ -2610,7 +2679,7 @@
2610
2679
  _u.trys.push([19, 21, , 22]);
2611
2680
  return [4 /*yield*/, scriptTools.execute(deepFreeze({
2612
2681
  scriptLanguage: currentTemplate.contentLanguage,
2613
- script: currentTemplate.content,
2682
+ script: preparedContent,
2614
2683
  parameters: parameters,
2615
2684
  }))];
2616
2685
  case 20:
@@ -2659,7 +2728,7 @@
2659
2728
  return [4 /*yield*/, tools.userInterface.promptDialog(deepFreeze({
2660
2729
  promptTitle: currentTemplate.title,
2661
2730
  promptMessage: replaceParameters(currentTemplate.description || '', parameters),
2662
- defaultValue: replaceParameters(currentTemplate.content, parameters),
2731
+ defaultValue: replaceParameters(preparedContent, parameters),
2663
2732
  // TODO: [🧠] !! Figure out how to define placeholder in .ptbk.md file
2664
2733
  placeholder: undefined,
2665
2734
  priority: priority,
@@ -2837,7 +2906,7 @@
2837
2906
  var parameter = _c.value;
2838
2907
  if (parametersToPass[parameter.name] === undefined) {
2839
2908
  // [4]
2840
- errors.push(new PipelineExecutionError("Parameter {".concat(parameter.name, "} should be an output parameter, but it was not be resolved")));
2909
+ warnings.push(new PipelineExecutionError("Parameter {".concat(parameter.name, "} should be an output parameter, but it was not generated during pipeline execution")));
2841
2910
  continue;
2842
2911
  }
2843
2912
  outputParameters[parameter.name] = parametersToPass[parameter.name] || '';
@@ -2852,7 +2921,7 @@
2852
2921
  }
2853
2922
  return outputParameters;
2854
2923
  }
2855
- var executionReport, _a, _b, parameter, errors, _loop_1, _c, _d, parameterName, state_1, parametersToPass, resovedParameterNames_1, unresovedTemplates_1, resolving_1, loopLimit, _loop_2, error_1, usage_1, outputParameters_1, usage, outputParameters;
2924
+ var errors, warnings, executionReport, _a, _b, parameter, _loop_1, _c, _d, parameterName, state_1, parametersToPass, resovedParameterNames_1, unresovedTemplates_1, resolving_1, loopLimit, _loop_2, error_1, usage_1, outputParameters_1, usage, outputParameters;
2856
2925
  var e_1, _e, e_2, _f;
2857
2926
  return __generator(this, function (_g) {
2858
2927
  switch (_g.label) {
@@ -2867,6 +2936,8 @@
2867
2936
  pipeline = _g.sent();
2868
2937
  _g.label = 2;
2869
2938
  case 2:
2939
+ errors = [];
2940
+ warnings = [];
2870
2941
  executionReport = {
2871
2942
  pipelineUrl: pipeline.pipelineUrl,
2872
2943
  title: pipeline.title,
@@ -2885,10 +2956,10 @@
2885
2956
  if (inputParameters[parameter.name] === undefined) {
2886
2957
  return [2 /*return*/, deepFreezeWithSameType({
2887
2958
  isSuccessful: false,
2888
- errors: [
2889
- new PipelineExecutionError("Parameter {".concat(parameter.name, "} is required as an input parameter")),
2890
- // <- TODO: !!!!! Test this error
2891
- ],
2959
+ errors: __spreadArray([
2960
+ new PipelineExecutionError("Parameter {".concat(parameter.name, "} is required as an input parameter"))
2961
+ ], __read(errors), false),
2962
+ warnings: [],
2892
2963
  executionReport: executionReport,
2893
2964
  outputParameters: {},
2894
2965
  usage: ZERO_USAGE,
@@ -2903,22 +2974,21 @@
2903
2974
  }
2904
2975
  finally { if (e_1) throw e_1.error; }
2905
2976
  }
2906
- errors = [];
2907
2977
  _loop_1 = function (parameterName) {
2908
2978
  var parameter = pipeline.parameters.find(function (_a) {
2909
2979
  var name = _a.name;
2910
2980
  return name === parameterName;
2911
2981
  });
2912
2982
  if (parameter === undefined) {
2913
- errors.push(new PipelineExecutionError("Extra parameter {".concat(parameterName, "} is passed as input parameter")));
2983
+ warnings.push(new PipelineExecutionError("Extra parameter {".concat(parameterName, "} is being passed which is not part of the pipeline.")));
2914
2984
  }
2915
2985
  else if (parameter.isInput === false) {
2916
2986
  return { value: deepFreezeWithSameType({
2917
2987
  isSuccessful: false,
2918
- errors: [
2919
- new PipelineExecutionError("Parameter {".concat(parameter.name, "} is passed as input parameter but is not input")),
2920
- // <- TODO: !!!!! Test this error
2921
- ],
2988
+ errors: __spreadArray([
2989
+ new PipelineExecutionError("Parameter {".concat(parameter.name, "} is passed as input parameter but it is not input"))
2990
+ ], __read(errors), false),
2991
+ warnings: warnings,
2922
2992
  executionReport: executionReport,
2923
2993
  outputParameters: {},
2924
2994
  usage: ZERO_USAGE,
@@ -3027,6 +3097,7 @@
3027
3097
  return [2 /*return*/, deepFreezeWithSameType({
3028
3098
  isSuccessful: false,
3029
3099
  errors: __spreadArray([error_1], __read(errors), false),
3100
+ warnings: warnings,
3030
3101
  usage: usage_1,
3031
3102
  executionReport: executionReport,
3032
3103
  outputParameters: outputParameters_1,
@@ -3040,6 +3111,7 @@
3040
3111
  return [2 /*return*/, deepFreezeWithSameType({
3041
3112
  isSuccessful: true,
3042
3113
  errors: errors,
3114
+ warnings: warnings,
3043
3115
  usage: usage,
3044
3116
  executionReport: executionReport,
3045
3117
  outputParameters: outputParameters,
@@ -3066,7 +3138,7 @@
3066
3138
  */
3067
3139
  function prepareKnowledgeFromMarkdown(content /* <- TODO: [🖖] (?maybe not) Always the file */, options) {
3068
3140
  return __awaiter(this, void 0, void 0, function () {
3069
- var llmTools, _a, maxParallelCount, _b, isVerbose, collection, prepareKnowledgeFromMarkdownExecutor, _c, prepareTitleExecutor, _d, prepareKeywordsExecutor, _e, result, outputParameters, knowledgeRaw, knowledgeTextPieces, knowledge;
3141
+ var llmTools, _a, maxParallelCount, _b, isVerbose, collection, prepareKnowledgeFromMarkdownExecutor, _c, prepareTitleExecutor, _d, prepareKeywordsExecutor, _e, result, outputParameters, knowledgePiecesRaw, knowledgeTextPieces, knowledge;
3070
3142
  var _f, _g, _h;
3071
3143
  var _this = this;
3072
3144
  return __generator(this, function (_j) {
@@ -3107,8 +3179,8 @@
3107
3179
  result = _j.sent();
3108
3180
  assertsExecutionSuccessful(result);
3109
3181
  outputParameters = result.outputParameters;
3110
- knowledgeRaw = outputParameters.knowledge;
3111
- knowledgeTextPieces = (knowledgeRaw || '').split('\n---\n');
3182
+ knowledgePiecesRaw = outputParameters.knowledgePieces;
3183
+ knowledgeTextPieces = (knowledgePiecesRaw || '').split('\n---\n');
3112
3184
  if (isVerbose) {
3113
3185
  console.info('knowledgeTextPieces:', knowledgeTextPieces);
3114
3186
  }
@@ -3322,6 +3394,53 @@
3322
3394
  * TODO: [🏢] !! Check validity of `temperature` in pipeline
3323
3395
  */
3324
3396
 
3397
+ /**
3398
+ * @@@
3399
+ */
3400
+ function prepareTemplates(pipeline, options) {
3401
+ return __awaiter(this, void 0, void 0, function () {
3402
+ var _a, maxParallelCount, promptTemplates, parameters, knowledgePiecesCount, promptTemplatesPrepared;
3403
+ var _this = this;
3404
+ return __generator(this, function (_b) {
3405
+ switch (_b.label) {
3406
+ case 0:
3407
+ _a = options.maxParallelCount, maxParallelCount = _a === void 0 ? MAX_PARALLEL_COUNT : _a;
3408
+ promptTemplates = pipeline.promptTemplates, parameters = pipeline.parameters, knowledgePiecesCount = pipeline.knowledgePiecesCount;
3409
+ // TODO: !!!! Apply samples to each template (if missing and is for the template defined)
3410
+ TODO_USE(parameters);
3411
+ promptTemplatesPrepared = new Array(promptTemplates.length);
3412
+ return [4 /*yield*/, forEachAsync(promptTemplates, { maxParallelCount: maxParallelCount /* <- TODO: [🪂] When there are subtasks, this maximul limit can be broken */ }, function (template, index) { return __awaiter(_this, void 0, void 0, function () {
3413
+ var preparedContent, preparedTemplate;
3414
+ return __generator(this, function (_a) {
3415
+ preparedContent = undefined;
3416
+ if (knowledgePiecesCount > 0) {
3417
+ preparedContent = spaceTrim.spaceTrim("\n {content}\n\n ## Knowledge\n\n {knowledge}\n ");
3418
+ // <- TODO: [🧠][🧻] Cutomize shape/language/formatting of the addition to the prompt
3419
+ }
3420
+ preparedTemplate = __assign(__assign({}, template), { preparedContent: preparedContent });
3421
+ promptTemplatesPrepared[index] = preparedTemplate;
3422
+ return [2 /*return*/];
3423
+ });
3424
+ }); })];
3425
+ case 1:
3426
+ _b.sent();
3427
+ return [2 /*return*/, { promptTemplatesPrepared: promptTemplatesPrepared }];
3428
+ }
3429
+ });
3430
+ });
3431
+ }
3432
+ /**
3433
+ * TODO: [🧠] Add context to each template (if missing)
3434
+ * TODO: [🧠] What is better name `prepareTemplate` or `prepareTemplateAndParameters`
3435
+ * TODO: !!!!! Index the samples and maybe templates
3436
+ * TODO: [🔼] !!! Export via `@promptbook/core`
3437
+ * TODO: Write tests for `preparePipeline`
3438
+ * TODO: [🏏] Leverage the batch API and build queues @see https://platform.openai.com/docs/guides/batch
3439
+ * TODO: [🧊] In future one preparation can take data from previous preparation and save tokens and time
3440
+ * TODO: [🛠] Actions, instruments (and maybe knowledge) => Functions and tools
3441
+ * TODO: [🧠][🥜]
3442
+ */
3443
+
3325
3444
  /**
3326
3445
  * Prepare pipeline from string (markdown) format to JSON format
3327
3446
  *
@@ -3330,18 +3449,18 @@
3330
3449
  */
3331
3450
  function preparePipeline(pipeline, options) {
3332
3451
  return __awaiter(this, void 0, void 0, function () {
3333
- var _a, maxParallelCount,
3452
+ var _a, maxParallelCount, parameters, promptTemplates,
3334
3453
  /*
3335
3454
  <- TODO: [🧠][0] `promptbookVersion` */
3336
3455
  knowledgeSources /*
3337
3456
  <- TODO: [🧊] `knowledgePieces` */, personas /*
3338
- <- TODO: [🧊] `preparations` */, currentPreparation, preparations, preparedPersonas, knowledgeSourcesPrepared, partialknowledgePiecesPrepared, knowledgePiecesPrepared;
3457
+ <- TODO: [🧊] `preparations` */, currentPreparation, preparations, preparedPersonas, knowledgeSourcesPrepared, partialknowledgePiecesPrepared, knowledgePiecesPrepared, promptTemplatesPrepared /* TODO: parameters: parametersPrepared*/;
3339
3458
  var _this = this;
3340
3459
  return __generator(this, function (_b) {
3341
3460
  switch (_b.label) {
3342
3461
  case 0:
3343
3462
  _a = options.maxParallelCount, maxParallelCount = _a === void 0 ? MAX_PARALLEL_COUNT : _a;
3344
- knowledgeSources = pipeline.knowledgeSources, personas = pipeline.personas;
3463
+ parameters = pipeline.parameters, promptTemplates = pipeline.promptTemplates, knowledgeSources = pipeline.knowledgeSources, personas = pipeline.personas;
3345
3464
  currentPreparation = {
3346
3465
  id: 1,
3347
3466
  // TODO: [🍥]> date: $currentDate(),
@@ -3374,16 +3493,20 @@
3374
3493
  case 2:
3375
3494
  partialknowledgePiecesPrepared = _b.sent();
3376
3495
  knowledgePiecesPrepared = partialknowledgePiecesPrepared.map(function (piece) { return (__assign(__assign({}, piece), { preparationIds: [/* TODO: [🧊] -> */ currentPreparation.id] })); });
3377
- // ----- /Knowledge preparation -----
3378
- // TODO: !!!!! Add context to each template (if missing)
3379
- // TODO: !!!!! Apply samples to each template (if missing)
3380
- return [2 /*return*/, __assign(__assign({}, pipeline), { knowledgeSources: knowledgeSourcesPrepared, knowledgePieces: knowledgePiecesPrepared, personas: preparedPersonas, preparations: preparations })];
3496
+ return [4 /*yield*/, prepareTemplates({
3497
+ parameters: parameters,
3498
+ promptTemplates: promptTemplates,
3499
+ knowledgePiecesCount: knowledgePiecesPrepared.length,
3500
+ }, options)];
3501
+ case 3:
3502
+ promptTemplatesPrepared = (_b.sent()).promptTemplatesPrepared;
3503
+ // ----- /Templates preparation -----
3504
+ return [2 /*return*/, __assign(__assign({}, pipeline), { promptTemplates: promptTemplatesPrepared, knowledgeSources: knowledgeSourcesPrepared, knowledgePieces: knowledgePiecesPrepared, personas: preparedPersonas, preparations: preparations })];
3381
3505
  }
3382
3506
  });
3383
3507
  });
3384
3508
  }
3385
3509
  /**
3386
- * TODO: !!!!! Index the samples and maybe templates
3387
3510
  * TODO: [🔼] !!! Export via `@promptbook/core`
3388
3511
  * TODO: Write tests for `preparePipeline`
3389
3512
  * TODO: [🏏] Leverage the batch API and build queues @see https://platform.openai.com/docs/guides/batch
@@ -5316,6 +5439,7 @@
5316
5439
  * TODO: [🥞] Not optimal parsing because `splitMarkdownIntoSections` is executed twice with same string, once through `flattenMarkdown` and second directly here
5317
5440
  * TODO: [♈] Probbably move expectations from templates to parameters
5318
5441
  * TODO: [🛠] Actions, instruments (and maybe knowledge) => Functions and tools
5442
+ * TODO: [🍙] Make some standart order of json properties
5319
5443
  */
5320
5444
 
5321
5445
  /**
@@ -5899,7 +6023,7 @@
5899
6023
  */
5900
6024
  AnthropicClaudeExecutionTools.prototype.callChatModel = function (prompt) {
5901
6025
  return __awaiter(this, void 0, void 0, function () {
5902
- var content, parameters, modelRequirements, rawRequest, start, complete, rawResponse, resultContent, usage;
6026
+ var content, parameters, modelRequirements, modelName, rawRequest, start, complete, rawResponse, resultContent, usage;
5903
6027
  return __generator(this, function (_a) {
5904
6028
  switch (_a.label) {
5905
6029
  case 0:
@@ -5911,6 +6035,7 @@
5911
6035
  if (modelRequirements.modelVariant !== 'CHAT') {
5912
6036
  throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
5913
6037
  }
6038
+ modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
5914
6039
  rawRequest = {
5915
6040
  model: modelRequirements.modelName || this.getDefaultChatModel().modelName,
5916
6041
  max_tokens: modelRequirements.maxTokens || 4096,
@@ -5922,7 +6047,7 @@
5922
6047
  messages: [
5923
6048
  {
5924
6049
  role: 'user',
5925
- content: replaceParameters(content, parameters),
6050
+ content: replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName })),
5926
6051
  },
5927
6052
  ],
5928
6053
  // TODO: Is here some equivalent of user identification?> user: this.options.user,
@@ -5983,9 +6108,9 @@
5983
6108
  throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
5984
6109
  }
5985
6110
 
5986
- const model = modelRequirements.modelName || this.getDefaultChatModel().modelName;
6111
+ const modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
5987
6112
  const modelSettings = {
5988
- model: rawResponse.model || model,
6113
+ model: modelName,
5989
6114
  max_tokens: modelRequirements.maxTokens || 2000, // <- Note: 2000 is for lagacy reasons
5990
6115
  // <- TODO: [🌾] Make some global max cap for maxTokens
5991
6116
  // <- TODO: Use here `systemMessage`, `temperature` and `seed`
@@ -5993,7 +6118,7 @@
5993
6118
 
5994
6119
  const rawRequest: xxxx.Completions.CompletionCreateParamsNonStreaming = {
5995
6120
  ...modelSettings,
5996
- prompt: replaceParameters(content, parameters),
6121
+ prompt: replaceParameters(content, { ...parameters, modelName }),
5997
6122
  user: this.options.user,
5998
6123
  };
5999
6124
  const start: string_date_iso8601 = getCurrentIsoDate();
@@ -6500,7 +6625,7 @@
6500
6625
  */
6501
6626
  OpenAiExecutionTools.prototype.callChatModel = function (prompt) {
6502
6627
  return __awaiter(this, void 0, void 0, function () {
6503
- var content, parameters, modelRequirements, expectFormat, model, modelSettings, rawRequest, start, complete, rawResponse, resultContent, usage;
6628
+ var content, parameters, modelRequirements, expectFormat, modelName, modelSettings, rawRequest, start, complete, rawResponse, resultContent, usage;
6504
6629
  return __generator(this, function (_a) {
6505
6630
  switch (_a.label) {
6506
6631
  case 0:
@@ -6512,9 +6637,9 @@
6512
6637
  if (modelRequirements.modelVariant !== 'CHAT') {
6513
6638
  throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
6514
6639
  }
6515
- model = modelRequirements.modelName || this.getDefaultChatModel().modelName;
6640
+ modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
6516
6641
  modelSettings = {
6517
- model: model,
6642
+ model: modelName,
6518
6643
  max_tokens: modelRequirements.maxTokens,
6519
6644
  // <- TODO: [🌾] Make some global max cap for maxTokens
6520
6645
  temperature: modelRequirements.temperature,
@@ -6536,7 +6661,7 @@
6536
6661
  ])), false), [
6537
6662
  {
6538
6663
  role: 'user',
6539
- content: replaceParameters(content, parameters),
6664
+ content: replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName })),
6540
6665
  },
6541
6666
  ], false), user: this.options.user });
6542
6667
  start = getCurrentIsoDate();
@@ -6565,7 +6690,7 @@
6565
6690
  }
6566
6691
  return [2 /*return*/, {
6567
6692
  content: resultContent,
6568
- modelName: rawResponse.model || model,
6693
+ modelName: rawResponse.model || modelName,
6569
6694
  timing: {
6570
6695
  start: start,
6571
6696
  complete: complete,
@@ -6583,7 +6708,7 @@
6583
6708
  */
6584
6709
  OpenAiExecutionTools.prototype.callCompletionModel = function (prompt) {
6585
6710
  return __awaiter(this, void 0, void 0, function () {
6586
- var content, parameters, modelRequirements, model, modelSettings, rawRequest, start, complete, rawResponse, resultContent, usage;
6711
+ var content, parameters, modelRequirements, modelName, modelSettings, rawRequest, start, complete, rawResponse, resultContent, usage;
6587
6712
  return __generator(this, function (_a) {
6588
6713
  switch (_a.label) {
6589
6714
  case 0:
@@ -6595,16 +6720,16 @@
6595
6720
  if (modelRequirements.modelVariant !== 'COMPLETION') {
6596
6721
  throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
6597
6722
  }
6598
- model = modelRequirements.modelName || this.getDefaultCompletionModel().modelName;
6723
+ modelName = modelRequirements.modelName || this.getDefaultCompletionModel().modelName;
6599
6724
  modelSettings = {
6600
- model: model,
6725
+ model: modelName,
6601
6726
  max_tokens: modelRequirements.maxTokens || 2000,
6602
6727
  // <- TODO: [🌾] Make some global max cap for maxTokens
6603
6728
  temperature: modelRequirements.temperature,
6604
6729
  // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
6605
6730
  // <- Note: [🧆]
6606
6731
  };
6607
- rawRequest = __assign(__assign({}, modelSettings), { prompt: replaceParameters(content, parameters), user: this.options.user });
6732
+ rawRequest = __assign(__assign({}, modelSettings), { prompt: replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName })), user: this.options.user });
6608
6733
  start = getCurrentIsoDate();
6609
6734
  if (this.options.isVerbose) {
6610
6735
  console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
@@ -6628,7 +6753,7 @@
6628
6753
  usage = computeOpenaiUsage(content, resultContent || '', rawResponse);
6629
6754
  return [2 /*return*/, {
6630
6755
  content: resultContent,
6631
- modelName: rawResponse.model || model,
6756
+ modelName: rawResponse.model || modelName,
6632
6757
  timing: {
6633
6758
  start: start,
6634
6759
  complete: complete,
@@ -6646,7 +6771,7 @@
6646
6771
  */
6647
6772
  OpenAiExecutionTools.prototype.callEmbeddingModel = function (prompt) {
6648
6773
  return __awaiter(this, void 0, void 0, function () {
6649
- var content, parameters, modelRequirements, model, rawRequest, start, complete, rawResponse, resultContent, usage;
6774
+ var content, parameters, modelRequirements, modelName, rawRequest, start, complete, rawResponse, resultContent, usage;
6650
6775
  return __generator(this, function (_a) {
6651
6776
  switch (_a.label) {
6652
6777
  case 0:
@@ -6658,10 +6783,10 @@
6658
6783
  if (modelRequirements.modelVariant !== 'EMBEDDING') {
6659
6784
  throw new PipelineExecutionError('Use embed only for EMBEDDING variant');
6660
6785
  }
6661
- model = modelRequirements.modelName || this.getDefaultEmbeddingModel().modelName;
6786
+ modelName = modelRequirements.modelName || this.getDefaultEmbeddingModel().modelName;
6662
6787
  rawRequest = {
6663
- input: replaceParameters(content, parameters),
6664
- model: model,
6788
+ input: replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName })),
6789
+ model: modelName,
6665
6790
  // TODO: !!!! Test model 3 and dimensions
6666
6791
  };
6667
6792
  start = getCurrentIsoDate();
@@ -6683,7 +6808,7 @@
6683
6808
  usage = computeOpenaiUsage(content, '', rawResponse);
6684
6809
  return [2 /*return*/, {
6685
6810
  content: resultContent,
6686
- modelName: rawResponse.model || model,
6811
+ modelName: rawResponse.model || modelName,
6687
6812
  timing: {
6688
6813
  start: start,
6689
6814
  complete: complete,