@promptbook/node 0.61.0-23 → 0.61.0-24

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. package/esm/index.es.js +115 -51
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/src/conversion/pipelineStringToJsonSync.d.ts +1 -0
  4. package/esm/typings/src/conversion/utils/stringifyPipelineJson.d.ts +1 -0
  5. package/esm/typings/src/execution/PromptResultUsage.d.ts +3 -0
  6. package/esm/typings/src/prepare/preparePipeline.d.ts +0 -1
  7. package/esm/typings/src/prepare/prepareTemplates.d.ts +31 -0
  8. package/esm/typings/src/prepare/unpreparePipeline.d.ts +1 -0
  9. package/esm/typings/src/types/PipelineJson/KnowledgePieceJson.d.ts +3 -0
  10. package/esm/typings/src/types/PipelineJson/KnowledgeSourceJson.d.ts +3 -0
  11. package/esm/typings/src/types/PipelineJson/LlmTemplateJson.d.ts +1 -0
  12. package/esm/typings/src/types/PipelineJson/PersonaJson.d.ts +3 -0
  13. package/esm/typings/src/types/PipelineJson/PipelineJson.d.ts +1 -0
  14. package/esm/typings/src/types/PipelineJson/PreparationJson.d.ts +1 -0
  15. package/esm/typings/src/types/PipelineJson/PromptDialogJson.d.ts +4 -3
  16. package/esm/typings/src/types/PipelineJson/PromptTemplateJsonCommon.d.ts +10 -0
  17. package/esm/typings/src/types/PipelineJson/PromptTemplateParameterJson.d.ts +1 -0
  18. package/esm/typings/src/types/PipelineJson/ScriptJson.d.ts +5 -2
  19. package/esm/typings/src/types/PipelineJson/SimpleTemplateJson.d.ts +5 -2
  20. package/package.json +2 -2
  21. package/umd/index.umd.js +115 -51
  22. package/umd/index.umd.js.map +1 -1
  23. package/umd/typings/src/conversion/pipelineStringToJsonSync.d.ts +1 -0
  24. package/umd/typings/src/conversion/utils/stringifyPipelineJson.d.ts +1 -0
  25. package/umd/typings/src/execution/PromptResultUsage.d.ts +3 -0
  26. package/umd/typings/src/prepare/preparePipeline.d.ts +0 -1
  27. package/umd/typings/src/prepare/prepareTemplates.d.ts +31 -0
  28. package/umd/typings/src/prepare/unpreparePipeline.d.ts +1 -0
  29. package/umd/typings/src/types/PipelineJson/KnowledgePieceJson.d.ts +3 -0
  30. package/umd/typings/src/types/PipelineJson/KnowledgeSourceJson.d.ts +3 -0
  31. package/umd/typings/src/types/PipelineJson/LlmTemplateJson.d.ts +1 -0
  32. package/umd/typings/src/types/PipelineJson/PersonaJson.d.ts +3 -0
  33. package/umd/typings/src/types/PipelineJson/PipelineJson.d.ts +1 -0
  34. package/umd/typings/src/types/PipelineJson/PreparationJson.d.ts +1 -0
  35. package/umd/typings/src/types/PipelineJson/PromptDialogJson.d.ts +4 -3
  36. package/umd/typings/src/types/PipelineJson/PromptTemplateJsonCommon.d.ts +10 -0
  37. package/umd/typings/src/types/PipelineJson/PromptTemplateParameterJson.d.ts +1 -0
  38. package/umd/typings/src/types/PipelineJson/ScriptJson.d.ts +5 -2
  39. package/umd/typings/src/types/PipelineJson/SimpleTemplateJson.d.ts +5 -2
package/esm/index.es.js CHANGED
@@ -649,7 +649,7 @@ function forEachAsync(array, options, callbackfunction) {
649
649
  });
650
650
  }
651
651
 
652
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.61.0-22",parameters:[{name:"content",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {content}",dependentParameterNames:["content"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-22",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.61.0-22",parameters:[{name:"content",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {content}",dependentParameterNames:["content"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-22",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.61.0-22",parameters:[{name:"content",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {content}",expectations:{words:{min:1,max:8}},dependentParameterNames:["content"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-22",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.61.0-22",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-22",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
652
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.61.0-23",parameters:[{name:"content",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {content}",dependentParameterNames:["content"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-23",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.61.0-23",parameters:[{name:"content",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {content}",dependentParameterNames:["content"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-23",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.61.0-23",parameters:[{name:"content",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {content}",expectations:{words:{min:1,max:8}},dependentParameterNames:["content"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-23",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.61.0-23",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-23",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
653
653
 
654
654
  /**
655
655
  * This error indicates that the promptbook in a markdown format cannot be parsed into a valid promptbook object
@@ -1087,15 +1087,17 @@ var ReferenceError$1 = /** @class */ (function (_super) {
1087
1087
  * Unprepare just strips the preparation data of the pipeline
1088
1088
  */
1089
1089
  function unpreparePipeline(pipeline) {
1090
- var personas = pipeline.personas, knowledgeSources = pipeline.knowledgeSources;
1090
+ var personas = pipeline.personas, knowledgeSources = pipeline.knowledgeSources, promptTemplates = pipeline.promptTemplates;
1091
1091
  personas = personas.map(function (persona) { return (__assign(__assign({}, persona), { modelRequirements: undefined, preparationIds: undefined })); });
1092
1092
  knowledgeSources = knowledgeSources.map(function (knowledgeSource) { return (__assign(__assign({}, knowledgeSource), { preparationIds: undefined })); });
1093
- return __assign(__assign({}, pipeline), { knowledgeSources: knowledgeSources, knowledgePieces: [], personas: personas, preparations: [] });
1093
+ promptTemplates = promptTemplates.map(function (promptTemplate) { return (__assign(__assign({}, promptTemplate), { preparedContent: undefined })); });
1094
+ return __assign(__assign({}, pipeline), { promptTemplates: promptTemplates, knowledgeSources: knowledgeSources, knowledgePieces: [], personas: personas, preparations: [] });
1094
1095
  }
1095
1096
  /**
1096
1097
  * TODO: [🔼] !!! Export via `@promptbook/core`
1097
1098
  * TODO: [🧿] Maybe do same process with same granularity and subfinctions as `preparePipeline`
1098
1099
  * TODO: Write tests for `preparePipeline`
1100
+ * TODO: [🍙] Make some standart order of json properties
1099
1101
  */
1100
1102
 
1101
1103
  /**
@@ -2002,6 +2004,12 @@ function isPipelinePrepared(pipeline) {
2002
2004
  if (!pipeline.knowledgeSources.every(function (knowledgeSource) { return knowledgeSource.preparationIds !== undefined; })) {
2003
2005
  return false;
2004
2006
  }
2007
+ /*
2008
+ TODO: [🧠][🍫] `promptTemplates` can not be determined if they are fully prepared SO ignoring them
2009
+ > if (!pipeline.promptTemplates.every(({ preparedContent }) => preparedContent === undefined)) {
2010
+ > return false;
2011
+ > }
2012
+ */
2005
2013
  return true;
2006
2014
  }
2007
2015
  /**
@@ -2214,7 +2222,7 @@ function union() {
2214
2222
  /**
2215
2223
  * The version of the Promptbook library
2216
2224
  */
2217
- var PROMPTBOOK_VERSION = '0.61.0-22';
2225
+ var PROMPTBOOK_VERSION = '0.61.0-23';
2218
2226
  // TODO: !!!! List here all the versions and annotate + put into script
2219
2227
 
2220
2228
  /**
@@ -2344,7 +2352,8 @@ function createPipelineExecutor(options) {
2344
2352
  pipeline = rawPipeline;
2345
2353
  }
2346
2354
  else {
2347
- console.warn(spaceTrim$1("\n Pipeline ".concat(rawPipeline.pipelineUrl || rawPipeline.sourceFile || rawPipeline.title, " is not prepared\n\n It will be prepared ad-hoc before the first execution\n But it is recommended to prepare the pipeline during collection preparation\n\n @see more at https://ptbk.io/prepare-pipeline\n ")));
2355
+ // TODO: !!!! This should be maybe warning in report
2356
+ console.warn(spaceTrim$1("\n Pipeline ".concat(rawPipeline.pipelineUrl || rawPipeline.sourceFile || rawPipeline.title, " is not prepared\n\n ").concat(rawPipeline.sourceFile, "\n\n It will be prepared ad-hoc before the first execution\n But it is recommended to prepare the pipeline during collection preparation\n\n @see more at https://ptbk.io/prepare-pipeline\n ")));
2348
2357
  }
2349
2358
  var pipelineExecutor = function (inputParameters, onProgress) { return __awaiter(_this, void 0, void 0, function () {
2350
2359
  // TODO: !!!!! Extract to separate functions and files - ALL FUNCTIONS BELOW
@@ -2353,7 +2362,7 @@ function createPipelineExecutor(options) {
2353
2362
  return __awaiter(this, void 0, void 0, function () {
2354
2363
  return __generator(this, function (_a) {
2355
2364
  TODO_USE(template);
2356
- return [2 /*return*/, ''];
2365
+ return [2 /*return*/, RESERVED_PARAMETER_MISSING_VALUE /* <- TODO: !!!! Implement */];
2357
2366
  });
2358
2367
  });
2359
2368
  }
@@ -2376,7 +2385,7 @@ function createPipelineExecutor(options) {
2376
2385
  return __generator(this, function (_a) {
2377
2386
  // TODO: !!!! Implement Better - use real index and keyword search
2378
2387
  TODO_USE(template);
2379
- return [2 /*return*/, ''];
2388
+ return [2 /*return*/, RESERVED_PARAMETER_MISSING_VALUE /* <- TODO: !!!! Implement */];
2380
2389
  });
2381
2390
  });
2382
2391
  }
@@ -2427,7 +2436,7 @@ function createPipelineExecutor(options) {
2427
2436
  }
2428
2437
  function executeSingleTemplate(currentTemplate) {
2429
2438
  return __awaiter(this, void 0, void 0, function () {
2430
- var name, title, priority, usedParameterNames, dependentParameterNames, definedParameters, _a, _b, _c, definedParameterNames, parameters, _d, _e, parameterName, prompt, chatResult, completionResult, embeddingResult, result, resultString, expectError, scriptPipelineExecutionErrors, maxAttempts, jokerParameterNames, attempt, isJokerAttempt, jokerParameterName, _f, _g, _h, _j, scriptTools, error_2, e_4_1, _k, _l, functionName, postprocessingError, _m, _o, scriptTools, error_3, e_5_1, e_6_1, error_4;
2439
+ var name, title, priority, usedParameterNames, dependentParameterNames, definedParameters, _a, _b, _c, definedParameterNames, parameters, _d, _e, parameterName, prompt, chatResult, completionResult, embeddingResult, result, resultString, expectError, scriptPipelineExecutionErrors, maxAttempts, jokerParameterNames, preparedContent, attempt, isJokerAttempt, jokerParameterName, _f, _g, _h, _j, scriptTools, error_2, e_4_1, _k, _l, functionName, postprocessingError, _m, _o, scriptTools, error_3, e_5_1, e_6_1, error_4;
2431
2440
  var e_7, _p, e_4, _q, e_6, _r, e_5, _s, _t;
2432
2441
  var _this = this;
2433
2442
  return __generator(this, function (_u) {
@@ -2497,6 +2506,9 @@ function createPipelineExecutor(options) {
2497
2506
  expectError = null;
2498
2507
  maxAttempts = currentTemplate.blockType === 'PROMPT_DIALOG' ? Infinity : maxExecutionAttempts;
2499
2508
  jokerParameterNames = currentTemplate.jokerParameterNames || [];
2509
+ preparedContent = (currentTemplate.preparedContent || '{content}')
2510
+ .split('{content}')
2511
+ .join(currentTemplate.content);
2500
2512
  attempt = -jokerParameterNames.length;
2501
2513
  _u.label = 4;
2502
2514
  case 4:
@@ -2531,7 +2543,7 @@ function createPipelineExecutor(options) {
2531
2543
  }
2532
2544
  return [3 /*break*/, 29];
2533
2545
  case 6:
2534
- resultString = replaceParameters(currentTemplate.content, parameters);
2546
+ resultString = replaceParameters(preparedContent, parameters);
2535
2547
  return [3 /*break*/, 30];
2536
2548
  case 7:
2537
2549
  prompt = {
@@ -2540,7 +2552,7 @@ function createPipelineExecutor(options) {
2540
2552
  ? pipeline.pipelineUrl
2541
2553
  : 'anonymous' /* <- TODO: [🧠] How to deal with anonymous pipelines, do here some auto-url like SHA-256 based ad-hoc identifier? */, "#").concat(currentTemplate.name),
2542
2554
  parameters: parameters,
2543
- content: currentTemplate.content,
2555
+ content: preparedContent,
2544
2556
  modelRequirements: currentTemplate.modelRequirements,
2545
2557
  expectations: __assign(__assign({}, (pipeline.personas.find(function (_a) {
2546
2558
  var name = _a.name;
@@ -2662,7 +2674,7 @@ function createPipelineExecutor(options) {
2662
2674
  _u.trys.push([19, 21, , 22]);
2663
2675
  return [4 /*yield*/, scriptTools.execute(deepFreeze({
2664
2676
  scriptLanguage: currentTemplate.contentLanguage,
2665
- script: currentTemplate.content,
2677
+ script: preparedContent,
2666
2678
  parameters: parameters,
2667
2679
  }))];
2668
2680
  case 20:
@@ -2711,7 +2723,7 @@ function createPipelineExecutor(options) {
2711
2723
  return [4 /*yield*/, tools.userInterface.promptDialog(deepFreeze({
2712
2724
  promptTitle: currentTemplate.title,
2713
2725
  promptMessage: replaceParameters(currentTemplate.description || '', parameters),
2714
- defaultValue: replaceParameters(currentTemplate.content, parameters),
2726
+ defaultValue: replaceParameters(preparedContent, parameters),
2715
2727
  // TODO: [🧠] !! Figure out how to define placeholder in .ptbk.md file
2716
2728
  placeholder: undefined,
2717
2729
  priority: priority,
@@ -2889,7 +2901,7 @@ function createPipelineExecutor(options) {
2889
2901
  var parameter = _c.value;
2890
2902
  if (parametersToPass[parameter.name] === undefined) {
2891
2903
  // [4]
2892
- warnings.push(new PipelineExecutionError("Parameter {".concat(parameter.name, "} should be an output parameter, but it was not be resolved")));
2904
+ warnings.push(new PipelineExecutionError("Parameter {".concat(parameter.name, "} should be an output parameter, but it was not generated during pipeline execution")));
2893
2905
  continue;
2894
2906
  }
2895
2907
  outputParameters[parameter.name] = parametersToPass[parameter.name] || '';
@@ -2904,7 +2916,7 @@ function createPipelineExecutor(options) {
2904
2916
  }
2905
2917
  return outputParameters;
2906
2918
  }
2907
- var executionReport, _a, _b, parameter, errors, warnings, _loop_1, _c, _d, parameterName, state_1, parametersToPass, resovedParameterNames_1, unresovedTemplates_1, resolving_1, loopLimit, _loop_2, error_1, usage_1, outputParameters_1, usage, outputParameters;
2919
+ var errors, warnings, executionReport, _a, _b, parameter, _loop_1, _c, _d, parameterName, state_1, parametersToPass, resovedParameterNames_1, unresovedTemplates_1, resolving_1, loopLimit, _loop_2, error_1, usage_1, outputParameters_1, usage, outputParameters;
2908
2920
  var e_1, _e, e_2, _f;
2909
2921
  return __generator(this, function (_g) {
2910
2922
  switch (_g.label) {
@@ -2919,6 +2931,8 @@ function createPipelineExecutor(options) {
2919
2931
  pipeline = _g.sent();
2920
2932
  _g.label = 2;
2921
2933
  case 2:
2934
+ errors = [];
2935
+ warnings = [];
2922
2936
  executionReport = {
2923
2937
  pipelineUrl: pipeline.pipelineUrl,
2924
2938
  title: pipeline.title,
@@ -2937,9 +2951,9 @@ function createPipelineExecutor(options) {
2937
2951
  if (inputParameters[parameter.name] === undefined) {
2938
2952
  return [2 /*return*/, deepFreezeWithSameType({
2939
2953
  isSuccessful: false,
2940
- errors: [
2941
- new PipelineExecutionError("Parameter {".concat(parameter.name, "} is required as an input parameter")),
2942
- ],
2954
+ errors: __spreadArray([
2955
+ new PipelineExecutionError("Parameter {".concat(parameter.name, "} is required as an input parameter"))
2956
+ ], __read(errors), false),
2943
2957
  warnings: [],
2944
2958
  executionReport: executionReport,
2945
2959
  outputParameters: {},
@@ -2955,8 +2969,6 @@ function createPipelineExecutor(options) {
2955
2969
  }
2956
2970
  finally { if (e_1) throw e_1.error; }
2957
2971
  }
2958
- errors = [];
2959
- warnings = [];
2960
2972
  _loop_1 = function (parameterName) {
2961
2973
  var parameter = pipeline.parameters.find(function (_a) {
2962
2974
  var name = _a.name;
@@ -2968,9 +2980,9 @@ function createPipelineExecutor(options) {
2968
2980
  else if (parameter.isInput === false) {
2969
2981
  return { value: deepFreezeWithSameType({
2970
2982
  isSuccessful: false,
2971
- errors: [
2972
- new PipelineExecutionError("Parameter {".concat(parameter.name, "} is passed as input parameter but it is not input")),
2973
- ],
2983
+ errors: __spreadArray([
2984
+ new PipelineExecutionError("Parameter {".concat(parameter.name, "} is passed as input parameter but it is not input"))
2985
+ ], __read(errors), false),
2974
2986
  warnings: warnings,
2975
2987
  executionReport: executionReport,
2976
2988
  outputParameters: {},
@@ -3377,6 +3389,53 @@ function preparePersona(personaDescription, options) {
3377
3389
  * TODO: [🏢] !! Check validity of `temperature` in pipeline
3378
3390
  */
3379
3391
 
3392
+ /**
3393
+ * @@@
3394
+ */
3395
+ function prepareTemplates(pipeline, options) {
3396
+ return __awaiter(this, void 0, void 0, function () {
3397
+ var _a, maxParallelCount, promptTemplates, parameters, knowledgePiecesCount, promptTemplatesPrepared;
3398
+ var _this = this;
3399
+ return __generator(this, function (_b) {
3400
+ switch (_b.label) {
3401
+ case 0:
3402
+ _a = options.maxParallelCount, maxParallelCount = _a === void 0 ? MAX_PARALLEL_COUNT : _a;
3403
+ promptTemplates = pipeline.promptTemplates, parameters = pipeline.parameters, knowledgePiecesCount = pipeline.knowledgePiecesCount;
3404
+ // TODO: !!!! Apply samples to each template (if missing and is for the template defined)
3405
+ TODO_USE(parameters);
3406
+ promptTemplatesPrepared = new Array(promptTemplates.length);
3407
+ return [4 /*yield*/, forEachAsync(promptTemplates, { maxParallelCount: maxParallelCount /* <- TODO: [🪂] When there are subtasks, this maximul limit can be broken */ }, function (template, index) { return __awaiter(_this, void 0, void 0, function () {
3408
+ var preparedContent, preparedTemplate;
3409
+ return __generator(this, function (_a) {
3410
+ preparedContent = undefined;
3411
+ if (knowledgePiecesCount > 0) {
3412
+ preparedContent = spaceTrim$1("\n {content}\n\n ## Knowledge\n\n {knowledge}\n ");
3413
+ // <- TODO: [🧠][🧻] Cutomize shape/language/formatting of the addition to the prompt
3414
+ }
3415
+ preparedTemplate = __assign(__assign({}, template), { preparedContent: preparedContent });
3416
+ promptTemplatesPrepared[index] = preparedTemplate;
3417
+ return [2 /*return*/];
3418
+ });
3419
+ }); })];
3420
+ case 1:
3421
+ _b.sent();
3422
+ return [2 /*return*/, { promptTemplatesPrepared: promptTemplatesPrepared }];
3423
+ }
3424
+ });
3425
+ });
3426
+ }
3427
+ /**
3428
+ * TODO: [🧠] Add context to each template (if missing)
3429
+ * TODO: [🧠] What is better name `prepareTemplate` or `prepareTemplateAndParameters`
3430
+ * TODO: !!!!! Index the samples and maybe templates
3431
+ * TODO: [🔼] !!! Export via `@promptbook/core`
3432
+ * TODO: Write tests for `preparePipeline`
3433
+ * TODO: [🏏] Leverage the batch API and build queues @see https://platform.openai.com/docs/guides/batch
3434
+ * TODO: [🧊] In future one preparation can take data from previous preparation and save tokens and time
3435
+ * TODO: [🛠] Actions, instruments (and maybe knowledge) => Functions and tools
3436
+ * TODO: [🧠][🥜]
3437
+ */
3438
+
3380
3439
  /**
3381
3440
  * Prepare pipeline from string (markdown) format to JSON format
3382
3441
  *
@@ -3385,18 +3444,18 @@ function preparePersona(personaDescription, options) {
3385
3444
  */
3386
3445
  function preparePipeline(pipeline, options) {
3387
3446
  return __awaiter(this, void 0, void 0, function () {
3388
- var _a, maxParallelCount,
3447
+ var _a, maxParallelCount, parameters, promptTemplates,
3389
3448
  /*
3390
3449
  <- TODO: [🧠][0] `promptbookVersion` */
3391
3450
  knowledgeSources /*
3392
3451
  <- TODO: [🧊] `knowledgePieces` */, personas /*
3393
- <- TODO: [🧊] `preparations` */, currentPreparation, preparations, preparedPersonas, knowledgeSourcesPrepared, partialknowledgePiecesPrepared, knowledgePiecesPrepared;
3452
+ <- TODO: [🧊] `preparations` */, currentPreparation, preparations, preparedPersonas, knowledgeSourcesPrepared, partialknowledgePiecesPrepared, knowledgePiecesPrepared, promptTemplatesPrepared /* TODO: parameters: parametersPrepared*/;
3394
3453
  var _this = this;
3395
3454
  return __generator(this, function (_b) {
3396
3455
  switch (_b.label) {
3397
3456
  case 0:
3398
3457
  _a = options.maxParallelCount, maxParallelCount = _a === void 0 ? MAX_PARALLEL_COUNT : _a;
3399
- knowledgeSources = pipeline.knowledgeSources, personas = pipeline.personas;
3458
+ parameters = pipeline.parameters, promptTemplates = pipeline.promptTemplates, knowledgeSources = pipeline.knowledgeSources, personas = pipeline.personas;
3400
3459
  currentPreparation = {
3401
3460
  id: 1,
3402
3461
  // TODO: [🍥]> date: $currentDate(),
@@ -3429,17 +3488,20 @@ function preparePipeline(pipeline, options) {
3429
3488
  case 2:
3430
3489
  partialknowledgePiecesPrepared = _b.sent();
3431
3490
  knowledgePiecesPrepared = partialknowledgePiecesPrepared.map(function (piece) { return (__assign(__assign({}, piece), { preparationIds: [/* TODO: [🧊] -> */ currentPreparation.id] })); });
3432
- // ----- /Knowledge preparation -----
3433
- // TODO: !!!!! Add context to each template (if missing)
3434
- // TODO: !!!!! Add knowledge to each template (if missing and is in pipeline defined)
3435
- // TODO: !!!!! Apply samples to each template (if missing and is for the template defined)
3436
- return [2 /*return*/, __assign(__assign({}, pipeline), { knowledgeSources: knowledgeSourcesPrepared, knowledgePieces: knowledgePiecesPrepared, personas: preparedPersonas, preparations: preparations })];
3491
+ return [4 /*yield*/, prepareTemplates({
3492
+ parameters: parameters,
3493
+ promptTemplates: promptTemplates,
3494
+ knowledgePiecesCount: knowledgePiecesPrepared.length,
3495
+ }, options)];
3496
+ case 3:
3497
+ promptTemplatesPrepared = (_b.sent()).promptTemplatesPrepared;
3498
+ // ----- /Templates preparation -----
3499
+ return [2 /*return*/, __assign(__assign({}, pipeline), { promptTemplates: promptTemplatesPrepared, knowledgeSources: knowledgeSourcesPrepared, knowledgePieces: knowledgePiecesPrepared, personas: preparedPersonas, preparations: preparations })];
3437
3500
  }
3438
3501
  });
3439
3502
  });
3440
3503
  }
3441
3504
  /**
3442
- * TODO: !!!!! Index the samples and maybe templates
3443
3505
  * TODO: [🔼] !!! Export via `@promptbook/core`
3444
3506
  * TODO: Write tests for `preparePipeline`
3445
3507
  * TODO: [🏏] Leverage the batch API and build queues @see https://platform.openai.com/docs/guides/batch
@@ -5372,6 +5434,7 @@ function pipelineStringToJsonSync(pipelineString) {
5372
5434
  * TODO: [🥞] Not optimal parsing because `splitMarkdownIntoSections` is executed twice with same string, once through `flattenMarkdown` and second directly here
5373
5435
  * TODO: [♈] Probbably move expectations from templates to parameters
5374
5436
  * TODO: [🛠] Actions, instruments (and maybe knowledge) => Functions and tools
5437
+ * TODO: [🍙] Make some standart order of json properties
5375
5438
  */
5376
5439
 
5377
5440
  /**
@@ -5955,7 +6018,7 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
5955
6018
  */
5956
6019
  AnthropicClaudeExecutionTools.prototype.callChatModel = function (prompt) {
5957
6020
  return __awaiter(this, void 0, void 0, function () {
5958
- var content, parameters, modelRequirements, rawRequest, start, complete, rawResponse, resultContent, usage;
6021
+ var content, parameters, modelRequirements, modelName, rawRequest, start, complete, rawResponse, resultContent, usage;
5959
6022
  return __generator(this, function (_a) {
5960
6023
  switch (_a.label) {
5961
6024
  case 0:
@@ -5967,6 +6030,7 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
5967
6030
  if (modelRequirements.modelVariant !== 'CHAT') {
5968
6031
  throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
5969
6032
  }
6033
+ modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
5970
6034
  rawRequest = {
5971
6035
  model: modelRequirements.modelName || this.getDefaultChatModel().modelName,
5972
6036
  max_tokens: modelRequirements.maxTokens || 4096,
@@ -5978,7 +6042,7 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
5978
6042
  messages: [
5979
6043
  {
5980
6044
  role: 'user',
5981
- content: replaceParameters(content, parameters),
6045
+ content: replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName })),
5982
6046
  },
5983
6047
  ],
5984
6048
  // TODO: Is here some equivalent of user identification?> user: this.options.user,
@@ -6039,9 +6103,9 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
6039
6103
  throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
6040
6104
  }
6041
6105
 
6042
- const model = modelRequirements.modelName || this.getDefaultChatModel().modelName;
6106
+ const modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
6043
6107
  const modelSettings = {
6044
- model: rawResponse.model || model,
6108
+ model: modelName,
6045
6109
  max_tokens: modelRequirements.maxTokens || 2000, // <- Note: 2000 is for lagacy reasons
6046
6110
  // <- TODO: [🌾] Make some global max cap for maxTokens
6047
6111
  // <- TODO: Use here `systemMessage`, `temperature` and `seed`
@@ -6049,7 +6113,7 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
6049
6113
 
6050
6114
  const rawRequest: xxxx.Completions.CompletionCreateParamsNonStreaming = {
6051
6115
  ...modelSettings,
6052
- prompt: replaceParameters(content, parameters),
6116
+ prompt: replaceParameters(content, { ...parameters, modelName }),
6053
6117
  user: this.options.user,
6054
6118
  };
6055
6119
  const start: string_date_iso8601 = getCurrentIsoDate();
@@ -6556,7 +6620,7 @@ var OpenAiExecutionTools = /** @class */ (function () {
6556
6620
  */
6557
6621
  OpenAiExecutionTools.prototype.callChatModel = function (prompt) {
6558
6622
  return __awaiter(this, void 0, void 0, function () {
6559
- var content, parameters, modelRequirements, expectFormat, model, modelSettings, rawRequest, start, complete, rawResponse, resultContent, usage;
6623
+ var content, parameters, modelRequirements, expectFormat, modelName, modelSettings, rawRequest, start, complete, rawResponse, resultContent, usage;
6560
6624
  return __generator(this, function (_a) {
6561
6625
  switch (_a.label) {
6562
6626
  case 0:
@@ -6568,9 +6632,9 @@ var OpenAiExecutionTools = /** @class */ (function () {
6568
6632
  if (modelRequirements.modelVariant !== 'CHAT') {
6569
6633
  throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
6570
6634
  }
6571
- model = modelRequirements.modelName || this.getDefaultChatModel().modelName;
6635
+ modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
6572
6636
  modelSettings = {
6573
- model: model,
6637
+ model: modelName,
6574
6638
  max_tokens: modelRequirements.maxTokens,
6575
6639
  // <- TODO: [🌾] Make some global max cap for maxTokens
6576
6640
  temperature: modelRequirements.temperature,
@@ -6592,7 +6656,7 @@ var OpenAiExecutionTools = /** @class */ (function () {
6592
6656
  ])), false), [
6593
6657
  {
6594
6658
  role: 'user',
6595
- content: replaceParameters(content, parameters),
6659
+ content: replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName })),
6596
6660
  },
6597
6661
  ], false), user: this.options.user });
6598
6662
  start = getCurrentIsoDate();
@@ -6621,7 +6685,7 @@ var OpenAiExecutionTools = /** @class */ (function () {
6621
6685
  }
6622
6686
  return [2 /*return*/, {
6623
6687
  content: resultContent,
6624
- modelName: rawResponse.model || model,
6688
+ modelName: rawResponse.model || modelName,
6625
6689
  timing: {
6626
6690
  start: start,
6627
6691
  complete: complete,
@@ -6639,7 +6703,7 @@ var OpenAiExecutionTools = /** @class */ (function () {
6639
6703
  */
6640
6704
  OpenAiExecutionTools.prototype.callCompletionModel = function (prompt) {
6641
6705
  return __awaiter(this, void 0, void 0, function () {
6642
- var content, parameters, modelRequirements, model, modelSettings, rawRequest, start, complete, rawResponse, resultContent, usage;
6706
+ var content, parameters, modelRequirements, modelName, modelSettings, rawRequest, start, complete, rawResponse, resultContent, usage;
6643
6707
  return __generator(this, function (_a) {
6644
6708
  switch (_a.label) {
6645
6709
  case 0:
@@ -6651,16 +6715,16 @@ var OpenAiExecutionTools = /** @class */ (function () {
6651
6715
  if (modelRequirements.modelVariant !== 'COMPLETION') {
6652
6716
  throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
6653
6717
  }
6654
- model = modelRequirements.modelName || this.getDefaultCompletionModel().modelName;
6718
+ modelName = modelRequirements.modelName || this.getDefaultCompletionModel().modelName;
6655
6719
  modelSettings = {
6656
- model: model,
6720
+ model: modelName,
6657
6721
  max_tokens: modelRequirements.maxTokens || 2000,
6658
6722
  // <- TODO: [🌾] Make some global max cap for maxTokens
6659
6723
  temperature: modelRequirements.temperature,
6660
6724
  // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
6661
6725
  // <- Note: [🧆]
6662
6726
  };
6663
- rawRequest = __assign(__assign({}, modelSettings), { prompt: replaceParameters(content, parameters), user: this.options.user });
6727
+ rawRequest = __assign(__assign({}, modelSettings), { prompt: replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName })), user: this.options.user });
6664
6728
  start = getCurrentIsoDate();
6665
6729
  if (this.options.isVerbose) {
6666
6730
  console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
@@ -6684,7 +6748,7 @@ var OpenAiExecutionTools = /** @class */ (function () {
6684
6748
  usage = computeOpenaiUsage(content, resultContent || '', rawResponse);
6685
6749
  return [2 /*return*/, {
6686
6750
  content: resultContent,
6687
- modelName: rawResponse.model || model,
6751
+ modelName: rawResponse.model || modelName,
6688
6752
  timing: {
6689
6753
  start: start,
6690
6754
  complete: complete,
@@ -6702,7 +6766,7 @@ var OpenAiExecutionTools = /** @class */ (function () {
6702
6766
  */
6703
6767
  OpenAiExecutionTools.prototype.callEmbeddingModel = function (prompt) {
6704
6768
  return __awaiter(this, void 0, void 0, function () {
6705
- var content, parameters, modelRequirements, model, rawRequest, start, complete, rawResponse, resultContent, usage;
6769
+ var content, parameters, modelRequirements, modelName, rawRequest, start, complete, rawResponse, resultContent, usage;
6706
6770
  return __generator(this, function (_a) {
6707
6771
  switch (_a.label) {
6708
6772
  case 0:
@@ -6714,10 +6778,10 @@ var OpenAiExecutionTools = /** @class */ (function () {
6714
6778
  if (modelRequirements.modelVariant !== 'EMBEDDING') {
6715
6779
  throw new PipelineExecutionError('Use embed only for EMBEDDING variant');
6716
6780
  }
6717
- model = modelRequirements.modelName || this.getDefaultEmbeddingModel().modelName;
6781
+ modelName = modelRequirements.modelName || this.getDefaultEmbeddingModel().modelName;
6718
6782
  rawRequest = {
6719
- input: replaceParameters(content, parameters),
6720
- model: model,
6783
+ input: replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName })),
6784
+ model: modelName,
6721
6785
  // TODO: !!!! Test model 3 and dimensions
6722
6786
  };
6723
6787
  start = getCurrentIsoDate();
@@ -6739,7 +6803,7 @@ var OpenAiExecutionTools = /** @class */ (function () {
6739
6803
  usage = computeOpenaiUsage(content, '', rawResponse);
6740
6804
  return [2 /*return*/, {
6741
6805
  content: resultContent,
6742
- modelName: rawResponse.model || model,
6806
+ modelName: rawResponse.model || modelName,
6743
6807
  timing: {
6744
6808
  start: start,
6745
6809
  complete: complete,