@promptbook/node 0.61.0-27 → 0.61.0-29

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/esm/index.es.js CHANGED
@@ -654,7 +654,7 @@ function forEachAsync(array, options, callbackfunction) {
654
654
  });
655
655
  }
656
656
 
657
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.61.0-26",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-26",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.61.0-26",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-26",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.61.0-26",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-26",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.61.0-26",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-26",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
657
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.61.0-28",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-28",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.61.0-28",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-28",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.61.0-28",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-28",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.61.0-28",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-28",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
658
658
 
659
659
  /**
660
660
  * This error indicates that the promptbook in a markdown format cannot be parsed into a valid promptbook object
@@ -2268,7 +2268,7 @@ function union() {
2268
2268
  /**
2269
2269
  * The version of the Promptbook library
2270
2270
  */
2271
- var PROMPTBOOK_VERSION = '0.61.0-26';
2271
+ var PROMPTBOOK_VERSION = '0.61.0-28';
2272
2272
  // TODO: !!!! List here all the versions and annotate + put into script
2273
2273
 
2274
2274
  /**
@@ -2389,17 +2389,17 @@ function checkExpectations(expectations, value) {
2389
2389
  */
2390
2390
  function createPipelineExecutor(options) {
2391
2391
  var _this = this;
2392
- var rawPipeline = options.pipeline, tools = options.tools, _a = options.settings, settings = _a === void 0 ? {} : _a;
2393
- var _b = settings.maxExecutionAttempts, maxExecutionAttempts = _b === void 0 ? MAX_EXECUTION_ATTEMPTS : _b, _c = settings.maxParallelCount, maxParallelCount = _c === void 0 ? MAX_PARALLEL_COUNT : _c, _d = settings.isVerbose, isVerbose = _d === void 0 ? false : _d;
2394
- validatePipeline(rawPipeline);
2392
+ var pipeline = options.pipeline, tools = options.tools, _a = options.settings, settings = _a === void 0 ? {} : _a;
2393
+ var _b = settings.maxExecutionAttempts, maxExecutionAttempts = _b === void 0 ? MAX_EXECUTION_ATTEMPTS : _b, _c = settings.maxParallelCount, maxParallelCount = _c === void 0 ? MAX_PARALLEL_COUNT : _c, _d = settings.isVerbose, isVerbose = _d === void 0 ? false : _d, _e = settings.isNotPreparedWarningSupressed, isNotPreparedWarningSupressed = _e === void 0 ? false : _e;
2394
+ validatePipeline(pipeline);
2395
2395
  var llmTools = joinLlmExecutionTools.apply(void 0, __spreadArray([], __read(arrayableToArray(tools.llm)), false));
2396
- var pipeline;
2397
- if (isPipelinePrepared(rawPipeline)) {
2398
- pipeline = rawPipeline;
2396
+ var preparedPipeline;
2397
+ if (isPipelinePrepared(pipeline)) {
2398
+ preparedPipeline = pipeline;
2399
2399
  }
2400
- else {
2401
- // TODO: !!!!! This should be maybe warning in report
2402
- console.warn(spaceTrim$1("\n Pipeline ".concat(rawPipeline.pipelineUrl || rawPipeline.sourceFile || rawPipeline.title, " is not prepared\n\n ").concat(rawPipeline.sourceFile, "\n\n It will be prepared ad-hoc before the first execution\n But it is recommended to prepare the pipeline during collection preparation\n\n @see more at https://ptbk.io/prepare-pipeline\n ")));
2400
+ else if (isNotPreparedWarningSupressed !== true) {
2401
+ // TODO: !!!!! Test that this work as intended together with prepared pipeline
2402
+ console.warn(spaceTrim$1("\n Pipeline ".concat(pipeline.pipelineUrl || pipeline.sourceFile || pipeline.title, " is not prepared\n\n ").concat(pipeline.sourceFile, "\n\n It will be prepared ad-hoc before the first execution and **returned as `preparedPipeline` in `PipelineExecutorResult`**\n But it is recommended to prepare the pipeline during collection preparation\n\n @see more at https://ptbk.io/prepare-pipeline\n ")));
2403
2403
  }
2404
2404
  var pipelineExecutor = function (inputParameters, onProgress) { return __awaiter(_this, void 0, void 0, function () {
2405
2405
  // TODO: !!! Extract to separate functions and files - ALL FUNCTIONS BELOW
@@ -2416,9 +2416,9 @@ function createPipelineExecutor(options) {
2416
2416
  template) {
2417
2417
  return __awaiter(this, void 0, void 0, function () {
2418
2418
  return __generator(this, function (_a) {
2419
- // TODO: [♨] Implement Better - use real index and keyword search
2419
+ // TODO: [♨] Implement Better - use real index and keyword search from `template` and {samples}
2420
2420
  TODO_USE(template);
2421
- return [2 /*return*/, pipeline.knowledgePieces.map(function (_a) {
2421
+ return [2 /*return*/, preparedPipeline.knowledgePieces.map(function (_a) {
2422
2422
  var content = _a.content;
2423
2423
  return "- ".concat(content);
2424
2424
  }).join('\n')];
@@ -2491,7 +2491,7 @@ function createPipelineExecutor(options) {
2491
2491
  case 0:
2492
2492
  name = "pipeline-executor-frame-".concat(currentTemplate.name);
2493
2493
  title = currentTemplate.title;
2494
- priority = pipeline.promptTemplates.length - pipeline.promptTemplates.indexOf(currentTemplate);
2494
+ priority = preparedPipeline.promptTemplates.length - preparedPipeline.promptTemplates.indexOf(currentTemplate);
2495
2495
  if (!onProgress /* <- [3] */) return [3 /*break*/, 2]; /* <- [3] */
2496
2496
  return [4 /*yield*/, onProgress({
2497
2497
  name: name,
@@ -2595,13 +2595,13 @@ function createPipelineExecutor(options) {
2595
2595
  case 7:
2596
2596
  prompt = {
2597
2597
  title: currentTemplate.title,
2598
- pipelineUrl: "".concat(pipeline.pipelineUrl
2599
- ? pipeline.pipelineUrl
2598
+ pipelineUrl: "".concat(preparedPipeline.pipelineUrl
2599
+ ? preparedPipeline.pipelineUrl
2600
2600
  : 'anonymous' /* <- TODO: [🧠] How to deal with anonymous pipelines, do here some auto-url like SHA-256 based ad-hoc identifier? */, "#").concat(currentTemplate.name),
2601
2601
  parameters: parameters,
2602
2602
  content: preparedContent,
2603
2603
  modelRequirements: currentTemplate.modelRequirements,
2604
- expectations: __assign(__assign({}, (pipeline.personas.find(function (_a) {
2604
+ expectations: __assign(__assign({}, (preparedPipeline.personas.find(function (_a) {
2605
2605
  var name = _a.name;
2606
2606
  return name === currentTemplate.personaName;
2607
2607
  }) || {})), currentTemplate.expectations),
@@ -2893,7 +2893,7 @@ function createPipelineExecutor(options) {
2893
2893
  ) {
2894
2894
  // TODO: [🧠] Maybe put other blockTypes into report
2895
2895
  executionReport.promptExecutions.push({
2896
- prompt: __assign(__assign({ '!!! All information': null }, prompt), { '!!! Wanted information': null, title: currentTemplate.title /* <- Note: If title in pipeline contains emojis, pass it innto report */, content: prompt.content, modelRequirements: prompt.modelRequirements, expectations: prompt.expectations, expectFormat: prompt.expectFormat }),
2896
+ prompt: __assign({}, prompt),
2897
2897
  result: result || undefined,
2898
2898
  error: expectError || undefined,
2899
2899
  });
@@ -2934,7 +2934,7 @@ function createPipelineExecutor(options) {
2934
2934
  var outputParameters = {};
2935
2935
  try {
2936
2936
  // Note: Filter ONLY output parameters
2937
- for (var _b = __values(pipeline.parameters.filter(function (_a) {
2937
+ for (var _b = __values(preparedPipeline.parameters.filter(function (_a) {
2938
2938
  var isOutput = _a.isOutput;
2939
2939
  return isOutput;
2940
2940
  })), _c = _b.next(); !_c.done; _c = _b.next()) {
@@ -2961,29 +2961,29 @@ function createPipelineExecutor(options) {
2961
2961
  return __generator(this, function (_g) {
2962
2962
  switch (_g.label) {
2963
2963
  case 0:
2964
- if (!(pipeline === undefined)) return [3 /*break*/, 2];
2965
- return [4 /*yield*/, preparePipeline(rawPipeline, {
2964
+ if (!(preparedPipeline === undefined)) return [3 /*break*/, 2];
2965
+ return [4 /*yield*/, preparePipeline(pipeline, {
2966
2966
  llmTools: llmTools,
2967
2967
  isVerbose: isVerbose,
2968
2968
  maxParallelCount: maxParallelCount,
2969
2969
  })];
2970
2970
  case 1:
2971
- pipeline = _g.sent();
2971
+ preparedPipeline = _g.sent();
2972
2972
  _g.label = 2;
2973
2973
  case 2:
2974
2974
  errors = [];
2975
2975
  warnings = [];
2976
2976
  executionReport = {
2977
- pipelineUrl: pipeline.pipelineUrl,
2978
- title: pipeline.title,
2977
+ pipelineUrl: preparedPipeline.pipelineUrl,
2978
+ title: preparedPipeline.title,
2979
2979
  promptbookUsedVersion: PROMPTBOOK_VERSION,
2980
- promptbookRequestedVersion: pipeline.promptbookVersion,
2981
- description: pipeline.description,
2980
+ promptbookRequestedVersion: preparedPipeline.promptbookVersion,
2981
+ description: preparedPipeline.description,
2982
2982
  promptExecutions: [],
2983
2983
  };
2984
2984
  try {
2985
2985
  // Note: Check that all input input parameters are defined
2986
- for (_a = __values(pipeline.parameters.filter(function (_a) {
2986
+ for (_a = __values(preparedPipeline.parameters.filter(function (_a) {
2987
2987
  var isInput = _a.isInput;
2988
2988
  return isInput;
2989
2989
  })), _b = _a.next(); !_b.done; _b = _a.next()) {
@@ -2998,6 +2998,7 @@ function createPipelineExecutor(options) {
2998
2998
  executionReport: executionReport,
2999
2999
  outputParameters: {},
3000
3000
  usage: ZERO_USAGE,
3001
+ preparedPipeline: preparedPipeline,
3001
3002
  })];
3002
3003
  }
3003
3004
  }
@@ -3010,7 +3011,7 @@ function createPipelineExecutor(options) {
3010
3011
  finally { if (e_1) throw e_1.error; }
3011
3012
  }
3012
3013
  _loop_1 = function (parameterName) {
3013
- var parameter = pipeline.parameters.find(function (_a) {
3014
+ var parameter = preparedPipeline.parameters.find(function (_a) {
3014
3015
  var name = _a.name;
3015
3016
  return name === parameterName;
3016
3017
  });
@@ -3027,6 +3028,7 @@ function createPipelineExecutor(options) {
3027
3028
  executionReport: executionReport,
3028
3029
  outputParameters: {},
3029
3030
  usage: ZERO_USAGE,
3031
+ preparedPipeline: preparedPipeline,
3030
3032
  }) };
3031
3033
  }
3032
3034
  };
@@ -3050,7 +3052,7 @@ function createPipelineExecutor(options) {
3050
3052
  _g.label = 3;
3051
3053
  case 3:
3052
3054
  _g.trys.push([3, 8, , 9]);
3053
- resovedParameterNames_1 = pipeline.parameters
3055
+ resovedParameterNames_1 = preparedPipeline.parameters
3054
3056
  .filter(function (_a) {
3055
3057
  var isInput = _a.isInput;
3056
3058
  return isInput;
@@ -3059,7 +3061,7 @@ function createPipelineExecutor(options) {
3059
3061
  var name = _a.name;
3060
3062
  return name;
3061
3063
  });
3062
- unresovedTemplates_1 = __spreadArray([], __read(pipeline.promptTemplates), false);
3064
+ unresovedTemplates_1 = __spreadArray([], __read(preparedPipeline.promptTemplates), false);
3063
3065
  resolving_1 = [];
3064
3066
  loopLimit = LOOP_LIMIT;
3065
3067
  _loop_2 = function () {
@@ -3136,6 +3138,7 @@ function createPipelineExecutor(options) {
3136
3138
  usage: usage_1,
3137
3139
  executionReport: executionReport,
3138
3140
  outputParameters: outputParameters_1,
3141
+ preparedPipeline: preparedPipeline,
3139
3142
  })];
3140
3143
  case 9:
3141
3144
  usage = addUsage.apply(void 0, __spreadArray([], __read(executionReport.promptExecutions.map(function (_a) {
@@ -3150,6 +3153,7 @@ function createPipelineExecutor(options) {
3150
3153
  usage: usage,
3151
3154
  executionReport: executionReport,
3152
3155
  outputParameters: outputParameters,
3156
+ preparedPipeline: preparedPipeline,
3153
3157
  })];
3154
3158
  }
3155
3159
  });
@@ -3157,8 +3161,6 @@ function createPipelineExecutor(options) {
3157
3161
  return pipelineExecutor;
3158
3162
  }
3159
3163
  /**
3160
- * TODO: !!!!! return `preparedPipeline` from execution
3161
- * TODO: !!!!! `isNotPreparedWarningSupressed`
3162
3164
  * TODO: Use isVerbose here (not only pass to `preparePipeline`)
3163
3165
  * TODO: [🪂] Use maxParallelCount here (not only pass to `preparePipeline`)
3164
3166
  * TODO: [♈] Probbably move expectations from templates to parameters
@@ -3218,6 +3220,7 @@ function prepareKnowledgeFromMarkdown(knowledgeContent /* <- TODO: [🖖] (?mayb
3218
3220
  outputParameters = result.outputParameters;
3219
3221
  knowledgePiecesRaw = outputParameters.knowledgePieces;
3220
3222
  knowledgeTextPieces = (knowledgePiecesRaw || '').split('\n---\n');
3223
+ // <- TODO: !!!!! Smarter split and filter out empty pieces
3221
3224
  if (isVerbose) {
3222
3225
  console.info('knowledgeTextPieces:', knowledgeTextPieces);
3223
3226
  }
@@ -3443,7 +3446,7 @@ function prepareTemplates(pipeline, options) {
3443
3446
  case 0:
3444
3447
  _a = options.maxParallelCount, maxParallelCount = _a === void 0 ? MAX_PARALLEL_COUNT : _a;
3445
3448
  promptTemplates = pipeline.promptTemplates, parameters = pipeline.parameters, knowledgePiecesCount = pipeline.knowledgePiecesCount;
3446
- // TODO: !!!!!! Apply samples to each template (if missing and is for the template defined)
3449
+ // TODO: !!!!! Apply samples to each template (if missing and is for the template defined)
3447
3450
  TODO_USE(parameters);
3448
3451
  promptTemplatesPrepared = new Array(promptTemplates.length);
3449
3452
  return [4 /*yield*/, forEachAsync(promptTemplates, { maxParallelCount: maxParallelCount /* <- TODO: [🪂] When there are subtasks, this maximul limit can be broken */ }, function (template, index) { return __awaiter(_this, void 0, void 0, function () {
@@ -3552,7 +3555,7 @@ function preparePipeline(pipeline, options) {
3552
3555
  * TODO: Write tests for `preparePipeline`
3553
3556
  * TODO: [🏏] Leverage the batch API and build queues @see https://platform.openai.com/docs/guides/batch
3554
3557
  * TODO: [🧊] In future one preparation can take data from previous preparation and save tokens and time
3555
- * TODO: [🎐] !!!!!! Use here countTotalUsage
3558
+ * TODO: [🎐] !!!!! Use here countTotalUsage
3556
3559
  * TODO: [🛠] Actions, instruments (and maybe knowledge) => Functions and tools
3557
3560
  */
3558
3561
 
@@ -6058,7 +6061,7 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
6058
6061
  */
6059
6062
  AnthropicClaudeExecutionTools.prototype.callChatModel = function (prompt) {
6060
6063
  return __awaiter(this, void 0, void 0, function () {
6061
- var content, parameters, modelRequirements, modelName, rawRequest, start, complete, rawResponse, resultContent, usage;
6064
+ var content, parameters, modelRequirements, modelName, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
6062
6065
  return __generator(this, function (_a) {
6063
6066
  switch (_a.label) {
6064
6067
  case 0:
@@ -6071,6 +6074,7 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
6071
6074
  throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
6072
6075
  }
6073
6076
  modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
6077
+ rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
6074
6078
  rawRequest = {
6075
6079
  model: modelRequirements.modelName || this.getDefaultChatModel().modelName,
6076
6080
  max_tokens: modelRequirements.maxTokens || 4096,
@@ -6082,7 +6086,7 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
6082
6086
  messages: [
6083
6087
  {
6084
6088
  role: 'user',
6085
- content: replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName })),
6089
+ content: rawPromptContent,
6086
6090
  },
6087
6091
  ],
6088
6092
  // TODO: Is here some equivalent of user identification?> user: this.options.user,
@@ -6119,8 +6123,10 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
6119
6123
  complete: complete,
6120
6124
  },
6121
6125
  usage: usage,
6126
+ rawPromptContent: rawPromptContent,
6127
+ rawRequest: rawRequest,
6122
6128
  rawResponse: rawResponse,
6123
- // <- [🤹‍♂️]
6129
+ // <- [🗯]
6124
6130
  }];
6125
6131
  }
6126
6132
  });
@@ -6153,7 +6159,7 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
6153
6159
 
6154
6160
  const rawRequest: xxxx.Completions.CompletionCreateParamsNonStreaming = {
6155
6161
  ...modelSettings,
6156
- prompt: replaceParameters(content, { ...parameters, modelName }),
6162
+ prompt: rawPromptContent,
6157
6163
  user: this.options.user,
6158
6164
  };
6159
6165
  const start: string_date_iso8601 = getCurrentIsoDate();
@@ -6192,7 +6198,7 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
6192
6198
  },
6193
6199
  usage,
6194
6200
  rawResponse,
6195
- // <- [🤹‍♂️]
6201
+ // <- [🗯]
6196
6202
  };
6197
6203
  }
6198
6204
  */
@@ -6660,7 +6666,7 @@ var OpenAiExecutionTools = /** @class */ (function () {
6660
6666
  */
6661
6667
  OpenAiExecutionTools.prototype.callChatModel = function (prompt) {
6662
6668
  return __awaiter(this, void 0, void 0, function () {
6663
- var content, parameters, modelRequirements, expectFormat, modelName, modelSettings, rawRequest, start, complete, rawResponse, resultContent, usage;
6669
+ var content, parameters, modelRequirements, expectFormat, modelName, modelSettings, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
6664
6670
  return __generator(this, function (_a) {
6665
6671
  switch (_a.label) {
6666
6672
  case 0:
@@ -6686,6 +6692,7 @@ var OpenAiExecutionTools = /** @class */ (function () {
6686
6692
  type: 'json_object',
6687
6693
  };
6688
6694
  }
6695
+ rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
6689
6696
  rawRequest = __assign(__assign({}, modelSettings), { messages: __spreadArray(__spreadArray([], __read((modelRequirements.systemMessage === undefined
6690
6697
  ? []
6691
6698
  : [
@@ -6696,7 +6703,7 @@ var OpenAiExecutionTools = /** @class */ (function () {
6696
6703
  ])), false), [
6697
6704
  {
6698
6705
  role: 'user',
6699
- content: replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName })),
6706
+ content: rawPromptContent,
6700
6707
  },
6701
6708
  ], false), user: this.options.user });
6702
6709
  start = getCurrentIsoDate();
@@ -6731,8 +6738,10 @@ var OpenAiExecutionTools = /** @class */ (function () {
6731
6738
  complete: complete,
6732
6739
  },
6733
6740
  usage: usage,
6741
+ rawPromptContent: rawPromptContent,
6742
+ rawRequest: rawRequest,
6734
6743
  rawResponse: rawResponse,
6735
- // <- [🤹‍♂️]
6744
+ // <- [🗯]
6736
6745
  }];
6737
6746
  }
6738
6747
  });
@@ -6743,7 +6752,7 @@ var OpenAiExecutionTools = /** @class */ (function () {
6743
6752
  */
6744
6753
  OpenAiExecutionTools.prototype.callCompletionModel = function (prompt) {
6745
6754
  return __awaiter(this, void 0, void 0, function () {
6746
- var content, parameters, modelRequirements, modelName, modelSettings, rawRequest, start, complete, rawResponse, resultContent, usage;
6755
+ var content, parameters, modelRequirements, modelName, modelSettings, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
6747
6756
  return __generator(this, function (_a) {
6748
6757
  switch (_a.label) {
6749
6758
  case 0:
@@ -6764,7 +6773,8 @@ var OpenAiExecutionTools = /** @class */ (function () {
6764
6773
  // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
6765
6774
  // <- Note: [🧆]
6766
6775
  };
6767
- rawRequest = __assign(__assign({}, modelSettings), { prompt: replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName })), user: this.options.user });
6776
+ rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
6777
+ rawRequest = __assign(__assign({}, modelSettings), { prompt: rawPromptContent, user: this.options.user });
6768
6778
  start = getCurrentIsoDate();
6769
6779
  if (this.options.isVerbose) {
6770
6780
  console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
@@ -6794,8 +6804,10 @@ var OpenAiExecutionTools = /** @class */ (function () {
6794
6804
  complete: complete,
6795
6805
  },
6796
6806
  usage: usage,
6807
+ rawPromptContent: rawPromptContent,
6808
+ rawRequest: rawRequest,
6797
6809
  rawResponse: rawResponse,
6798
- // <- [🤹‍♂️]
6810
+ // <- [🗯]
6799
6811
  }];
6800
6812
  }
6801
6813
  });
@@ -6806,7 +6818,7 @@ var OpenAiExecutionTools = /** @class */ (function () {
6806
6818
  */
6807
6819
  OpenAiExecutionTools.prototype.callEmbeddingModel = function (prompt) {
6808
6820
  return __awaiter(this, void 0, void 0, function () {
6809
- var content, parameters, modelRequirements, modelName, rawRequest, start, complete, rawResponse, resultContent, usage;
6821
+ var content, parameters, modelRequirements, modelName, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
6810
6822
  return __generator(this, function (_a) {
6811
6823
  switch (_a.label) {
6812
6824
  case 0:
@@ -6819,8 +6831,9 @@ var OpenAiExecutionTools = /** @class */ (function () {
6819
6831
  throw new PipelineExecutionError('Use embed only for EMBEDDING variant');
6820
6832
  }
6821
6833
  modelName = modelRequirements.modelName || this.getDefaultEmbeddingModel().modelName;
6834
+ rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
6822
6835
  rawRequest = {
6823
- input: replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName })),
6836
+ input: rawPromptContent,
6824
6837
  model: modelName,
6825
6838
  };
6826
6839
  start = getCurrentIsoDate();
@@ -6848,8 +6861,10 @@ var OpenAiExecutionTools = /** @class */ (function () {
6848
6861
  complete: complete,
6849
6862
  },
6850
6863
  usage: usage,
6864
+ rawPromptContent: rawPromptContent,
6865
+ rawRequest: rawRequest,
6851
6866
  rawResponse: rawResponse,
6852
- // <- [🤹‍♂️]
6867
+ // <- [🗯]
6853
6868
  }];
6854
6869
  }
6855
6870
  });