@promptbook/node 0.61.0-28 → 0.61.0-29

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,4 +1,5 @@
1
1
  import type { Promisable } from 'type-fest';
2
+ import type { PipelineJson } from '../types/PipelineJson/PipelineJson';
2
3
  import { PipelineExecutionError } from '../errors/PipelineExecutionError';
3
4
  import type { TaskProgress } from '../types/TaskProgress';
4
5
  import type { ExecutionReportJson } from '../types/execution-report/ExecutionReportJson';
@@ -10,6 +11,8 @@ import type { PromptResultUsage } from './PromptResultUsage';
10
11
  *
11
12
  * It can be created with `createPipelineExecutor` function.
12
13
  *
14
+ * @@@ almost-JSON (what about errors)
15
+ *
13
16
  * @see https://github.com/webgptorg/promptbook#executor
14
17
  */
15
18
  export type PipelineExecutor = {
@@ -17,8 +20,16 @@ export type PipelineExecutor = {
17
20
  };
18
21
  /**
19
22
  * @@@
23
+ *
24
+ * @@@ almost-JSON (what about errors)
20
25
  */
21
26
  export type PipelineExecutorResult = {
27
+ /**
28
+ * Result parameters of the execution
29
+ *
30
+ * Note: If the execution was not successful, there are only some of the result parameters
31
+ */
32
+ readonly outputParameters: Parameters;
22
33
  /**
23
34
  * Whether the execution was successful, details are aviable in `executionReport`
24
35
  */
@@ -40,13 +51,14 @@ export type PipelineExecutorResult = {
40
51
  */
41
52
  readonly executionReport: ExecutionReportJson;
42
53
  /**
43
- * Result parameters of the execution
54
+ * The prepared pipeline that was used for the execution
44
55
  *
45
- * Note: If the execution was not successful, there are only some of the result parameters
56
+ * Note: If you called `createPipelineExecutor` with fully prepared pipeline, this is the same object as this pipeline
57
+ * If you passed not fully prepared pipeline, this is same pipeline but fully prepared
46
58
  */
47
- readonly outputParameters: Parameters;
59
+ readonly preparedPipeline: PipelineJson;
48
60
  };
49
61
  /**
50
62
  * TODO: [🧠] Should this file be in /execution or /types folder?
51
- * TODO: [💷] `assertsExecutionSuccessful` should be the method of `PipelineExecutor` result
63
+ * TODO: [💷] `assertsExecutionSuccessful` should be the method of `PipelineExecutor` result - BUT maybe NOT?
52
64
  */
@@ -20,6 +20,16 @@ type CreatePipelineExecutorSettings = {
20
20
  * @default false
21
21
  */
22
22
  readonly isVerbose?: boolean;
23
+ /**
24
+ * If you pass fully prepared pipeline, this does not matter
25
+ *
26
+ * Otherwise:
27
+ * If false or not set, warning is shown when pipeline is not prepared
28
+ * If true, warning is suppressed
29
+ *
30
+ * @default false
31
+ */
32
+ readonly isNotPreparedWarningSupressed?: boolean;
23
33
  };
24
34
  /**
25
35
  * Options for `createPipelineExecutor`
@@ -47,8 +57,6 @@ interface CreatePipelineExecutorOptions {
47
57
  export declare function createPipelineExecutor(options: CreatePipelineExecutorOptions): PipelineExecutor;
48
58
  export {};
49
59
  /**
50
- * TODO: !!!!! return `preparedPipeline` from execution
51
- * TODO: !!!!! `isNotPreparedWarningSupressed`
52
60
  * TODO: Use isVerbose here (not only pass to `preparePipeline`)
53
61
  * TODO: [🪂] Use maxParallelCount here (not only pass to `preparePipeline`)
54
62
  * TODO: [♈] Probbably move expectations from templates to parameters
@@ -41,7 +41,7 @@ export type ExecutionReportJson = {
41
41
  /**
42
42
  * The prompt wich was executed
43
43
  */
44
- readonly prompt: Omit<Prompt, 'pipelineUrl' | 'parameters'>;
44
+ readonly prompt: Omit<Prompt, 'pipelineUrl'>;
45
45
  /**
46
46
  * Result of the prompt execution (if not failed during LLM execution)
47
47
  */
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/node",
3
- "version": "0.61.0-28",
3
+ "version": "0.61.0-29",
4
4
  "description": "Supercharge your use of large language models",
5
5
  "private": false,
6
6
  "sideEffects": false,
@@ -51,7 +51,7 @@
51
51
  }
52
52
  ],
53
53
  "peerDependencies": {
54
- "@promptbook/core": "0.61.0-28"
54
+ "@promptbook/core": "0.61.0-29"
55
55
  },
56
56
  "main": "./umd/index.umd.js",
57
57
  "module": "./esm/index.es.js",
package/umd/index.umd.js CHANGED
@@ -659,7 +659,7 @@
659
659
  });
660
660
  }
661
661
 
662
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.61.0-27",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-27",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.61.0-27",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-27",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.61.0-27",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-27",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.61.0-27",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-27",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
662
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.61.0-28",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-28",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.61.0-28",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-28",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.61.0-28",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-28",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.61.0-28",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-28",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
663
663
 
664
664
  /**
665
665
  * This error indicates that the promptbook in a markdown format cannot be parsed into a valid promptbook object
@@ -2273,7 +2273,7 @@
2273
2273
  /**
2274
2274
  * The version of the Promptbook library
2275
2275
  */
2276
- var PROMPTBOOK_VERSION = '0.61.0-27';
2276
+ var PROMPTBOOK_VERSION = '0.61.0-28';
2277
2277
  // TODO: !!!! List here all the versions and annotate + put into script
2278
2278
 
2279
2279
  /**
@@ -2394,17 +2394,17 @@
2394
2394
  */
2395
2395
  function createPipelineExecutor(options) {
2396
2396
  var _this = this;
2397
- var rawPipeline = options.pipeline, tools = options.tools, _a = options.settings, settings = _a === void 0 ? {} : _a;
2398
- var _b = settings.maxExecutionAttempts, maxExecutionAttempts = _b === void 0 ? MAX_EXECUTION_ATTEMPTS : _b, _c = settings.maxParallelCount, maxParallelCount = _c === void 0 ? MAX_PARALLEL_COUNT : _c, _d = settings.isVerbose, isVerbose = _d === void 0 ? false : _d;
2399
- validatePipeline(rawPipeline);
2397
+ var pipeline = options.pipeline, tools = options.tools, _a = options.settings, settings = _a === void 0 ? {} : _a;
2398
+ var _b = settings.maxExecutionAttempts, maxExecutionAttempts = _b === void 0 ? MAX_EXECUTION_ATTEMPTS : _b, _c = settings.maxParallelCount, maxParallelCount = _c === void 0 ? MAX_PARALLEL_COUNT : _c, _d = settings.isVerbose, isVerbose = _d === void 0 ? false : _d, _e = settings.isNotPreparedWarningSupressed, isNotPreparedWarningSupressed = _e === void 0 ? false : _e;
2399
+ validatePipeline(pipeline);
2400
2400
  var llmTools = joinLlmExecutionTools.apply(void 0, __spreadArray([], __read(arrayableToArray(tools.llm)), false));
2401
- var pipeline;
2402
- if (isPipelinePrepared(rawPipeline)) {
2403
- pipeline = rawPipeline;
2401
+ var preparedPipeline;
2402
+ if (isPipelinePrepared(pipeline)) {
2403
+ preparedPipeline = pipeline;
2404
2404
  }
2405
- else {
2406
- // TODO: !!!!! This should be maybe warning in report
2407
- console.warn(spaceTrim.spaceTrim("\n Pipeline ".concat(rawPipeline.pipelineUrl || rawPipeline.sourceFile || rawPipeline.title, " is not prepared\n\n ").concat(rawPipeline.sourceFile, "\n\n It will be prepared ad-hoc before the first execution\n But it is recommended to prepare the pipeline during collection preparation\n\n @see more at https://ptbk.io/prepare-pipeline\n ")));
2405
+ else if (isNotPreparedWarningSupressed !== true) {
2406
+ // TODO: !!!!! Test that this work as intended together with prepared pipeline
2407
+ console.warn(spaceTrim.spaceTrim("\n Pipeline ".concat(pipeline.pipelineUrl || pipeline.sourceFile || pipeline.title, " is not prepared\n\n ").concat(pipeline.sourceFile, "\n\n It will be prepared ad-hoc before the first execution and **returned as `preparedPipeline` in `PipelineExecutorResult`**\n But it is recommended to prepare the pipeline during collection preparation\n\n @see more at https://ptbk.io/prepare-pipeline\n ")));
2408
2408
  }
2409
2409
  var pipelineExecutor = function (inputParameters, onProgress) { return __awaiter(_this, void 0, void 0, function () {
2410
2410
  // TODO: !!! Extract to separate functions and files - ALL FUNCTIONS BELOW
@@ -2421,9 +2421,9 @@
2421
2421
  template) {
2422
2422
  return __awaiter(this, void 0, void 0, function () {
2423
2423
  return __generator(this, function (_a) {
2424
- // TODO: [♨] Implement Better - use real index and keyword search
2424
+ // TODO: [♨] Implement Better - use real index and keyword search from `template` and {samples}
2425
2425
  TODO_USE(template);
2426
- return [2 /*return*/, pipeline.knowledgePieces.map(function (_a) {
2426
+ return [2 /*return*/, preparedPipeline.knowledgePieces.map(function (_a) {
2427
2427
  var content = _a.content;
2428
2428
  return "- ".concat(content);
2429
2429
  }).join('\n')];
@@ -2496,7 +2496,7 @@
2496
2496
  case 0:
2497
2497
  name = "pipeline-executor-frame-".concat(currentTemplate.name);
2498
2498
  title = currentTemplate.title;
2499
- priority = pipeline.promptTemplates.length - pipeline.promptTemplates.indexOf(currentTemplate);
2499
+ priority = preparedPipeline.promptTemplates.length - preparedPipeline.promptTemplates.indexOf(currentTemplate);
2500
2500
  if (!onProgress /* <- [3] */) return [3 /*break*/, 2]; /* <- [3] */
2501
2501
  return [4 /*yield*/, onProgress({
2502
2502
  name: name,
@@ -2600,13 +2600,13 @@
2600
2600
  case 7:
2601
2601
  prompt = {
2602
2602
  title: currentTemplate.title,
2603
- pipelineUrl: "".concat(pipeline.pipelineUrl
2604
- ? pipeline.pipelineUrl
2603
+ pipelineUrl: "".concat(preparedPipeline.pipelineUrl
2604
+ ? preparedPipeline.pipelineUrl
2605
2605
  : 'anonymous' /* <- TODO: [🧠] How to deal with anonymous pipelines, do here some auto-url like SHA-256 based ad-hoc identifier? */, "#").concat(currentTemplate.name),
2606
2606
  parameters: parameters,
2607
2607
  content: preparedContent,
2608
2608
  modelRequirements: currentTemplate.modelRequirements,
2609
- expectations: __assign(__assign({}, (pipeline.personas.find(function (_a) {
2609
+ expectations: __assign(__assign({}, (preparedPipeline.personas.find(function (_a) {
2610
2610
  var name = _a.name;
2611
2611
  return name === currentTemplate.personaName;
2612
2612
  }) || {})), currentTemplate.expectations),
@@ -2898,7 +2898,7 @@
2898
2898
  ) {
2899
2899
  // TODO: [🧠] Maybe put other blockTypes into report
2900
2900
  executionReport.promptExecutions.push({
2901
- prompt: __assign(__assign({ '!!! All information': null }, prompt), { '!!! Wanted information': null, title: currentTemplate.title /* <- Note: If title in pipeline contains emojis, pass it innto report */, content: prompt.content, modelRequirements: prompt.modelRequirements, expectations: prompt.expectations, expectFormat: prompt.expectFormat }),
2901
+ prompt: __assign({}, prompt),
2902
2902
  result: result || undefined,
2903
2903
  error: expectError || undefined,
2904
2904
  });
@@ -2939,7 +2939,7 @@
2939
2939
  var outputParameters = {};
2940
2940
  try {
2941
2941
  // Note: Filter ONLY output parameters
2942
- for (var _b = __values(pipeline.parameters.filter(function (_a) {
2942
+ for (var _b = __values(preparedPipeline.parameters.filter(function (_a) {
2943
2943
  var isOutput = _a.isOutput;
2944
2944
  return isOutput;
2945
2945
  })), _c = _b.next(); !_c.done; _c = _b.next()) {
@@ -2966,29 +2966,29 @@
2966
2966
  return __generator(this, function (_g) {
2967
2967
  switch (_g.label) {
2968
2968
  case 0:
2969
- if (!(pipeline === undefined)) return [3 /*break*/, 2];
2970
- return [4 /*yield*/, preparePipeline(rawPipeline, {
2969
+ if (!(preparedPipeline === undefined)) return [3 /*break*/, 2];
2970
+ return [4 /*yield*/, preparePipeline(pipeline, {
2971
2971
  llmTools: llmTools,
2972
2972
  isVerbose: isVerbose,
2973
2973
  maxParallelCount: maxParallelCount,
2974
2974
  })];
2975
2975
  case 1:
2976
- pipeline = _g.sent();
2976
+ preparedPipeline = _g.sent();
2977
2977
  _g.label = 2;
2978
2978
  case 2:
2979
2979
  errors = [];
2980
2980
  warnings = [];
2981
2981
  executionReport = {
2982
- pipelineUrl: pipeline.pipelineUrl,
2983
- title: pipeline.title,
2982
+ pipelineUrl: preparedPipeline.pipelineUrl,
2983
+ title: preparedPipeline.title,
2984
2984
  promptbookUsedVersion: PROMPTBOOK_VERSION,
2985
- promptbookRequestedVersion: pipeline.promptbookVersion,
2986
- description: pipeline.description,
2985
+ promptbookRequestedVersion: preparedPipeline.promptbookVersion,
2986
+ description: preparedPipeline.description,
2987
2987
  promptExecutions: [],
2988
2988
  };
2989
2989
  try {
2990
2990
  // Note: Check that all input input parameters are defined
2991
- for (_a = __values(pipeline.parameters.filter(function (_a) {
2991
+ for (_a = __values(preparedPipeline.parameters.filter(function (_a) {
2992
2992
  var isInput = _a.isInput;
2993
2993
  return isInput;
2994
2994
  })), _b = _a.next(); !_b.done; _b = _a.next()) {
@@ -3003,6 +3003,7 @@
3003
3003
  executionReport: executionReport,
3004
3004
  outputParameters: {},
3005
3005
  usage: ZERO_USAGE,
3006
+ preparedPipeline: preparedPipeline,
3006
3007
  })];
3007
3008
  }
3008
3009
  }
@@ -3015,7 +3016,7 @@
3015
3016
  finally { if (e_1) throw e_1.error; }
3016
3017
  }
3017
3018
  _loop_1 = function (parameterName) {
3018
- var parameter = pipeline.parameters.find(function (_a) {
3019
+ var parameter = preparedPipeline.parameters.find(function (_a) {
3019
3020
  var name = _a.name;
3020
3021
  return name === parameterName;
3021
3022
  });
@@ -3032,6 +3033,7 @@
3032
3033
  executionReport: executionReport,
3033
3034
  outputParameters: {},
3034
3035
  usage: ZERO_USAGE,
3036
+ preparedPipeline: preparedPipeline,
3035
3037
  }) };
3036
3038
  }
3037
3039
  };
@@ -3055,7 +3057,7 @@
3055
3057
  _g.label = 3;
3056
3058
  case 3:
3057
3059
  _g.trys.push([3, 8, , 9]);
3058
- resovedParameterNames_1 = pipeline.parameters
3060
+ resovedParameterNames_1 = preparedPipeline.parameters
3059
3061
  .filter(function (_a) {
3060
3062
  var isInput = _a.isInput;
3061
3063
  return isInput;
@@ -3064,7 +3066,7 @@
3064
3066
  var name = _a.name;
3065
3067
  return name;
3066
3068
  });
3067
- unresovedTemplates_1 = __spreadArray([], __read(pipeline.promptTemplates), false);
3069
+ unresovedTemplates_1 = __spreadArray([], __read(preparedPipeline.promptTemplates), false);
3068
3070
  resolving_1 = [];
3069
3071
  loopLimit = LOOP_LIMIT;
3070
3072
  _loop_2 = function () {
@@ -3141,6 +3143,7 @@
3141
3143
  usage: usage_1,
3142
3144
  executionReport: executionReport,
3143
3145
  outputParameters: outputParameters_1,
3146
+ preparedPipeline: preparedPipeline,
3144
3147
  })];
3145
3148
  case 9:
3146
3149
  usage = addUsage.apply(void 0, __spreadArray([], __read(executionReport.promptExecutions.map(function (_a) {
@@ -3155,6 +3158,7 @@
3155
3158
  usage: usage,
3156
3159
  executionReport: executionReport,
3157
3160
  outputParameters: outputParameters,
3161
+ preparedPipeline: preparedPipeline,
3158
3162
  })];
3159
3163
  }
3160
3164
  });
@@ -3162,8 +3166,6 @@
3162
3166
  return pipelineExecutor;
3163
3167
  }
3164
3168
  /**
3165
- * TODO: !!!!! return `preparedPipeline` from execution
3166
- * TODO: !!!!! `isNotPreparedWarningSupressed`
3167
3169
  * TODO: Use isVerbose here (not only pass to `preparePipeline`)
3168
3170
  * TODO: [🪂] Use maxParallelCount here (not only pass to `preparePipeline`)
3169
3171
  * TODO: [♈] Probbably move expectations from templates to parameters
@@ -3223,6 +3225,7 @@
3223
3225
  outputParameters = result.outputParameters;
3224
3226
  knowledgePiecesRaw = outputParameters.knowledgePieces;
3225
3227
  knowledgeTextPieces = (knowledgePiecesRaw || '').split('\n---\n');
3228
+ // <- TODO: !!!!! Smarter split and filter out empty pieces
3226
3229
  if (isVerbose) {
3227
3230
  console.info('knowledgeTextPieces:', knowledgeTextPieces);
3228
3231
  }