@promptbook/cli 0.61.0-21 → 0.61.0-23

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -48,6 +48,7 @@ Then just use it:
48
48
 
49
49
  ```typescript
50
50
  import { createPipelineExecutor, assertsExecutionSuccessful } from '@promptbook/core';
51
+ import { createLlmToolsFromEnv } from '@promptbook/node';
51
52
  import { getPipelineCollection } from './promptbook-collection'; // <- Importing from pre-built library
52
53
  import { JavascriptExecutionTools } from '@promptbook/execute-javascript';
53
54
  import { OpenAiExecutionTools } from '@promptbook/openai';
@@ -59,10 +60,7 @@ const promptbook = await getPipelineCollection().getPipelineByUrl(
59
60
 
60
61
  // ▶ Prepare tools
61
62
  const tools = {
62
- llm: new OpenAiExecutionTools({
63
- isVerbose: true,
64
- apiKey: process.env.OPENAI_API_KEY,
65
- }),
63
+ llm: createLlmToolsFromEnv(),
66
64
  script: [new JavascriptExecutionTools()],
67
65
  };
68
66
 
package/esm/index.es.js CHANGED
@@ -150,7 +150,7 @@ new Function("\n try {\n if (typeof WorkerGlobalScope !== 'undefined'
150
150
  /**
151
151
  * The version of the Promptbook library
152
152
  */
153
- var PROMPTBOOK_VERSION = '0.61.0-20';
153
+ var PROMPTBOOK_VERSION = '0.61.0-22';
154
154
  // TODO: !!!! List here all the versions and annotate + put into script
155
155
 
156
156
  /**
@@ -279,19 +279,26 @@ var EXECUTIONS_CACHE_DIRNAME = '/.promptbook/executions-cache';
279
279
  * The name of the builded pipeline collection made by CLI `ptbk make` and for lookup in `createCollectionFromDirectory`
280
280
  */
281
281
  var PIPELINE_COLLECTION_BASE_FILENAME = "index";
282
+ /**
283
+ * Nonce which is used for replacing things in strings
284
+ */
285
+ var REPLACING_NONCE = 'u$k42k%!V2zo34w7Fu#@QUHYPW';
282
286
  /**
283
287
  * The names of the parameters that are reserved for special purposes
284
288
  */
285
289
  var RESERVED_PARAMETER_NAMES = deepFreeze([
286
290
  'context',
291
+ 'knowledge',
292
+ 'samples',
293
+ 'modelName',
287
294
  'currentDate',
288
295
  // <- TODO: Add more like 'date', 'modelName',...
289
296
  // <- TODO: Add [emoji] + instructions ACRY when adding new reserved parameter
290
297
  ]);
291
298
  /**
292
- * Nonce which is used for replacing things in strings
299
+ * @@@
293
300
  */
294
- var REPLACING_NONCE = 'u$k42k%!V2zo34w7Fu#@QUHYPW';
301
+ var RESERVED_PARAMETER_MISSING_VALUE = 'MISSING-' + REPLACING_NONCE;
295
302
  /*
296
303
  TODO: !!! Just testing false-negative detection of [🟡][🟢][🔵][⚪] leak
297
304
  */
@@ -735,7 +742,7 @@ function forEachAsync(array, options, callbackfunction) {
735
742
  });
736
743
  }
737
744
 
738
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.61.0-20",parameters:[{name:"content",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledge",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {content}",dependentParameterNames:["content"],resultingParameterName:"knowledge"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-20",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.61.0-20",parameters:[{name:"content",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {content}",dependentParameterNames:["content"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-20",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.61.0-20",parameters:[{name:"content",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {content}",expectations:{words:{min:1,max:8}},dependentParameterNames:["content"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-20",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.61.0-20",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-20",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
745
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.61.0-22",parameters:[{name:"content",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {content}",dependentParameterNames:["content"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-22",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.61.0-22",parameters:[{name:"content",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {content}",dependentParameterNames:["content"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-22",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.61.0-22",parameters:[{name:"content",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {content}",expectations:{words:{min:1,max:8}},dependentParameterNames:["content"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-22",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.61.0-22",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-22",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
739
746
 
740
747
  /**
741
748
  * This error indicates that the promptbook in a markdown format cannot be parsed into a valid promptbook object
@@ -1003,7 +1010,7 @@ function validatePipeline(pipeline) {
1003
1010
  throw new PipelineLogicError("Parameter {".concat(template.resultingParameterName, "} is defined multiple times"));
1004
1011
  }
1005
1012
  if (RESERVED_PARAMETER_NAMES.includes(template.resultingParameterName)) {
1006
- throw new PipelineLogicError("Parameter name {".concat(template.resultingParameterName, "} is reserved, please use fifferent name"));
1013
+ throw new PipelineLogicError("Parameter name {".concat(template.resultingParameterName, "} is reserved, please use different name"));
1007
1014
  }
1008
1015
  definedParameters.add(template.resultingParameterName);
1009
1016
  if (template.blockType === 'PROMPT_TEMPLATE' && template.modelRequirements.modelVariant === undefined) {
@@ -1180,6 +1187,7 @@ function unpreparePipeline(pipeline) {
1180
1187
  }
1181
1188
  /**
1182
1189
  * TODO: [🔼] !!! Export via `@promptbook/core`
1190
+ * TODO: [🧿] Maybe do same process with same granularity and subfinctions as `preparePipeline`
1183
1191
  * TODO: Write tests for `preparePipeline`
1184
1192
  */
1185
1193
 
@@ -1659,7 +1667,11 @@ function assertsExecutionSuccessful(executionResult) {
1659
1667
  throw errors[0];
1660
1668
  }
1661
1669
  else {
1662
- throw new PipelineExecutionError(spaceTrim$1(function (block) { return "\n Multiple errors occurred during promptnook execution\n\n ".concat(block(errors.map(function (error) { return '- ' + error.message; }).join('\n')), "\n "); }));
1670
+ throw new PipelineExecutionError(spaceTrim$1(function (block) { return "\n Multiple errors occurred during promptnook execution\n\n ".concat(block(errors
1671
+ .map(function (error, index) {
1672
+ return spaceTrim$1(function (block) { return "\n Error ".concat(index + 1, ":\n ").concat(block(error.stack || error.message), "\n "); });
1673
+ })
1674
+ .join('\n')), "\n "); }));
1663
1675
  }
1664
1676
  }
1665
1677
  /**
@@ -2078,22 +2090,21 @@ function isPipelinePrepared(pipeline) {
2078
2090
  // Note: Ignoring `pipeline.preparations` @@@
2079
2091
  // Note: Ignoring `pipeline.knowledgePieces` @@@
2080
2092
  if (!pipeline.personas.every(function (persona) { return persona.modelRequirements !== undefined; })) {
2081
- console.log('!!!!', 'Not all personas have modelRequirements');
2082
2093
  return false;
2083
2094
  }
2084
2095
  if (!pipeline.knowledgeSources.every(function (knowledgeSource) { return knowledgeSource.preparationIds !== undefined; })) {
2085
- console.log('!!!!', 'Not all knowledgeSources have preparationIds');
2086
2096
  return false;
2087
2097
  }
2088
- // TODO: !!!!! Is context in each template
2089
- // TODO: !!!!! Are samples prepared
2090
- // TODO: !!!!! Are templates prepared
2091
2098
  return true;
2092
2099
  }
2093
2100
  /**
2094
2101
  * TODO: [🐠] Maybe base this on `makeValidator`
2095
2102
  * TODO: [🔼] Export via core or utils
2096
2103
  * TODO: [🧊] Pipeline can be partially prepared, this should return true ONLY if fully prepared
2104
+ * TODO: [🧿] Maybe do same process with same granularity and subfinctions as `preparePipeline`
2105
+ * - Is context in each template
2106
+ * - Are samples prepared
2107
+ * - Are templates prepared
2097
2108
  */
2098
2109
 
2099
2110
  /**
@@ -2155,6 +2166,22 @@ var LimitReachedError = /** @class */ (function (_super) {
2155
2166
  * @throws {PipelineExecutionError} if parameter is not defined, not closed, or not opened
2156
2167
  */
2157
2168
  function replaceParameters(template, parameters) {
2169
+ var e_1, _a;
2170
+ try {
2171
+ for (var _b = __values(Object.entries(parameters)), _c = _b.next(); !_c.done; _c = _b.next()) {
2172
+ var _d = __read(_c.value, 2), parameterName = _d[0], parameterValue = _d[1];
2173
+ if (parameterValue === RESERVED_PARAMETER_MISSING_VALUE) {
2174
+ throw new UnexpectedError("Parameter {".concat(parameterName, "} has missing value"));
2175
+ }
2176
+ }
2177
+ }
2178
+ catch (e_1_1) { e_1 = { error: e_1_1 }; }
2179
+ finally {
2180
+ try {
2181
+ if (_c && !_c.done && (_a = _b.return)) _a.call(_b);
2182
+ }
2183
+ finally { if (e_1) throw e_1.error; }
2184
+ }
2158
2185
  var replacedTemplate = template;
2159
2186
  var match;
2160
2187
  var loopLimit = LOOP_LIMIT;
@@ -2407,11 +2434,21 @@ function createPipelineExecutor(options) {
2407
2434
  console.warn(spaceTrim$1("\n Pipeline ".concat(rawPipeline.pipelineUrl || rawPipeline.sourceFile || rawPipeline.title, " is not prepared\n\n It will be prepared ad-hoc before the first execution\n But it is recommended to prepare the pipeline during collection preparation\n\n @see more at https://ptbk.io/prepare-pipeline\n ")));
2408
2435
  }
2409
2436
  var pipelineExecutor = function (inputParameters, onProgress) { return __awaiter(_this, void 0, void 0, function () {
2437
+ // TODO: !!!!! Extract to separate functions and files - ALL FUNCTIONS BELOW
2410
2438
  function getContextForTemplate(// <- TODO: [🧠][🥜]
2411
2439
  template) {
2412
2440
  return __awaiter(this, void 0, void 0, function () {
2413
2441
  return __generator(this, function (_a) {
2414
- // TODO: !!!!!! Implement Better - use real index and keyword search
2442
+ TODO_USE(template);
2443
+ return [2 /*return*/, ''];
2444
+ });
2445
+ });
2446
+ }
2447
+ function getKnowledgeForTemplate(// <- TODO: [🧠][🥜]
2448
+ template) {
2449
+ return __awaiter(this, void 0, void 0, function () {
2450
+ return __generator(this, function (_a) {
2451
+ // TODO: !!!! Implement Better - use real index and keyword search
2415
2452
  TODO_USE(template);
2416
2453
  return [2 /*return*/, pipeline.knowledgePieces.map(function (_a) {
2417
2454
  var content = _a.content;
@@ -2420,19 +2457,39 @@ function createPipelineExecutor(options) {
2420
2457
  });
2421
2458
  });
2422
2459
  }
2460
+ function getSamplesForTemplate(// <- TODO: [🧠][🥜]
2461
+ template) {
2462
+ return __awaiter(this, void 0, void 0, function () {
2463
+ return __generator(this, function (_a) {
2464
+ // TODO: !!!! Implement Better - use real index and keyword search
2465
+ TODO_USE(template);
2466
+ return [2 /*return*/, ''];
2467
+ });
2468
+ });
2469
+ }
2423
2470
  function getReservedParametersForTemplate(template) {
2424
2471
  return __awaiter(this, void 0, void 0, function () {
2425
- var context, currentDate, reservedParameters, RESERVED_PARAMETER_NAMES_1, RESERVED_PARAMETER_NAMES_1_1, parameterName;
2472
+ var context, knowledge, samples, currentDate, modelName, reservedParameters, RESERVED_PARAMETER_NAMES_1, RESERVED_PARAMETER_NAMES_1_1, parameterName;
2426
2473
  var e_3, _a;
2427
2474
  return __generator(this, function (_b) {
2428
2475
  switch (_b.label) {
2429
2476
  case 0: return [4 /*yield*/, getContextForTemplate(template)];
2430
2477
  case 1:
2431
2478
  context = _b.sent();
2479
+ return [4 /*yield*/, getKnowledgeForTemplate(template)];
2480
+ case 2:
2481
+ knowledge = _b.sent();
2482
+ return [4 /*yield*/, getSamplesForTemplate(template)];
2483
+ case 3:
2484
+ samples = _b.sent();
2432
2485
  currentDate = new Date().toISOString();
2486
+ modelName = RESERVED_PARAMETER_MISSING_VALUE;
2433
2487
  reservedParameters = {
2434
2488
  context: context,
2489
+ knowledge: knowledge,
2490
+ samples: samples,
2435
2491
  currentDate: currentDate,
2492
+ modelName: modelName,
2436
2493
  };
2437
2494
  try {
2438
2495
  // Note: Doublecheck that ALL reserved parameters are defined:
@@ -2919,7 +2976,7 @@ function createPipelineExecutor(options) {
2919
2976
  var parameter = _c.value;
2920
2977
  if (parametersToPass[parameter.name] === undefined) {
2921
2978
  // [4]
2922
- errors.push(new PipelineExecutionError("Parameter {".concat(parameter.name, "} is required as an output parameter but not set in the pipeline")));
2979
+ warnings.push(new PipelineExecutionError("Parameter {".concat(parameter.name, "} should be an output parameter, but it was not be resolved")));
2923
2980
  continue;
2924
2981
  }
2925
2982
  outputParameters[parameter.name] = parametersToPass[parameter.name] || '';
@@ -2934,7 +2991,7 @@ function createPipelineExecutor(options) {
2934
2991
  }
2935
2992
  return outputParameters;
2936
2993
  }
2937
- var executionReport, _a, _b, parameter, errors, _loop_1, _c, _d, parameterName, state_1, parametersToPass, resovedParameters_1, unresovedTemplates_1, resolving_1, loopLimit, _loop_2, error_1, usage_1, outputParameters_1, usage, outputParameters;
2994
+ var executionReport, _a, _b, parameter, errors, warnings, _loop_1, _c, _d, parameterName, state_1, parametersToPass, resovedParameterNames_1, unresovedTemplates_1, resolving_1, loopLimit, _loop_2, error_1, usage_1, outputParameters_1, usage, outputParameters;
2938
2995
  var e_1, _e, e_2, _f;
2939
2996
  return __generator(this, function (_g) {
2940
2997
  switch (_g.label) {
@@ -2969,8 +3026,8 @@ function createPipelineExecutor(options) {
2969
3026
  isSuccessful: false,
2970
3027
  errors: [
2971
3028
  new PipelineExecutionError("Parameter {".concat(parameter.name, "} is required as an input parameter")),
2972
- // <- TODO: !!!!! Test this error
2973
3029
  ],
3030
+ warnings: [],
2974
3031
  executionReport: executionReport,
2975
3032
  outputParameters: {},
2976
3033
  usage: ZERO_USAGE,
@@ -2986,21 +3043,22 @@ function createPipelineExecutor(options) {
2986
3043
  finally { if (e_1) throw e_1.error; }
2987
3044
  }
2988
3045
  errors = [];
3046
+ warnings = [];
2989
3047
  _loop_1 = function (parameterName) {
2990
3048
  var parameter = pipeline.parameters.find(function (_a) {
2991
3049
  var name = _a.name;
2992
3050
  return name === parameterName;
2993
3051
  });
2994
3052
  if (parameter === undefined) {
2995
- errors.push(new PipelineExecutionError("Extra parameter {".concat(parameterName, "} is passed as input parameter")));
3053
+ warnings.push(new PipelineExecutionError("Extra parameter {".concat(parameterName, "} is being passed which is not part of the pipeline.")));
2996
3054
  }
2997
3055
  else if (parameter.isInput === false) {
2998
3056
  return { value: deepFreezeWithSameType({
2999
3057
  isSuccessful: false,
3000
3058
  errors: [
3001
- new PipelineExecutionError("Parameter {".concat(parameter.name, "} is passed as input parameter but is not input")),
3002
- // <- TODO: !!!!! Test this error
3059
+ new PipelineExecutionError("Parameter {".concat(parameter.name, "} is passed as input parameter but it is not input")),
3003
3060
  ],
3061
+ warnings: warnings,
3004
3062
  executionReport: executionReport,
3005
3063
  outputParameters: {},
3006
3064
  usage: ZERO_USAGE,
@@ -3027,7 +3085,7 @@ function createPipelineExecutor(options) {
3027
3085
  _g.label = 3;
3028
3086
  case 3:
3029
3087
  _g.trys.push([3, 8, , 9]);
3030
- resovedParameters_1 = pipeline.parameters
3088
+ resovedParameterNames_1 = pipeline.parameters
3031
3089
  .filter(function (_a) {
3032
3090
  var isInput = _a.isInput;
3033
3091
  return isInput;
@@ -3049,7 +3107,9 @@ function createPipelineExecutor(options) {
3049
3107
  throw new UnexpectedError('Loop limit reached during resolving parameters pipeline execution');
3050
3108
  }
3051
3109
  currentTemplate = unresovedTemplates_1.find(function (template) {
3052
- return template.dependentParameterNames.every(function (name) { return resovedParameters_1.includes(name); });
3110
+ return template.dependentParameterNames.every(function (name) {
3111
+ return __spreadArray(__spreadArray([], __read(resovedParameterNames_1), false), __read(RESERVED_PARAMETER_NAMES), false).includes(name);
3112
+ });
3053
3113
  });
3054
3114
  if (!(!currentTemplate && resolving_1.length === 0)) return [3 /*break*/, 1];
3055
3115
  throw new UnexpectedError(
@@ -3061,7 +3121,7 @@ function createPipelineExecutor(options) {
3061
3121
  .map(function (dependentParameterName) { return "{".concat(dependentParameterName, "}"); })
3062
3122
  .join(' and '));
3063
3123
  })
3064
- .join('\n')), "\n\n Resolved:\n ").concat(block(resovedParameters_1.map(function (name) { return "- Parameter {".concat(name, "}"); }).join('\n')), "\n\n Note: This should be catched in `validatePipeline`\n "); }));
3124
+ .join('\n')), "\n\n Resolved:\n ").concat(block(resovedParameterNames_1.map(function (name) { return "- Parameter {".concat(name, "}"); }).join('\n')), "\n\n Note: This should be catched in `validatePipeline`\n "); }));
3065
3125
  case 1:
3066
3126
  if (!!currentTemplate) return [3 /*break*/, 3];
3067
3127
  /* [5] */ return [4 /*yield*/, Promise.race(resolving_1)];
@@ -3072,7 +3132,7 @@ function createPipelineExecutor(options) {
3072
3132
  unresovedTemplates_1 = unresovedTemplates_1.filter(function (template) { return template !== currentTemplate; });
3073
3133
  work_1 = executeSingleTemplate(currentTemplate)
3074
3134
  .then(function () {
3075
- resovedParameters_1 = __spreadArray(__spreadArray([], __read(resovedParameters_1), false), [currentTemplate.resultingParameterName], false);
3135
+ resovedParameterNames_1 = __spreadArray(__spreadArray([], __read(resovedParameterNames_1), false), [currentTemplate.resultingParameterName], false);
3076
3136
  })
3077
3137
  .then(function () {
3078
3138
  resolving_1 = resolving_1.filter(function (w) { return w !== work_1; });
@@ -3107,6 +3167,7 @@ function createPipelineExecutor(options) {
3107
3167
  return [2 /*return*/, deepFreezeWithSameType({
3108
3168
  isSuccessful: false,
3109
3169
  errors: __spreadArray([error_1], __read(errors), false),
3170
+ warnings: warnings,
3110
3171
  usage: usage_1,
3111
3172
  executionReport: executionReport,
3112
3173
  outputParameters: outputParameters_1,
@@ -3120,6 +3181,7 @@ function createPipelineExecutor(options) {
3120
3181
  return [2 /*return*/, deepFreezeWithSameType({
3121
3182
  isSuccessful: true,
3122
3183
  errors: errors,
3184
+ warnings: warnings,
3123
3185
  usage: usage,
3124
3186
  executionReport: executionReport,
3125
3187
  outputParameters: outputParameters,
@@ -3146,7 +3208,7 @@ function createPipelineExecutor(options) {
3146
3208
  */
3147
3209
  function prepareKnowledgeFromMarkdown(content /* <- TODO: [🖖] (?maybe not) Always the file */, options) {
3148
3210
  return __awaiter(this, void 0, void 0, function () {
3149
- var llmTools, _a, maxParallelCount, _b, isVerbose, collection, prepareKnowledgeFromMarkdownExecutor, _c, prepareTitleExecutor, _d, prepareKeywordsExecutor, _e, result, outputParameters, knowledgeRaw, knowledgeTextPieces, knowledge;
3211
+ var llmTools, _a, maxParallelCount, _b, isVerbose, collection, prepareKnowledgeFromMarkdownExecutor, _c, prepareTitleExecutor, _d, prepareKeywordsExecutor, _e, result, outputParameters, knowledgePiecesRaw, knowledgeTextPieces, knowledge;
3150
3212
  var _f, _g, _h;
3151
3213
  var _this = this;
3152
3214
  return __generator(this, function (_j) {
@@ -3187,8 +3249,8 @@ function prepareKnowledgeFromMarkdown(content /* <- TODO: [🖖] (?maybe not) Al
3187
3249
  result = _j.sent();
3188
3250
  assertsExecutionSuccessful(result);
3189
3251
  outputParameters = result.outputParameters;
3190
- knowledgeRaw = outputParameters.knowledge;
3191
- knowledgeTextPieces = (knowledgeRaw || '').split('\n---\n');
3252
+ knowledgePiecesRaw = outputParameters.knowledgePieces;
3253
+ knowledgeTextPieces = (knowledgePiecesRaw || '').split('\n---\n');
3192
3254
  if (isVerbose) {
3193
3255
  console.info('knowledgeTextPieces:', knowledgeTextPieces);
3194
3256
  }
@@ -3456,7 +3518,8 @@ function preparePipeline(pipeline, options) {
3456
3518
  knowledgePiecesPrepared = partialknowledgePiecesPrepared.map(function (piece) { return (__assign(__assign({}, piece), { preparationIds: [/* TODO: [🧊] -> */ currentPreparation.id] })); });
3457
3519
  // ----- /Knowledge preparation -----
3458
3520
  // TODO: !!!!! Add context to each template (if missing)
3459
- // TODO: !!!!! Apply samples to each template (if missing)
3521
+ // TODO: !!!!! Add knowledge to each template (if missing and is in pipeline defined)
3522
+ // TODO: !!!!! Apply samples to each template (if missing and is for the template defined)
3460
3523
  return [2 /*return*/, __assign(__assign({}, pipeline), { knowledgeSources: knowledgeSourcesPrepared, knowledgePieces: knowledgePiecesPrepared, personas: preparedPersonas, preparations: preparations })];
3461
3524
  }
3462
3525
  });
@@ -7525,7 +7588,7 @@ function initializePrettifyCommand(program) {
7525
7588
  prettifyCommand.action(function (filesGlob, _a) {
7526
7589
  var ignore = _a.ignore;
7527
7590
  return __awaiter(_this, void 0, void 0, function () {
7528
- var filePaths, filePaths_1, filePaths_1_1, filePath, promptbookMarkdown, error_1, e_1_1;
7591
+ var filePaths, filePaths_1, filePaths_1_1, filePath, pipelineMarkdown, error_1, e_1_1;
7529
7592
  var e_1, _b;
7530
7593
  return __generator(this, function (_c) {
7531
7594
  switch (_c.label) {
@@ -7546,18 +7609,18 @@ function initializePrettifyCommand(program) {
7546
7609
  }
7547
7610
  return [4 /*yield*/, readFile(filePath, 'utf-8')];
7548
7611
  case 4:
7549
- promptbookMarkdown = (_c.sent());
7612
+ pipelineMarkdown = (_c.sent());
7550
7613
  _c.label = 5;
7551
7614
  case 5:
7552
7615
  _c.trys.push([5, 8, , 9]);
7553
- return [4 /*yield*/, prettifyPipelineString(promptbookMarkdown, {
7616
+ return [4 /*yield*/, prettifyPipelineString(pipelineMarkdown, {
7554
7617
  isGraphAdded: true,
7555
7618
  isPrettifyed: true,
7556
7619
  // <- [🕌]
7557
7620
  })];
7558
7621
  case 6:
7559
- promptbookMarkdown = _c.sent();
7560
- return [4 /*yield*/, writeFile(filePath, promptbookMarkdown)];
7622
+ pipelineMarkdown = _c.sent();
7623
+ return [4 /*yield*/, writeFile(filePath, pipelineMarkdown)];
7561
7624
  case 7:
7562
7625
  _c.sent();
7563
7626
  console.info(colors.green("Prettify ".concat(filePath)));