@promptbook/cli 0.61.0-22 → 0.61.0-23

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -48,6 +48,7 @@ Then just use it:
48
48
 
49
49
  ```typescript
50
50
  import { createPipelineExecutor, assertsExecutionSuccessful } from '@promptbook/core';
51
+ import { createLlmToolsFromEnv } from '@promptbook/node';
51
52
  import { getPipelineCollection } from './promptbook-collection'; // <- Importing from pre-built library
52
53
  import { JavascriptExecutionTools } from '@promptbook/execute-javascript';
53
54
  import { OpenAiExecutionTools } from '@promptbook/openai';
@@ -59,10 +60,7 @@ const promptbook = await getPipelineCollection().getPipelineByUrl(
59
60
 
60
61
  // ▶ Prepare tools
61
62
  const tools = {
62
- llm: new OpenAiExecutionTools({
63
- isVerbose: true,
64
- apiKey: process.env.OPENAI_API_KEY,
65
- }),
63
+ llm: createLlmToolsFromEnv(),
66
64
  script: [new JavascriptExecutionTools()],
67
65
  };
68
66
 
package/esm/index.es.js CHANGED
@@ -150,7 +150,7 @@ new Function("\n try {\n if (typeof WorkerGlobalScope !== 'undefined'
150
150
  /**
151
151
  * The version of the Promptbook library
152
152
  */
153
- var PROMPTBOOK_VERSION = '0.61.0-21';
153
+ var PROMPTBOOK_VERSION = '0.61.0-22';
154
154
  // TODO: !!!! List here all the versions and annotate + put into script
155
155
 
156
156
  /**
@@ -279,19 +279,26 @@ var EXECUTIONS_CACHE_DIRNAME = '/.promptbook/executions-cache';
279
279
  * The name of the builded pipeline collection made by CLI `ptbk make` and for lookup in `createCollectionFromDirectory`
280
280
  */
281
281
  var PIPELINE_COLLECTION_BASE_FILENAME = "index";
282
+ /**
283
+ * Nonce which is used for replacing things in strings
284
+ */
285
+ var REPLACING_NONCE = 'u$k42k%!V2zo34w7Fu#@QUHYPW';
282
286
  /**
283
287
  * The names of the parameters that are reserved for special purposes
284
288
  */
285
289
  var RESERVED_PARAMETER_NAMES = deepFreeze([
286
290
  'context',
291
+ 'knowledge',
292
+ 'samples',
293
+ 'modelName',
287
294
  'currentDate',
288
295
  // <- TODO: Add more like 'date', 'modelName',...
289
296
  // <- TODO: Add [emoji] + instructions ACRY when adding new reserved parameter
290
297
  ]);
291
298
  /**
292
- * Nonce which is used for replacing things in strings
299
+ * @@@
293
300
  */
294
- var REPLACING_NONCE = 'u$k42k%!V2zo34w7Fu#@QUHYPW';
301
+ var RESERVED_PARAMETER_MISSING_VALUE = 'MISSING-' + REPLACING_NONCE;
295
302
  /*
296
303
  TODO: !!! Just testing false-negative detection of [🟡][🟢][🔵][⚪] leak
297
304
  */
@@ -735,7 +742,7 @@ function forEachAsync(array, options, callbackfunction) {
735
742
  });
736
743
  }
737
744
 
738
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.61.0-21",parameters:[{name:"content",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledge",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {content}",dependentParameterNames:["content"],resultingParameterName:"knowledge"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-21",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.61.0-21",parameters:[{name:"content",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {content}",dependentParameterNames:["content"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-21",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.61.0-21",parameters:[{name:"content",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {content}",expectations:{words:{min:1,max:8}},dependentParameterNames:["content"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-21",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.61.0-21",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-21",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
745
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.61.0-22",parameters:[{name:"content",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {content}",dependentParameterNames:["content"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-22",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.61.0-22",parameters:[{name:"content",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {content}",dependentParameterNames:["content"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-22",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.61.0-22",parameters:[{name:"content",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {content}",expectations:{words:{min:1,max:8}},dependentParameterNames:["content"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-22",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.61.0-22",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-22",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
739
746
 
740
747
  /**
741
748
  * This error indicates that the promptbook in a markdown format cannot be parsed into a valid promptbook object
@@ -1003,7 +1010,7 @@ function validatePipeline(pipeline) {
1003
1010
  throw new PipelineLogicError("Parameter {".concat(template.resultingParameterName, "} is defined multiple times"));
1004
1011
  }
1005
1012
  if (RESERVED_PARAMETER_NAMES.includes(template.resultingParameterName)) {
1006
- throw new PipelineLogicError("Parameter name {".concat(template.resultingParameterName, "} is reserved, please use fifferent name"));
1013
+ throw new PipelineLogicError("Parameter name {".concat(template.resultingParameterName, "} is reserved, please use different name"));
1007
1014
  }
1008
1015
  definedParameters.add(template.resultingParameterName);
1009
1016
  if (template.blockType === 'PROMPT_TEMPLATE' && template.modelRequirements.modelVariant === undefined) {
@@ -1180,6 +1187,7 @@ function unpreparePipeline(pipeline) {
1180
1187
  }
1181
1188
  /**
1182
1189
  * TODO: [🔼] !!! Export via `@promptbook/core`
1190
+ * TODO: [🧿] Maybe do same process with same granularity and subfinctions as `preparePipeline`
1183
1191
  * TODO: Write tests for `preparePipeline`
1184
1192
  */
1185
1193
 
@@ -2082,22 +2090,21 @@ function isPipelinePrepared(pipeline) {
2082
2090
  // Note: Ignoring `pipeline.preparations` @@@
2083
2091
  // Note: Ignoring `pipeline.knowledgePieces` @@@
2084
2092
  if (!pipeline.personas.every(function (persona) { return persona.modelRequirements !== undefined; })) {
2085
- console.log('!!!!', 'Not all personas have modelRequirements');
2086
2093
  return false;
2087
2094
  }
2088
2095
  if (!pipeline.knowledgeSources.every(function (knowledgeSource) { return knowledgeSource.preparationIds !== undefined; })) {
2089
- console.log('!!!!', 'Not all knowledgeSources have preparationIds');
2090
2096
  return false;
2091
2097
  }
2092
- // TODO: !!!!! Is context in each template
2093
- // TODO: !!!!! Are samples prepared
2094
- // TODO: !!!!! Are templates prepared
2095
2098
  return true;
2096
2099
  }
2097
2100
  /**
2098
2101
  * TODO: [🐠] Maybe base this on `makeValidator`
2099
2102
  * TODO: [🔼] Export via core or utils
2100
2103
  * TODO: [🧊] Pipeline can be partially prepared, this should return true ONLY if fully prepared
2104
+ * TODO: [🧿] Maybe do same process with same granularity and subfinctions as `preparePipeline`
2105
+ * - Is context in each template
2106
+ * - Are samples prepared
2107
+ * - Are templates prepared
2101
2108
  */
2102
2109
 
2103
2110
  /**
@@ -2159,6 +2166,22 @@ var LimitReachedError = /** @class */ (function (_super) {
2159
2166
  * @throws {PipelineExecutionError} if parameter is not defined, not closed, or not opened
2160
2167
  */
2161
2168
  function replaceParameters(template, parameters) {
2169
+ var e_1, _a;
2170
+ try {
2171
+ for (var _b = __values(Object.entries(parameters)), _c = _b.next(); !_c.done; _c = _b.next()) {
2172
+ var _d = __read(_c.value, 2), parameterName = _d[0], parameterValue = _d[1];
2173
+ if (parameterValue === RESERVED_PARAMETER_MISSING_VALUE) {
2174
+ throw new UnexpectedError("Parameter {".concat(parameterName, "} has missing value"));
2175
+ }
2176
+ }
2177
+ }
2178
+ catch (e_1_1) { e_1 = { error: e_1_1 }; }
2179
+ finally {
2180
+ try {
2181
+ if (_c && !_c.done && (_a = _b.return)) _a.call(_b);
2182
+ }
2183
+ finally { if (e_1) throw e_1.error; }
2184
+ }
2162
2185
  var replacedTemplate = template;
2163
2186
  var match;
2164
2187
  var loopLimit = LOOP_LIMIT;
@@ -2411,11 +2434,21 @@ function createPipelineExecutor(options) {
2411
2434
  console.warn(spaceTrim$1("\n Pipeline ".concat(rawPipeline.pipelineUrl || rawPipeline.sourceFile || rawPipeline.title, " is not prepared\n\n It will be prepared ad-hoc before the first execution\n But it is recommended to prepare the pipeline during collection preparation\n\n @see more at https://ptbk.io/prepare-pipeline\n ")));
2412
2435
  }
2413
2436
  var pipelineExecutor = function (inputParameters, onProgress) { return __awaiter(_this, void 0, void 0, function () {
2437
+ // TODO: !!!!! Extract to separate functions and files - ALL FUNCTIONS BELOW
2414
2438
  function getContextForTemplate(// <- TODO: [🧠][🥜]
2415
2439
  template) {
2416
2440
  return __awaiter(this, void 0, void 0, function () {
2417
2441
  return __generator(this, function (_a) {
2418
- // TODO: !!!!!! Implement Better - use real index and keyword search
2442
+ TODO_USE(template);
2443
+ return [2 /*return*/, ''];
2444
+ });
2445
+ });
2446
+ }
2447
+ function getKnowledgeForTemplate(// <- TODO: [🧠][🥜]
2448
+ template) {
2449
+ return __awaiter(this, void 0, void 0, function () {
2450
+ return __generator(this, function (_a) {
2451
+ // TODO: !!!! Implement Better - use real index and keyword search
2419
2452
  TODO_USE(template);
2420
2453
  return [2 /*return*/, pipeline.knowledgePieces.map(function (_a) {
2421
2454
  var content = _a.content;
@@ -2424,19 +2457,39 @@ function createPipelineExecutor(options) {
2424
2457
  });
2425
2458
  });
2426
2459
  }
2460
+ function getSamplesForTemplate(// <- TODO: [🧠][🥜]
2461
+ template) {
2462
+ return __awaiter(this, void 0, void 0, function () {
2463
+ return __generator(this, function (_a) {
2464
+ // TODO: !!!! Implement Better - use real index and keyword search
2465
+ TODO_USE(template);
2466
+ return [2 /*return*/, ''];
2467
+ });
2468
+ });
2469
+ }
2427
2470
  function getReservedParametersForTemplate(template) {
2428
2471
  return __awaiter(this, void 0, void 0, function () {
2429
- var context, currentDate, reservedParameters, RESERVED_PARAMETER_NAMES_1, RESERVED_PARAMETER_NAMES_1_1, parameterName;
2472
+ var context, knowledge, samples, currentDate, modelName, reservedParameters, RESERVED_PARAMETER_NAMES_1, RESERVED_PARAMETER_NAMES_1_1, parameterName;
2430
2473
  var e_3, _a;
2431
2474
  return __generator(this, function (_b) {
2432
2475
  switch (_b.label) {
2433
2476
  case 0: return [4 /*yield*/, getContextForTemplate(template)];
2434
2477
  case 1:
2435
2478
  context = _b.sent();
2479
+ return [4 /*yield*/, getKnowledgeForTemplate(template)];
2480
+ case 2:
2481
+ knowledge = _b.sent();
2482
+ return [4 /*yield*/, getSamplesForTemplate(template)];
2483
+ case 3:
2484
+ samples = _b.sent();
2436
2485
  currentDate = new Date().toISOString();
2486
+ modelName = RESERVED_PARAMETER_MISSING_VALUE;
2437
2487
  reservedParameters = {
2438
2488
  context: context,
2489
+ knowledge: knowledge,
2490
+ samples: samples,
2439
2491
  currentDate: currentDate,
2492
+ modelName: modelName,
2440
2493
  };
2441
2494
  try {
2442
2495
  // Note: Doublecheck that ALL reserved parameters are defined:
@@ -2923,7 +2976,7 @@ function createPipelineExecutor(options) {
2923
2976
  var parameter = _c.value;
2924
2977
  if (parametersToPass[parameter.name] === undefined) {
2925
2978
  // [4]
2926
- errors.push(new PipelineExecutionError("Parameter {".concat(parameter.name, "} should be an output parameter, but it was not be resolved")));
2979
+ warnings.push(new PipelineExecutionError("Parameter {".concat(parameter.name, "} should be an output parameter, but it was not be resolved")));
2927
2980
  continue;
2928
2981
  }
2929
2982
  outputParameters[parameter.name] = parametersToPass[parameter.name] || '';
@@ -2938,7 +2991,7 @@ function createPipelineExecutor(options) {
2938
2991
  }
2939
2992
  return outputParameters;
2940
2993
  }
2941
- var executionReport, _a, _b, parameter, errors, _loop_1, _c, _d, parameterName, state_1, parametersToPass, resovedParameterNames_1, unresovedTemplates_1, resolving_1, loopLimit, _loop_2, error_1, usage_1, outputParameters_1, usage, outputParameters;
2994
+ var executionReport, _a, _b, parameter, errors, warnings, _loop_1, _c, _d, parameterName, state_1, parametersToPass, resovedParameterNames_1, unresovedTemplates_1, resolving_1, loopLimit, _loop_2, error_1, usage_1, outputParameters_1, usage, outputParameters;
2942
2995
  var e_1, _e, e_2, _f;
2943
2996
  return __generator(this, function (_g) {
2944
2997
  switch (_g.label) {
@@ -2973,8 +3026,8 @@ function createPipelineExecutor(options) {
2973
3026
  isSuccessful: false,
2974
3027
  errors: [
2975
3028
  new PipelineExecutionError("Parameter {".concat(parameter.name, "} is required as an input parameter")),
2976
- // <- TODO: !!!!! Test this error
2977
3029
  ],
3030
+ warnings: [],
2978
3031
  executionReport: executionReport,
2979
3032
  outputParameters: {},
2980
3033
  usage: ZERO_USAGE,
@@ -2990,21 +3043,22 @@ function createPipelineExecutor(options) {
2990
3043
  finally { if (e_1) throw e_1.error; }
2991
3044
  }
2992
3045
  errors = [];
3046
+ warnings = [];
2993
3047
  _loop_1 = function (parameterName) {
2994
3048
  var parameter = pipeline.parameters.find(function (_a) {
2995
3049
  var name = _a.name;
2996
3050
  return name === parameterName;
2997
3051
  });
2998
3052
  if (parameter === undefined) {
2999
- errors.push(new PipelineExecutionError("Extra parameter {".concat(parameterName, "} is passed as input parameter")));
3053
+ warnings.push(new PipelineExecutionError("Extra parameter {".concat(parameterName, "} is being passed which is not part of the pipeline.")));
3000
3054
  }
3001
3055
  else if (parameter.isInput === false) {
3002
3056
  return { value: deepFreezeWithSameType({
3003
3057
  isSuccessful: false,
3004
3058
  errors: [
3005
- new PipelineExecutionError("Parameter {".concat(parameter.name, "} is passed as input parameter but is not input")),
3006
- // <- TODO: !!!!! Test this error
3059
+ new PipelineExecutionError("Parameter {".concat(parameter.name, "} is passed as input parameter but it is not input")),
3007
3060
  ],
3061
+ warnings: warnings,
3008
3062
  executionReport: executionReport,
3009
3063
  outputParameters: {},
3010
3064
  usage: ZERO_USAGE,
@@ -3113,6 +3167,7 @@ function createPipelineExecutor(options) {
3113
3167
  return [2 /*return*/, deepFreezeWithSameType({
3114
3168
  isSuccessful: false,
3115
3169
  errors: __spreadArray([error_1], __read(errors), false),
3170
+ warnings: warnings,
3116
3171
  usage: usage_1,
3117
3172
  executionReport: executionReport,
3118
3173
  outputParameters: outputParameters_1,
@@ -3126,6 +3181,7 @@ function createPipelineExecutor(options) {
3126
3181
  return [2 /*return*/, deepFreezeWithSameType({
3127
3182
  isSuccessful: true,
3128
3183
  errors: errors,
3184
+ warnings: warnings,
3129
3185
  usage: usage,
3130
3186
  executionReport: executionReport,
3131
3187
  outputParameters: outputParameters,
@@ -3152,7 +3208,7 @@ function createPipelineExecutor(options) {
3152
3208
  */
3153
3209
  function prepareKnowledgeFromMarkdown(content /* <- TODO: [🖖] (?maybe not) Always the file */, options) {
3154
3210
  return __awaiter(this, void 0, void 0, function () {
3155
- var llmTools, _a, maxParallelCount, _b, isVerbose, collection, prepareKnowledgeFromMarkdownExecutor, _c, prepareTitleExecutor, _d, prepareKeywordsExecutor, _e, result, outputParameters, knowledgeRaw, knowledgeTextPieces, knowledge;
3211
+ var llmTools, _a, maxParallelCount, _b, isVerbose, collection, prepareKnowledgeFromMarkdownExecutor, _c, prepareTitleExecutor, _d, prepareKeywordsExecutor, _e, result, outputParameters, knowledgePiecesRaw, knowledgeTextPieces, knowledge;
3156
3212
  var _f, _g, _h;
3157
3213
  var _this = this;
3158
3214
  return __generator(this, function (_j) {
@@ -3193,8 +3249,8 @@ function prepareKnowledgeFromMarkdown(content /* <- TODO: [🖖] (?maybe not) Al
3193
3249
  result = _j.sent();
3194
3250
  assertsExecutionSuccessful(result);
3195
3251
  outputParameters = result.outputParameters;
3196
- knowledgeRaw = outputParameters.knowledge;
3197
- knowledgeTextPieces = (knowledgeRaw || '').split('\n---\n');
3252
+ knowledgePiecesRaw = outputParameters.knowledgePieces;
3253
+ knowledgeTextPieces = (knowledgePiecesRaw || '').split('\n---\n');
3198
3254
  if (isVerbose) {
3199
3255
  console.info('knowledgeTextPieces:', knowledgeTextPieces);
3200
3256
  }
@@ -3462,7 +3518,8 @@ function preparePipeline(pipeline, options) {
3462
3518
  knowledgePiecesPrepared = partialknowledgePiecesPrepared.map(function (piece) { return (__assign(__assign({}, piece), { preparationIds: [/* TODO: [🧊] -> */ currentPreparation.id] })); });
3463
3519
  // ----- /Knowledge preparation -----
3464
3520
  // TODO: !!!!! Add context to each template (if missing)
3465
- // TODO: !!!!! Apply samples to each template (if missing)
3521
+ // TODO: !!!!! Add knowledge to each template (if missing and is in pipeline defined)
3522
+ // TODO: !!!!! Apply samples to each template (if missing and is for the template defined)
3466
3523
  return [2 /*return*/, __assign(__assign({}, pipeline), { knowledgeSources: knowledgeSourcesPrepared, knowledgePieces: knowledgePiecesPrepared, personas: preparedPersonas, preparations: preparations })];
3467
3524
  }
3468
3525
  });
@@ -7531,7 +7588,7 @@ function initializePrettifyCommand(program) {
7531
7588
  prettifyCommand.action(function (filesGlob, _a) {
7532
7589
  var ignore = _a.ignore;
7533
7590
  return __awaiter(_this, void 0, void 0, function () {
7534
- var filePaths, filePaths_1, filePaths_1_1, filePath, promptbookMarkdown, error_1, e_1_1;
7591
+ var filePaths, filePaths_1, filePaths_1_1, filePath, pipelineMarkdown, error_1, e_1_1;
7535
7592
  var e_1, _b;
7536
7593
  return __generator(this, function (_c) {
7537
7594
  switch (_c.label) {
@@ -7552,18 +7609,18 @@ function initializePrettifyCommand(program) {
7552
7609
  }
7553
7610
  return [4 /*yield*/, readFile(filePath, 'utf-8')];
7554
7611
  case 4:
7555
- promptbookMarkdown = (_c.sent());
7612
+ pipelineMarkdown = (_c.sent());
7556
7613
  _c.label = 5;
7557
7614
  case 5:
7558
7615
  _c.trys.push([5, 8, , 9]);
7559
- return [4 /*yield*/, prettifyPipelineString(promptbookMarkdown, {
7616
+ return [4 /*yield*/, prettifyPipelineString(pipelineMarkdown, {
7560
7617
  isGraphAdded: true,
7561
7618
  isPrettifyed: true,
7562
7619
  // <- [🕌]
7563
7620
  })];
7564
7621
  case 6:
7565
- promptbookMarkdown = _c.sent();
7566
- return [4 /*yield*/, writeFile(filePath, promptbookMarkdown)];
7622
+ pipelineMarkdown = _c.sent();
7623
+ return [4 /*yield*/, writeFile(filePath, pipelineMarkdown)];
7567
7624
  case 7:
7568
7625
  _c.sent();
7569
7626
  console.info(colors.green("Prettify ".concat(filePath)));