@promptbook/node 0.61.0-22 → 0.61.0-23

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/esm/index.es.js CHANGED
@@ -186,15 +186,26 @@ var MAX_EXECUTION_ATTEMPTS = 3;
186
186
  * The name of the builded pipeline collection made by CLI `ptbk make` and for lookup in `createCollectionFromDirectory`
187
187
  */
188
188
  var PIPELINE_COLLECTION_BASE_FILENAME = "index";
189
+ /**
190
+ * Nonce which is used for replacing things in strings
191
+ */
192
+ var REPLACING_NONCE = 'u$k42k%!V2zo34w7Fu#@QUHYPW';
189
193
  /**
190
194
  * The names of the parameters that are reserved for special purposes
191
195
  */
192
196
  var RESERVED_PARAMETER_NAMES = deepFreeze([
193
197
  'context',
198
+ 'knowledge',
199
+ 'samples',
200
+ 'modelName',
194
201
  'currentDate',
195
202
  // <- TODO: Add more like 'date', 'modelName',...
196
203
  // <- TODO: Add [emoji] + instructions ACRY when adding new reserved parameter
197
204
  ]);
205
+ /**
206
+ * @@@
207
+ */
208
+ var RESERVED_PARAMETER_MISSING_VALUE = 'MISSING-' + REPLACING_NONCE;
198
209
  /*
199
210
  TODO: !!! Just testing false-negative detection of [🟡][🟢][🔵][⚪] leak
200
211
  */
@@ -638,7 +649,7 @@ function forEachAsync(array, options, callbackfunction) {
638
649
  });
639
650
  }
640
651
 
641
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.61.0-21",parameters:[{name:"content",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledge",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {content}",dependentParameterNames:["content"],resultingParameterName:"knowledge"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-21",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.61.0-21",parameters:[{name:"content",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {content}",dependentParameterNames:["content"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-21",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.61.0-21",parameters:[{name:"content",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {content}",expectations:{words:{min:1,max:8}},dependentParameterNames:["content"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-21",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.61.0-21",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-21",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
652
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.61.0-22",parameters:[{name:"content",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {content}",dependentParameterNames:["content"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-22",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.61.0-22",parameters:[{name:"content",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {content}",dependentParameterNames:["content"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-22",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.61.0-22",parameters:[{name:"content",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {content}",expectations:{words:{min:1,max:8}},dependentParameterNames:["content"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-22",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.61.0-22",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-22",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
642
653
 
643
654
  /**
644
655
  * This error indicates that the promptbook in a markdown format cannot be parsed into a valid promptbook object
@@ -906,7 +917,7 @@ function validatePipeline(pipeline) {
906
917
  throw new PipelineLogicError("Parameter {".concat(template.resultingParameterName, "} is defined multiple times"));
907
918
  }
908
919
  if (RESERVED_PARAMETER_NAMES.includes(template.resultingParameterName)) {
909
- throw new PipelineLogicError("Parameter name {".concat(template.resultingParameterName, "} is reserved, please use fifferent name"));
920
+ throw new PipelineLogicError("Parameter name {".concat(template.resultingParameterName, "} is reserved, please use different name"));
910
921
  }
911
922
  definedParameters.add(template.resultingParameterName);
912
923
  if (template.blockType === 'PROMPT_TEMPLATE' && template.modelRequirements.modelVariant === undefined) {
@@ -1083,6 +1094,7 @@ function unpreparePipeline(pipeline) {
1083
1094
  }
1084
1095
  /**
1085
1096
  * TODO: [🔼] !!! Export via `@promptbook/core`
1097
+ * TODO: [🧿] Maybe do same process with same granularity and subfinctions as `preparePipeline`
1086
1098
  * TODO: Write tests for `preparePipeline`
1087
1099
  */
1088
1100
 
@@ -1985,22 +1997,21 @@ function isPipelinePrepared(pipeline) {
1985
1997
  // Note: Ignoring `pipeline.preparations` @@@
1986
1998
  // Note: Ignoring `pipeline.knowledgePieces` @@@
1987
1999
  if (!pipeline.personas.every(function (persona) { return persona.modelRequirements !== undefined; })) {
1988
- console.log('!!!!', 'Not all personas have modelRequirements');
1989
2000
  return false;
1990
2001
  }
1991
2002
  if (!pipeline.knowledgeSources.every(function (knowledgeSource) { return knowledgeSource.preparationIds !== undefined; })) {
1992
- console.log('!!!!', 'Not all knowledgeSources have preparationIds');
1993
2003
  return false;
1994
2004
  }
1995
- // TODO: !!!!! Is context in each template
1996
- // TODO: !!!!! Are samples prepared
1997
- // TODO: !!!!! Are templates prepared
1998
2005
  return true;
1999
2006
  }
2000
2007
  /**
2001
2008
  * TODO: [🐠] Maybe base this on `makeValidator`
2002
2009
  * TODO: [🔼] Export via core or utils
2003
2010
  * TODO: [🧊] Pipeline can be partially prepared, this should return true ONLY if fully prepared
2011
+ * TODO: [🧿] Maybe do same process with same granularity and subfinctions as `preparePipeline`
2012
+ * - Is context in each template
2013
+ * - Are samples prepared
2014
+ * - Are templates prepared
2004
2015
  */
2005
2016
 
2006
2017
  /**
@@ -2062,6 +2073,22 @@ var LimitReachedError = /** @class */ (function (_super) {
2062
2073
  * @throws {PipelineExecutionError} if parameter is not defined, not closed, or not opened
2063
2074
  */
2064
2075
  function replaceParameters(template, parameters) {
2076
+ var e_1, _a;
2077
+ try {
2078
+ for (var _b = __values(Object.entries(parameters)), _c = _b.next(); !_c.done; _c = _b.next()) {
2079
+ var _d = __read(_c.value, 2), parameterName = _d[0], parameterValue = _d[1];
2080
+ if (parameterValue === RESERVED_PARAMETER_MISSING_VALUE) {
2081
+ throw new UnexpectedError("Parameter {".concat(parameterName, "} has missing value"));
2082
+ }
2083
+ }
2084
+ }
2085
+ catch (e_1_1) { e_1 = { error: e_1_1 }; }
2086
+ finally {
2087
+ try {
2088
+ if (_c && !_c.done && (_a = _b.return)) _a.call(_b);
2089
+ }
2090
+ finally { if (e_1) throw e_1.error; }
2091
+ }
2065
2092
  var replacedTemplate = template;
2066
2093
  var match;
2067
2094
  var loopLimit = LOOP_LIMIT;
@@ -2187,7 +2214,7 @@ function union() {
2187
2214
  /**
2188
2215
  * The version of the Promptbook library
2189
2216
  */
2190
- var PROMPTBOOK_VERSION = '0.61.0-21';
2217
+ var PROMPTBOOK_VERSION = '0.61.0-22';
2191
2218
  // TODO: !!!! List here all the versions and annotate + put into script
2192
2219
 
2193
2220
  /**
@@ -2320,11 +2347,21 @@ function createPipelineExecutor(options) {
2320
2347
  console.warn(spaceTrim$1("\n Pipeline ".concat(rawPipeline.pipelineUrl || rawPipeline.sourceFile || rawPipeline.title, " is not prepared\n\n It will be prepared ad-hoc before the first execution\n But it is recommended to prepare the pipeline during collection preparation\n\n @see more at https://ptbk.io/prepare-pipeline\n ")));
2321
2348
  }
2322
2349
  var pipelineExecutor = function (inputParameters, onProgress) { return __awaiter(_this, void 0, void 0, function () {
2350
+ // TODO: !!!!! Extract to separate functions and files - ALL FUNCTIONS BELOW
2323
2351
  function getContextForTemplate(// <- TODO: [🧠][🥜]
2324
2352
  template) {
2325
2353
  return __awaiter(this, void 0, void 0, function () {
2326
2354
  return __generator(this, function (_a) {
2327
- // TODO: !!!!!! Implement Better - use real index and keyword search
2355
+ TODO_USE(template);
2356
+ return [2 /*return*/, ''];
2357
+ });
2358
+ });
2359
+ }
2360
+ function getKnowledgeForTemplate(// <- TODO: [🧠][🥜]
2361
+ template) {
2362
+ return __awaiter(this, void 0, void 0, function () {
2363
+ return __generator(this, function (_a) {
2364
+ // TODO: !!!! Implement Better - use real index and keyword search
2328
2365
  TODO_USE(template);
2329
2366
  return [2 /*return*/, pipeline.knowledgePieces.map(function (_a) {
2330
2367
  var content = _a.content;
@@ -2333,19 +2370,39 @@ function createPipelineExecutor(options) {
2333
2370
  });
2334
2371
  });
2335
2372
  }
2373
+ function getSamplesForTemplate(// <- TODO: [🧠][🥜]
2374
+ template) {
2375
+ return __awaiter(this, void 0, void 0, function () {
2376
+ return __generator(this, function (_a) {
2377
+ // TODO: !!!! Implement Better - use real index and keyword search
2378
+ TODO_USE(template);
2379
+ return [2 /*return*/, ''];
2380
+ });
2381
+ });
2382
+ }
2336
2383
  function getReservedParametersForTemplate(template) {
2337
2384
  return __awaiter(this, void 0, void 0, function () {
2338
- var context, currentDate, reservedParameters, RESERVED_PARAMETER_NAMES_1, RESERVED_PARAMETER_NAMES_1_1, parameterName;
2385
+ var context, knowledge, samples, currentDate, modelName, reservedParameters, RESERVED_PARAMETER_NAMES_1, RESERVED_PARAMETER_NAMES_1_1, parameterName;
2339
2386
  var e_3, _a;
2340
2387
  return __generator(this, function (_b) {
2341
2388
  switch (_b.label) {
2342
2389
  case 0: return [4 /*yield*/, getContextForTemplate(template)];
2343
2390
  case 1:
2344
2391
  context = _b.sent();
2392
+ return [4 /*yield*/, getKnowledgeForTemplate(template)];
2393
+ case 2:
2394
+ knowledge = _b.sent();
2395
+ return [4 /*yield*/, getSamplesForTemplate(template)];
2396
+ case 3:
2397
+ samples = _b.sent();
2345
2398
  currentDate = new Date().toISOString();
2399
+ modelName = RESERVED_PARAMETER_MISSING_VALUE;
2346
2400
  reservedParameters = {
2347
2401
  context: context,
2402
+ knowledge: knowledge,
2403
+ samples: samples,
2348
2404
  currentDate: currentDate,
2405
+ modelName: modelName,
2349
2406
  };
2350
2407
  try {
2351
2408
  // Note: Doublecheck that ALL reserved parameters are defined:
@@ -2832,7 +2889,7 @@ function createPipelineExecutor(options) {
2832
2889
  var parameter = _c.value;
2833
2890
  if (parametersToPass[parameter.name] === undefined) {
2834
2891
  // [4]
2835
- errors.push(new PipelineExecutionError("Parameter {".concat(parameter.name, "} should be an output parameter, but it was not be resolved")));
2892
+ warnings.push(new PipelineExecutionError("Parameter {".concat(parameter.name, "} should be an output parameter, but it was not be resolved")));
2836
2893
  continue;
2837
2894
  }
2838
2895
  outputParameters[parameter.name] = parametersToPass[parameter.name] || '';
@@ -2847,7 +2904,7 @@ function createPipelineExecutor(options) {
2847
2904
  }
2848
2905
  return outputParameters;
2849
2906
  }
2850
- var executionReport, _a, _b, parameter, errors, _loop_1, _c, _d, parameterName, state_1, parametersToPass, resovedParameterNames_1, unresovedTemplates_1, resolving_1, loopLimit, _loop_2, error_1, usage_1, outputParameters_1, usage, outputParameters;
2907
+ var executionReport, _a, _b, parameter, errors, warnings, _loop_1, _c, _d, parameterName, state_1, parametersToPass, resovedParameterNames_1, unresovedTemplates_1, resolving_1, loopLimit, _loop_2, error_1, usage_1, outputParameters_1, usage, outputParameters;
2851
2908
  var e_1, _e, e_2, _f;
2852
2909
  return __generator(this, function (_g) {
2853
2910
  switch (_g.label) {
@@ -2882,8 +2939,8 @@ function createPipelineExecutor(options) {
2882
2939
  isSuccessful: false,
2883
2940
  errors: [
2884
2941
  new PipelineExecutionError("Parameter {".concat(parameter.name, "} is required as an input parameter")),
2885
- // <- TODO: !!!!! Test this error
2886
2942
  ],
2943
+ warnings: [],
2887
2944
  executionReport: executionReport,
2888
2945
  outputParameters: {},
2889
2946
  usage: ZERO_USAGE,
@@ -2899,21 +2956,22 @@ function createPipelineExecutor(options) {
2899
2956
  finally { if (e_1) throw e_1.error; }
2900
2957
  }
2901
2958
  errors = [];
2959
+ warnings = [];
2902
2960
  _loop_1 = function (parameterName) {
2903
2961
  var parameter = pipeline.parameters.find(function (_a) {
2904
2962
  var name = _a.name;
2905
2963
  return name === parameterName;
2906
2964
  });
2907
2965
  if (parameter === undefined) {
2908
- errors.push(new PipelineExecutionError("Extra parameter {".concat(parameterName, "} is passed as input parameter")));
2966
+ warnings.push(new PipelineExecutionError("Extra parameter {".concat(parameterName, "} is being passed which is not part of the pipeline.")));
2909
2967
  }
2910
2968
  else if (parameter.isInput === false) {
2911
2969
  return { value: deepFreezeWithSameType({
2912
2970
  isSuccessful: false,
2913
2971
  errors: [
2914
- new PipelineExecutionError("Parameter {".concat(parameter.name, "} is passed as input parameter but is not input")),
2915
- // <- TODO: !!!!! Test this error
2972
+ new PipelineExecutionError("Parameter {".concat(parameter.name, "} is passed as input parameter but it is not input")),
2916
2973
  ],
2974
+ warnings: warnings,
2917
2975
  executionReport: executionReport,
2918
2976
  outputParameters: {},
2919
2977
  usage: ZERO_USAGE,
@@ -3022,6 +3080,7 @@ function createPipelineExecutor(options) {
3022
3080
  return [2 /*return*/, deepFreezeWithSameType({
3023
3081
  isSuccessful: false,
3024
3082
  errors: __spreadArray([error_1], __read(errors), false),
3083
+ warnings: warnings,
3025
3084
  usage: usage_1,
3026
3085
  executionReport: executionReport,
3027
3086
  outputParameters: outputParameters_1,
@@ -3035,6 +3094,7 @@ function createPipelineExecutor(options) {
3035
3094
  return [2 /*return*/, deepFreezeWithSameType({
3036
3095
  isSuccessful: true,
3037
3096
  errors: errors,
3097
+ warnings: warnings,
3038
3098
  usage: usage,
3039
3099
  executionReport: executionReport,
3040
3100
  outputParameters: outputParameters,
@@ -3061,7 +3121,7 @@ function createPipelineExecutor(options) {
3061
3121
  */
3062
3122
  function prepareKnowledgeFromMarkdown(content /* <- TODO: [🖖] (?maybe not) Always the file */, options) {
3063
3123
  return __awaiter(this, void 0, void 0, function () {
3064
- var llmTools, _a, maxParallelCount, _b, isVerbose, collection, prepareKnowledgeFromMarkdownExecutor, _c, prepareTitleExecutor, _d, prepareKeywordsExecutor, _e, result, outputParameters, knowledgeRaw, knowledgeTextPieces, knowledge;
3124
+ var llmTools, _a, maxParallelCount, _b, isVerbose, collection, prepareKnowledgeFromMarkdownExecutor, _c, prepareTitleExecutor, _d, prepareKeywordsExecutor, _e, result, outputParameters, knowledgePiecesRaw, knowledgeTextPieces, knowledge;
3065
3125
  var _f, _g, _h;
3066
3126
  var _this = this;
3067
3127
  return __generator(this, function (_j) {
@@ -3102,8 +3162,8 @@ function prepareKnowledgeFromMarkdown(content /* <- TODO: [🖖] (?maybe not) Al
3102
3162
  result = _j.sent();
3103
3163
  assertsExecutionSuccessful(result);
3104
3164
  outputParameters = result.outputParameters;
3105
- knowledgeRaw = outputParameters.knowledge;
3106
- knowledgeTextPieces = (knowledgeRaw || '').split('\n---\n');
3165
+ knowledgePiecesRaw = outputParameters.knowledgePieces;
3166
+ knowledgeTextPieces = (knowledgePiecesRaw || '').split('\n---\n');
3107
3167
  if (isVerbose) {
3108
3168
  console.info('knowledgeTextPieces:', knowledgeTextPieces);
3109
3169
  }
@@ -3371,7 +3431,8 @@ function preparePipeline(pipeline, options) {
3371
3431
  knowledgePiecesPrepared = partialknowledgePiecesPrepared.map(function (piece) { return (__assign(__assign({}, piece), { preparationIds: [/* TODO: [🧊] -> */ currentPreparation.id] })); });
3372
3432
  // ----- /Knowledge preparation -----
3373
3433
  // TODO: !!!!! Add context to each template (if missing)
3374
- // TODO: !!!!! Apply samples to each template (if missing)
3434
+ // TODO: !!!!! Add knowledge to each template (if missing and is in pipeline defined)
3435
+ // TODO: !!!!! Apply samples to each template (if missing and is for the template defined)
3375
3436
  return [2 /*return*/, __assign(__assign({}, pipeline), { knowledgeSources: knowledgeSourcesPrepared, knowledgePieces: knowledgePiecesPrepared, personas: preparedPersonas, preparations: preparations })];
3376
3437
  }
3377
3438
  });