@promptbook/node 0.61.0-24 → 0.61.0-25

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -88,7 +88,7 @@ File `write-website-content.ptbk.md`:
88
88
  > - PROMPTBOOK VERSION 0.0.1
89
89
  > - INPUT  PARAM `{rawTitle}` Automatically suggested a site name or empty text
90
90
  > - INPUT  PARAM `{rawAssigment}` Automatically generated site entry from image recognition
91
- > - OUTPUT PARAM `{content}` Web content
91
+ > - OUTPUT PARAM `{websiteContent}` Web content
92
92
  > - OUTPUT PARAM `{keywords}` Keywords
93
93
  >
94
94
  > ## 👤 Specifying the assigment
@@ -247,7 +247,7 @@ File `write-website-content.ptbk.md`:
247
247
  > {contentBody}
248
248
  > ```
249
249
  >
250
- > `-> {content}`
250
+ > `-> {websiteContent}`
251
251
 
252
252
 
253
253
 
@@ -287,7 +287,7 @@ flowchart LR
287
287
  templateCombineTheBeginning--"{contentBeginning}"-->templateCombineTheContent
288
288
  templateWriteTheContent--"{contentBody}"-->templateCombineTheContent
289
289
 
290
- templateCombineTheContent--"{content}"-->output
290
+ templateCombineTheContent--"{websiteContent}"-->output
291
291
  output((Output)):::output
292
292
 
293
293
  classDef input color: grey;
package/esm/index.es.js CHANGED
@@ -194,6 +194,7 @@ var REPLACING_NONCE = 'u$k42k%!V2zo34w7Fu#@QUHYPW';
194
194
  * The names of the parameters that are reserved for special purposes
195
195
  */
196
196
  var RESERVED_PARAMETER_NAMES = deepFreeze([
197
+ 'content',
197
198
  'context',
198
199
  'knowledge',
199
200
  'samples',
@@ -206,6 +207,10 @@ var RESERVED_PARAMETER_NAMES = deepFreeze([
206
207
  * @@@
207
208
  */
208
209
  var RESERVED_PARAMETER_MISSING_VALUE = 'MISSING-' + REPLACING_NONCE;
210
+ /**
211
+ * @@@
212
+ */
213
+ var RESERVED_PARAMETER_RESTRICTED = 'RESTRICTED-' + REPLACING_NONCE;
209
214
  /*
210
215
  TODO: !!! Just testing false-negative detection of [🟡][🟢][🔵][⚪] leak
211
216
  */
@@ -649,7 +654,7 @@ function forEachAsync(array, options, callbackfunction) {
649
654
  });
650
655
  }
651
656
 
652
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.61.0-23",parameters:[{name:"content",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {content}",dependentParameterNames:["content"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-23",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.61.0-23",parameters:[{name:"content",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {content}",dependentParameterNames:["content"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-23",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.61.0-23",parameters:[{name:"content",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {content}",expectations:{words:{min:1,max:8}},dependentParameterNames:["content"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-23",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.61.0-23",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-23",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
657
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.61.0-24",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-24",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.61.0-24",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-24",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.61.0-24",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-24",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.61.0-24",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-24",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
653
658
 
654
659
  /**
655
660
  * This error indicates that the promptbook in a markdown format cannot be parsed into a valid promptbook object
@@ -1083,6 +1088,36 @@ var ReferenceError$1 = /** @class */ (function (_super) {
1083
1088
  return ReferenceError;
1084
1089
  }(Error));
1085
1090
 
1091
+ /**
1092
+ * Parses the template and returns the list of all parameter names
1093
+ *
1094
+ * @param template the template with parameters in {curly} braces
1095
+ * @returns the list of parameter names
1096
+ */
1097
+ function extractParameters(template) {
1098
+ var e_1, _a;
1099
+ var matches = template.matchAll(/{\w+}/g);
1100
+ var parameterNames = new Set();
1101
+ try {
1102
+ for (var matches_1 = __values(matches), matches_1_1 = matches_1.next(); !matches_1_1.done; matches_1_1 = matches_1.next()) {
1103
+ var match = matches_1_1.value;
1104
+ var parameterName = match[0].slice(1, -1);
1105
+ parameterNames.add(parameterName);
1106
+ }
1107
+ }
1108
+ catch (e_1_1) { e_1 = { error: e_1_1 }; }
1109
+ finally {
1110
+ try {
1111
+ if (matches_1_1 && !matches_1_1.done && (_a = matches_1.return)) _a.call(matches_1);
1112
+ }
1113
+ finally { if (e_1) throw e_1.error; }
1114
+ }
1115
+ return parameterNames;
1116
+ }
1117
+ /**
1118
+ * TODO: !!!!! Rename to extractParameterNames
1119
+ */
1120
+
1086
1121
  /**
1087
1122
  * Unprepare just strips the preparation data of the pipeline
1088
1123
  */
@@ -1090,7 +1125,14 @@ function unpreparePipeline(pipeline) {
1090
1125
  var personas = pipeline.personas, knowledgeSources = pipeline.knowledgeSources, promptTemplates = pipeline.promptTemplates;
1091
1126
  personas = personas.map(function (persona) { return (__assign(__assign({}, persona), { modelRequirements: undefined, preparationIds: undefined })); });
1092
1127
  knowledgeSources = knowledgeSources.map(function (knowledgeSource) { return (__assign(__assign({}, knowledgeSource), { preparationIds: undefined })); });
1093
- promptTemplates = promptTemplates.map(function (promptTemplate) { return (__assign(__assign({}, promptTemplate), { preparedContent: undefined })); });
1128
+ promptTemplates = promptTemplates.map(function (promptTemplate) {
1129
+ var dependentParameterNames = promptTemplate.dependentParameterNames;
1130
+ var parameterNames = extractParameters(promptTemplate.preparedContent || '');
1131
+ dependentParameterNames = dependentParameterNames.filter(function (dependentParameterName) { return !parameterNames.has(dependentParameterName); });
1132
+ var promptTemplateUnprepared = __assign(__assign({}, promptTemplate), { dependentParameterNames: dependentParameterNames });
1133
+ delete promptTemplateUnprepared.preparedContent;
1134
+ return promptTemplateUnprepared;
1135
+ });
1094
1136
  return __assign(__assign({}, pipeline), { promptTemplates: promptTemplates, knowledgeSources: knowledgeSources, knowledgePieces: [], personas: personas, preparations: [] });
1095
1137
  }
1096
1138
  /**
@@ -1587,33 +1629,6 @@ function assertsExecutionSuccessful(executionResult) {
1587
1629
  * TODO: [🧠] Can this return type be better typed than void
1588
1630
  */
1589
1631
 
1590
- /**
1591
- * Parses the template and returns the list of all parameter names
1592
- *
1593
- * @param template the template with parameters in {curly} braces
1594
- * @returns the list of parameter names
1595
- */
1596
- function extractParameters(template) {
1597
- var e_1, _a;
1598
- var matches = template.matchAll(/{\w+}/g);
1599
- var parameterNames = new Set();
1600
- try {
1601
- for (var matches_1 = __values(matches), matches_1_1 = matches_1.next(); !matches_1_1.done; matches_1_1 = matches_1.next()) {
1602
- var match = matches_1_1.value;
1603
- var parameterName = match[0].slice(1, -1);
1604
- parameterNames.add(parameterName);
1605
- }
1606
- }
1607
- catch (e_1_1) { e_1 = { error: e_1_1 }; }
1608
- finally {
1609
- try {
1610
- if (matches_1_1 && !matches_1_1.done && (_a = matches_1.return)) _a.call(matches_1);
1611
- }
1612
- finally { if (e_1) throw e_1.error; }
1613
- }
1614
- return parameterNames;
1615
- }
1616
-
1617
1632
  /**
1618
1633
  * Parses the given script and returns the list of all used variables that are not defined in the script
1619
1634
  *
@@ -1671,10 +1686,10 @@ function extractVariables(script) {
1671
1686
  */
1672
1687
  function extractParametersFromPromptTemplate(promptTemplate) {
1673
1688
  var e_1, _a, e_2, _b, e_3, _c;
1674
- var title = promptTemplate.title, description = promptTemplate.description, blockType = promptTemplate.blockType, content = promptTemplate.content, jokerParameterNames = promptTemplate.jokerParameterNames;
1689
+ var title = promptTemplate.title, description = promptTemplate.description, blockType = promptTemplate.blockType, content = promptTemplate.content, preparedContent = promptTemplate.preparedContent, jokerParameterNames = promptTemplate.jokerParameterNames;
1675
1690
  var parameterNames = new Set();
1676
1691
  try {
1677
- for (var _d = __values(__spreadArray(__spreadArray(__spreadArray([], __read(extractParameters(title)), false), __read(extractParameters(description || '')), false), __read(extractParameters(content)), false)), _e = _d.next(); !_e.done; _e = _d.next()) {
1692
+ for (var _d = __values(__spreadArray(__spreadArray(__spreadArray(__spreadArray([], __read(extractParameters(title)), false), __read(extractParameters(description || '')), false), __read(extractParameters(content)), false), __read(extractParameters(preparedContent || '')), false)), _e = _d.next(); !_e.done; _e = _d.next()) {
1678
1693
  var parameterName = _e.value;
1679
1694
  parameterNames.add(parameterName);
1680
1695
  }
@@ -1714,6 +1729,8 @@ function extractParametersFromPromptTemplate(promptTemplate) {
1714
1729
  }
1715
1730
  finally { if (e_3) throw e_3.error; }
1716
1731
  }
1732
+ parameterNames.delete('content');
1733
+ // <- Note {websiteContent} is used in `preparedContent`
1717
1734
  return parameterNames;
1718
1735
  }
1719
1736
  /**
@@ -1883,7 +1900,14 @@ var MultipleLlmExecutionTools = /** @class */ (function () {
1883
1900
  throw errors[0];
1884
1901
  }
1885
1902
  else if (errors.length > 1) {
1886
- throw new PipelineExecutionError(spaceTrim(function (block) { return "\n All execution tools failed:\n\n ".concat(block(errors.map(function (error) { return "- ".concat(error.name || 'Error', ": ").concat(error.message); }).join('\n')), "\n\n "); }));
1903
+ throw new PipelineExecutionError(
1904
+ // TODO: Tell which execution tools failed like
1905
+ // 1) OpenAI throw PipelineExecutionError: Parameter {knowledge} is not defined
1906
+ // 2) AnthropicClaude throw PipelineExecutionError: Parameter {knowledge} is not defined
1907
+ // 3) ...
1908
+ spaceTrim(function (block) { return "\n All execution tools failed:\n\n ".concat(block(errors
1909
+ .map(function (error, i) { return "".concat(i + 1, ") **").concat(error.name || 'Error', ":** ").concat(error.message); })
1910
+ .join('\n')), "\n\n "); }));
1887
1911
  }
1888
1912
  else {
1889
1913
  throw new PipelineExecutionError(spaceTrim(function (block) { return "\n You have not provided any `LlmExecutionTools` that support model variant \"".concat(prompt.modelRequirements.modelVariant, "\n\n Available `LlmExecutionTools`:\n ").concat(block(_this.llmExecutionTools
@@ -2088,6 +2112,10 @@ function replaceParameters(template, parameters) {
2088
2112
  if (parameterValue === RESERVED_PARAMETER_MISSING_VALUE) {
2089
2113
  throw new UnexpectedError("Parameter {".concat(parameterName, "} has missing value"));
2090
2114
  }
2115
+ else if (parameterValue === RESERVED_PARAMETER_RESTRICTED) {
2116
+ // TODO: [🍵]
2117
+ throw new UnexpectedError("Parameter {".concat(parameterName, "} is restricted to use"));
2118
+ }
2091
2119
  }
2092
2120
  }
2093
2121
  catch (e_1_1) { e_1 = { error: e_1_1 }; }
@@ -2222,7 +2250,7 @@ function union() {
2222
2250
  /**
2223
2251
  * The version of the Promptbook library
2224
2252
  */
2225
- var PROMPTBOOK_VERSION = '0.61.0-23';
2253
+ var PROMPTBOOK_VERSION = '0.61.0-24';
2226
2254
  // TODO: !!!! List here all the versions and annotate + put into script
2227
2255
 
2228
2256
  /**
@@ -2356,7 +2384,7 @@ function createPipelineExecutor(options) {
2356
2384
  console.warn(spaceTrim$1("\n Pipeline ".concat(rawPipeline.pipelineUrl || rawPipeline.sourceFile || rawPipeline.title, " is not prepared\n\n ").concat(rawPipeline.sourceFile, "\n\n It will be prepared ad-hoc before the first execution\n But it is recommended to prepare the pipeline during collection preparation\n\n @see more at https://ptbk.io/prepare-pipeline\n ")));
2357
2385
  }
2358
2386
  var pipelineExecutor = function (inputParameters, onProgress) { return __awaiter(_this, void 0, void 0, function () {
2359
- // TODO: !!!!! Extract to separate functions and files - ALL FUNCTIONS BELOW
2387
+ // TODO: !!! Extract to separate functions and files - ALL FUNCTIONS BELOW
2360
2388
  function getContextForTemplate(// <- TODO: [🧠][🥜]
2361
2389
  template) {
2362
2390
  return __awaiter(this, void 0, void 0, function () {
@@ -2407,6 +2435,7 @@ function createPipelineExecutor(options) {
2407
2435
  currentDate = new Date().toISOString();
2408
2436
  modelName = RESERVED_PARAMETER_MISSING_VALUE;
2409
2437
  reservedParameters = {
2438
+ content: RESERVED_PARAMETER_RESTRICTED,
2410
2439
  context: context,
2411
2440
  knowledge: knowledge,
2412
2441
  samples: samples,
@@ -3117,6 +3146,8 @@ function createPipelineExecutor(options) {
3117
3146
  return pipelineExecutor;
3118
3147
  }
3119
3148
  /**
3149
+ * TODO: !!!! return `preparedPipeline` from execution
3150
+ * TODO: !!!! `isNotPreparedWarningSupressed`
3120
3151
  * TODO: Use isVerbose here (not only pass to `preparePipeline`)
3121
3152
  * TODO: [🪂] Use maxParallelCount here (not only pass to `preparePipeline`)
3122
3153
  * TODO: [♈] Probbably move expectations from templates to parameters
@@ -3131,7 +3162,7 @@ function createPipelineExecutor(options) {
3131
3162
  /**
3132
3163
  * @@@
3133
3164
  */
3134
- function prepareKnowledgeFromMarkdown(content /* <- TODO: [🖖] (?maybe not) Always the file */, options) {
3165
+ function prepareKnowledgeFromMarkdown(knowledgeContent /* <- TODO: [🖖] (?maybe not) Always the file */, options) {
3135
3166
  return __awaiter(this, void 0, void 0, function () {
3136
3167
  var llmTools, _a, maxParallelCount, _b, isVerbose, collection, prepareKnowledgeFromMarkdownExecutor, _c, prepareTitleExecutor, _d, prepareKeywordsExecutor, _e, result, outputParameters, knowledgePiecesRaw, knowledgeTextPieces, knowledge;
3137
3168
  var _f, _g, _h;
@@ -3169,7 +3200,7 @@ function prepareKnowledgeFromMarkdown(content /* <- TODO: [🖖] (?maybe not) Al
3169
3200
  llm: llmTools,
3170
3201
  },
3171
3202
  _h)]);
3172
- return [4 /*yield*/, prepareKnowledgeFromMarkdownExecutor({ content: content })];
3203
+ return [4 /*yield*/, prepareKnowledgeFromMarkdownExecutor({ knowledgeContent: knowledgeContent })];
3173
3204
  case 4:
3174
3205
  result = _j.sent();
3175
3206
  assertsExecutionSuccessful(result);
@@ -3182,25 +3213,25 @@ function prepareKnowledgeFromMarkdown(content /* <- TODO: [🖖] (?maybe not) Al
3182
3213
  return [4 /*yield*/, Promise.all(
3183
3214
  // TODO: [🪂] !! Do not send all at once but in chunks
3184
3215
  knowledgeTextPieces.map(function (knowledgeTextPiece, i) { return __awaiter(_this, void 0, void 0, function () {
3185
- var name, title, content, keywords, index, titleResult, _a, titleRaw, keywordsResult, _b, keywordsRaw, embeddingResult, error_1;
3216
+ var name, title, knowledgePieceContent, keywords, index, titleResult, _a, titleRaw, keywordsResult, _b, keywordsRaw, embeddingResult, error_1;
3186
3217
  return __generator(this, function (_c) {
3187
3218
  switch (_c.label) {
3188
3219
  case 0:
3189
3220
  name = "piece-".concat(i);
3190
3221
  title = spaceTrim(knowledgeTextPiece.substring(0, 100));
3191
- content = spaceTrim(knowledgeTextPiece);
3222
+ knowledgePieceContent = spaceTrim(knowledgeTextPiece);
3192
3223
  keywords = [];
3193
3224
  index = [];
3194
3225
  _c.label = 1;
3195
3226
  case 1:
3196
3227
  _c.trys.push([1, 7, , 8]);
3197
- return [4 /*yield*/, prepareTitleExecutor({ content: content })];
3228
+ return [4 /*yield*/, prepareTitleExecutor({ knowledgePieceContent: knowledgePieceContent })];
3198
3229
  case 2:
3199
3230
  titleResult = _c.sent();
3200
3231
  _a = titleResult.outputParameters.title, titleRaw = _a === void 0 ? 'Untitled' : _a;
3201
3232
  title = spaceTrim(titleRaw) /* <- TODO: Maybe do in pipeline */;
3202
3233
  name = titleToName(title);
3203
- return [4 /*yield*/, prepareKeywordsExecutor({ content: content })];
3234
+ return [4 /*yield*/, prepareKeywordsExecutor({ knowledgePieceContent: knowledgePieceContent })];
3204
3235
  case 3:
3205
3236
  keywordsResult = _c.sent();
3206
3237
  _b = keywordsResult.outputParameters.keywords, keywordsRaw = _b === void 0 ? '' : _b;
@@ -3218,7 +3249,7 @@ function prepareKnowledgeFromMarkdown(content /* <- TODO: [🖖] (?maybe not) Al
3218
3249
  case 4: return [4 /*yield*/, llmTools.callEmbeddingModel({
3219
3250
  title: "Embedding for ".concat(title) /* <- Note: No impact on embedding result itself, just for logging */,
3220
3251
  parameters: {},
3221
- content: content,
3252
+ content: knowledgePieceContent,
3222
3253
  modelRequirements: {
3223
3254
  modelVariant: 'EMBEDDING',
3224
3255
  },
@@ -3239,7 +3270,7 @@ function prepareKnowledgeFromMarkdown(content /* <- TODO: [🖖] (?maybe not) Al
3239
3270
  case 8: return [2 /*return*/, {
3240
3271
  name: name,
3241
3272
  title: title,
3242
- content: content,
3273
+ content: knowledgePieceContent,
3243
3274
  keywords: keywords,
3244
3275
  index: index,
3245
3276
  // <- TODO: [☀] sources,
@@ -3405,14 +3436,18 @@ function prepareTemplates(pipeline, options) {
3405
3436
  TODO_USE(parameters);
3406
3437
  promptTemplatesPrepared = new Array(promptTemplates.length);
3407
3438
  return [4 /*yield*/, forEachAsync(promptTemplates, { maxParallelCount: maxParallelCount /* <- TODO: [🪂] When there are subtasks, this maximul limit can be broken */ }, function (template, index) { return __awaiter(_this, void 0, void 0, function () {
3408
- var preparedContent, preparedTemplate;
3439
+ var dependentParameterNames, preparedContent, preparedTemplate;
3409
3440
  return __generator(this, function (_a) {
3441
+ dependentParameterNames = template.dependentParameterNames;
3410
3442
  preparedContent = undefined;
3411
- if (knowledgePiecesCount > 0) {
3443
+ if (knowledgePiecesCount > 0 && !dependentParameterNames.includes('knowledge')) {
3412
3444
  preparedContent = spaceTrim$1("\n {content}\n\n ## Knowledge\n\n {knowledge}\n ");
3413
3445
  // <- TODO: [🧠][🧻] Cutomize shape/language/formatting of the addition to the prompt
3446
+ dependentParameterNames = __spreadArray(__spreadArray([], __read(dependentParameterNames), false), [
3447
+ 'knowledge',
3448
+ ], false);
3414
3449
  }
3415
- preparedTemplate = __assign(__assign({}, template), { preparedContent: preparedContent });
3450
+ preparedTemplate = __assign(__assign({}, template), { dependentParameterNames: dependentParameterNames, preparedContent: preparedContent });
3416
3451
  promptTemplatesPrepared[index] = preparedTemplate;
3417
3452
  return [2 /*return*/];
3418
3453
  });
@@ -4228,7 +4263,7 @@ var parameterCommandParser = {
4228
4263
  /**
4229
4264
  * Example usages of the PARAMETER command
4230
4265
  */
4231
- examples: ['PARAMETER {title} Title of the book', 'OUTPUT PARAMETER {content} Content of the book'],
4266
+ examples: ['PARAMETER {title} Title of the book', 'OUTPUT PARAMETER {websiteContent} Content of the book'],
4232
4267
  /**
4233
4268
  * Parses the PARAMETER command
4234
4269
  */