@promptbook/node 0.61.0-24 → 0.61.0-26

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (24) hide show
  1. package/README.md +3 -3
  2. package/esm/index.es.js +179 -135
  3. package/esm/index.es.js.map +1 -1
  4. package/esm/typings/src/_packages/utils.index.d.ts +4 -4
  5. package/esm/typings/src/config.d.ts +5 -1
  6. package/esm/typings/src/conversion/utils/{extractParametersFromPromptTemplate.d.ts → extractParameterNamesFromPromptTemplate.d.ts} +1 -1
  7. package/esm/typings/src/execution/createPipelineExecutor.d.ts +2 -0
  8. package/esm/typings/src/knowledge/prepare-knowledge/markdown/prepareKnowledgeFromMarkdown.d.ts +1 -1
  9. package/esm/typings/src/types/PipelineJson/PipelineJson.d.ts +1 -1
  10. package/esm/typings/src/utils/{extractParameters.d.ts → extractParameterNames.d.ts} +4 -1
  11. package/package.json +2 -2
  12. package/umd/index.umd.js +179 -135
  13. package/umd/index.umd.js.map +1 -1
  14. package/umd/typings/src/_packages/utils.index.d.ts +4 -4
  15. package/umd/typings/src/config.d.ts +5 -1
  16. package/umd/typings/src/conversion/utils/{extractParametersFromPromptTemplate.d.ts → extractParameterNamesFromPromptTemplate.d.ts} +1 -1
  17. package/umd/typings/src/execution/createPipelineExecutor.d.ts +2 -0
  18. package/umd/typings/src/knowledge/prepare-knowledge/markdown/prepareKnowledgeFromMarkdown.d.ts +1 -1
  19. package/umd/typings/src/types/PipelineJson/PipelineJson.d.ts +1 -1
  20. package/umd/typings/src/utils/{extractParameters.d.ts → extractParameterNames.d.ts} +4 -1
  21. /package/esm/typings/src/conversion/utils/{extractParametersFromPromptTemplate.test.d.ts → extractParameterNamesFromPromptTemplate.test.d.ts} +0 -0
  22. /package/esm/typings/src/utils/{extractParameters.test.d.ts → extractParameterNames.test.d.ts} +0 -0
  23. /package/umd/typings/src/conversion/utils/{extractParametersFromPromptTemplate.test.d.ts → extractParameterNamesFromPromptTemplate.test.d.ts} +0 -0
  24. /package/umd/typings/src/utils/{extractParameters.test.d.ts → extractParameterNames.test.d.ts} +0 -0
package/umd/index.umd.js CHANGED
@@ -199,6 +199,7 @@
199
199
  * The names of the parameters that are reserved for special purposes
200
200
  */
201
201
  var RESERVED_PARAMETER_NAMES = deepFreeze([
202
+ 'content',
202
203
  'context',
203
204
  'knowledge',
204
205
  'samples',
@@ -211,6 +212,10 @@
211
212
  * @@@
212
213
  */
213
214
  var RESERVED_PARAMETER_MISSING_VALUE = 'MISSING-' + REPLACING_NONCE;
215
+ /**
216
+ * @@@
217
+ */
218
+ var RESERVED_PARAMETER_RESTRICTED = 'RESTRICTED-' + REPLACING_NONCE;
214
219
  /*
215
220
  TODO: !!! Just testing false-negative detection of [🟡][🟢][🔵][⚪] leak
216
221
  */
@@ -654,7 +659,7 @@
654
659
  });
655
660
  }
656
661
 
657
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.61.0-23",parameters:[{name:"content",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {content}",dependentParameterNames:["content"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-23",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.61.0-23",parameters:[{name:"content",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {content}",dependentParameterNames:["content"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-23",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.61.0-23",parameters:[{name:"content",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {content}",expectations:{words:{min:1,max:8}},dependentParameterNames:["content"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-23",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.61.0-23",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-23",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
662
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.61.0-25",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-25",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.61.0-25",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-25",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.61.0-25",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-25",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.61.0-25",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-25",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
658
663
 
659
664
  /**
660
665
  * This error indicates that the promptbook in a markdown format cannot be parsed into a valid promptbook object
@@ -857,52 +862,63 @@
857
862
  */
858
863
  function validatePipeline(pipeline) {
859
864
  // TODO: [🧠] Maybe test if promptbook is a promise and make specific error case for that
860
- var e_1, _a, e_2, _b, e_3, _c, e_4, _d, e_5, _e;
865
+ var e_1, _a, e_2, _b, e_3, _c;
866
+ var pipelineIdentification = (function () {
867
+ // Note: This is a 😐 implementation of [🚞]
868
+ var _ = [];
869
+ if (pipeline.sourceFile !== undefined) {
870
+ _.push("File: ".concat(pipeline.sourceFile));
871
+ }
872
+ if (pipeline.pipelineUrl !== undefined) {
873
+ _.push("Url: ".concat(pipeline.pipelineUrl));
874
+ }
875
+ return _.join('\n');
876
+ })();
861
877
  if (pipeline.pipelineUrl !== undefined && !isValidPipelineUrl(pipeline.pipelineUrl)) {
862
878
  // <- Note: [🚲]
863
- throw new PipelineLogicError("Invalid promptbook URL \"".concat(pipeline.pipelineUrl, "\""));
879
+ throw new PipelineLogicError(spaceTrim.spaceTrim(function (block) { return "\n Invalid promptbook URL \"".concat(pipeline.pipelineUrl, "\"\n\n ").concat(block(pipelineIdentification), "\n "); }));
864
880
  }
865
881
  if (!isValidPromptbookVersion(pipeline.promptbookVersion)) {
866
882
  // <- Note: [🚲]
867
- throw new PipelineLogicError("Invalid promptbook Version \"".concat(pipeline.pipelineUrl, "\""));
883
+ throw new PipelineLogicError(spaceTrim.spaceTrim(function (block) { return "\n Invalid Promptbook Version \"".concat(pipeline.promptbookVersion, "\"\n\n ").concat(block(pipelineIdentification), "\n "); }));
868
884
  }
869
885
  // TODO: [🧠] Maybe do here some propper JSON-schema / ZOD checking
870
886
  if (!Array.isArray(pipeline.parameters)) {
871
887
  // TODO: [🧠] what is the correct error tp throw - maybe PromptbookSchemaError
872
- throw new ParsingError(spaceTrim.spaceTrim("\n Promptbook is valid JSON but with wrong structure\n\n promptbook.parameters expected to be an array, but got ".concat(typeof pipeline.parameters, "\n ")));
888
+ throw new ParsingError(spaceTrim.spaceTrim(function (block) { return "\n Promptbook is valid JSON but with wrong structure\n\n `promptbook.parameters` expected to be an array, but got ".concat(typeof pipeline.parameters, "\n\n ").concat(block(pipelineIdentification), "\n "); }));
873
889
  }
874
890
  // TODO: [🧠] Maybe do here some propper JSON-schema / ZOD checking
875
891
  if (!Array.isArray(pipeline.promptTemplates)) {
876
892
  // TODO: [🧠] what is the correct error tp throw - maybe PromptbookSchemaError
877
- throw new ParsingError(spaceTrim.spaceTrim("\n Promptbook is valid JSON but with wrong structure\n\n promptbook.promptTemplates expected to be an array, but got ".concat(typeof pipeline.promptTemplates, "\n ")));
893
+ throw new ParsingError(spaceTrim.spaceTrim(function (block) { return "\n Promptbook is valid JSON but with wrong structure\n\n `promptbook.promptTemplates` expected to be an array, but got ".concat(typeof pipeline.promptTemplates, "\n\n ").concat(block(pipelineIdentification), "\n "); }));
878
894
  }
879
895
  var _loop_1 = function (parameter) {
880
896
  if (parameter.isInput && parameter.isOutput) {
881
- throw new PipelineLogicError("Parameter {".concat(parameter.name, "} can not be both input and output"));
897
+ throw new PipelineLogicError(spaceTrim.spaceTrim(function (block) { return "\n\n Parameter {".concat(parameter.name, "} can not be both input and output\n\n ").concat(block(pipelineIdentification), "\n "); }));
882
898
  }
883
899
  // Note: Testing that parameter is either intermediate or output BUT not created and unused
884
900
  if (!parameter.isInput &&
885
901
  !parameter.isOutput &&
886
902
  !pipeline.promptTemplates.some(function (template) { return template.dependentParameterNames.includes(parameter.name); })) {
887
- throw new PipelineLogicError(spaceTrim.spaceTrim("\n Parameter {".concat(parameter.name, "} is created but not used\n\n You can declare {").concat(parameter.name, "} as output parameter by adding in the header:\n - OUTPUT PARAMETER `{").concat(parameter.name, "}` ").concat(parameter.description || '', "\n\n ")));
903
+ throw new PipelineLogicError(spaceTrim.spaceTrim(function (block) { return "\n Parameter {".concat(parameter.name, "} is created but not used\n\n You can declare {").concat(parameter.name, "} as output parameter by adding in the header:\n - OUTPUT PARAMETER `{").concat(parameter.name, "}` ").concat(parameter.description || '', "\n\n ").concat(block(pipelineIdentification), "\n\n "); }));
888
904
  }
889
905
  // Note: Testing that parameter is either input or result of some template
890
906
  if (!parameter.isInput &&
891
907
  !pipeline.promptTemplates.some(function (template) { return template.resultingParameterName === parameter.name; })) {
892
- throw new PipelineLogicError(spaceTrim.spaceTrim("\n Parameter {".concat(parameter.name, "} is declared but not defined\n\n You can do one of these:\n - Remove declaration of {").concat(parameter.name, "}\n - Add prompt template that results in -> {").concat(parameter.name, "}\n\n ")));
908
+ throw new PipelineLogicError(spaceTrim.spaceTrim(function (block) { return "\n Parameter {".concat(parameter.name, "} is declared but not defined\n\n You can do one of these:\n - Remove declaration of {").concat(parameter.name, "}\n - Add prompt template that results in -> {").concat(parameter.name, "}\n\n ").concat(block(pipelineIdentification), "\n "); }));
893
909
  }
894
910
  };
895
911
  try {
896
912
  // Note: Check each parameter individually
897
- for (var _f = __values(pipeline.parameters), _g = _f.next(); !_g.done; _g = _f.next()) {
898
- var parameter = _g.value;
913
+ for (var _d = __values(pipeline.parameters), _e = _d.next(); !_e.done; _e = _d.next()) {
914
+ var parameter = _e.value;
899
915
  _loop_1(parameter);
900
916
  }
901
917
  }
902
918
  catch (e_1_1) { e_1 = { error: e_1_1 }; }
903
919
  finally {
904
920
  try {
905
- if (_g && !_g.done && (_a = _f.return)) _a.call(_f);
921
+ if (_e && !_e.done && (_a = _d.return)) _a.call(_d);
906
922
  }
907
923
  finally { if (e_1) throw e_1.error; }
908
924
  }
@@ -914,70 +930,80 @@
914
930
  var name = _a.name;
915
931
  return name;
916
932
  }));
917
- try {
918
- // Note: Checking each template individually
919
- for (var _h = __values(pipeline.promptTemplates), _j = _h.next(); !_j.done; _j = _h.next()) {
920
- var template = _j.value;
921
- if (definedParameters.has(template.resultingParameterName)) {
922
- throw new PipelineLogicError("Parameter {".concat(template.resultingParameterName, "} is defined multiple times"));
933
+ var _loop_2 = function (template) {
934
+ var e_4, _h, e_5, _j;
935
+ if (definedParameters.has(template.resultingParameterName)) {
936
+ throw new PipelineLogicError(spaceTrim.spaceTrim(function (block) { return "\n Parameter {".concat(template.resultingParameterName, "} is defined multiple times\n\n ").concat(block(pipelineIdentification), "\n "); }));
937
+ }
938
+ if (RESERVED_PARAMETER_NAMES.includes(template.resultingParameterName)) {
939
+ throw new PipelineLogicError(spaceTrim.spaceTrim(function (block) { return "\n Parameter name {".concat(template.resultingParameterName, "} is reserved, please use different name\n\n ").concat(block(pipelineIdentification), "\n "); }));
940
+ }
941
+ definedParameters.add(template.resultingParameterName);
942
+ if (template.blockType === 'PROMPT_TEMPLATE' && template.modelRequirements.modelVariant === undefined) {
943
+ throw new PipelineLogicError(spaceTrim.spaceTrim(function (block) { return "\n\n You must specify MODEL VARIANT in the prompt template \"".concat(template.title, "\"\n\n For example:\n - MODEL VARIANT Chat\n - MODEL NAME `gpt-4-1106-preview`").concat(/* <- TODO: Dynamic listing of command examples */ '', "\n\n ").concat(block(pipelineIdentification), "\n "); }));
944
+ }
945
+ if (template.jokerParameterNames && template.jokerParameterNames.length > 0) {
946
+ if (!template.expectFormat &&
947
+ !template.expectations /* <- TODO: Require at least 1 -> min <- expectation to use jokers */) {
948
+ throw new PipelineLogicError(spaceTrim.spaceTrim(function (block) { return "\n Joker parameters are used for {".concat(template.resultingParameterName, "} but no expectations are defined\n\n ").concat(block(pipelineIdentification), "\n "); }));
923
949
  }
924
- if (RESERVED_PARAMETER_NAMES.includes(template.resultingParameterName)) {
925
- throw new PipelineLogicError("Parameter name {".concat(template.resultingParameterName, "} is reserved, please use different name"));
950
+ var _loop_4 = function (joker) {
951
+ if (!template.dependentParameterNames.includes(joker)) {
952
+ throw new PipelineLogicError(spaceTrim.spaceTrim(function (block) { return "\n Parameter {".concat(joker, "} is used for {").concat(template.resultingParameterName, "} as joker but not in `dependentParameterNames`\n\n ").concat(block(pipelineIdentification), "\n "); }));
953
+ }
954
+ };
955
+ try {
956
+ for (var _k = (e_4 = void 0, __values(template.jokerParameterNames)), _l = _k.next(); !_l.done; _l = _k.next()) {
957
+ var joker = _l.value;
958
+ _loop_4(joker);
959
+ }
926
960
  }
927
- definedParameters.add(template.resultingParameterName);
928
- if (template.blockType === 'PROMPT_TEMPLATE' && template.modelRequirements.modelVariant === undefined) {
929
- throw new PipelineLogicError(spaceTrim.spaceTrim("\n\n You must specify MODEL VARIANT in the prompt template \"".concat(template.title, "\"\n\n For example:\n - MODEL VARIANT Chat\n - MODEL NAME `gpt-4-1106-preview`\n\n ")));
961
+ catch (e_4_1) { e_4 = { error: e_4_1 }; }
962
+ finally {
963
+ try {
964
+ if (_l && !_l.done && (_h = _k.return)) _h.call(_k);
965
+ }
966
+ finally { if (e_4) throw e_4.error; }
930
967
  }
931
- if (template.jokerParameterNames && template.jokerParameterNames.length > 0) {
932
- if (!template.expectFormat &&
933
- !template.expectations /* <- TODO: Require at least 1 -> min <- expectation to use jokers */) {
934
- throw new PipelineLogicError("Joker parameters are used for {".concat(template.resultingParameterName, "} but no expectations are defined"));
968
+ }
969
+ if (template.expectations) {
970
+ var _loop_5 = function (unit, min, max) {
971
+ if (min !== undefined && max !== undefined && min > max) {
972
+ throw new PipelineLogicError(spaceTrim.spaceTrim(function (block) { return "\n Min expectation (=".concat(min, ") of ").concat(unit, " is higher than max expectation (=").concat(max, ")\n\n ").concat(block(pipelineIdentification), "\n "); }));
935
973
  }
936
- try {
937
- for (var _k = (e_3 = void 0, __values(template.jokerParameterNames)), _l = _k.next(); !_l.done; _l = _k.next()) {
938
- var joker = _l.value;
939
- if (!template.dependentParameterNames.includes(joker)) {
940
- throw new PipelineLogicError("Parameter {".concat(joker, "} is used for {").concat(template.resultingParameterName, "} as joker but not in dependentParameterNames"));
941
- }
942
- }
974
+ if (min !== undefined && min < 0) {
975
+ throw new PipelineLogicError(spaceTrim.spaceTrim(function (block) { return "\n Min expectation of ".concat(unit, " must be zero or positive\n\n ").concat(block(pipelineIdentification), "\n "); }));
943
976
  }
944
- catch (e_3_1) { e_3 = { error: e_3_1 }; }
945
- finally {
946
- try {
947
- if (_l && !_l.done && (_c = _k.return)) _c.call(_k);
948
- }
949
- finally { if (e_3) throw e_3.error; }
977
+ if (max !== undefined && max <= 0) {
978
+ throw new PipelineLogicError(spaceTrim.spaceTrim(function (block) { return "\n Max expectation of ".concat(unit, " must be positive\n\n ").concat(block(pipelineIdentification), "\n "); }));
979
+ }
980
+ };
981
+ try {
982
+ for (var _m = (e_5 = void 0, __values(Object.entries(template.expectations))), _o = _m.next(); !_o.done; _o = _m.next()) {
983
+ var _p = __read(_o.value, 2), unit = _p[0], _q = _p[1], min = _q.min, max = _q.max;
984
+ _loop_5(unit, min, max);
950
985
  }
951
986
  }
952
- if (template.expectations) {
987
+ catch (e_5_1) { e_5 = { error: e_5_1 }; }
988
+ finally {
953
989
  try {
954
- for (var _m = (e_4 = void 0, __values(Object.entries(template.expectations))), _o = _m.next(); !_o.done; _o = _m.next()) {
955
- var _p = __read(_o.value, 2), unit = _p[0], _q = _p[1], min = _q.min, max = _q.max;
956
- if (min !== undefined && max !== undefined && min > max) {
957
- throw new PipelineLogicError("Min expectation (=".concat(min, ") of ").concat(unit, " is higher than max expectation (=").concat(max, ")"));
958
- }
959
- if (min !== undefined && min < 0) {
960
- throw new PipelineLogicError("Min expectation of ".concat(unit, " must be zero or positive"));
961
- }
962
- if (max !== undefined && max <= 0) {
963
- throw new PipelineLogicError("Max expectation of ".concat(unit, " must be positive"));
964
- }
965
- }
966
- }
967
- catch (e_4_1) { e_4 = { error: e_4_1 }; }
968
- finally {
969
- try {
970
- if (_o && !_o.done && (_d = _m.return)) _d.call(_m);
971
- }
972
- finally { if (e_4) throw e_4.error; }
990
+ if (_o && !_o.done && (_j = _m.return)) _j.call(_m);
973
991
  }
992
+ finally { if (e_5) throw e_5.error; }
974
993
  }
975
994
  }
995
+ };
996
+ try {
997
+ // Note: Checking each template individually
998
+ for (var _f = __values(pipeline.promptTemplates), _g = _f.next(); !_g.done; _g = _f.next()) {
999
+ var template = _g.value;
1000
+ _loop_2(template);
1001
+ }
976
1002
  }
977
1003
  catch (e_2_1) { e_2 = { error: e_2_1 }; }
978
1004
  finally {
979
1005
  try {
980
- if (_j && !_j.done && (_b = _h.return)) _b.call(_h);
1006
+ if (_g && !_g.done && (_b = _f.return)) _b.call(_f);
981
1007
  }
982
1008
  finally { if (e_2) throw e_2.error; }
983
1009
  }
@@ -998,20 +1024,20 @@
998
1024
  resovedParameters = __spreadArray(__spreadArray([], __read(resovedParameters), false), [reservedParameterName], false);
999
1025
  }
1000
1026
  }
1001
- catch (e_5_1) { e_5 = { error: e_5_1 }; }
1027
+ catch (e_3_1) { e_3 = { error: e_3_1 }; }
1002
1028
  finally {
1003
1029
  try {
1004
- if (RESERVED_PARAMETER_NAMES_1_1 && !RESERVED_PARAMETER_NAMES_1_1.done && (_e = RESERVED_PARAMETER_NAMES_1.return)) _e.call(RESERVED_PARAMETER_NAMES_1);
1030
+ if (RESERVED_PARAMETER_NAMES_1_1 && !RESERVED_PARAMETER_NAMES_1_1.done && (_c = RESERVED_PARAMETER_NAMES_1.return)) _c.call(RESERVED_PARAMETER_NAMES_1);
1005
1031
  }
1006
- finally { if (e_5) throw e_5.error; }
1032
+ finally { if (e_3) throw e_3.error; }
1007
1033
  }
1008
1034
  var unresovedTemplates = __spreadArray([], __read(pipeline.promptTemplates), false);
1009
1035
  // <- TODO: [🧠][🥜]
1010
1036
  var loopLimit = LOOP_LIMIT;
1011
- var _loop_2 = function () {
1037
+ var _loop_3 = function () {
1012
1038
  if (loopLimit-- < 0) {
1013
1039
  // Note: Really UnexpectedError not LimitReachedError - this should not happen and be caught below
1014
- throw new UnexpectedError('Loop limit reached during detection of circular dependencies in `validatePipeline`');
1040
+ throw new UnexpectedError(spaceTrim.spaceTrim(function (block) { return "\n Loop limit reached during detection of circular dependencies in `validatePipeline`\n\n ".concat(block(pipelineIdentification), "\n "); }));
1015
1041
  }
1016
1042
  var currentlyResovedTemplates = unresovedTemplates.filter(function (template) {
1017
1043
  return template.dependentParameterNames.every(function (name) { return resovedParameters.includes(name); });
@@ -1026,7 +1052,7 @@
1026
1052
  .map(function (dependentParameterName) { return "{".concat(dependentParameterName, "}"); })
1027
1053
  .join(' and '));
1028
1054
  })
1029
- .join('\n')), "\n\n Resolved:\n ").concat(block(resovedParameters.map(function (name) { return "- Parameter {".concat(name, "}"); }).join('\n')), "\n "); }));
1055
+ .join('\n')), "\n\n Resolved:\n ").concat(block(resovedParameters.map(function (name) { return "- Parameter {".concat(name, "}"); }).join('\n')), "\n\n ").concat(block(pipelineIdentification), "\n "); }));
1030
1056
  }
1031
1057
  resovedParameters = __spreadArray(__spreadArray([], __read(resovedParameters), false), __read(currentlyResovedTemplates.map(function (_a) {
1032
1058
  var resultingParameterName = _a.resultingParameterName;
@@ -1035,7 +1061,7 @@
1035
1061
  unresovedTemplates = unresovedTemplates.filter(function (template) { return !currentlyResovedTemplates.includes(template); });
1036
1062
  };
1037
1063
  while (unresovedTemplates.length > 0) {
1038
- _loop_2();
1064
+ _loop_3();
1039
1065
  }
1040
1066
  return pipeline;
1041
1067
  }
@@ -1088,6 +1114,36 @@
1088
1114
  return ReferenceError;
1089
1115
  }(Error));
1090
1116
 
1117
+ /**
1118
+ * Parses the template and returns the list of all parameter names
1119
+ *
1120
+ * @param template the template with parameters in {curly} braces
1121
+ * @returns the list of parameter names
1122
+ */
1123
+ function extractParameterNames(template) {
1124
+ var e_1, _a;
1125
+ var matches = template.matchAll(/{\w+}/g);
1126
+ var parameterNames = new Set();
1127
+ try {
1128
+ for (var matches_1 = __values(matches), matches_1_1 = matches_1.next(); !matches_1_1.done; matches_1_1 = matches_1.next()) {
1129
+ var match = matches_1_1.value;
1130
+ var parameterName = match[0].slice(1, -1);
1131
+ parameterNames.add(parameterName);
1132
+ }
1133
+ }
1134
+ catch (e_1_1) { e_1 = { error: e_1_1 }; }
1135
+ finally {
1136
+ try {
1137
+ if (matches_1_1 && !matches_1_1.done && (_a = matches_1.return)) _a.call(matches_1);
1138
+ }
1139
+ finally { if (e_1) throw e_1.error; }
1140
+ }
1141
+ return parameterNames;
1142
+ }
1143
+ /**
1144
+ * TODO: !!!!! Rename to extractParameterNames
1145
+ */
1146
+
1091
1147
  /**
1092
1148
  * Unprepare just strips the preparation data of the pipeline
1093
1149
  */
@@ -1095,7 +1151,14 @@
1095
1151
  var personas = pipeline.personas, knowledgeSources = pipeline.knowledgeSources, promptTemplates = pipeline.promptTemplates;
1096
1152
  personas = personas.map(function (persona) { return (__assign(__assign({}, persona), { modelRequirements: undefined, preparationIds: undefined })); });
1097
1153
  knowledgeSources = knowledgeSources.map(function (knowledgeSource) { return (__assign(__assign({}, knowledgeSource), { preparationIds: undefined })); });
1098
- promptTemplates = promptTemplates.map(function (promptTemplate) { return (__assign(__assign({}, promptTemplate), { preparedContent: undefined })); });
1154
+ promptTemplates = promptTemplates.map(function (promptTemplate) {
1155
+ var dependentParameterNames = promptTemplate.dependentParameterNames;
1156
+ var parameterNames = extractParameterNames(promptTemplate.preparedContent || '');
1157
+ dependentParameterNames = dependentParameterNames.filter(function (dependentParameterName) { return !parameterNames.has(dependentParameterName); });
1158
+ var promptTemplateUnprepared = __assign(__assign({}, promptTemplate), { dependentParameterNames: dependentParameterNames });
1159
+ delete promptTemplateUnprepared.preparedContent;
1160
+ return promptTemplateUnprepared;
1161
+ });
1099
1162
  return __assign(__assign({}, pipeline), { promptTemplates: promptTemplates, knowledgeSources: knowledgeSources, knowledgePieces: [], personas: personas, preparations: [] });
1100
1163
  }
1101
1164
  /**
@@ -1592,33 +1655,6 @@
1592
1655
  * TODO: [🧠] Can this return type be better typed than void
1593
1656
  */
1594
1657
 
1595
- /**
1596
- * Parses the template and returns the list of all parameter names
1597
- *
1598
- * @param template the template with parameters in {curly} braces
1599
- * @returns the list of parameter names
1600
- */
1601
- function extractParameters(template) {
1602
- var e_1, _a;
1603
- var matches = template.matchAll(/{\w+}/g);
1604
- var parameterNames = new Set();
1605
- try {
1606
- for (var matches_1 = __values(matches), matches_1_1 = matches_1.next(); !matches_1_1.done; matches_1_1 = matches_1.next()) {
1607
- var match = matches_1_1.value;
1608
- var parameterName = match[0].slice(1, -1);
1609
- parameterNames.add(parameterName);
1610
- }
1611
- }
1612
- catch (e_1_1) { e_1 = { error: e_1_1 }; }
1613
- finally {
1614
- try {
1615
- if (matches_1_1 && !matches_1_1.done && (_a = matches_1.return)) _a.call(matches_1);
1616
- }
1617
- finally { if (e_1) throw e_1.error; }
1618
- }
1619
- return parameterNames;
1620
- }
1621
-
1622
1658
  /**
1623
1659
  * Parses the given script and returns the list of all used variables that are not defined in the script
1624
1660
  *
@@ -1674,12 +1710,12 @@
1674
1710
  * @returns the set of parameter names
1675
1711
  * @throws {ParsingError} if the script is invalid
1676
1712
  */
1677
- function extractParametersFromPromptTemplate(promptTemplate) {
1713
+ function extractParameterNamesFromPromptTemplate(promptTemplate) {
1678
1714
  var e_1, _a, e_2, _b, e_3, _c;
1679
- var title = promptTemplate.title, description = promptTemplate.description, blockType = promptTemplate.blockType, content = promptTemplate.content, jokerParameterNames = promptTemplate.jokerParameterNames;
1715
+ var title = promptTemplate.title, description = promptTemplate.description, blockType = promptTemplate.blockType, content = promptTemplate.content, preparedContent = promptTemplate.preparedContent, jokerParameterNames = promptTemplate.jokerParameterNames;
1680
1716
  var parameterNames = new Set();
1681
1717
  try {
1682
- for (var _d = __values(__spreadArray(__spreadArray(__spreadArray([], __read(extractParameters(title)), false), __read(extractParameters(description || '')), false), __read(extractParameters(content)), false)), _e = _d.next(); !_e.done; _e = _d.next()) {
1718
+ for (var _d = __values(__spreadArray(__spreadArray(__spreadArray(__spreadArray([], __read(extractParameterNames(title)), false), __read(extractParameterNames(description || '')), false), __read(extractParameterNames(content)), false), __read(extractParameterNames(preparedContent || '')), false)), _e = _d.next(); !_e.done; _e = _d.next()) {
1683
1719
  var parameterName = _e.value;
1684
1720
  parameterNames.add(parameterName);
1685
1721
  }
@@ -1719,6 +1755,8 @@
1719
1755
  }
1720
1756
  finally { if (e_3) throw e_3.error; }
1721
1757
  }
1758
+ parameterNames.delete('content');
1759
+ // <- Note {websiteContent} is used in `preparedContent`
1722
1760
  return parameterNames;
1723
1761
  }
1724
1762
  /**
@@ -1888,7 +1926,14 @@
1888
1926
  throw errors[0];
1889
1927
  }
1890
1928
  else if (errors.length > 1) {
1891
- throw new PipelineExecutionError(spaceTrim__default["default"](function (block) { return "\n All execution tools failed:\n\n ".concat(block(errors.map(function (error) { return "- ".concat(error.name || 'Error', ": ").concat(error.message); }).join('\n')), "\n\n "); }));
1929
+ throw new PipelineExecutionError(
1930
+ // TODO: Tell which execution tools failed like
1931
+ // 1) OpenAI throw PipelineExecutionError: Parameter {knowledge} is not defined
1932
+ // 2) AnthropicClaude throw PipelineExecutionError: Parameter {knowledge} is not defined
1933
+ // 3) ...
1934
+ spaceTrim__default["default"](function (block) { return "\n All execution tools failed:\n\n ".concat(block(errors
1935
+ .map(function (error, i) { return "".concat(i + 1, ") **").concat(error.name || 'Error', ":** ").concat(error.message); })
1936
+ .join('\n')), "\n\n "); }));
1892
1937
  }
1893
1938
  else {
1894
1939
  throw new PipelineExecutionError(spaceTrim__default["default"](function (block) { return "\n You have not provided any `LlmExecutionTools` that support model variant \"".concat(prompt.modelRequirements.modelVariant, "\n\n Available `LlmExecutionTools`:\n ").concat(block(_this.llmExecutionTools
@@ -2093,6 +2138,10 @@
2093
2138
  if (parameterValue === RESERVED_PARAMETER_MISSING_VALUE) {
2094
2139
  throw new UnexpectedError("Parameter {".concat(parameterName, "} has missing value"));
2095
2140
  }
2141
+ else if (parameterValue === RESERVED_PARAMETER_RESTRICTED) {
2142
+ // TODO: [🍵]
2143
+ throw new UnexpectedError("Parameter {".concat(parameterName, "} is restricted to use"));
2144
+ }
2096
2145
  }
2097
2146
  }
2098
2147
  catch (e_1_1) { e_1 = { error: e_1_1 }; }
@@ -2227,7 +2276,7 @@
2227
2276
  /**
2228
2277
  * The version of the Promptbook library
2229
2278
  */
2230
- var PROMPTBOOK_VERSION = '0.61.0-23';
2279
+ var PROMPTBOOK_VERSION = '0.61.0-25';
2231
2280
  // TODO: !!!! List here all the versions and annotate + put into script
2232
2281
 
2233
2282
  /**
@@ -2361,7 +2410,7 @@
2361
2410
  console.warn(spaceTrim.spaceTrim("\n Pipeline ".concat(rawPipeline.pipelineUrl || rawPipeline.sourceFile || rawPipeline.title, " is not prepared\n\n ").concat(rawPipeline.sourceFile, "\n\n It will be prepared ad-hoc before the first execution\n But it is recommended to prepare the pipeline during collection preparation\n\n @see more at https://ptbk.io/prepare-pipeline\n ")));
2362
2411
  }
2363
2412
  var pipelineExecutor = function (inputParameters, onProgress) { return __awaiter(_this, void 0, void 0, function () {
2364
- // TODO: !!!!! Extract to separate functions and files - ALL FUNCTIONS BELOW
2413
+ // TODO: !!! Extract to separate functions and files - ALL FUNCTIONS BELOW
2365
2414
  function getContextForTemplate(// <- TODO: [🧠][🥜]
2366
2415
  template) {
2367
2416
  return __awaiter(this, void 0, void 0, function () {
@@ -2412,6 +2461,7 @@
2412
2461
  currentDate = new Date().toISOString();
2413
2462
  modelName = RESERVED_PARAMETER_MISSING_VALUE;
2414
2463
  reservedParameters = {
2464
+ content: RESERVED_PARAMETER_RESTRICTED,
2415
2465
  context: context,
2416
2466
  knowledge: knowledge,
2417
2467
  samples: samples,
@@ -2465,7 +2515,7 @@
2465
2515
  _u.sent();
2466
2516
  _u.label = 2;
2467
2517
  case 2:
2468
- usedParameterNames = extractParametersFromPromptTemplate(currentTemplate);
2518
+ usedParameterNames = extractParameterNamesFromPromptTemplate(currentTemplate);
2469
2519
  dependentParameterNames = new Set(currentTemplate.dependentParameterNames);
2470
2520
  if (union(difference(usedParameterNames, dependentParameterNames), difference(dependentParameterNames, usedParameterNames)).size !== 0) {
2471
2521
  throw new UnexpectedError(spaceTrim.spaceTrim("\n Dependent parameters are not consistent used parameters:\n\n Dependent parameters:\n ".concat(Array.from(dependentParameterNames).join(', '), "\n\n Used parameters:\n ").concat(Array.from(usedParameterNames).join(', '), "\n\n ")));
@@ -2851,14 +2901,7 @@
2851
2901
  ) {
2852
2902
  // TODO: [🧠] Maybe put other blockTypes into report
2853
2903
  executionReport.promptExecutions.push({
2854
- prompt: {
2855
- title: currentTemplate.title /* <- Note: If title in pipeline contains emojis, pass it innto report */,
2856
- content: prompt.content,
2857
- modelRequirements: prompt.modelRequirements,
2858
- expectations: prompt.expectations,
2859
- expectFormat: prompt.expectFormat,
2860
- // <- Note: Do want to pass ONLY wanted information to the report
2861
- },
2904
+ prompt: __assign(__assign({ '!!! All information': null }, prompt), { '!!! Wanted information': null, title: currentTemplate.title /* <- Note: If title in pipeline contains emojis, pass it innto report */, content: prompt.content, modelRequirements: prompt.modelRequirements, expectations: prompt.expectations, expectFormat: prompt.expectFormat }),
2862
2905
  result: result || undefined,
2863
2906
  error: expectError || undefined,
2864
2907
  });
@@ -3122,6 +3165,8 @@
3122
3165
  return pipelineExecutor;
3123
3166
  }
3124
3167
  /**
3168
+ * TODO: !!!! return `preparedPipeline` from execution
3169
+ * TODO: !!!! `isNotPreparedWarningSupressed`
3125
3170
  * TODO: Use isVerbose here (not only pass to `preparePipeline`)
3126
3171
  * TODO: [🪂] Use maxParallelCount here (not only pass to `preparePipeline`)
3127
3172
  * TODO: [♈] Probbably move expectations from templates to parameters
@@ -3136,7 +3181,7 @@
3136
3181
  /**
3137
3182
  * @@@
3138
3183
  */
3139
- function prepareKnowledgeFromMarkdown(content /* <- TODO: [🖖] (?maybe not) Always the file */, options) {
3184
+ function prepareKnowledgeFromMarkdown(knowledgeContent /* <- TODO: [🖖] (?maybe not) Always the file */, options) {
3140
3185
  return __awaiter(this, void 0, void 0, function () {
3141
3186
  var llmTools, _a, maxParallelCount, _b, isVerbose, collection, prepareKnowledgeFromMarkdownExecutor, _c, prepareTitleExecutor, _d, prepareKeywordsExecutor, _e, result, outputParameters, knowledgePiecesRaw, knowledgeTextPieces, knowledge;
3142
3187
  var _f, _g, _h;
@@ -3174,7 +3219,7 @@
3174
3219
  llm: llmTools,
3175
3220
  },
3176
3221
  _h)]);
3177
- return [4 /*yield*/, prepareKnowledgeFromMarkdownExecutor({ content: content })];
3222
+ return [4 /*yield*/, prepareKnowledgeFromMarkdownExecutor({ knowledgeContent: knowledgeContent })];
3178
3223
  case 4:
3179
3224
  result = _j.sent();
3180
3225
  assertsExecutionSuccessful(result);
@@ -3187,25 +3232,25 @@
3187
3232
  return [4 /*yield*/, Promise.all(
3188
3233
  // TODO: [🪂] !! Do not send all at once but in chunks
3189
3234
  knowledgeTextPieces.map(function (knowledgeTextPiece, i) { return __awaiter(_this, void 0, void 0, function () {
3190
- var name, title, content, keywords, index, titleResult, _a, titleRaw, keywordsResult, _b, keywordsRaw, embeddingResult, error_1;
3235
+ var name, title, knowledgePieceContent, keywords, index, titleResult, _a, titleRaw, keywordsResult, _b, keywordsRaw, embeddingResult, error_1;
3191
3236
  return __generator(this, function (_c) {
3192
3237
  switch (_c.label) {
3193
3238
  case 0:
3194
3239
  name = "piece-".concat(i);
3195
3240
  title = spaceTrim__default["default"](knowledgeTextPiece.substring(0, 100));
3196
- content = spaceTrim__default["default"](knowledgeTextPiece);
3241
+ knowledgePieceContent = spaceTrim__default["default"](knowledgeTextPiece);
3197
3242
  keywords = [];
3198
3243
  index = [];
3199
3244
  _c.label = 1;
3200
3245
  case 1:
3201
3246
  _c.trys.push([1, 7, , 8]);
3202
- return [4 /*yield*/, prepareTitleExecutor({ content: content })];
3247
+ return [4 /*yield*/, prepareTitleExecutor({ knowledgePieceContent: knowledgePieceContent })];
3203
3248
  case 2:
3204
3249
  titleResult = _c.sent();
3205
3250
  _a = titleResult.outputParameters.title, titleRaw = _a === void 0 ? 'Untitled' : _a;
3206
3251
  title = spaceTrim__default["default"](titleRaw) /* <- TODO: Maybe do in pipeline */;
3207
3252
  name = titleToName(title);
3208
- return [4 /*yield*/, prepareKeywordsExecutor({ content: content })];
3253
+ return [4 /*yield*/, prepareKeywordsExecutor({ knowledgePieceContent: knowledgePieceContent })];
3209
3254
  case 3:
3210
3255
  keywordsResult = _c.sent();
3211
3256
  _b = keywordsResult.outputParameters.keywords, keywordsRaw = _b === void 0 ? '' : _b;
@@ -3223,7 +3268,7 @@
3223
3268
  case 4: return [4 /*yield*/, llmTools.callEmbeddingModel({
3224
3269
  title: "Embedding for ".concat(title) /* <- Note: No impact on embedding result itself, just for logging */,
3225
3270
  parameters: {},
3226
- content: content,
3271
+ content: knowledgePieceContent,
3227
3272
  modelRequirements: {
3228
3273
  modelVariant: 'EMBEDDING',
3229
3274
  },
@@ -3244,7 +3289,7 @@
3244
3289
  case 8: return [2 /*return*/, {
3245
3290
  name: name,
3246
3291
  title: title,
3247
- content: content,
3292
+ content: knowledgePieceContent,
3248
3293
  keywords: keywords,
3249
3294
  index: index,
3250
3295
  // <- TODO: [☀] sources,
@@ -3410,14 +3455,18 @@
3410
3455
  TODO_USE(parameters);
3411
3456
  promptTemplatesPrepared = new Array(promptTemplates.length);
3412
3457
  return [4 /*yield*/, forEachAsync(promptTemplates, { maxParallelCount: maxParallelCount /* <- TODO: [🪂] When there are subtasks, this maximul limit can be broken */ }, function (template, index) { return __awaiter(_this, void 0, void 0, function () {
3413
- var preparedContent, preparedTemplate;
3458
+ var dependentParameterNames, preparedContent, preparedTemplate;
3414
3459
  return __generator(this, function (_a) {
3460
+ dependentParameterNames = template.dependentParameterNames;
3415
3461
  preparedContent = undefined;
3416
- if (knowledgePiecesCount > 0) {
3462
+ if (knowledgePiecesCount > 0 && !dependentParameterNames.includes('knowledge')) {
3417
3463
  preparedContent = spaceTrim.spaceTrim("\n {content}\n\n ## Knowledge\n\n {knowledge}\n ");
3418
3464
  // <- TODO: [🧠][🧻] Cutomize shape/language/formatting of the addition to the prompt
3465
+ dependentParameterNames = __spreadArray(__spreadArray([], __read(dependentParameterNames), false), [
3466
+ 'knowledge',
3467
+ ], false);
3419
3468
  }
3420
- preparedTemplate = __assign(__assign({}, template), { preparedContent: preparedContent });
3469
+ preparedTemplate = __assign(__assign({}, template), { dependentParameterNames: dependentParameterNames, preparedContent: preparedContent });
3421
3470
  promptTemplatesPrepared[index] = preparedTemplate;
3422
3471
  return [2 /*return*/];
3423
3472
  });
@@ -4233,7 +4282,7 @@
4233
4282
  /**
4234
4283
  * Example usages of the PARAMETER command
4235
4284
  */
4236
- examples: ['PARAMETER {title} Title of the book', 'OUTPUT PARAMETER {content} Content of the book'],
4285
+ examples: ['PARAMETER {title} Title of the book', 'OUTPUT PARAMETER {websiteContent} Content of the book'],
4237
4286
  /**
4238
4287
  * Parses the PARAMETER command
4239
4288
  */
@@ -4332,11 +4381,8 @@
4332
4381
  /**
4333
4382
  * Name of the command
4334
4383
  */
4335
- name: 'VERSION',
4336
- /*
4337
- Note: [📇] No need to put here "PROMPTBOOK" alias here
4338
- aliasNames: ['PROMPTBOOK_VERSION'],
4339
- */
4384
+ name: 'PROMPTBOOK_VERSION',
4385
+ aliasNames: ['PTBK_VERSION', 'PTBK_V', 'PTBKV'],
4340
4386
  /**
4341
4387
  * BOILERPLATE command can be used in:
4342
4388
  */
@@ -4352,7 +4398,7 @@
4352
4398
  /**
4353
4399
  * Example usages of the PROMPTBOOK_VERSION command
4354
4400
  */
4355
- examples: ["PROMPTBOOK VERSION ".concat(PROMPTBOOK_VERSION), "VERSION ".concat(PROMPTBOOK_VERSION)],
4401
+ examples: ["PROMPTBOOK VERSION ".concat(PROMPTBOOK_VERSION), "PTBKV ".concat(PROMPTBOOK_VERSION)],
4356
4402
  /**
4357
4403
  * Parses the PROMPTBOOK_VERSION command
4358
4404
  */
@@ -4386,8 +4432,9 @@
4386
4432
  * Name of the command
4387
4433
  */
4388
4434
  name: 'URL',
4435
+ aliasNames: ['PIPELINE_URL'],
4389
4436
  /*
4390
- Note: [🛵] No need for alias name because it is already preprocessed
4437
+ Note: [🛵] No need for this alias name because it is already preprocessed
4391
4438
  aliasNames: ['HTTPS'],
4392
4439
  */
4393
4440
  /**
@@ -4406,6 +4453,7 @@
4406
4453
  * Example usages of the URL command
4407
4454
  */
4408
4455
  examples: [
4456
+ 'PIPELINE URL https://promptbook.studio/library/write-cv.ptbk.md',
4409
4457
  'URL https://promptbook.studio/library/write-cv.ptbk.md',
4410
4458
  'https://promptbook.studio/library/write-cv.ptbk.md',
4411
4459
  ],
@@ -4644,10 +4692,6 @@
4644
4692
  .split(' ')
4645
4693
  .map(function (part) { return part.trim(); })
4646
4694
  .filter(function (item) { return item !== ''; })
4647
- // Note: [📇]:
4648
- .filter(function (item) { return !/^PTBK$/i.test(item); })
4649
- .filter(function (item) { return !/^PIPELINE$/i.test(item); })
4650
- .filter(function (item) { return !/^PROMPTBOOK$/i.test(item); })
4651
4695
  .map(removeMarkdownFormatting)
4652
4696
  .map(function (item) { return item.trim(); });
4653
4697
  if (items.length === 0 || items[0] === '') {
@@ -5362,7 +5406,7 @@
5362
5406
  if (templateModelRequirements.modelVariant === undefined) {
5363
5407
  templateModelRequirements.modelVariant = 'CHAT';
5364
5408
  }
5365
- templateJson.dependentParameterNames = Array.from(extractParametersFromPromptTemplate(templateJson));
5409
+ templateJson.dependentParameterNames = Array.from(extractParameterNamesFromPromptTemplate(templateJson));
5366
5410
  // TODO: [🍧][❔] Remove this condition - modelRequirements should be put here via BLOCK command not removed when PROMPT_TEMPLATE
5367
5411
  if (templateJson.blockType !== 'PROMPT_TEMPLATE') {
5368
5412
  delete templateJson.modelRequirements;