@promptbook/node 0.61.0-17 → 0.61.0-18

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/esm/index.es.js CHANGED
@@ -395,7 +395,7 @@ function forEachAsync(array, options, callbackfunction) {
395
395
  });
396
396
  }
397
397
 
398
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.61.0-16",parameters:[{name:"content",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledge",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {content}",dependentParameterNames:["content"],resultingParameterName:"knowledge"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-16",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.61.0-16",parameters:[{name:"content",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {content}",dependentParameterNames:["content"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-16",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.61.0-16",parameters:[{name:"content",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {content}",expectations:{words:{min:1,max:8}},dependentParameterNames:["content"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-16",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.61.0-16",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-16",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
398
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.61.0-17",parameters:[{name:"content",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledge",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {content}",dependentParameterNames:["content"],resultingParameterName:"knowledge"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-17",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.61.0-17",parameters:[{name:"content",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {content}",dependentParameterNames:["content"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-17",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.61.0-17",parameters:[{name:"content",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {content}",expectations:{words:{min:1,max:8}},dependentParameterNames:["content"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-17",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.61.0-17",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-17",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
399
399
 
400
400
  /**
401
401
  * Prettify the html code
@@ -1071,79 +1071,94 @@ var ReferenceError$1 = /** @class */ (function (_super) {
1071
1071
  }(Error));
1072
1072
 
1073
1073
  /**
1074
- * Library of promptbooks that groups together promptbooks for an application.
1075
- * This implementation is a very thin wrapper around the Array / Map of promptbooks.
1074
+ * Unprepare just strips the preparation data of the pipeline
1075
+ */
1076
+ function unpreparePipeline(pipeline) {
1077
+ var personas = pipeline.personas, knowledgeSources = pipeline.knowledgeSources;
1078
+ personas = personas.map(function (persona) { return (__assign(__assign({}, persona), { modelRequirements: undefined, preparationIds: undefined })); });
1079
+ knowledgeSources = knowledgeSources.map(function (knowledgeSource) { return (__assign(__assign({}, knowledgeSource), { preparationIds: undefined })); });
1080
+ return __assign(__assign({}, pipeline), { knowledgeSources: knowledgeSources, knowledgePieces: [], personas: personas, preparations: [] });
1081
+ }
1082
+ /**
1083
+ * TODO: [🔼] !!! Export via `@promptbook/core`
1084
+ * TODO: Write tests for `preparePipeline`
1085
+ */
1086
+
1087
+ /**
1088
+ * Library of pipelines that groups together pipelines for an application.
1089
+ * This implementation is a very thin wrapper around the Array / Map of pipelines.
1076
1090
  *
1077
1091
  * @private use `createCollectionFromJson` instead
1078
- * @see https://github.com/webgptorg/promptbook#promptbook-collection
1092
+ * @see https://github.com/webgptorg/pipeline#pipeline-collection
1079
1093
  */
1080
1094
  var SimplePipelineCollection = /** @class */ (function () {
1081
1095
  /**
1082
- * Constructs a pipeline collection from promptbooks
1096
+ * Constructs a pipeline collection from pipelines
1083
1097
  *
1084
- * @param promptbooks @@@
1098
+ * @param pipelines @@@
1085
1099
  *
1086
1100
  * @private Use instead `createCollectionFromJson`
1087
- * Note: During the construction logic of all promptbooks are validated
1101
+ * Note: During the construction logic of all pipelines are validated
1088
1102
  * Note: It is not recommended to use this constructor directly, use `createCollectionFromJson` *(or other variant)* instead
1089
1103
  */
1090
1104
  function SimplePipelineCollection() {
1091
1105
  var e_1, _a;
1092
- var promptbooks = [];
1106
+ var pipelines = [];
1093
1107
  for (var _i = 0; _i < arguments.length; _i++) {
1094
- promptbooks[_i] = arguments[_i];
1108
+ pipelines[_i] = arguments[_i];
1095
1109
  }
1096
1110
  this.collection = new Map();
1097
1111
  try {
1098
- for (var promptbooks_1 = __values(promptbooks), promptbooks_1_1 = promptbooks_1.next(); !promptbooks_1_1.done; promptbooks_1_1 = promptbooks_1.next()) {
1099
- var promptbook = promptbooks_1_1.value;
1100
- if (promptbook.pipelineUrl === undefined) {
1101
- throw new ReferenceError$1(spaceTrim$1("\n Promptbook with name \"".concat(promptbook.title, "\" does not have defined URL\n\n File:\n ").concat(promptbook.sourceFile || 'Unknown', "\n\n Note: Promptbooks without URLs are called anonymous promptbooks\n They can be used as standalone promptbooks, but they cannot be referenced by other promptbooks\n And also they cannot be used in the pipeline collection\n\n ")));
1112
+ for (var pipelines_1 = __values(pipelines), pipelines_1_1 = pipelines_1.next(); !pipelines_1_1.done; pipelines_1_1 = pipelines_1.next()) {
1113
+ var pipeline = pipelines_1_1.value;
1114
+ if (pipeline.pipelineUrl === undefined) {
1115
+ throw new ReferenceError$1(spaceTrim$1("\n Pipeline with name \"".concat(pipeline.title, "\" does not have defined URL\n\n File:\n ").concat(pipeline.sourceFile || 'Unknown', "\n\n Note: Pipelines without URLs are called anonymous pipelines\n They can be used as standalone pipelines, but they cannot be referenced by other pipelines\n And also they cannot be used in the pipeline collection\n\n ")));
1102
1116
  }
1103
- validatePipeline(promptbook);
1117
+ validatePipeline(pipeline);
1104
1118
  // Note: [🦄]
1105
- if (this.collection.has(promptbook.pipelineUrl) &&
1106
- pipelineJsonToString(promptbook) !== pipelineJsonToString(this.collection.get(promptbook.pipelineUrl))) {
1107
- var existing = this.collection.get(promptbook.pipelineUrl);
1108
- throw new ReferenceError$1(spaceTrim$1("\n Promptbook with URL \"".concat(promptbook.pipelineUrl, "\" is already in the collection\n\n Conflicting files:\n ").concat(existing.sourceFile || 'Unknown', "\n ").concat(promptbook.sourceFile || 'Unknown', "\n\n Note: Promptbooks with the same URL are not allowed\n Only exepction is when the promptbooks are identical\n\n ")));
1119
+ if (this.collection.has(pipeline.pipelineUrl) &&
1120
+ pipelineJsonToString(unpreparePipeline(pipeline)) !==
1121
+ pipelineJsonToString(unpreparePipeline(this.collection.get(pipeline.pipelineUrl)))) {
1122
+ var existing = this.collection.get(pipeline.pipelineUrl);
1123
+ throw new ReferenceError$1(spaceTrim$1("\n Pipeline with URL \"".concat(pipeline.pipelineUrl, "\" is already in the collection\n\n Conflicting files:\n ").concat(existing.sourceFile || 'Unknown', "\n ").concat(pipeline.sourceFile || 'Unknown', "\n\n Note: Pipelines with the same URL are not allowed\n Only exepction is when the pipelines are identical\n\n ")));
1109
1124
  }
1110
- this.collection.set(promptbook.pipelineUrl, promptbook);
1125
+ this.collection.set(pipeline.pipelineUrl, pipeline);
1111
1126
  }
1112
1127
  }
1113
1128
  catch (e_1_1) { e_1 = { error: e_1_1 }; }
1114
1129
  finally {
1115
1130
  try {
1116
- if (promptbooks_1_1 && !promptbooks_1_1.done && (_a = promptbooks_1.return)) _a.call(promptbooks_1);
1131
+ if (pipelines_1_1 && !pipelines_1_1.done && (_a = pipelines_1.return)) _a.call(pipelines_1);
1117
1132
  }
1118
1133
  finally { if (e_1) throw e_1.error; }
1119
1134
  }
1120
1135
  }
1121
1136
  /**
1122
- * Gets all promptbooks in the collection
1137
+ * Gets all pipelines in the collection
1123
1138
  */
1124
1139
  SimplePipelineCollection.prototype.listPipelines = function () {
1125
1140
  return Array.from(this.collection.keys());
1126
1141
  };
1127
1142
  /**
1128
- * Gets promptbook by its URL
1143
+ * Gets pipeline by its URL
1129
1144
  *
1130
1145
  * Note: This is not a direct fetching from the URL, but a lookup in the collection
1131
1146
  */
1132
1147
  SimplePipelineCollection.prototype.getPipelineByUrl = function (url) {
1133
1148
  var _this = this;
1134
- var promptbook = this.collection.get(url);
1135
- if (!promptbook) {
1149
+ var pipeline = this.collection.get(url);
1150
+ if (!pipeline) {
1136
1151
  if (this.listPipelines().length === 0) {
1137
- throw new NotFoundError(spaceTrim$1("\n Promptbook with url \"".concat(url, "\" not found\n\n No promptbooks available\n ")));
1152
+ throw new NotFoundError(spaceTrim$1("\n Pipeline with url \"".concat(url, "\" not found\n\n No pipelines available\n ")));
1138
1153
  }
1139
- throw new NotFoundError(spaceTrim$1(function (block) { return "\n Promptbook with url \"".concat(url, "\" not found\n\n Available promptbooks:\n ").concat(block(_this.listPipelines()
1154
+ throw new NotFoundError(spaceTrim$1(function (block) { return "\n Pipeline with url \"".concat(url, "\" not found\n\n Available pipelines:\n ").concat(block(_this.listPipelines()
1140
1155
  .map(function (pipelineUrl) { return "- ".concat(pipelineUrl); })
1141
1156
  .join('\n')), "\n\n "); }));
1142
1157
  }
1143
- return promptbook;
1158
+ return pipeline;
1144
1159
  };
1145
1160
  /**
1146
- * Checks whether given prompt was defined in any promptbook in the collection
1161
+ * Checks whether given prompt was defined in any pipeline in the collection
1147
1162
  */
1148
1163
  SimplePipelineCollection.prototype.isResponsibleForPrompt = function (prompt) {
1149
1164
  return true;
@@ -1918,7 +1933,7 @@ var MultipleLlmExecutionTools = /** @class */ (function () {
1918
1933
  throw new PipelineExecutionError(spaceTrim(function (block) { return "\n All execution tools failed:\n\n ".concat(block(errors.map(function (error) { return "- ".concat(error.name || 'Error', ": ").concat(error.message); }).join('\n')), "\n\n "); }));
1919
1934
  }
1920
1935
  else {
1921
- throw new PipelineExecutionError(spaceTrim(function (block) { return "\n No execution tools available for model variant \"".concat(prompt.modelRequirements.modelVariant, "\".\n\n tl;dr\n\n You have provided no LLM Execution Tools that support model variant \"").concat(prompt.modelRequirements.modelVariant, ":\n ").concat(block(_this.llmExecutionTools
1936
+ throw new PipelineExecutionError(spaceTrim(function (block) { return "\n You have not provided any `LlmExecutionTools` that support model variant \"".concat(prompt.modelRequirements.modelVariant, "\n\n Available `LlmExecutionTools`:\n ").concat(block(_this.llmExecutionTools
1922
1937
  .map(function (tools) { return "- ".concat(tools.title, " ").concat(tools.description || ''); })
1923
1938
  .join('\n')), "\n\n "); }));
1924
1939
  }
@@ -1997,7 +2012,7 @@ function joinLlmExecutionTools() {
1997
2012
  llmExecutionTools[_i] = arguments[_i];
1998
2013
  }
1999
2014
  if (llmExecutionTools.length === 0) {
2000
- var warningMessage = spaceTrim("\n You have provided no LLM Execution Tools.\n This means that you won't be able to execute any prompts that require large language models like GPT-4 or Anthropic's Claude.\n\n Technically, it's not an error, but it's probably not what you want because it does not make sense to use Promptbook without language models.\n ");
2015
+ var warningMessage = spaceTrim("\n You have not provided any `LlmExecutionTools`\n This means that you won't be able to execute any prompts that require large language models like GPT-4 or Anthropic's Claude.\n\n Technically, it's not an error, but it's probably not what you want because it does not make sense to use Promptbook without language models.\n ");
2001
2016
  // TODO: [🟥] Detect browser / node and make it colorfull
2002
2017
  console.warn(warningMessage);
2003
2018
  /*
@@ -2160,7 +2175,7 @@ function replaceParameters(template, parameters) {
2160
2175
  /**
2161
2176
  * The version of the Promptbook library
2162
2177
  */
2163
- var PROMPTBOOK_VERSION = '0.61.0-16';
2178
+ var PROMPTBOOK_VERSION = '0.61.0-17';
2164
2179
  // TODO: !!!! List here all the versions and annotate + put into script
2165
2180
 
2166
2181
  /**
@@ -5384,11 +5399,11 @@ function createCollectionFromPromise(promptbookSourcesPromiseOrFactory) {
5384
5399
  }
5385
5400
 
5386
5401
  /**
5387
- * Constructs Promptbook from given directory
5402
+ * Constructs Pipeline from given directory
5388
5403
  *
5389
5404
  * Note: Works only in Node.js environment because it reads the file system
5390
5405
  *
5391
- * @param path - path to the directory with promptbooks
5406
+ * @param path - path to the directory with pipelines
5392
5407
  * @param options - Misc options for the collection
5393
5408
  * @returns PipelineCollection
5394
5409
  */
@@ -5418,20 +5433,31 @@ function createCollectionFromDirectory(path, options) {
5418
5433
  }
5419
5434
  _a = options || {}, _b = _a.isRecursive, isRecursive = _b === void 0 ? true : _b, _c = _a.isVerbose, isVerbose = _c === void 0 ? false : _c, _d = _a.isLazyLoaded, isLazyLoaded = _d === void 0 ? false : _d, _e = _a.isCrashedOnError, isCrashedOnError = _e === void 0 ? true : _e;
5420
5435
  collection = createCollectionFromPromise(function () { return __awaiter(_this, void 0, void 0, function () {
5421
- var fileNames, promptbooks, _loop_1, fileNames_1, fileNames_1_1, fileName, e_1_1;
5436
+ var fileNames, pipelines, _loop_1, fileNames_1, fileNames_1_1, fileName, e_1_1;
5422
5437
  var e_1, _a;
5423
5438
  return __generator(this, function (_b) {
5424
5439
  switch (_b.label) {
5425
5440
  case 0:
5426
5441
  if (isVerbose) {
5427
- console.info("Creating pipeline collection from path ".concat(path.split('\\').join('/')));
5442
+ console.info(colors.cyan("Creating pipeline collection from path ".concat(path.split('\\').join('/'))));
5428
5443
  }
5429
5444
  return [4 /*yield*/, listAllFiles(path, isRecursive)];
5430
5445
  case 1:
5431
5446
  fileNames = _b.sent();
5432
- promptbooks = [];
5447
+ // Note: First load all .ptbk.json and then .ptbk.md files
5448
+ // .ptbk.json can be prepared so it is faster to load
5449
+ fileNames.sort(function (a, b) {
5450
+ if (a.endsWith('.ptbk.json') && b.endsWith('.ptbk.md')) {
5451
+ return -1;
5452
+ }
5453
+ if (a.endsWith('.ptbk.md') && b.endsWith('.ptbk.json')) {
5454
+ return 1;
5455
+ }
5456
+ return 0;
5457
+ });
5458
+ pipelines = [];
5433
5459
  _loop_1 = function (fileName) {
5434
- var sourceFile, promptbook, pipelineString, _c, _d, error_1, wrappedErrorMessage;
5460
+ var sourceFile, pipeline, pipelineString, _c, _d, error_1, wrappedErrorMessage;
5435
5461
  return __generator(this, function (_e) {
5436
5462
  switch (_e.label) {
5437
5463
  case 0:
@@ -5439,53 +5465,52 @@ function createCollectionFromDirectory(path, options) {
5439
5465
  _e.label = 1;
5440
5466
  case 1:
5441
5467
  _e.trys.push([1, 8, , 9]);
5442
- promptbook = null;
5468
+ pipeline = null;
5443
5469
  if (!fileName.endsWith('.ptbk.md')) return [3 /*break*/, 4];
5444
5470
  return [4 /*yield*/, readFile(fileName, 'utf8')];
5445
5471
  case 2:
5446
5472
  pipelineString = (_e.sent());
5447
5473
  return [4 /*yield*/, pipelineStringToJson(pipelineString, options)];
5448
5474
  case 3:
5449
- promptbook = _e.sent();
5450
- promptbook = __assign(__assign({}, promptbook), { sourceFile: sourceFile });
5475
+ pipeline = _e.sent();
5476
+ pipeline = __assign(__assign({}, pipeline), { sourceFile: sourceFile });
5451
5477
  return [3 /*break*/, 7];
5452
5478
  case 4:
5453
5479
  if (!fileName.endsWith('.ptbk.json')) return [3 /*break*/, 6];
5454
- if (isVerbose) {
5455
- console.info("Loading ".concat(fileName.split('\\').join('/')));
5456
- }
5457
5480
  _d = (_c = JSON).parse;
5458
5481
  return [4 /*yield*/, readFile(fileName, 'utf8')];
5459
5482
  case 5:
5460
5483
  // TODO: Handle non-valid JSON files
5461
- promptbook = _d.apply(_c, [_e.sent()]);
5484
+ pipeline = _d.apply(_c, [_e.sent()]);
5462
5485
  // TODO: [🌗]
5463
- promptbook = __assign(__assign({}, promptbook), { sourceFile: sourceFile });
5486
+ pipeline = __assign(__assign({}, pipeline), { sourceFile: sourceFile });
5464
5487
  return [3 /*break*/, 7];
5465
5488
  case 6:
5466
5489
  if (isVerbose) {
5467
- console.info("Skipping file ".concat(fileName.split('\\').join('/')));
5490
+ console.info(colors.gray("Skipping file ".concat(fileName.split('\\').join('/'))));
5468
5491
  }
5469
5492
  _e.label = 7;
5470
5493
  case 7:
5471
5494
  // ---
5472
- if (promptbook !== null) {
5473
- if (!promptbook.pipelineUrl) {
5495
+ if (pipeline !== null) {
5496
+ if (!pipeline.pipelineUrl) {
5474
5497
  if (isVerbose) {
5475
- console.info("Not loading ".concat(fileName.split('\\').join('/'), " - missing URL"));
5498
+ console.info(colors.red("Can not load pipeline from ".concat(fileName
5499
+ .split('\\')
5500
+ .join('/'), " because of missing URL")));
5476
5501
  }
5477
5502
  }
5478
5503
  else {
5479
- if (isVerbose) {
5480
- console.info("Loading ".concat(fileName.split('\\').join('/')));
5481
- }
5482
5504
  if (!isCrashedOnError) {
5483
- // Note: Validate promptbook to check if it is logically correct to not crash on invalid promptbooks
5505
+ // Note: Validate pipeline to check if it is logically correct to not crash on invalid pipelines
5484
5506
  // But be handled in current try-catch block
5485
- validatePipeline(promptbook);
5507
+ validatePipeline(pipeline);
5508
+ }
5509
+ if (isVerbose) {
5510
+ console.info(colors.green("Loading ".concat(fileName.split('\\').join('/'))));
5486
5511
  }
5487
- // Note: [🦄] Promptbook with same url uniqueness will be checked automatically in SimplePipelineCollection
5488
- promptbooks.push(promptbook);
5512
+ // Note: [🦄] Pipeline with same url uniqueness will be checked automatically in SimplePipelineCollection
5513
+ pipelines.push(pipeline);
5489
5514
  }
5490
5515
  }
5491
5516
  return [3 /*break*/, 9];
@@ -5531,7 +5556,7 @@ function createCollectionFromDirectory(path, options) {
5531
5556
  }
5532
5557
  finally { if (e_1) throw e_1.error; }
5533
5558
  return [7 /*endfinally*/];
5534
- case 9: return [2 /*return*/, promptbooks];
5559
+ case 9: return [2 /*return*/, pipelines];
5535
5560
  }
5536
5561
  });
5537
5562
  }); });
@@ -5605,8 +5630,8 @@ function listAllFiles(path, isRecursive) {
5605
5630
  });
5606
5631
  }
5607
5632
  /**
5608
- * TODO: !!!! [🧠] Library precompilation and do not mix markdown and json promptbooks
5609
- * Note: [🟢] This code should never be published outside of `@promptbook/node`
5633
+ * TODO: !!!! [🧠] Library precompilation and do not mix markdown and json pipelines
5634
+ * Note: [🟢] This code should never be published outside of `@pipeline/node`
5610
5635
  */
5611
5636
 
5612
5637
  /**