@promptbook/node 0.61.0-16 โ†’ 0.61.0-18

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/esm/index.es.js CHANGED
@@ -4,6 +4,8 @@ import { join } from 'path';
4
4
  import spaceTrim, { spaceTrim as spaceTrim$1 } from 'spacetrim';
5
5
  import { format } from 'prettier';
6
6
  import parserHtml from 'prettier/parser-html';
7
+ import Anthropic from '@anthropic-ai/sdk';
8
+ import OpenAI from 'openai';
7
9
 
8
10
  /*! *****************************************************************************
9
11
  Copyright (c) Microsoft Corporation.
@@ -393,7 +395,7 @@ function forEachAsync(array, options, callbackfunction) {
393
395
  });
394
396
  }
395
397
 
396
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.61.0-15",parameters:[{name:"content",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledge",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {content}",dependentParameterNames:["content"],resultingParameterName:"knowledge"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-15",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.61.0-15",parameters:[{name:"content",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {content}",dependentParameterNames:["content"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-15",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.61.0-15",parameters:[{name:"content",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {content}",expectations:{words:{min:1,max:8}},dependentParameterNames:["content"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-15",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.61.0-15",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-15",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
398
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.61.0-17",parameters:[{name:"content",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledge",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {content}",dependentParameterNames:["content"],resultingParameterName:"knowledge"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-17",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.61.0-17",parameters:[{name:"content",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {content}",dependentParameterNames:["content"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-17",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.61.0-17",parameters:[{name:"content",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {content}",expectations:{words:{min:1,max:8}},dependentParameterNames:["content"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-17",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.61.0-17",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-17",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
397
399
 
398
400
  /**
399
401
  * Prettify the html code
@@ -1002,11 +1004,11 @@ function validatePipeline(pipeline) {
1002
1004
  throw new PipelineLogicError(spaceTrim$1(function (block) { return "\n\n Can not resolve some parameters:\n Either you are using a parameter that is not defined, or there are some circular dependencies.\n\n Can not resolve:\n ".concat(block(unresovedTemplates
1003
1005
  .map(function (_a) {
1004
1006
  var resultingParameterName = _a.resultingParameterName, dependentParameterNames = _a.dependentParameterNames;
1005
- return "- {".concat(resultingParameterName, "} depends on ").concat(dependentParameterNames
1007
+ return "- Parameter {".concat(resultingParameterName, "} which depends on ").concat(dependentParameterNames
1006
1008
  .map(function (dependentParameterName) { return "{".concat(dependentParameterName, "}"); })
1007
- .join(', '));
1009
+ .join(' and '));
1008
1010
  })
1009
- .join('\n')), "\n\n Resolved:\n ").concat(block(resovedParameters.map(function (name) { return "- {".concat(name, "}"); }).join('\n')), "\n "); }));
1011
+ .join('\n')), "\n\n Resolved:\n ").concat(block(resovedParameters.map(function (name) { return "- Parameter {".concat(name, "}"); }).join('\n')), "\n "); }));
1010
1012
  }
1011
1013
  resovedParameters = __spreadArray(__spreadArray([], __read(resovedParameters), false), __read(currentlyResovedTemplates.map(function (_a) {
1012
1014
  var resultingParameterName = _a.resultingParameterName;
@@ -1069,79 +1071,94 @@ var ReferenceError$1 = /** @class */ (function (_super) {
1069
1071
  }(Error));
1070
1072
 
1071
1073
  /**
1072
- * Library of promptbooks that groups together promptbooks for an application.
1073
- * This implementation is a very thin wrapper around the Array / Map of promptbooks.
1074
+ * Unprepare just strips the preparation data of the pipeline
1075
+ */
1076
+ function unpreparePipeline(pipeline) {
1077
+ var personas = pipeline.personas, knowledgeSources = pipeline.knowledgeSources;
1078
+ personas = personas.map(function (persona) { return (__assign(__assign({}, persona), { modelRequirements: undefined, preparationIds: undefined })); });
1079
+ knowledgeSources = knowledgeSources.map(function (knowledgeSource) { return (__assign(__assign({}, knowledgeSource), { preparationIds: undefined })); });
1080
+ return __assign(__assign({}, pipeline), { knowledgeSources: knowledgeSources, knowledgePieces: [], personas: personas, preparations: [] });
1081
+ }
1082
+ /**
1083
+ * TODO: [๐Ÿ”ผ] !!! Export via `@promptbook/core`
1084
+ * TODO: Write tests for `preparePipeline`
1085
+ */
1086
+
1087
+ /**
1088
+ * Library of pipelines that groups together pipelines for an application.
1089
+ * This implementation is a very thin wrapper around the Array / Map of pipelines.
1074
1090
  *
1075
1091
  * @private use `createCollectionFromJson` instead
1076
- * @see https://github.com/webgptorg/promptbook#promptbook-collection
1092
+ * @see https://github.com/webgptorg/pipeline#pipeline-collection
1077
1093
  */
1078
1094
  var SimplePipelineCollection = /** @class */ (function () {
1079
1095
  /**
1080
- * Constructs a pipeline collection from promptbooks
1096
+ * Constructs a pipeline collection from pipelines
1081
1097
  *
1082
- * @param promptbooks @@@
1098
+ * @param pipelines @@@
1083
1099
  *
1084
1100
  * @private Use instead `createCollectionFromJson`
1085
- * Note: During the construction logic of all promptbooks are validated
1101
+ * Note: During the construction logic of all pipelines are validated
1086
1102
  * Note: It is not recommended to use this constructor directly, use `createCollectionFromJson` *(or other variant)* instead
1087
1103
  */
1088
1104
  function SimplePipelineCollection() {
1089
1105
  var e_1, _a;
1090
- var promptbooks = [];
1106
+ var pipelines = [];
1091
1107
  for (var _i = 0; _i < arguments.length; _i++) {
1092
- promptbooks[_i] = arguments[_i];
1108
+ pipelines[_i] = arguments[_i];
1093
1109
  }
1094
1110
  this.collection = new Map();
1095
1111
  try {
1096
- for (var promptbooks_1 = __values(promptbooks), promptbooks_1_1 = promptbooks_1.next(); !promptbooks_1_1.done; promptbooks_1_1 = promptbooks_1.next()) {
1097
- var promptbook = promptbooks_1_1.value;
1098
- if (promptbook.pipelineUrl === undefined) {
1099
- throw new ReferenceError$1(spaceTrim$1("\n Promptbook with name \"".concat(promptbook.title, "\" does not have defined URL\n\n File:\n ").concat(promptbook.sourceFile || 'Unknown', "\n\n Note: Promptbooks without URLs are called anonymous promptbooks\n They can be used as standalone promptbooks, but they cannot be referenced by other promptbooks\n And also they cannot be used in the pipeline collection\n\n ")));
1112
+ for (var pipelines_1 = __values(pipelines), pipelines_1_1 = pipelines_1.next(); !pipelines_1_1.done; pipelines_1_1 = pipelines_1.next()) {
1113
+ var pipeline = pipelines_1_1.value;
1114
+ if (pipeline.pipelineUrl === undefined) {
1115
+ throw new ReferenceError$1(spaceTrim$1("\n Pipeline with name \"".concat(pipeline.title, "\" does not have defined URL\n\n File:\n ").concat(pipeline.sourceFile || 'Unknown', "\n\n Note: Pipelines without URLs are called anonymous pipelines\n They can be used as standalone pipelines, but they cannot be referenced by other pipelines\n And also they cannot be used in the pipeline collection\n\n ")));
1100
1116
  }
1101
- validatePipeline(promptbook);
1117
+ validatePipeline(pipeline);
1102
1118
  // Note: [๐Ÿฆ„]
1103
- if (this.collection.has(promptbook.pipelineUrl) &&
1104
- pipelineJsonToString(promptbook) !== pipelineJsonToString(this.collection.get(promptbook.pipelineUrl))) {
1105
- var existing = this.collection.get(promptbook.pipelineUrl);
1106
- throw new ReferenceError$1(spaceTrim$1("\n Promptbook with URL \"".concat(promptbook.pipelineUrl, "\" is already in the collection\n\n Conflicting files:\n ").concat(existing.sourceFile || 'Unknown', "\n ").concat(promptbook.sourceFile || 'Unknown', "\n\n Note: Promptbooks with the same URL are not allowed\n Only exepction is when the promptbooks are identical\n\n ")));
1119
+ if (this.collection.has(pipeline.pipelineUrl) &&
1120
+ pipelineJsonToString(unpreparePipeline(pipeline)) !==
1121
+ pipelineJsonToString(unpreparePipeline(this.collection.get(pipeline.pipelineUrl)))) {
1122
+ var existing = this.collection.get(pipeline.pipelineUrl);
1123
+ throw new ReferenceError$1(spaceTrim$1("\n Pipeline with URL \"".concat(pipeline.pipelineUrl, "\" is already in the collection\n\n Conflicting files:\n ").concat(existing.sourceFile || 'Unknown', "\n ").concat(pipeline.sourceFile || 'Unknown', "\n\n Note: Pipelines with the same URL are not allowed\n Only exepction is when the pipelines are identical\n\n ")));
1107
1124
  }
1108
- this.collection.set(promptbook.pipelineUrl, promptbook);
1125
+ this.collection.set(pipeline.pipelineUrl, pipeline);
1109
1126
  }
1110
1127
  }
1111
1128
  catch (e_1_1) { e_1 = { error: e_1_1 }; }
1112
1129
  finally {
1113
1130
  try {
1114
- if (promptbooks_1_1 && !promptbooks_1_1.done && (_a = promptbooks_1.return)) _a.call(promptbooks_1);
1131
+ if (pipelines_1_1 && !pipelines_1_1.done && (_a = pipelines_1.return)) _a.call(pipelines_1);
1115
1132
  }
1116
1133
  finally { if (e_1) throw e_1.error; }
1117
1134
  }
1118
1135
  }
1119
1136
  /**
1120
- * Gets all promptbooks in the collection
1137
+ * Gets all pipelines in the collection
1121
1138
  */
1122
1139
  SimplePipelineCollection.prototype.listPipelines = function () {
1123
1140
  return Array.from(this.collection.keys());
1124
1141
  };
1125
1142
  /**
1126
- * Gets promptbook by its URL
1143
+ * Gets pipeline by its URL
1127
1144
  *
1128
1145
  * Note: This is not a direct fetching from the URL, but a lookup in the collection
1129
1146
  */
1130
1147
  SimplePipelineCollection.prototype.getPipelineByUrl = function (url) {
1131
1148
  var _this = this;
1132
- var promptbook = this.collection.get(url);
1133
- if (!promptbook) {
1149
+ var pipeline = this.collection.get(url);
1150
+ if (!pipeline) {
1134
1151
  if (this.listPipelines().length === 0) {
1135
- throw new NotFoundError(spaceTrim$1("\n Promptbook with url \"".concat(url, "\" not found\n\n No promptbooks available\n ")));
1152
+ throw new NotFoundError(spaceTrim$1("\n Pipeline with url \"".concat(url, "\" not found\n\n No pipelines available\n ")));
1136
1153
  }
1137
- throw new NotFoundError(spaceTrim$1(function (block) { return "\n Promptbook with url \"".concat(url, "\" not found\n\n Available promptbooks:\n ").concat(block(_this.listPipelines()
1154
+ throw new NotFoundError(spaceTrim$1(function (block) { return "\n Pipeline with url \"".concat(url, "\" not found\n\n Available pipelines:\n ").concat(block(_this.listPipelines()
1138
1155
  .map(function (pipelineUrl) { return "- ".concat(pipelineUrl); })
1139
1156
  .join('\n')), "\n\n "); }));
1140
1157
  }
1141
- return promptbook;
1158
+ return pipeline;
1142
1159
  };
1143
1160
  /**
1144
- * Checks whether given prompt was defined in any promptbook in the collection
1161
+ * Checks whether given prompt was defined in any pipeline in the collection
1145
1162
  */
1146
1163
  SimplePipelineCollection.prototype.isResponsibleForPrompt = function (prompt) {
1147
1164
  return true;
@@ -1916,7 +1933,7 @@ var MultipleLlmExecutionTools = /** @class */ (function () {
1916
1933
  throw new PipelineExecutionError(spaceTrim(function (block) { return "\n All execution tools failed:\n\n ".concat(block(errors.map(function (error) { return "- ".concat(error.name || 'Error', ": ").concat(error.message); }).join('\n')), "\n\n "); }));
1917
1934
  }
1918
1935
  else {
1919
- throw new PipelineExecutionError(spaceTrim(function (block) { return "\n No execution tools available for model variant \"".concat(prompt.modelRequirements.modelVariant, "\".\n\n tl;dr\n\n You have provided no LLM Execution Tools that support model variant \"").concat(prompt.modelRequirements.modelVariant, ":\n ").concat(block(_this.llmExecutionTools
1936
+ throw new PipelineExecutionError(spaceTrim(function (block) { return "\n You have not provided any `LlmExecutionTools` that support model variant \"".concat(prompt.modelRequirements.modelVariant, "\n\n Available `LlmExecutionTools`:\n ").concat(block(_this.llmExecutionTools
1920
1937
  .map(function (tools) { return "- ".concat(tools.title, " ").concat(tools.description || ''); })
1921
1938
  .join('\n')), "\n\n "); }));
1922
1939
  }
@@ -1995,7 +2012,7 @@ function joinLlmExecutionTools() {
1995
2012
  llmExecutionTools[_i] = arguments[_i];
1996
2013
  }
1997
2014
  if (llmExecutionTools.length === 0) {
1998
- var warningMessage = spaceTrim("\n You have provided no LLM Execution Tools.\n This means that you won't be able to execute any prompts that require large language models like GPT-4 or Anthropic's Claude.\n\n Technically, it's not an error, but it's probably not what you want because it does not make sense to use Promptbook without language models.\n ");
2015
+ var warningMessage = spaceTrim("\n You have not provided any `LlmExecutionTools`\n This means that you won't be able to execute any prompts that require large language models like GPT-4 or Anthropic's Claude.\n\n Technically, it's not an error, but it's probably not what you want because it does not make sense to use Promptbook without language models.\n ");
1999
2016
  // TODO: [๐ŸŸฅ] Detect browser / node and make it colorfull
2000
2017
  console.warn(warningMessage);
2001
2018
  /*
@@ -2029,9 +2046,11 @@ function isPipelinePrepared(pipeline) {
2029
2046
  // Note: Ignoring `pipeline.preparations` @@@
2030
2047
  // Note: Ignoring `pipeline.knowledgePieces` @@@
2031
2048
  if (!pipeline.personas.every(function (persona) { return persona.modelRequirements !== undefined; })) {
2049
+ console.log('!!!!', 'Not all personas have modelRequirements');
2032
2050
  return false;
2033
2051
  }
2034
2052
  if (!pipeline.knowledgeSources.every(function (knowledgeSource) { return knowledgeSource.preparationIds !== undefined; })) {
2053
+ console.log('!!!!', 'Not all knowledgeSources have preparationIds');
2035
2054
  return false;
2036
2055
  }
2037
2056
  // TODO: !!!!! Is context in each template
@@ -2040,6 +2059,7 @@ function isPipelinePrepared(pipeline) {
2040
2059
  return true;
2041
2060
  }
2042
2061
  /**
2062
+ * TODO: [๐Ÿ ] Maybe base this on `makeValidator`
2043
2063
  * TODO: [๐Ÿ”ผ] Export via core or utils
2044
2064
  * TODO: [๐ŸงŠ] Pipeline can be partially prepared, this should return true ONLY if fully prepared
2045
2065
  */
@@ -2155,7 +2175,7 @@ function replaceParameters(template, parameters) {
2155
2175
  /**
2156
2176
  * The version of the Promptbook library
2157
2177
  */
2158
- var PROMPTBOOK_VERSION = '0.61.0-15';
2178
+ var PROMPTBOOK_VERSION = '0.61.0-17';
2159
2179
  // TODO: !!!! List here all the versions and annotate + put into script
2160
2180
 
2161
2181
  /**
@@ -5379,11 +5399,11 @@ function createCollectionFromPromise(promptbookSourcesPromiseOrFactory) {
5379
5399
  }
5380
5400
 
5381
5401
  /**
5382
- * Constructs Promptbook from given directory
5402
+ * Constructs Pipeline from given directory
5383
5403
  *
5384
5404
  * Note: Works only in Node.js environment because it reads the file system
5385
5405
  *
5386
- * @param path - path to the directory with promptbooks
5406
+ * @param path - path to the directory with pipelines
5387
5407
  * @param options - Misc options for the collection
5388
5408
  * @returns PipelineCollection
5389
5409
  */
@@ -5413,20 +5433,31 @@ function createCollectionFromDirectory(path, options) {
5413
5433
  }
5414
5434
  _a = options || {}, _b = _a.isRecursive, isRecursive = _b === void 0 ? true : _b, _c = _a.isVerbose, isVerbose = _c === void 0 ? false : _c, _d = _a.isLazyLoaded, isLazyLoaded = _d === void 0 ? false : _d, _e = _a.isCrashedOnError, isCrashedOnError = _e === void 0 ? true : _e;
5415
5435
  collection = createCollectionFromPromise(function () { return __awaiter(_this, void 0, void 0, function () {
5416
- var fileNames, promptbooks, _loop_1, fileNames_1, fileNames_1_1, fileName, e_1_1;
5436
+ var fileNames, pipelines, _loop_1, fileNames_1, fileNames_1_1, fileName, e_1_1;
5417
5437
  var e_1, _a;
5418
5438
  return __generator(this, function (_b) {
5419
5439
  switch (_b.label) {
5420
5440
  case 0:
5421
5441
  if (isVerbose) {
5422
- console.info("Creating pipeline collection from path ".concat(path.split('\\').join('/')));
5442
+ console.info(colors.cyan("Creating pipeline collection from path ".concat(path.split('\\').join('/'))));
5423
5443
  }
5424
5444
  return [4 /*yield*/, listAllFiles(path, isRecursive)];
5425
5445
  case 1:
5426
5446
  fileNames = _b.sent();
5427
- promptbooks = [];
5447
+ // Note: First load all .ptbk.json and then .ptbk.md files
5448
+ // .ptbk.json can be prepared so it is faster to load
5449
+ fileNames.sort(function (a, b) {
5450
+ if (a.endsWith('.ptbk.json') && b.endsWith('.ptbk.md')) {
5451
+ return -1;
5452
+ }
5453
+ if (a.endsWith('.ptbk.md') && b.endsWith('.ptbk.json')) {
5454
+ return 1;
5455
+ }
5456
+ return 0;
5457
+ });
5458
+ pipelines = [];
5428
5459
  _loop_1 = function (fileName) {
5429
- var sourceFile, promptbook, pipelineString, _c, _d, error_1, wrappedErrorMessage;
5460
+ var sourceFile, pipeline, pipelineString, _c, _d, error_1, wrappedErrorMessage;
5430
5461
  return __generator(this, function (_e) {
5431
5462
  switch (_e.label) {
5432
5463
  case 0:
@@ -5434,53 +5465,52 @@ function createCollectionFromDirectory(path, options) {
5434
5465
  _e.label = 1;
5435
5466
  case 1:
5436
5467
  _e.trys.push([1, 8, , 9]);
5437
- promptbook = null;
5468
+ pipeline = null;
5438
5469
  if (!fileName.endsWith('.ptbk.md')) return [3 /*break*/, 4];
5439
5470
  return [4 /*yield*/, readFile(fileName, 'utf8')];
5440
5471
  case 2:
5441
5472
  pipelineString = (_e.sent());
5442
5473
  return [4 /*yield*/, pipelineStringToJson(pipelineString, options)];
5443
5474
  case 3:
5444
- promptbook = _e.sent();
5445
- promptbook = __assign(__assign({}, promptbook), { sourceFile: sourceFile });
5475
+ pipeline = _e.sent();
5476
+ pipeline = __assign(__assign({}, pipeline), { sourceFile: sourceFile });
5446
5477
  return [3 /*break*/, 7];
5447
5478
  case 4:
5448
5479
  if (!fileName.endsWith('.ptbk.json')) return [3 /*break*/, 6];
5449
- if (isVerbose) {
5450
- console.info("Loading ".concat(fileName.split('\\').join('/')));
5451
- }
5452
5480
  _d = (_c = JSON).parse;
5453
5481
  return [4 /*yield*/, readFile(fileName, 'utf8')];
5454
5482
  case 5:
5455
5483
  // TODO: Handle non-valid JSON files
5456
- promptbook = _d.apply(_c, [_e.sent()]);
5484
+ pipeline = _d.apply(_c, [_e.sent()]);
5457
5485
  // TODO: [๐ŸŒ—]
5458
- promptbook = __assign(__assign({}, promptbook), { sourceFile: sourceFile });
5486
+ pipeline = __assign(__assign({}, pipeline), { sourceFile: sourceFile });
5459
5487
  return [3 /*break*/, 7];
5460
5488
  case 6:
5461
5489
  if (isVerbose) {
5462
- console.info("Skipping file ".concat(fileName.split('\\').join('/')));
5490
+ console.info(colors.gray("Skipping file ".concat(fileName.split('\\').join('/'))));
5463
5491
  }
5464
5492
  _e.label = 7;
5465
5493
  case 7:
5466
5494
  // ---
5467
- if (promptbook !== null) {
5468
- if (!promptbook.pipelineUrl) {
5495
+ if (pipeline !== null) {
5496
+ if (!pipeline.pipelineUrl) {
5469
5497
  if (isVerbose) {
5470
- console.info("Not loading ".concat(fileName.split('\\').join('/'), " - missing URL"));
5498
+ console.info(colors.red("Can not load pipeline from ".concat(fileName
5499
+ .split('\\')
5500
+ .join('/'), " because of missing URL")));
5471
5501
  }
5472
5502
  }
5473
5503
  else {
5474
- if (isVerbose) {
5475
- console.info("Loading ".concat(fileName.split('\\').join('/')));
5476
- }
5477
5504
  if (!isCrashedOnError) {
5478
- // Note: Validate promptbook to check if it is logically correct to not crash on invalid promptbooks
5505
+ // Note: Validate pipeline to check if it is logically correct to not crash on invalid pipelines
5479
5506
  // But be handled in current try-catch block
5480
- validatePipeline(promptbook);
5507
+ validatePipeline(pipeline);
5508
+ }
5509
+ if (isVerbose) {
5510
+ console.info(colors.green("Loading ".concat(fileName.split('\\').join('/'))));
5481
5511
  }
5482
- // Note: [๐Ÿฆ„] Promptbook with same url uniqueness will be checked automatically in SimplePipelineCollection
5483
- promptbooks.push(promptbook);
5512
+ // Note: [๐Ÿฆ„] Pipeline with same url uniqueness will be checked automatically in SimplePipelineCollection
5513
+ pipelines.push(pipeline);
5484
5514
  }
5485
5515
  }
5486
5516
  return [3 /*break*/, 9];
@@ -5526,7 +5556,7 @@ function createCollectionFromDirectory(path, options) {
5526
5556
  }
5527
5557
  finally { if (e_1) throw e_1.error; }
5528
5558
  return [7 /*endfinally*/];
5529
- case 9: return [2 /*return*/, promptbooks];
5559
+ case 9: return [2 /*return*/, pipelines];
5530
5560
  }
5531
5561
  });
5532
5562
  }); });
@@ -5600,9 +5630,1090 @@ function listAllFiles(path, isRecursive) {
5600
5630
  });
5601
5631
  }
5602
5632
  /**
5603
- * TODO: !!!! [๐Ÿง ] Library precompilation and do not mix markdown and json promptbooks
5633
+ * TODO: !!!! [๐Ÿง ] Library precompilation and do not mix markdown and json pipelines
5634
+ * Note: [๐ŸŸข] This code should never be published outside of `@pipeline/node`
5635
+ */
5636
+
5637
+ /**
5638
+ * This error type indicates that you try to use a feature that is not available in the current environment
5639
+ */
5640
+ var EnvironmentMismatchError = /** @class */ (function (_super) {
5641
+ __extends(EnvironmentMismatchError, _super);
5642
+ function EnvironmentMismatchError(message) {
5643
+ var _this = _super.call(this, message) || this;
5644
+ _this.name = 'EnvironmentMismatchError';
5645
+ Object.setPrototypeOf(_this, EnvironmentMismatchError.prototype);
5646
+ return _this;
5647
+ }
5648
+ return EnvironmentMismatchError;
5649
+ }(Error));
5650
+
5651
+ /**
5652
+ * Helper of usage compute
5653
+ *
5654
+ * @param content the content of prompt or response
5655
+ * @returns part of PromptResultUsageCounts
5656
+ *
5657
+ * @private internal util of LlmExecutionTools
5658
+ */
5659
+ function computeUsageCounts(content) {
5660
+ return {
5661
+ charactersCount: { value: countCharacters(content) },
5662
+ wordsCount: { value: countWords(content) },
5663
+ sentencesCount: { value: countSentences(content) },
5664
+ linesCount: { value: countLines(content) },
5665
+ paragraphsCount: { value: countParagraphs(content) },
5666
+ pagesCount: { value: countPages(content) },
5667
+ };
5668
+ }
5669
+
5670
+ /**
5671
+ * Make UncertainNumber
5672
+ *
5673
+ * @param value
5674
+ *
5675
+ * @private utility for initializating UncertainNumber
5676
+ */
5677
+ function uncertainNumber(value) {
5678
+ if (value === null || value === undefined || Number.isNaN(NaN)) {
5679
+ return { value: 0, isUncertain: true };
5680
+ }
5681
+ return { value: value };
5682
+ }
5683
+
5684
+ /**
5685
+ * Get current date in ISO 8601 format
5686
+ *
5687
+ * @private This is internal util of the promptbook
5688
+ */
5689
+ function getCurrentIsoDate() {
5690
+ return new Date().toISOString();
5691
+ }
5692
+
5693
+ /**
5694
+ * Function computeUsage will create price per one token based on the string value found on openai page
5695
+ *
5696
+ * @private within the repository, used only as internal helper for `OPENAI_MODELS`
5697
+ */
5698
+ function computeUsage(value) {
5699
+ var _a = __read(value.split(' / '), 2), price = _a[0], tokens = _a[1];
5700
+ return parseFloat(price.replace('$', '')) / parseFloat(tokens.replace('M tokens', '')) / 1000000;
5701
+ }
5702
+
5703
+ /**
5704
+ * List of available Anthropic Claude models with pricing
5705
+ *
5706
+ * Note: Done at 2024-05-25
5707
+ *
5708
+ * @see https://docs.anthropic.com/en/docs/models-overview
5709
+ */
5710
+ var ANTHROPIC_CLAUDE_MODELS = [
5711
+ {
5712
+ modelVariant: 'CHAT',
5713
+ modelTitle: 'Claude 3 Opus',
5714
+ modelName: 'claude-3-opus-20240229',
5715
+ pricing: {
5716
+ prompt: computeUsage("$15.00 / 1M tokens"),
5717
+ output: computeUsage("$75.00 / 1M tokens"),
5718
+ },
5719
+ },
5720
+ {
5721
+ modelVariant: 'CHAT',
5722
+ modelTitle: 'Claude 3 Sonnet',
5723
+ modelName: 'claude-3-sonnet-20240229',
5724
+ pricing: {
5725
+ prompt: computeUsage("$3.00 / 1M tokens"),
5726
+ output: computeUsage("$15.00 / 1M tokens"),
5727
+ },
5728
+ },
5729
+ {
5730
+ modelVariant: 'CHAT',
5731
+ modelTitle: 'Claude 3 Haiku',
5732
+ modelName: ' claude-3-haiku-20240307',
5733
+ pricing: {
5734
+ prompt: computeUsage("$0.25 / 1M tokens"),
5735
+ output: computeUsage("$1.25 / 1M tokens"),
5736
+ },
5737
+ },
5738
+ {
5739
+ modelVariant: 'CHAT',
5740
+ modelTitle: 'Claude 2.1',
5741
+ modelName: 'claude-2.1',
5742
+ pricing: {
5743
+ prompt: computeUsage("$8.00 / 1M tokens"),
5744
+ output: computeUsage("$24.00 / 1M tokens"),
5745
+ },
5746
+ },
5747
+ {
5748
+ modelVariant: 'CHAT',
5749
+ modelTitle: 'Claude 2',
5750
+ modelName: 'claude-2.0',
5751
+ pricing: {
5752
+ prompt: computeUsage("$8.00 / 1M tokens"),
5753
+ output: computeUsage("$24.00 / 1M tokens"),
5754
+ },
5755
+ },
5756
+ {
5757
+ modelVariant: 'CHAT',
5758
+ modelTitle: ' Claude Instant 1.2',
5759
+ modelName: 'claude-instant-1.2',
5760
+ pricing: {
5761
+ prompt: computeUsage("$0.80 / 1M tokens"),
5762
+ output: computeUsage("$2.40 / 1M tokens"),
5763
+ },
5764
+ },
5765
+ // TODO: !!! Claude 1 and 2 has also completion versions - ask Hoagy
5766
+ ];
5767
+ /**
5768
+ * Note: [๐Ÿค–] Add models of new variant
5769
+ * TODO: [๐Ÿง ] !!! Add embedding models OR Anthropic has only chat+completion models?
5770
+ * TODO: [๐Ÿง ] Some mechanism to propagate unsureness
5771
+ * TODO: [๐Ÿง ][๐Ÿ‘ฎโ€โ™€๏ธ] Put here more info like description, isVision, trainingDateCutoff, languages, strengths ( Top-level performance, intelligence, fluency, and understanding), contextWindow,...
5772
+ * TODO: [๐Ÿ•š] Make this list dynamic - dynamically can be listed modelNames but not modelVariant, legacy status, context length and pricing
5773
+ */
5774
+
5775
+ /**
5776
+ * Execution Tools for calling Anthropic Claude API.
5777
+ */
5778
+ var AnthropicClaudeExecutionTools = /** @class */ (function () {
5779
+ /**
5780
+ * Creates Anthropic Claude Execution Tools.
5781
+ *
5782
+ * @param options which are relevant are directly passed to the Anthropic Claude client
5783
+ */
5784
+ function AnthropicClaudeExecutionTools(options) {
5785
+ if (options === void 0) { options = {}; }
5786
+ this.options = options;
5787
+ // Note: Passing only Anthropic Claude relevant options to Anthropic constructor
5788
+ var anthropicOptions = __assign({}, options);
5789
+ delete anthropicOptions.isVerbose;
5790
+ this.client = new Anthropic(anthropicOptions);
5791
+ }
5792
+ Object.defineProperty(AnthropicClaudeExecutionTools.prototype, "title", {
5793
+ get: function () {
5794
+ return 'Anthropic Claude';
5795
+ },
5796
+ enumerable: false,
5797
+ configurable: true
5798
+ });
5799
+ Object.defineProperty(AnthropicClaudeExecutionTools.prototype, "description", {
5800
+ get: function () {
5801
+ return 'Use all models provided by Anthropic Claude';
5802
+ },
5803
+ enumerable: false,
5804
+ configurable: true
5805
+ });
5806
+ /**
5807
+ * Calls Anthropic Claude API to use a chat model.
5808
+ */
5809
+ AnthropicClaudeExecutionTools.prototype.callChatModel = function (prompt) {
5810
+ return __awaiter(this, void 0, void 0, function () {
5811
+ var content, parameters, modelRequirements, rawRequest, start, complete, rawResponse, resultContent, usage;
5812
+ return __generator(this, function (_a) {
5813
+ switch (_a.label) {
5814
+ case 0:
5815
+ if (this.options.isVerbose) {
5816
+ console.info('๐Ÿ’ฌ Anthropic Claude callChatModel call');
5817
+ }
5818
+ content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
5819
+ // TODO: [โ˜‚] Use here more modelRequirements
5820
+ if (modelRequirements.modelVariant !== 'CHAT') {
5821
+ throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
5822
+ }
5823
+ rawRequest = {
5824
+ model: modelRequirements.modelName || this.getDefaultChatModel().modelName,
5825
+ max_tokens: modelRequirements.maxTokens || 4096,
5826
+ // <- TODO: [๐ŸŒพ] Make some global max cap for maxTokens
5827
+ temperature: modelRequirements.temperature,
5828
+ system: modelRequirements.systemMessage,
5829
+ // <- TODO: [๐Ÿˆ] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
5830
+ // <- Note: [๐Ÿง†]
5831
+ messages: [
5832
+ {
5833
+ role: 'user',
5834
+ content: replaceParameters(content, parameters),
5835
+ },
5836
+ ],
5837
+ // TODO: Is here some equivalent of user identification?> user: this.options.user,
5838
+ };
5839
+ start = getCurrentIsoDate();
5840
+ if (this.options.isVerbose) {
5841
+ console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
5842
+ }
5843
+ return [4 /*yield*/, this.client.messages.create(rawRequest)];
5844
+ case 1:
5845
+ rawResponse = _a.sent();
5846
+ if (this.options.isVerbose) {
5847
+ console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
5848
+ }
5849
+ if (!rawResponse.content[0]) {
5850
+ throw new PipelineExecutionError('No content from Anthropic Claude');
5851
+ }
5852
+ if (rawResponse.content.length > 1) {
5853
+ throw new PipelineExecutionError('More than one content blocks from Anthropic Claude');
5854
+ }
5855
+ resultContent = rawResponse.content[0].text;
5856
+ // eslint-disable-next-line prefer-const
5857
+ complete = getCurrentIsoDate();
5858
+ usage = {
5859
+ price: { value: 0, isUncertain: true } /* <- TODO: [๐Ÿž] Compute usage */,
5860
+ input: __assign({ tokensCount: uncertainNumber(rawResponse.usage.input_tokens) }, computeUsageCounts(prompt.content)),
5861
+ output: __assign({ tokensCount: uncertainNumber(rawResponse.usage.output_tokens) }, computeUsageCounts(prompt.content)),
5862
+ };
5863
+ return [2 /*return*/, {
5864
+ content: resultContent,
5865
+ modelName: rawResponse.model,
5866
+ timing: {
5867
+ start: start,
5868
+ complete: complete,
5869
+ },
5870
+ usage: usage,
5871
+ rawResponse: rawResponse,
5872
+ // <- [๐Ÿคนโ€โ™‚๏ธ]
5873
+ }];
5874
+ }
5875
+ });
5876
+ });
5877
+ };
5878
+ /*
5879
+ TODO: [๐Ÿ‘]
5880
+ public async callCompletionModel(
5881
+ prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements'>,
5882
+ ): Promise<PromptCompletionResult> {
5883
+
5884
+ if (this.options.isVerbose) {
5885
+ console.info('๐Ÿ–‹ Anthropic Claude callCompletionModel call');
5886
+ }
5887
+
5888
+ const { content, parameters, modelRequirements } = prompt;
5889
+
5890
+ // TODO: [โ˜‚] Use here more modelRequirements
5891
+ if (modelRequirements.modelVariant !== 'COMPLETION') {
5892
+ throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
5893
+ }
5894
+
5895
+ const model = modelRequirements.modelName || this.getDefaultChatModel().modelName;
5896
+ const modelSettings = {
5897
+ model: rawResponse.model || model,
5898
+ max_tokens: modelRequirements.maxTokens || 2000, // <- Note: 2000 is for lagacy reasons
5899
+ // <- TODO: [๐ŸŒพ] Make some global max cap for maxTokens
5900
+ // <- TODO: Use here `systemMessage`, `temperature` and `seed`
5901
+ };
5902
+
5903
+ const rawRequest: xxxx.Completions.CompletionCreateParamsNonStreaming = {
5904
+ ...modelSettings,
5905
+ prompt: replaceParameters(content, parameters),
5906
+ user: this.options.user,
5907
+ };
5908
+ const start: string_date_iso8601 = getCurrentIsoDate();
5909
+ let complete: string_date_iso8601;
5910
+
5911
+ if (this.options.isVerbose) {
5912
+ console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
5913
+ }
5914
+ const rawResponse = await this.client.completions.create(rawRequest);
5915
+ if (this.options.isVerbose) {
5916
+ console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
5917
+ }
5918
+
5919
+ if (!rawResponse.choices[0]) {
5920
+ throw new PipelineExecutionError('No choises from Anthropic Claude');
5921
+ }
5922
+
5923
+ if (rawResponse.choices.length > 1) {
5924
+ // TODO: This should be maybe only warning
5925
+ throw new PipelineExecutionError('More than one choise from Anthropic Claude');
5926
+ }
5927
+
5928
+ const resultContent = rawResponse.choices[0].text;
5929
+ // eslint-disable-next-line prefer-const
5930
+ complete = getCurrentIsoDate();
5931
+ const usage = { price: 'UNKNOWN', inputTokens: 0, outputTokens: 0 /* <- TODO: [๐Ÿž] Compute usage * / } satisfies PromptResultUsage;
5932
+
5933
+
5934
+
5935
+ return {
5936
+ content: resultContent,
5937
+ modelName: rawResponse.model || model,
5938
+ timing: {
5939
+ start,
5940
+ complete,
5941
+ },
5942
+ usage,
5943
+ rawResponse,
5944
+ // <- [๐Ÿคนโ€โ™‚๏ธ]
5945
+ };
5946
+ }
5947
+ */
5948
+ // <- Note: [๐Ÿค–] callXxxModel
5949
+ /**
5950
+ * Get the model that should be used as default
5951
+ */
5952
+ AnthropicClaudeExecutionTools.prototype.getDefaultModel = function (defaultModelName) {
5953
+ var model = ANTHROPIC_CLAUDE_MODELS.find(function (_a) {
5954
+ var modelName = _a.modelName;
5955
+ return modelName.startsWith(defaultModelName);
5956
+ });
5957
+ if (model === undefined) {
5958
+ throw new UnexpectedError(spaceTrim(function (block) {
5959
+ return "\n Cannot find model in OpenAI models with name \"".concat(defaultModelName, "\" which should be used as default.\n\n Available models:\n ").concat(block(ANTHROPIC_CLAUDE_MODELS.map(function (_a) {
5960
+ var modelName = _a.modelName;
5961
+ return "- \"".concat(modelName, "\"");
5962
+ }).join('\n')), "\n\n ");
5963
+ }));
5964
+ }
5965
+ return model;
5966
+ };
5967
+ /**
5968
+ * Default model for chat variant.
5969
+ */
5970
+ AnthropicClaudeExecutionTools.prototype.getDefaultChatModel = function () {
5971
+ return this.getDefaultModel('claude-3-opus');
5972
+ };
5973
+ // <- Note: [๐Ÿค–] getDefaultXxxModel
5974
+ /**
5975
+ * List all available Anthropic Claude models that can be used
5976
+ */
5977
+ AnthropicClaudeExecutionTools.prototype.listModels = function () {
5978
+ return ANTHROPIC_CLAUDE_MODELS;
5979
+ };
5980
+ return AnthropicClaudeExecutionTools;
5981
+ }());
5982
+ /**
5983
+ * TODO: !!!! [๐Ÿ†] JSON mode
5984
+ * TODO: [๐Ÿง ] Maybe handle errors via transformAnthropicError (like transformAzureError)
5985
+ * TODO: Maybe Create some common util for callChatModel and callCompletionModel
5986
+ * TODO: Maybe make custom OpenaiError
5987
+ * TODO: [๐Ÿง ][๐Ÿˆ] Maybe use `isDeterministic` from options
5988
+ */
5989
+
5990
+ /**
5991
+ * List of available OpenAI models with pricing
5992
+ *
5993
+ * Note: Done at 2024-05-20
5994
+ *
5995
+ * @see https://platform.openai.com/docs/models/
5996
+ * @see https://openai.com/api/pricing/
5997
+ */
5998
+ var OPENAI_MODELS = [
5999
+ /*/
6000
+ {
6001
+ modelTitle: 'dall-e-3',
6002
+ modelName: 'dall-e-3',
6003
+ },
6004
+ /**/
6005
+ /*/
6006
+ {
6007
+ modelTitle: 'whisper-1',
6008
+ modelName: 'whisper-1',
6009
+ },
6010
+ /**/
6011
+ /**/
6012
+ {
6013
+ modelVariant: 'COMPLETION',
6014
+ modelTitle: 'davinci-002',
6015
+ modelName: 'davinci-002',
6016
+ pricing: {
6017
+ prompt: computeUsage("$2.00 / 1M tokens"),
6018
+ output: computeUsage("$2.00 / 1M tokens"), // <- not sure
6019
+ },
6020
+ },
6021
+ /**/
6022
+ /*/
6023
+ {
6024
+ modelTitle: 'dall-e-2',
6025
+ modelName: 'dall-e-2',
6026
+ },
6027
+ /**/
6028
+ /**/
6029
+ {
6030
+ modelVariant: 'CHAT',
6031
+ modelTitle: 'gpt-3.5-turbo-16k',
6032
+ modelName: 'gpt-3.5-turbo-16k',
6033
+ pricing: {
6034
+ prompt: computeUsage("$3.00 / 1M tokens"),
6035
+ output: computeUsage("$4.00 / 1M tokens"),
6036
+ },
6037
+ },
6038
+ /**/
6039
+ /*/
6040
+ {
6041
+ modelTitle: 'tts-1-hd-1106',
6042
+ modelName: 'tts-1-hd-1106',
6043
+ },
6044
+ /**/
6045
+ /*/
6046
+ {
6047
+ modelTitle: 'tts-1-hd',
6048
+ modelName: 'tts-1-hd',
6049
+ },
6050
+ /**/
6051
+ /**/
6052
+ {
6053
+ modelVariant: 'CHAT',
6054
+ modelTitle: 'gpt-4',
6055
+ modelName: 'gpt-4',
6056
+ pricing: {
6057
+ prompt: computeUsage("$30.00 / 1M tokens"),
6058
+ output: computeUsage("$60.00 / 1M tokens"),
6059
+ },
6060
+ },
6061
+ /**/
6062
+ /**/
6063
+ {
6064
+ modelVariant: 'CHAT',
6065
+ modelTitle: 'gpt-4-32k',
6066
+ modelName: 'gpt-4-32k',
6067
+ pricing: {
6068
+ prompt: computeUsage("$60.00 / 1M tokens"),
6069
+ output: computeUsage("$120.00 / 1M tokens"),
6070
+ },
6071
+ },
6072
+ /**/
6073
+ /*/
6074
+ {
6075
+ modelVariant: 'CHAT',
6076
+ modelTitle: 'gpt-4-0613',
6077
+ modelName: 'gpt-4-0613',
6078
+ pricing: {
6079
+ prompt: computeUsage(` / 1M tokens`),
6080
+ output: computeUsage(` / 1M tokens`),
6081
+ },
6082
+ },
6083
+ /**/
6084
+ /**/
6085
+ {
6086
+ modelVariant: 'CHAT',
6087
+ modelTitle: 'gpt-4-turbo-2024-04-09',
6088
+ modelName: 'gpt-4-turbo-2024-04-09',
6089
+ pricing: {
6090
+ prompt: computeUsage("$10.00 / 1M tokens"),
6091
+ output: computeUsage("$30.00 / 1M tokens"),
6092
+ },
6093
+ },
6094
+ /**/
6095
+ /**/
6096
+ {
6097
+ modelVariant: 'CHAT',
6098
+ modelTitle: 'gpt-3.5-turbo-1106',
6099
+ modelName: 'gpt-3.5-turbo-1106',
6100
+ pricing: {
6101
+ prompt: computeUsage("$1.00 / 1M tokens"),
6102
+ output: computeUsage("$2.00 / 1M tokens"),
6103
+ },
6104
+ },
6105
+ /**/
6106
+ /**/
6107
+ {
6108
+ modelVariant: 'CHAT',
6109
+ modelTitle: 'gpt-4-turbo',
6110
+ modelName: 'gpt-4-turbo',
6111
+ pricing: {
6112
+ prompt: computeUsage("$10.00 / 1M tokens"),
6113
+ output: computeUsage("$30.00 / 1M tokens"),
6114
+ },
6115
+ },
6116
+ /**/
6117
+ /**/
6118
+ {
6119
+ modelVariant: 'COMPLETION',
6120
+ modelTitle: 'gpt-3.5-turbo-instruct-0914',
6121
+ modelName: 'gpt-3.5-turbo-instruct-0914',
6122
+ pricing: {
6123
+ prompt: computeUsage("$1.50 / 1M tokens"),
6124
+ output: computeUsage("$2.00 / 1M tokens"), // <- For gpt-3.5-turbo-instruct
6125
+ },
6126
+ },
6127
+ /**/
6128
+ /**/
6129
+ {
6130
+ modelVariant: 'COMPLETION',
6131
+ modelTitle: 'gpt-3.5-turbo-instruct',
6132
+ modelName: 'gpt-3.5-turbo-instruct',
6133
+ pricing: {
6134
+ prompt: computeUsage("$1.50 / 1M tokens"),
6135
+ output: computeUsage("$2.00 / 1M tokens"),
6136
+ },
6137
+ },
6138
+ /**/
6139
+ /*/
6140
+ {
6141
+ modelTitle: 'tts-1',
6142
+ modelName: 'tts-1',
6143
+ },
6144
+ /**/
6145
+ /**/
6146
+ {
6147
+ modelVariant: 'CHAT',
6148
+ modelTitle: 'gpt-3.5-turbo',
6149
+ modelName: 'gpt-3.5-turbo',
6150
+ pricing: {
6151
+ prompt: computeUsage("$3.00 / 1M tokens"),
6152
+ output: computeUsage("$6.00 / 1M tokens"), // <- Not sure, refer to gpt-3.5-turbo in Fine-tuning models
6153
+ },
6154
+ },
6155
+ /**/
6156
+ /**/
6157
+ {
6158
+ modelVariant: 'CHAT',
6159
+ modelTitle: 'gpt-3.5-turbo-0301',
6160
+ modelName: 'gpt-3.5-turbo-0301',
6161
+ pricing: {
6162
+ prompt: computeUsage("$1.50 / 1M tokens"),
6163
+ output: computeUsage("$2.00 / 1M tokens"),
6164
+ },
6165
+ },
6166
+ /**/
6167
+ /**/
6168
+ {
6169
+ modelVariant: 'COMPLETION',
6170
+ modelTitle: 'babbage-002',
6171
+ modelName: 'babbage-002',
6172
+ pricing: {
6173
+ prompt: computeUsage("$0.40 / 1M tokens"),
6174
+ output: computeUsage("$0.40 / 1M tokens"), // <- Not sure
6175
+ },
6176
+ },
6177
+ /**/
6178
+ /**/
6179
+ {
6180
+ modelVariant: 'CHAT',
6181
+ modelTitle: 'gpt-4-1106-preview',
6182
+ modelName: 'gpt-4-1106-preview',
6183
+ pricing: {
6184
+ prompt: computeUsage("$10.00 / 1M tokens"),
6185
+ output: computeUsage("$30.00 / 1M tokens"),
6186
+ },
6187
+ },
6188
+ /**/
6189
+ /**/
6190
+ {
6191
+ modelVariant: 'CHAT',
6192
+ modelTitle: 'gpt-4-0125-preview',
6193
+ modelName: 'gpt-4-0125-preview',
6194
+ pricing: {
6195
+ prompt: computeUsage("$10.00 / 1M tokens"),
6196
+ output: computeUsage("$30.00 / 1M tokens"),
6197
+ },
6198
+ },
6199
+ /**/
6200
+ /*/
6201
+ {
6202
+ modelTitle: 'tts-1-1106',
6203
+ modelName: 'tts-1-1106',
6204
+ },
6205
+ /**/
6206
+ /**/
6207
+ {
6208
+ modelVariant: 'CHAT',
6209
+ modelTitle: 'gpt-3.5-turbo-0125',
6210
+ modelName: 'gpt-3.5-turbo-0125',
6211
+ pricing: {
6212
+ prompt: computeUsage("$0.50 / 1M tokens"),
6213
+ output: computeUsage("$1.50 / 1M tokens"),
6214
+ },
6215
+ },
6216
+ /**/
6217
+ /**/
6218
+ {
6219
+ modelVariant: 'CHAT',
6220
+ modelTitle: 'gpt-4-turbo-preview',
6221
+ modelName: 'gpt-4-turbo-preview',
6222
+ pricing: {
6223
+ prompt: computeUsage("$10.00 / 1M tokens"),
6224
+ output: computeUsage("$30.00 / 1M tokens"), // <- Not sure, just for gpt-4-turbo
6225
+ },
6226
+ },
6227
+ /**/
6228
+ /**/
6229
+ {
6230
+ modelVariant: 'EMBEDDING',
6231
+ modelTitle: 'text-embedding-3-large',
6232
+ modelName: 'text-embedding-3-large',
6233
+ pricing: {
6234
+ prompt: computeUsage("$0.13 / 1M tokens"),
6235
+ // TODO: [๐Ÿ] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
6236
+ output: 0, // <- Note: [๐Ÿ†–] In Embedding models you dont pay for output
6237
+ },
6238
+ },
6239
+ /**/
6240
+ /**/
6241
+ {
6242
+ modelVariant: 'EMBEDDING',
6243
+ modelTitle: 'text-embedding-3-small',
6244
+ modelName: 'text-embedding-3-small',
6245
+ pricing: {
6246
+ prompt: computeUsage("$0.02 / 1M tokens"),
6247
+ // TODO: [๐Ÿ] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
6248
+ output: 0, // <- Note: [๐Ÿ†–] In Embedding models you dont pay for output
6249
+ },
6250
+ },
6251
+ /**/
6252
+ /**/
6253
+ {
6254
+ modelVariant: 'CHAT',
6255
+ modelTitle: 'gpt-3.5-turbo-0613',
6256
+ modelName: 'gpt-3.5-turbo-0613',
6257
+ pricing: {
6258
+ prompt: computeUsage("$1.50 / 1M tokens"),
6259
+ output: computeUsage("$2.00 / 1M tokens"),
6260
+ },
6261
+ },
6262
+ /**/
6263
+ /**/
6264
+ {
6265
+ modelVariant: 'EMBEDDING',
6266
+ modelTitle: 'text-embedding-ada-002',
6267
+ modelName: 'text-embedding-ada-002',
6268
+ pricing: {
6269
+ prompt: computeUsage("$0.1 / 1M tokens"),
6270
+ // TODO: [๐Ÿ] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
6271
+ output: 0, // <- Note: [๐Ÿ†–] In Embedding models you dont pay for output
6272
+ },
6273
+ },
6274
+ /**/
6275
+ /*/
6276
+ {
6277
+ modelVariant: 'CHAT',
6278
+ modelTitle: 'gpt-4-1106-vision-preview',
6279
+ modelName: 'gpt-4-1106-vision-preview',
6280
+ },
6281
+ /**/
6282
+ /*/
6283
+ {
6284
+ modelVariant: 'CHAT',
6285
+ modelTitle: 'gpt-4-vision-preview',
6286
+ modelName: 'gpt-4-vision-preview',
6287
+ pricing: {
6288
+ prompt: computeUsage(`$10.00 / 1M tokens`),
6289
+ output: computeUsage(`$30.00 / 1M tokens`),
6290
+ },
6291
+ },
6292
+ /**/
6293
+ /**/
6294
+ {
6295
+ modelVariant: 'CHAT',
6296
+ modelTitle: 'gpt-4o-2024-05-13',
6297
+ modelName: 'gpt-4o-2024-05-13',
6298
+ pricing: {
6299
+ prompt: computeUsage("$5.00 / 1M tokens"),
6300
+ output: computeUsage("$15.00 / 1M tokens"),
6301
+ },
6302
+ },
6303
+ /**/
6304
+ /**/
6305
+ {
6306
+ modelVariant: 'CHAT',
6307
+ modelTitle: 'gpt-4o',
6308
+ modelName: 'gpt-4o',
6309
+ pricing: {
6310
+ prompt: computeUsage("$5.00 / 1M tokens"),
6311
+ output: computeUsage("$15.00 / 1M tokens"),
6312
+ },
6313
+ },
6314
+ /**/
6315
+ /**/
6316
+ {
6317
+ modelVariant: 'CHAT',
6318
+ modelTitle: 'gpt-3.5-turbo-16k-0613',
6319
+ modelName: 'gpt-3.5-turbo-16k-0613',
6320
+ pricing: {
6321
+ prompt: computeUsage("$3.00 / 1M tokens"),
6322
+ output: computeUsage("$4.00 / 1M tokens"),
6323
+ },
6324
+ },
6325
+ /**/
6326
+ ];
6327
+ /**
6328
+ * Note: [๐Ÿค–] Add models of new variant
6329
+ * TODO: [๐Ÿง ] Some mechanism to propagate unsureness
6330
+ * TODO: [๐Ÿ•š][๐Ÿ‘ฎโ€โ™€๏ธ] Make this list dynamic - dynamically can be listed modelNames but not modelVariant, legacy status, context length and pricing
6331
+ * TODO: [๐Ÿง ][๐Ÿ‘ฎโ€โ™€๏ธ] Put here more info like description, isVision, trainingDateCutoff, languages, strengths ( Top-level performance, intelligence, fluency, and understanding), contextWindow,...
6332
+ * @see https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4
6333
+ * @see https://openai.com/api/pricing/
6334
+ * @see /other/playground/playground.ts
6335
+ * TODO: [๐Ÿ“] Make better
6336
+ * TODO: Change model titles to human eg: "gpt-4-turbo-2024-04-09" -> "GPT-4 Turbo (2024-04-09)"
6337
+ * TODO: [๐Ÿšธ] Not all models are compatible with JSON mode, add this information here and use it
6338
+ */
6339
+
6340
+ /**
6341
+ * Computes the usage of the OpenAI API based on the response from OpenAI
6342
+ *
6343
+ * @param promptContent The content of the prompt
6344
+ * @param resultContent The content of the result (for embedding prompts or failed prompts pass empty string)
6345
+ * @param rawResponse The raw response from OpenAI API
6346
+ * @throws {PipelineExecutionError} If the usage is not defined in the response from OpenAI
6347
+ * @private internal util of `OpenAiExecutionTools`
6348
+ */
6349
+ function computeOpenaiUsage(promptContent, // <- Note: Intentionally using [] to access type properties to bring jsdoc from Prompt/PromptResult to consumer
6350
+ resultContent, rawResponse) {
6351
+ var _a, _b;
6352
+ if (rawResponse.usage === undefined) {
6353
+ throw new PipelineExecutionError('The usage is not defined in the response from OpenAI');
6354
+ }
6355
+ if (((_a = rawResponse.usage) === null || _a === void 0 ? void 0 : _a.prompt_tokens) === undefined) {
6356
+ throw new PipelineExecutionError('In OpenAI response `usage.prompt_tokens` not defined');
6357
+ }
6358
+ var inputTokens = rawResponse.usage.prompt_tokens;
6359
+ var outputTokens = ((_b = rawResponse.usage) === null || _b === void 0 ? void 0 : _b.completion_tokens) || 0;
6360
+ var modelInfo = OPENAI_MODELS.find(function (model) { return model.modelName === rawResponse.model; });
6361
+ var price;
6362
+ if (modelInfo === undefined || modelInfo.pricing === undefined) {
6363
+ price = uncertainNumber();
6364
+ }
6365
+ else {
6366
+ price = uncertainNumber(inputTokens * modelInfo.pricing.prompt + outputTokens * modelInfo.pricing.output);
6367
+ }
6368
+ return {
6369
+ price: price,
6370
+ input: __assign({ tokensCount: uncertainNumber(rawResponse.usage.prompt_tokens) }, computeUsageCounts(promptContent)),
6371
+ output: __assign({ tokensCount: uncertainNumber(outputTokens) }, computeUsageCounts(resultContent)),
6372
+ };
6373
+ }
6374
+
6375
+ /**
6376
+ * Execution Tools for calling OpenAI API.
6377
+ */
6378
+ var OpenAiExecutionTools = /** @class */ (function () {
6379
+ /**
6380
+ * Creates OpenAI Execution Tools.
6381
+ *
6382
+ * @param options which are relevant are directly passed to the OpenAI client
6383
+ */
6384
+ function OpenAiExecutionTools(options) {
6385
+ if (options === void 0) { options = {}; }
6386
+ this.options = options;
6387
+ // Note: Passing only OpenAI relevant options to OpenAI constructor
6388
+ var openAiOptions = __assign({}, options);
6389
+ delete openAiOptions.isVerbose;
6390
+ delete openAiOptions.user;
6391
+ this.client = new OpenAI(__assign({}, openAiOptions));
6392
+ }
6393
+ Object.defineProperty(OpenAiExecutionTools.prototype, "title", {
6394
+ get: function () {
6395
+ return 'OpenAI';
6396
+ },
6397
+ enumerable: false,
6398
+ configurable: true
6399
+ });
6400
+ Object.defineProperty(OpenAiExecutionTools.prototype, "description", {
6401
+ get: function () {
6402
+ return 'Use all models provided by OpenAI';
6403
+ },
6404
+ enumerable: false,
6405
+ configurable: true
6406
+ });
6407
+ /**
6408
+ * Calls OpenAI API to use a chat model.
6409
+ */
6410
+ OpenAiExecutionTools.prototype.callChatModel = function (prompt) {
6411
+ return __awaiter(this, void 0, void 0, function () {
6412
+ var content, parameters, modelRequirements, expectFormat, model, modelSettings, rawRequest, start, complete, rawResponse, resultContent, usage;
6413
+ return __generator(this, function (_a) {
6414
+ switch (_a.label) {
6415
+ case 0:
6416
+ if (this.options.isVerbose) {
6417
+ console.info('๐Ÿ’ฌ OpenAI callChatModel call', { prompt: prompt });
6418
+ }
6419
+ content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements, expectFormat = prompt.expectFormat;
6420
+ // TODO: [โ˜‚] Use here more modelRequirements
6421
+ if (modelRequirements.modelVariant !== 'CHAT') {
6422
+ throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
6423
+ }
6424
+ model = modelRequirements.modelName || this.getDefaultChatModel().modelName;
6425
+ modelSettings = {
6426
+ model: model,
6427
+ max_tokens: modelRequirements.maxTokens,
6428
+ // <- TODO: [๐ŸŒพ] Make some global max cap for maxTokens
6429
+ temperature: modelRequirements.temperature,
6430
+ // <- TODO: [๐Ÿˆ] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
6431
+ // <- Note: [๐Ÿง†]
6432
+ };
6433
+ if (expectFormat === 'JSON') {
6434
+ modelSettings.response_format = {
6435
+ type: 'json_object',
6436
+ };
6437
+ }
6438
+ rawRequest = __assign(__assign({}, modelSettings), { messages: __spreadArray(__spreadArray([], __read((modelRequirements.systemMessage === undefined
6439
+ ? []
6440
+ : [
6441
+ {
6442
+ role: 'system',
6443
+ content: modelRequirements.systemMessage,
6444
+ },
6445
+ ])), false), [
6446
+ {
6447
+ role: 'user',
6448
+ content: replaceParameters(content, parameters),
6449
+ },
6450
+ ], false), user: this.options.user });
6451
+ start = getCurrentIsoDate();
6452
+ if (this.options.isVerbose) {
6453
+ console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
6454
+ }
6455
+ return [4 /*yield*/, this.client.chat.completions.create(rawRequest)];
6456
+ case 1:
6457
+ rawResponse = _a.sent();
6458
+ if (this.options.isVerbose) {
6459
+ console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
6460
+ }
6461
+ if (!rawResponse.choices[0]) {
6462
+ throw new PipelineExecutionError('No choises from OpenAI');
6463
+ }
6464
+ if (rawResponse.choices.length > 1) {
6465
+ // TODO: This should be maybe only warning
6466
+ throw new PipelineExecutionError('More than one choise from OpenAI');
6467
+ }
6468
+ resultContent = rawResponse.choices[0].message.content;
6469
+ // eslint-disable-next-line prefer-const
6470
+ complete = getCurrentIsoDate();
6471
+ usage = computeOpenaiUsage(content, resultContent || '', rawResponse);
6472
+ if (resultContent === null) {
6473
+ throw new PipelineExecutionError('No response message from OpenAI');
6474
+ }
6475
+ return [2 /*return*/, {
6476
+ content: resultContent,
6477
+ modelName: rawResponse.model || model,
6478
+ timing: {
6479
+ start: start,
6480
+ complete: complete,
6481
+ },
6482
+ usage: usage,
6483
+ rawResponse: rawResponse,
6484
+ // <- [๐Ÿคนโ€โ™‚๏ธ]
6485
+ }];
6486
+ }
6487
+ });
6488
+ });
6489
+ };
6490
+ /**
6491
+ * Calls OpenAI API to use a complete model.
6492
+ */
6493
+ OpenAiExecutionTools.prototype.callCompletionModel = function (prompt) {
6494
+ return __awaiter(this, void 0, void 0, function () {
6495
+ var content, parameters, modelRequirements, model, modelSettings, rawRequest, start, complete, rawResponse, resultContent, usage;
6496
+ return __generator(this, function (_a) {
6497
+ switch (_a.label) {
6498
+ case 0:
6499
+ if (this.options.isVerbose) {
6500
+ console.info('๐Ÿ–‹ OpenAI callCompletionModel call', { prompt: prompt });
6501
+ }
6502
+ content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
6503
+ // TODO: [โ˜‚] Use here more modelRequirements
6504
+ if (modelRequirements.modelVariant !== 'COMPLETION') {
6505
+ throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
6506
+ }
6507
+ model = modelRequirements.modelName || this.getDefaultCompletionModel().modelName;
6508
+ modelSettings = {
6509
+ model: model,
6510
+ max_tokens: modelRequirements.maxTokens || 2000,
6511
+ // <- TODO: [๐ŸŒพ] Make some global max cap for maxTokens
6512
+ temperature: modelRequirements.temperature,
6513
+ // <- TODO: [๐Ÿˆ] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
6514
+ // <- Note: [๐Ÿง†]
6515
+ };
6516
+ rawRequest = __assign(__assign({}, modelSettings), { prompt: replaceParameters(content, parameters), user: this.options.user });
6517
+ start = getCurrentIsoDate();
6518
+ if (this.options.isVerbose) {
6519
+ console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
6520
+ }
6521
+ return [4 /*yield*/, this.client.completions.create(rawRequest)];
6522
+ case 1:
6523
+ rawResponse = _a.sent();
6524
+ if (this.options.isVerbose) {
6525
+ console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
6526
+ }
6527
+ if (!rawResponse.choices[0]) {
6528
+ throw new PipelineExecutionError('No choises from OpenAI');
6529
+ }
6530
+ if (rawResponse.choices.length > 1) {
6531
+ // TODO: This should be maybe only warning
6532
+ throw new PipelineExecutionError('More than one choise from OpenAI');
6533
+ }
6534
+ resultContent = rawResponse.choices[0].text;
6535
+ // eslint-disable-next-line prefer-const
6536
+ complete = getCurrentIsoDate();
6537
+ usage = computeOpenaiUsage(content, resultContent || '', rawResponse);
6538
+ return [2 /*return*/, {
6539
+ content: resultContent,
6540
+ modelName: rawResponse.model || model,
6541
+ timing: {
6542
+ start: start,
6543
+ complete: complete,
6544
+ },
6545
+ usage: usage,
6546
+ rawResponse: rawResponse,
6547
+ // <- [๐Ÿคนโ€โ™‚๏ธ]
6548
+ }];
6549
+ }
6550
+ });
6551
+ });
6552
+ };
6553
+ /**
6554
+ * Calls OpenAI API to use a embedding model
6555
+ */
6556
+ OpenAiExecutionTools.prototype.callEmbeddingModel = function (prompt) {
6557
+ return __awaiter(this, void 0, void 0, function () {
6558
+ var content, parameters, modelRequirements, model, rawRequest, start, complete, rawResponse, resultContent, usage;
6559
+ return __generator(this, function (_a) {
6560
+ switch (_a.label) {
6561
+ case 0:
6562
+ if (this.options.isVerbose) {
6563
+ console.info('๐Ÿ–‹ OpenAI embedding call', { prompt: prompt });
6564
+ }
6565
+ content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
6566
+ // TODO: [โ˜‚] Use here more modelRequirements
6567
+ if (modelRequirements.modelVariant !== 'EMBEDDING') {
6568
+ throw new PipelineExecutionError('Use embed only for EMBEDDING variant');
6569
+ }
6570
+ model = modelRequirements.modelName || this.getDefaultEmbeddingModel().modelName;
6571
+ rawRequest = {
6572
+ input: replaceParameters(content, parameters),
6573
+ model: model,
6574
+ // TODO: !!!! Test model 3 and dimensions
6575
+ };
6576
+ start = getCurrentIsoDate();
6577
+ if (this.options.isVerbose) {
6578
+ console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
6579
+ }
6580
+ return [4 /*yield*/, this.client.embeddings.create(rawRequest)];
6581
+ case 1:
6582
+ rawResponse = _a.sent();
6583
+ if (this.options.isVerbose) {
6584
+ console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
6585
+ }
6586
+ if (rawResponse.data.length !== 1) {
6587
+ throw new PipelineExecutionError("Expected exactly 1 data item in response, got ".concat(rawResponse.data.length));
6588
+ }
6589
+ resultContent = rawResponse.data[0].embedding;
6590
+ // eslint-disable-next-line prefer-const
6591
+ complete = getCurrentIsoDate();
6592
+ usage = computeOpenaiUsage(content, '', rawResponse);
6593
+ return [2 /*return*/, {
6594
+ content: resultContent,
6595
+ modelName: rawResponse.model || model,
6596
+ timing: {
6597
+ start: start,
6598
+ complete: complete,
6599
+ },
6600
+ usage: usage,
6601
+ rawResponse: rawResponse,
6602
+ // <- [๐Ÿคนโ€โ™‚๏ธ]
6603
+ }];
6604
+ }
6605
+ });
6606
+ });
6607
+ };
6608
+ // <- Note: [๐Ÿค–] callXxxModel
6609
+ /**
6610
+ * Get the model that should be used as default
6611
+ */
6612
+ OpenAiExecutionTools.prototype.getDefaultModel = function (defaultModelName) {
6613
+ var model = OPENAI_MODELS.find(function (_a) {
6614
+ var modelName = _a.modelName;
6615
+ return modelName === defaultModelName;
6616
+ });
6617
+ if (model === undefined) {
6618
+ throw new UnexpectedError(spaceTrim(function (block) {
6619
+ return "\n Cannot find model in OpenAI models with name \"".concat(defaultModelName, "\" which should be used as default.\n\n Available models:\n ").concat(block(OPENAI_MODELS.map(function (_a) {
6620
+ var modelName = _a.modelName;
6621
+ return "- \"".concat(modelName, "\"");
6622
+ }).join('\n')), "\n\n ");
6623
+ }));
6624
+ }
6625
+ return model;
6626
+ };
6627
+ /**
6628
+ * Default model for chat variant.
6629
+ */
6630
+ OpenAiExecutionTools.prototype.getDefaultChatModel = function () {
6631
+ return this.getDefaultModel('gpt-4o');
6632
+ };
6633
+ /**
6634
+ * Default model for completion variant.
6635
+ */
6636
+ OpenAiExecutionTools.prototype.getDefaultCompletionModel = function () {
6637
+ return this.getDefaultModel('gpt-3.5-turbo-instruct');
6638
+ };
6639
+ /**
6640
+ * Default model for completion variant.
6641
+ */
6642
+ OpenAiExecutionTools.prototype.getDefaultEmbeddingModel = function () {
6643
+ return this.getDefaultModel('text-embedding-3-large');
6644
+ };
6645
+ // <- Note: [๐Ÿค–] getDefaultXxxModel
6646
+ /**
6647
+ * List all available OpenAI models that can be used
6648
+ */
6649
+ OpenAiExecutionTools.prototype.listModels = function () {
6650
+ /*
6651
+ Note: Dynamic lising of the models
6652
+ const models = await this.openai.models.list({});
6653
+
6654
+ console.log({ models });
6655
+ console.log(models.data);
6656
+ */
6657
+ return OPENAI_MODELS;
6658
+ };
6659
+ return OpenAiExecutionTools;
6660
+ }());
6661
+ /**
6662
+ * TODO: [๐Ÿง ][๐Ÿง™โ€โ™‚๏ธ] Maybe there can be some wizzard for thoose who want to use just OpenAI
6663
+ * TODO: Maybe Create some common util for callChatModel and callCompletionModel
6664
+ * TODO: Maybe make custom OpenaiError
6665
+ * TODO: [๐Ÿง ][๐Ÿˆ] Maybe use `isDeterministic` from options
6666
+ */
6667
+
6668
+ /**
6669
+ * @@@
6670
+ *
6671
+ * Note: This function is not cached, every call creates new instance of `LlmExecutionTools`
6672
+ *
6673
+ * It looks for environment variables:
6674
+ * - `process.env.OPENAI_API_KEY`
6675
+ * - `process.env.ANTHROPIC_CLAUDE_API_KEY`
6676
+ *
6677
+ * @returns @@@
6678
+ */
6679
+ function createLlmToolsFromEnv(options) {
6680
+ if (options === void 0) { options = {}; }
6681
+ if (!isRunningInNode()) {
6682
+ throw new EnvironmentMismatchError('Function `createLlmToolsFromEnv` works only in Node.js environment');
6683
+ }
6684
+ var _a = options.isVerbose, isVerbose = _a === void 0 ? false : _a;
6685
+ var llmTools = [];
6686
+ if (typeof process.env.OPENAI_API_KEY === 'string') {
6687
+ llmTools.push(new OpenAiExecutionTools({
6688
+ isVerbose: isVerbose,
6689
+ apiKey: process.env.OPENAI_API_KEY,
6690
+ }));
6691
+ }
6692
+ if (typeof process.env.ANTHROPIC_CLAUDE_API_KEY === 'string') {
6693
+ llmTools.push(new AnthropicClaudeExecutionTools({
6694
+ isVerbose: isVerbose,
6695
+ apiKey: process.env.ANTHROPIC_CLAUDE_API_KEY,
6696
+ }));
6697
+ }
6698
+ if (llmTools.length === 0) {
6699
+ throw new Error(spaceTrim("\n No LLM tools found in the environment\n\n Please set one of environment variables:\n - OPENAI_API_KEY\n - ANTHROPIC_CLAUDE_API_KEY\n "));
6700
+ }
6701
+ else if (llmTools.length === 1) {
6702
+ return llmTools[0];
6703
+ }
6704
+ else {
6705
+ return joinLlmExecutionTools.apply(void 0, __spreadArray([], __read(llmTools), false));
6706
+ }
6707
+ }
6708
+ /**
6709
+ * TODO: [๐Ÿ”ผ] !!! Export via `@promptbook/node`
6710
+ * TODO: @@@ write discussion about this - wizzard
6711
+ * TODO: Add Azure
6712
+ * TODO: [๐Ÿง ] Which name is better `createLlmToolsFromEnv` or `createLlmToolsFromEnvironment`?
6713
+ * TODO: [๐Ÿง ] Is there some meaningfull way how to test this util
6714
+ * TODO: [๐Ÿง ] Maybe pass env as argument
5604
6715
  * Note: [๐ŸŸข] This code should never be published outside of `@promptbook/node`
5605
6716
  */
5606
6717
 
5607
- export { PROMPTBOOK_VERSION, createCollectionFromDirectory };
6718
+ export { PROMPTBOOK_VERSION, createCollectionFromDirectory, createLlmToolsFromEnv };
5608
6719
  //# sourceMappingURL=index.es.js.map