@promptbook/node 0.66.0-7 → 0.66.0-9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. package/esm/index.es.js +196 -1698
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/src/_packages/anthropic-claude.index.d.ts +0 -2
  4. package/esm/typings/src/_packages/azure-openai.index.d.ts +4 -0
  5. package/esm/typings/src/_packages/cli.index.d.ts +8 -0
  6. package/esm/typings/src/_packages/core.index.d.ts +2 -0
  7. package/esm/typings/src/_packages/types.index.d.ts +22 -14
  8. package/esm/typings/src/_packages/utils.index.d.ts +7 -7
  9. package/esm/typings/src/execution/LlmExecutionToolsConstructor.d.ts +1 -1
  10. package/esm/typings/src/llm-providers/_common/$llmToolsMetadataRegister.d.ts +2 -2
  11. package/esm/typings/src/llm-providers/_common/$llmToolsRegister.d.ts +2 -2
  12. package/esm/typings/src/llm-providers/_common/$registeredLlmToolsMessage.d.ts +9 -0
  13. package/esm/typings/src/llm-providers/_common/LlmToolsConfiguration.d.ts +1 -1
  14. package/esm/typings/src/llm-providers/_common/LlmToolsMetadata.d.ts +1 -1
  15. package/esm/typings/src/llm-providers/_common/createLlmToolsFromConfigurationFromEnv.d.ts +1 -0
  16. package/esm/typings/src/llm-providers/_common/getLlmToolsForTestingAndScriptsAndPlayground.d.ts +1 -1
  17. package/esm/typings/src/llm-providers/anthropic-claude/register-constructor.d.ts +1 -0
  18. package/esm/typings/src/llm-providers/azure-openai/createAzureOpenAiExecutionTools.d.ts +15 -0
  19. package/esm/typings/src/llm-providers/azure-openai/register-configuration.d.ts +9 -0
  20. package/esm/typings/src/llm-providers/azure-openai/register-constructor.d.ts +12 -0
  21. package/esm/typings/src/llm-providers/openai/register-constructor.d.ts +1 -0
  22. package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +1 -0
  23. package/esm/typings/src/llm-providers/remote/interfaces/{Promptbook_Server_Error.d.ts → PromptbookServer_Error.d.ts} +1 -1
  24. package/esm/typings/src/llm-providers/remote/interfaces/PromptbookServer_ListModels_Request.d.ts +34 -0
  25. package/esm/typings/src/llm-providers/remote/interfaces/PromptbookServer_ListModels_Response.d.ts +15 -0
  26. package/esm/typings/src/llm-providers/remote/interfaces/{Promptbook_Server_Progress.d.ts → PromptbookServer_Prompt_Progress.d.ts} +1 -1
  27. package/esm/typings/src/llm-providers/remote/interfaces/{Promptbook_Server_Request.d.ts → PromptbookServer_Prompt_Request.d.ts} +15 -3
  28. package/esm/typings/src/llm-providers/remote/interfaces/{Promptbook_Server_Response.d.ts → PromptbookServer_Prompt_Response.d.ts} +1 -1
  29. package/esm/typings/src/llm-providers/remote/interfaces/RemoteLlmExecutionToolsOptions.d.ts +1 -7
  30. package/esm/typings/src/llm-providers/remote/startRemoteServer.d.ts +1 -0
  31. package/esm/typings/src/utils/{Register.d.ts → $Register.d.ts} +6 -2
  32. package/esm/typings/src/utils/environment/{getGlobalScope.d.ts → $getGlobalScope.d.ts} +1 -1
  33. package/esm/typings/src/utils/organization/f.d.ts +6 -0
  34. package/package.json +2 -6
  35. package/umd/index.umd.js +200 -1700
  36. package/umd/index.umd.js.map +1 -1
  37. package/esm/typings/src/llm-providers/_common/config.d.ts +0 -14
  38. package/esm/typings/src/llm-providers/anthropic-claude/register1.d.ts +0 -4
  39. /package/esm/typings/src/llm-providers/mocked/{fakeTextToExpectations.d.ts → $fakeTextToExpectations.d.ts} +0 -0
  40. /package/esm/typings/src/utils/{currentDate.d.ts → $currentDate.d.ts} +0 -0
  41. /package/esm/typings/src/utils/environment/{isRunningInBrowser.d.ts → $isRunningInBrowser.d.ts} +0 -0
  42. /package/esm/typings/src/utils/environment/{isRunningInNode.d.ts → $isRunningInNode.d.ts} +0 -0
  43. /package/esm/typings/src/utils/environment/{isRunningInWebWorker.d.ts → $isRunningInWebWorker.d.ts} +0 -0
  44. /package/esm/typings/src/utils/files/{isDirectoryExisting.d.ts → $isDirectoryExisting.d.ts} +0 -0
  45. /package/esm/typings/src/utils/files/{isDirectoryExisting.test.d.ts → $isDirectoryExisting.test.d.ts} +0 -0
  46. /package/esm/typings/src/utils/files/{isFileExisting.d.ts → $isFileExisting.d.ts} +0 -0
  47. /package/esm/typings/src/utils/files/{isFileExisting.test.d.ts → $isFileExisting.test.d.ts} +0 -0
  48. /package/esm/typings/src/utils/files/{listAllFiles.d.ts → $listAllFiles.d.ts} +0 -0
  49. /package/esm/typings/src/utils/files/{listAllFiles.test.d.ts → $listAllFiles.test.d.ts} +0 -0
  50. /package/esm/typings/src/utils/random/{randomSeed.d.ts → $randomSeed.d.ts} +0 -0
package/esm/index.es.js CHANGED
@@ -8,16 +8,12 @@ import hexEncoder from 'crypto-js/enc-hex';
8
8
  import sha256 from 'crypto-js/sha256';
9
9
  import { join } from 'path/posix';
10
10
  import * as dotenv from 'dotenv';
11
- import { io } from 'socket.io-client';
12
- import Anthropic from '@anthropic-ai/sdk';
13
- import { OpenAIClient, AzureKeyCredential } from '@azure/openai';
14
- import OpenAI from 'openai';
15
11
 
16
12
  // ⚠️ WARNING: This code has been generated so that any manual changes will be overwritten
17
13
  /**
18
14
  * The version of the Promptbook library
19
15
  */
20
- var PROMPTBOOK_VERSION = '0.66.0-6';
16
+ var PROMPTBOOK_VERSION = '0.66.0-8';
21
17
  // TODO: !!!! List here all the versions and annotate + put into script
22
18
 
23
19
  /*! *****************************************************************************
@@ -185,6 +181,26 @@ function deepFreezeWithSameType(objectValue) {
185
181
  * TODO: [🧠] Is there a way how to meaningfully test this utility
186
182
  */
187
183
 
184
+ /**
185
+ * Returns the same value that is passed as argument.
186
+ * No side effects.
187
+ *
188
+ * Note: It can be usefull for:
189
+ *
190
+ * 1) Leveling indentation
191
+ * 2) Putting always-true or always-false conditions without getting eslint errors
192
+ *
193
+ * @param value any values
194
+ * @returns the same values
195
+ * @private within the repository
196
+ */
197
+ function just(value) {
198
+ if (value === undefined) {
199
+ return undefined;
200
+ }
201
+ return value;
202
+ }
203
+
188
204
  // <- TODO: [🧠] Better system for generator warnings - not always "code" and "by `@promptbook/cli`"
189
205
  /**
190
206
  * The maximum number of iterations for a loops
@@ -696,7 +712,7 @@ function forEachAsync(array, options, callbackfunction) {
696
712
  });
697
713
  }
698
714
 
699
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.66.0-6",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.66.0-6",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.66.0-6",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.66.0-6",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
715
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.66.0-8",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.66.0-8",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.66.0-8",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.66.0-8",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
700
716
 
701
717
  /**
702
718
  * This error indicates that the promptbook in a markdown format cannot be parsed into a valid promptbook object
@@ -1924,8 +1940,37 @@ var MultipleLlmExecutionTools = /** @class */ (function () {
1924
1940
  */
1925
1941
  MultipleLlmExecutionTools.prototype.checkConfiguration = function () {
1926
1942
  return __awaiter(this, void 0, void 0, function () {
1927
- return __generator(this, function (_a) {
1928
- return [2 /*return*/];
1943
+ var _a, _b, llmExecutionTools, e_1_1;
1944
+ var e_1, _c;
1945
+ return __generator(this, function (_d) {
1946
+ switch (_d.label) {
1947
+ case 0:
1948
+ _d.trys.push([0, 5, 6, 7]);
1949
+ _a = __values(this.llmExecutionTools), _b = _a.next();
1950
+ _d.label = 1;
1951
+ case 1:
1952
+ if (!!_b.done) return [3 /*break*/, 4];
1953
+ llmExecutionTools = _b.value;
1954
+ return [4 /*yield*/, llmExecutionTools.checkConfiguration()];
1955
+ case 2:
1956
+ _d.sent();
1957
+ _d.label = 3;
1958
+ case 3:
1959
+ _b = _a.next();
1960
+ return [3 /*break*/, 1];
1961
+ case 4: return [3 /*break*/, 7];
1962
+ case 5:
1963
+ e_1_1 = _d.sent();
1964
+ e_1 = { error: e_1_1 };
1965
+ return [3 /*break*/, 7];
1966
+ case 6:
1967
+ try {
1968
+ if (_b && !_b.done && (_c = _a.return)) _c.call(_a);
1969
+ }
1970
+ finally { if (e_1) throw e_1.error; }
1971
+ return [7 /*endfinally*/];
1972
+ case 7: return [2 /*return*/];
1973
+ }
1929
1974
  });
1930
1975
  });
1931
1976
  };
@@ -1935,8 +1980,8 @@ var MultipleLlmExecutionTools = /** @class */ (function () {
1935
1980
  */
1936
1981
  MultipleLlmExecutionTools.prototype.listModels = function () {
1937
1982
  return __awaiter(this, void 0, void 0, function () {
1938
- var availableModels, _a, _b, llmExecutionTools, models, e_1_1;
1939
- var e_1, _c;
1983
+ var availableModels, _a, _b, llmExecutionTools, models, e_2_1;
1984
+ var e_2, _c;
1940
1985
  return __generator(this, function (_d) {
1941
1986
  switch (_d.label) {
1942
1987
  case 0:
@@ -1959,14 +2004,14 @@ var MultipleLlmExecutionTools = /** @class */ (function () {
1959
2004
  return [3 /*break*/, 2];
1960
2005
  case 5: return [3 /*break*/, 8];
1961
2006
  case 6:
1962
- e_1_1 = _d.sent();
1963
- e_1 = { error: e_1_1 };
2007
+ e_2_1 = _d.sent();
2008
+ e_2 = { error: e_2_1 };
1964
2009
  return [3 /*break*/, 8];
1965
2010
  case 7:
1966
2011
  try {
1967
2012
  if (_b && !_b.done && (_c = _a.return)) _c.call(_a);
1968
2013
  }
1969
- finally { if (e_1) throw e_1.error; }
2014
+ finally { if (e_2) throw e_2.error; }
1970
2015
  return [7 /*endfinally*/];
1971
2016
  case 8: return [2 /*return*/, availableModels];
1972
2017
  }
@@ -1999,8 +2044,8 @@ var MultipleLlmExecutionTools = /** @class */ (function () {
1999
2044
  */
2000
2045
  MultipleLlmExecutionTools.prototype.callCommonModel = function (prompt) {
2001
2046
  return __awaiter(this, void 0, void 0, function () {
2002
- var errors, _a, _b, llmExecutionTools, _c, error_1, e_2_1;
2003
- var e_2, _d;
2047
+ var errors, _a, _b, llmExecutionTools, _c, error_1, e_3_1;
2048
+ var e_3, _d;
2004
2049
  var _this = this;
2005
2050
  return __generator(this, function (_e) {
2006
2051
  switch (_e.label) {
@@ -2056,14 +2101,14 @@ var MultipleLlmExecutionTools = /** @class */ (function () {
2056
2101
  return [3 /*break*/, 2];
2057
2102
  case 14: return [3 /*break*/, 17];
2058
2103
  case 15:
2059
- e_2_1 = _e.sent();
2060
- e_2 = { error: e_2_1 };
2104
+ e_3_1 = _e.sent();
2105
+ e_3 = { error: e_3_1 };
2061
2106
  return [3 /*break*/, 17];
2062
2107
  case 16:
2063
2108
  try {
2064
2109
  if (_b && !_b.done && (_d = _a.return)) _d.call(_a);
2065
2110
  }
2066
- finally { if (e_2) throw e_2.error; }
2111
+ finally { if (e_3) throw e_3.error; }
2067
2112
  return [7 /*endfinally*/];
2068
2113
  case 17:
2069
2114
  if (errors.length === 1) {
@@ -6306,7 +6351,7 @@ var EnvironmentMismatchError = /** @class */ (function (_super) {
6306
6351
  /**
6307
6352
  * @@@
6308
6353
  *
6309
- * Note: `$` is used to indicate that this function is not a pure function - it access global
6354
+ * Note: `$` is used to indicate that this function is not a pure function - it access global scope
6310
6355
  *
6311
6356
  * @public exported from `@promptbook/utils`
6312
6357
  */
@@ -6320,41 +6365,45 @@ function $getGlobalScope() {
6320
6365
  /**
6321
6366
  * Register is @@@
6322
6367
  *
6368
+ * Note: `$` is used to indicate that this function is not a pure function - it accesses and adds variables in global scope.
6369
+ *
6323
6370
  * @private internal utility, exported are only signleton instances of this class
6324
6371
  */
6325
- var Register = /** @class */ (function () {
6326
- function Register(storage) {
6327
- this.storage = storage;
6372
+ var $Register = /** @class */ (function () {
6373
+ function $Register(storageName) {
6374
+ this.storageName = storageName;
6375
+ storageName = "_promptbook_".concat(storageName);
6376
+ var globalScope = $getGlobalScope();
6377
+ if (globalScope[storageName] === undefined) {
6378
+ globalScope[storageName] = [];
6379
+ }
6380
+ else if (!Array.isArray(globalScope[storageName])) {
6381
+ throw new UnexpectedError("Expected (global) ".concat(storageName, " to be an array, but got ").concat(typeof globalScope[storageName]));
6382
+ }
6383
+ this.storage = globalScope[storageName];
6328
6384
  }
6329
- Register.prototype.list = function () {
6385
+ $Register.prototype.list = function () {
6330
6386
  // <- TODO: ReadonlyDeep<Array<TRegistered>>
6331
6387
  return this.storage;
6332
6388
  };
6333
- Register.prototype.register = function (registered) {
6389
+ $Register.prototype.register = function (registered) {
6334
6390
  // <- TODO: What to return here
6335
6391
  var packageName = registered.packageName, className = registered.className;
6336
6392
  var existingRegistrationIndex = this.storage.findIndex(function (item) { return item.packageName === packageName && item.className === className; });
6337
6393
  var existingRegistration = this.storage[existingRegistrationIndex];
6338
- if (existingRegistration) {
6339
- console.warn("!!!!!! Re-registering ".concat(packageName, ".").concat(className, " again"));
6340
- this.storage[existingRegistrationIndex] = registered;
6394
+ // TODO: !!!!!! Global IS_VERBOSE mode
6395
+ if (!existingRegistration) {
6396
+ console.warn("[\uD83D\uDCE6] Registering `".concat(packageName, ".").concat(className, "` to `").concat(this.storageName, "`"));
6397
+ this.storage.push(registered);
6341
6398
  }
6342
6399
  else {
6343
- this.storage.push(registered);
6400
+ console.warn("[\uD83D\uDCE6] Re-registering `".concat(packageName, ".").concat(className, "` to `").concat(this.storageName, "`"));
6401
+ this.storage[existingRegistrationIndex] = registered;
6344
6402
  }
6345
6403
  };
6346
- return Register;
6404
+ return $Register;
6347
6405
  }());
6348
6406
 
6349
- // TODO: !!!!!! Move this logic to Register and rename to $Register
6350
- var globalScope = $getGlobalScope();
6351
- if (globalScope.$llmToolsMetadataRegister === undefined) {
6352
- globalScope.$llmToolsMetadataRegister = [];
6353
- }
6354
- else if (!Array.isArray(globalScope.$llmToolsMetadataRegister)) {
6355
- throw new UnexpectedError("Expected $llmToolsMetadataRegister to be an array, but got ".concat(typeof globalScope.$llmToolsMetadataRegister));
6356
- }
6357
- var _ = globalScope.$llmToolsMetadataRegister;
6358
6407
  /**
6359
6408
  * @@@
6360
6409
  *
@@ -6362,8 +6411,7 @@ var _ = globalScope.$llmToolsMetadataRegister;
6362
6411
  * @singleton Only one instance of each register is created per build, but thare can be more @@@
6363
6412
  * @public exported from `@promptbook/core`
6364
6413
  */
6365
- var $llmToolsMetadataRegister = new Register(_);
6366
- $getGlobalScope().$llmToolsMetadataRegister;
6414
+ var $llmToolsMetadataRegister = new $Register('llm_tools_metadata');
6367
6415
 
6368
6416
  /**
6369
6417
  * @@@
@@ -6389,6 +6437,7 @@ function createLlmToolsFromConfigurationFromEnv() {
6389
6437
  return llmToolsConfiguration;
6390
6438
  }
6391
6439
  /**
6440
+ * TODO: [🧠][🪁] Maybe do allow to do auto-install if package not registered and not found
6392
6441
  * TODO: Add Azure OpenAI
6393
6442
  * TODO: [🧠][🍛]
6394
6443
  * TODO: [🧠] Is there some meaningfull way how to test this util
@@ -6399,1682 +6448,131 @@ function createLlmToolsFromConfigurationFromEnv() {
6399
6448
  */
6400
6449
 
6401
6450
  /**
6402
- * Remote server is a proxy server that uses its execution tools internally and exposes the executor interface externally.
6403
- *
6404
- * You can simply use `RemoteExecutionTools` on client-side javascript and connect to your remote server.
6405
- * This is useful to make all logic on browser side but not expose your API keys or no need to use customer's GPU.
6406
- *
6407
- * @see https://github.com/webgptorg/promptbook#remote-server
6408
- * @public exported from `@promptbook/remote-client`
6409
- */
6410
- var RemoteLlmExecutionTools = /** @class */ (function () {
6411
- function RemoteLlmExecutionTools(options) {
6412
- this.options = options;
6413
- }
6414
- Object.defineProperty(RemoteLlmExecutionTools.prototype, "title", {
6415
- get: function () {
6416
- // TODO: [🧠] Maybe fetch title+description from the remote server (as well as if model methods are defined)
6417
- return 'Remote server';
6418
- },
6419
- enumerable: false,
6420
- configurable: true
6421
- });
6422
- Object.defineProperty(RemoteLlmExecutionTools.prototype, "description", {
6423
- get: function () {
6424
- return 'Use all models by your remote server';
6425
- },
6426
- enumerable: false,
6427
- configurable: true
6428
- });
6429
- /**
6430
- * Check the configuration of all execution tools
6431
- */
6432
- RemoteLlmExecutionTools.prototype.checkConfiguration = function () {
6433
- return __awaiter(this, void 0, void 0, function () {
6434
- return __generator(this, function (_a) {
6435
- return [2 /*return*/];
6436
- });
6437
- });
6438
- };
6439
- /**
6440
- * List all available models that can be used
6441
- */
6442
- RemoteLlmExecutionTools.prototype.listModels = function () {
6443
- return __awaiter(this, void 0, void 0, function () {
6444
- return __generator(this, function (_a) {
6445
- return [2 /*return*/, (this.options.models ||
6446
- [
6447
- /* !!!!!! */
6448
- ])];
6449
- });
6450
- });
6451
- };
6452
- /**
6453
- * Creates a connection to the remote proxy server.
6454
- */
6455
- RemoteLlmExecutionTools.prototype.makeConnection = function () {
6456
- var _this = this;
6457
- return new Promise(
6458
- // <- TODO: [🧱] Implement in a functional (not new Class) way
6459
- function (resolve, reject) {
6460
- var socket = io(_this.options.remoteUrl, {
6461
- path: _this.options.path,
6462
- // path: `${this.remoteUrl.pathname}/socket.io`,
6463
- transports: [/*'websocket', <- TODO: [🌬] Make websocket transport work */ 'polling'],
6464
- });
6465
- // console.log('Connecting to', this.options.remoteUrl.href, { socket });
6466
- socket.on('connect', function () {
6467
- resolve(socket);
6468
- });
6469
- // TODO: !!!! Better timeout handling
6470
- setTimeout(function () {
6471
- reject(new Error("Timeout while connecting to ".concat(_this.options.remoteUrl)));
6472
- }, 1000 /* <- TODO: Timeout to config */);
6473
- });
6474
- };
6475
- /**
6476
- * Calls remote proxy server to use a chat model
6477
- */
6478
- RemoteLlmExecutionTools.prototype.callChatModel = function (prompt) {
6479
- if (this.options.isVerbose) {
6480
- console.info("\uD83D\uDD8B Remote callChatModel call");
6481
- }
6482
- return /* not await */ this.callCommonModel(prompt);
6483
- };
6484
- /**
6485
- * Calls remote proxy server to use a completion model
6486
- */
6487
- RemoteLlmExecutionTools.prototype.callCompletionModel = function (prompt) {
6488
- if (this.options.isVerbose) {
6489
- console.info("\uD83D\uDCAC Remote callCompletionModel call");
6490
- }
6491
- return /* not await */ this.callCommonModel(prompt);
6492
- };
6493
- /**
6494
- * Calls remote proxy server to use a embedding model
6495
- */
6496
- RemoteLlmExecutionTools.prototype.callEmbeddingModel = function (prompt) {
6497
- if (this.options.isVerbose) {
6498
- console.info("\uD83D\uDCAC Remote callEmbeddingModel call");
6499
- }
6500
- return /* not await */ this.callCommonModel(prompt);
6501
- };
6502
- // <- Note: [🤖] callXxxModel
6503
- /**
6504
- * Calls remote proxy server to use both completion or chat model
6505
- */
6506
- RemoteLlmExecutionTools.prototype.callCommonModel = function (prompt) {
6507
- return __awaiter(this, void 0, void 0, function () {
6508
- var socket, promptResult;
6509
- return __generator(this, function (_a) {
6510
- switch (_a.label) {
6511
- case 0: return [4 /*yield*/, this.makeConnection()];
6512
- case 1:
6513
- socket = _a.sent();
6514
- if (this.options.isAnonymous) {
6515
- socket.emit('request', {
6516
- llmToolsConfiguration: this.options.llmToolsConfiguration,
6517
- prompt: prompt,
6518
- // <- TODO: [🛫] `prompt` is NOT fully serializable as JSON, it contains functions which are not serializable
6519
- });
6520
- }
6521
- else {
6522
- socket.emit('request', {
6523
- clientId: this.options.clientId,
6524
- prompt: prompt,
6525
- // <- TODO: [🛫] `prompt` is NOT fully serializable as JSON, it contains functions which are not serializable
6526
- });
6527
- }
6528
- return [4 /*yield*/, new Promise(function (resolve, reject) {
6529
- socket.on('response', function (response) {
6530
- resolve(response.promptResult);
6531
- socket.disconnect();
6532
- });
6533
- socket.on('error', function (error) {
6534
- reject(new PipelineExecutionError(error.errorMessage));
6535
- socket.disconnect();
6536
- });
6537
- })];
6538
- case 2:
6539
- promptResult = _a.sent();
6540
- socket.disconnect();
6541
- return [2 /*return*/, promptResult];
6542
- }
6543
- });
6544
- });
6545
- };
6546
- return RemoteLlmExecutionTools;
6547
- }());
6548
- /**
6549
- * TODO: [🍓] Allow to list compatible models with each variant
6550
- * TODO: [🗯] RemoteLlmExecutionTools should extend Destroyable and implement IDestroyable
6551
- * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
6552
- * TODO: [🧠] Maybe remove `@promptbook/remote-client` and just use `@promptbook/core`
6553
- */
6554
-
6555
- /**
6556
- * Function computeUsage will create price per one token based on the string value found on openai page
6557
- *
6558
- * @private within the repository, used only as internal helper for `OPENAI_MODELS`
6559
- */
6560
- function computeUsage(value) {
6561
- var _a = __read(value.split(' / '), 2), price = _a[0], tokens = _a[1];
6562
- return parseFloat(price.replace('$', '')) / parseFloat(tokens.replace('M tokens', '')) / 1000000;
6563
- }
6564
-
6565
- /**
6566
- * List of available Anthropic Claude models with pricing
6567
- *
6568
- * Note: Done at 2024-08-16
6569
- *
6570
- * @see https://docs.anthropic.com/en/docs/models-overview
6571
- * @public exported from `@promptbook/anthropic-claude`
6572
- */
6573
- var ANTHROPIC_CLAUDE_MODELS = [
6574
- {
6575
- modelVariant: 'CHAT',
6576
- modelTitle: 'Claude 3.5 Sonnet',
6577
- modelName: 'claude-3-5-sonnet-20240620',
6578
- pricing: {
6579
- prompt: computeUsage("$3.00 / 1M tokens"),
6580
- output: computeUsage("$15.00 / 1M tokens"),
6581
- },
6582
- },
6583
- {
6584
- modelVariant: 'CHAT',
6585
- modelTitle: 'Claude 3 Opus',
6586
- modelName: 'claude-3-opus-20240229',
6587
- pricing: {
6588
- prompt: computeUsage("$15.00 / 1M tokens"),
6589
- output: computeUsage("$75.00 / 1M tokens"),
6590
- },
6591
- },
6592
- {
6593
- modelVariant: 'CHAT',
6594
- modelTitle: 'Claude 3 Sonnet',
6595
- modelName: 'claude-3-sonnet-20240229',
6596
- pricing: {
6597
- prompt: computeUsage("$3.00 / 1M tokens"),
6598
- output: computeUsage("$15.00 / 1M tokens"),
6599
- },
6600
- },
6601
- {
6602
- modelVariant: 'CHAT',
6603
- modelTitle: 'Claude 3 Haiku',
6604
- modelName: ' claude-3-haiku-20240307',
6605
- pricing: {
6606
- prompt: computeUsage("$0.25 / 1M tokens"),
6607
- output: computeUsage("$1.25 / 1M tokens"),
6608
- },
6609
- },
6610
- {
6611
- modelVariant: 'CHAT',
6612
- modelTitle: 'Claude 2.1',
6613
- modelName: 'claude-2.1',
6614
- pricing: {
6615
- prompt: computeUsage("$8.00 / 1M tokens"),
6616
- output: computeUsage("$24.00 / 1M tokens"),
6617
- },
6618
- },
6619
- {
6620
- modelVariant: 'CHAT',
6621
- modelTitle: 'Claude 2',
6622
- modelName: 'claude-2.0',
6623
- pricing: {
6624
- prompt: computeUsage("$8.00 / 1M tokens"),
6625
- output: computeUsage("$24.00 / 1M tokens"),
6626
- },
6627
- },
6628
- {
6629
- modelVariant: 'CHAT',
6630
- modelTitle: ' Claude Instant 1.2',
6631
- modelName: 'claude-instant-1.2',
6632
- pricing: {
6633
- prompt: computeUsage("$0.80 / 1M tokens"),
6634
- output: computeUsage("$2.40 / 1M tokens"),
6635
- },
6636
- },
6637
- // TODO: !!! Claude 1 and 2 has also completion versions - ask Hoagy
6638
- ];
6639
- /**
6640
- * Note: [🤖] Add models of new variant
6641
- * TODO: [🧠] !!! Add embedding models OR Anthropic has only chat+completion models?
6642
- * TODO: [🧠] Some mechanism to propagate unsureness
6643
- * TODO: [🧠][👮‍♀️] Put here more info like description, isVision, trainingDateCutoff, languages, strengths ( Top-level performance, intelligence, fluency, and understanding), contextWindow,...
6644
- * TODO: [🎰] Some mechanism to auto-update available models
6645
- */
6646
-
6647
- /**
6648
- * Get current date in ISO 8601 format
6451
+ * @@@
6649
6452
  *
6650
- * @private internal utility
6453
+ * Note: `$` is used to indicate that this interacts with the global scope
6454
+ * @singleton Only one instance of each register is created per build, but thare can be more @@@
6455
+ * @public exported from `@promptbook/core`
6651
6456
  */
6652
- function getCurrentIsoDate() {
6653
- return new Date().toISOString();
6654
- }
6457
+ var $llmToolsRegister = new $Register('llm_execution_tools_constructors');
6655
6458
 
6656
6459
  /**
6657
- * Helper of usage compute
6460
+ * Creates a message with all registered LLM tools
6658
6461
  *
6659
- * @param content the content of prompt or response
6660
- * @returns part of PromptResultUsageCounts
6462
+ * Note: This function is used to create a (error) message when there is no constructor for some LLM provider
6661
6463
  *
6662
- * @private internal utility of LlmExecutionTools
6464
+ * @private internal function of `createLlmToolsFromConfiguration` and `createLlmToolsFromEnv`
6663
6465
  */
6664
- function computeUsageCounts(content) {
6665
- return {
6666
- charactersCount: { value: countCharacters(content) },
6667
- wordsCount: { value: countWords(content) },
6668
- sentencesCount: { value: countSentences(content) },
6669
- linesCount: { value: countLines(content) },
6670
- paragraphsCount: { value: countParagraphs(content) },
6671
- pagesCount: { value: countPages(content) },
6466
+ function $registeredLlmToolsMessage() {
6467
+ var e_1, _a, e_2, _b;
6468
+ /**
6469
+ * Mixes registered LLM tools from $llmToolsMetadataRegister and $llmToolsRegister
6470
+ */
6471
+ var all = [];
6472
+ var _loop_1 = function (packageName, className) {
6473
+ if (all.some(function (item) { return item.packageName === packageName && item.className === className; })) {
6474
+ return "continue";
6475
+ }
6476
+ all.push({ packageName: packageName, className: className });
6672
6477
  };
6673
- }
6674
-
6675
- /**
6676
- * Make UncertainNumber
6677
- *
6678
- * @param value
6679
- *
6680
- * @private utility for initializating UncertainNumber
6681
- */
6682
- function uncertainNumber(value) {
6683
- if (value === null || value === undefined || Number.isNaN(value)) {
6684
- return { value: 0, isUncertain: true };
6685
- }
6686
- return { value: value };
6687
- }
6688
-
6689
- /**
6690
- * Computes the usage of the Anthropic Claude API based on the response from Anthropic Claude
6691
- *
6692
- * @param promptContent The content of the prompt
6693
- * @param resultContent The content of the result (for embedding prompts or failed prompts pass empty string)
6694
- * @param rawResponse The raw response from Anthropic Claude API
6695
- * @throws {PipelineExecutionError} If the usage is not defined in the response from Anthropic Claude
6696
- * @private internal utility of `AnthropicClaudeExecutionTools`
6697
- */
6698
- function computeAnthropicClaudeUsage(promptContent, // <- Note: Intentionally using [] to access type properties to bring jsdoc from Prompt/PromptResult to consumer
6699
- resultContent, rawResponse) {
6700
- var _a, _b;
6701
- if (rawResponse.usage === undefined) {
6702
- throw new PipelineExecutionError('The usage is not defined in the response from Anthropic Claude');
6478
+ try {
6479
+ for (var _c = __values($llmToolsMetadataRegister.list()), _d = _c.next(); !_d.done; _d = _c.next()) {
6480
+ var _e = _d.value, packageName = _e.packageName, className = _e.className;
6481
+ _loop_1(packageName, className);
6482
+ }
6703
6483
  }
6704
- if (((_a = rawResponse.usage) === null || _a === void 0 ? void 0 : _a.input_tokens) === undefined) {
6705
- throw new PipelineExecutionError('In Anthropic Claude response `usage.prompt_tokens` not defined');
6484
+ catch (e_1_1) { e_1 = { error: e_1_1 }; }
6485
+ finally {
6486
+ try {
6487
+ if (_d && !_d.done && (_a = _c.return)) _a.call(_c);
6488
+ }
6489
+ finally { if (e_1) throw e_1.error; }
6706
6490
  }
6707
- var inputTokens = rawResponse.usage.input_tokens;
6708
- var outputTokens = ((_b = rawResponse.usage) === null || _b === void 0 ? void 0 : _b.output_tokens) || 0;
6709
- var modelInfo = ANTHROPIC_CLAUDE_MODELS.find(function (model) { return model.modelName === rawResponse.model; });
6710
- var price;
6711
- if (modelInfo === undefined || modelInfo.pricing === undefined) {
6712
- price = uncertainNumber();
6491
+ var _loop_2 = function (packageName, className) {
6492
+ if (all.some(function (item) { return item.packageName === packageName && item.className === className; })) {
6493
+ return "continue";
6494
+ }
6495
+ all.push({ packageName: packageName, className: className });
6496
+ };
6497
+ try {
6498
+ for (var _f = __values($llmToolsRegister.list()), _g = _f.next(); !_g.done; _g = _f.next()) {
6499
+ var _h = _g.value, packageName = _h.packageName, className = _h.className;
6500
+ _loop_2(packageName, className);
6501
+ }
6713
6502
  }
6714
- else {
6715
- price = uncertainNumber(inputTokens * modelInfo.pricing.prompt + outputTokens * modelInfo.pricing.output);
6503
+ catch (e_2_1) { e_2 = { error: e_2_1 }; }
6504
+ finally {
6505
+ try {
6506
+ if (_g && !_g.done && (_b = _f.return)) _b.call(_f);
6507
+ }
6508
+ finally { if (e_2) throw e_2.error; }
6716
6509
  }
6717
- return {
6718
- price: price,
6719
- input: __assign({ tokensCount: uncertainNumber(rawResponse.usage.input_tokens) }, computeUsageCounts(promptContent)),
6720
- output: __assign({ tokensCount: uncertainNumber(outputTokens) }, computeUsageCounts(resultContent)),
6721
- };
6510
+ var metadata = all.map(function (metadata) {
6511
+ var isMetadataAviailable = $llmToolsMetadataRegister
6512
+ .list()
6513
+ .find(function (_a) {
6514
+ var packageName = _a.packageName, className = _a.className;
6515
+ return metadata.packageName === packageName && metadata.className === className;
6516
+ });
6517
+ var isInstalled = $llmToolsRegister
6518
+ .list()
6519
+ .find(function (_a) {
6520
+ var packageName = _a.packageName, className = _a.className;
6521
+ return metadata.packageName === packageName && metadata.className === className;
6522
+ });
6523
+ return __assign(__assign({}, metadata), { isMetadataAviailable: isMetadataAviailable, isInstalled: isInstalled });
6524
+ });
6525
+ return spaceTrim(function (block) { return "\n Available LLM providers are:\n ".concat(block(metadata
6526
+ .map(function (_a, i) {
6527
+ var packageName = _a.packageName, className = _a.className, isMetadataAviailable = _a.isMetadataAviailable, isInstalled = _a.isInstalled;
6528
+ var more;
6529
+ if (just(false)) {
6530
+ more = '';
6531
+ }
6532
+ else if (!isMetadataAviailable && !isInstalled) {
6533
+ // TODO: [�][�] Maybe do allow to do auto-install if package not registered and not found
6534
+ more = "(not installed and no metadata, looks like a unexpected behavior)";
6535
+ }
6536
+ else if (isMetadataAviailable && !isInstalled) {
6537
+ // TODO: [�][�]
6538
+ more = "(not installed)";
6539
+ }
6540
+ else if (!isMetadataAviailable && isInstalled) {
6541
+ more = "(no metadata, looks like a unexpected behavior)";
6542
+ }
6543
+ else if (isMetadataAviailable && isInstalled) {
6544
+ more = "(installed)";
6545
+ }
6546
+ else {
6547
+ more = "(unknown state, looks like a unexpected behavior)";
6548
+ }
6549
+ return "".concat(i + 1, ") `").concat(className, "` from `").concat(packageName, "` ").concat(more);
6550
+ })
6551
+ .join('\n')), "\n "); });
6722
6552
  }
6723
- /**
6724
- * TODO: [🤝] DRY Maybe some common abstraction between `computeOpenAiUsage` and `computeAnthropicClaudeUsage`
6725
- */
6726
6553
 
6727
6554
  /**
6728
- * Execution Tools for calling Anthropic Claude API.
6555
+ * @@@
6556
+ *
6557
+ * Note: This function is not cached, every call creates new instance of `MultipleLlmExecutionTools`
6729
6558
  *
6730
- * @public exported from `@promptbook/anthropic-claude`
6731
- * @deprecated use `createAnthropicClaudeExecutionTools` instead
6559
+ * @returns @@@
6560
+ * @public exported from `@promptbook/core`
6732
6561
  */
6733
- var AnthropicClaudeExecutionTools = /** @class */ (function () {
6734
- /**
6735
- * Creates Anthropic Claude Execution Tools.
6736
- *
6737
- * @param options which are relevant are directly passed to the Anthropic Claude client
6738
- */
6739
- function AnthropicClaudeExecutionTools(options) {
6740
- if (options === void 0) { options = { isProxied: false }; }
6741
- this.options = options;
6742
- /**
6743
- * Anthropic Claude API client.
6744
- */
6745
- this.client = null;
6746
- }
6747
- Object.defineProperty(AnthropicClaudeExecutionTools.prototype, "title", {
6748
- get: function () {
6749
- return 'Anthropic Claude';
6750
- },
6751
- enumerable: false,
6752
- configurable: true
6753
- });
6754
- Object.defineProperty(AnthropicClaudeExecutionTools.prototype, "description", {
6755
- get: function () {
6756
- return 'Use all models provided by Anthropic Claude';
6757
- },
6758
- enumerable: false,
6759
- configurable: true
6760
- });
6761
- AnthropicClaudeExecutionTools.prototype.getClient = function () {
6762
- return __awaiter(this, void 0, void 0, function () {
6763
- var anthropicOptions;
6764
- return __generator(this, function (_a) {
6765
- if (this.client === null) {
6766
- anthropicOptions = __assign({}, this.options);
6767
- delete anthropicOptions.isVerbose;
6768
- delete anthropicOptions.isProxied;
6769
- this.client = new Anthropic(anthropicOptions);
6770
- }
6771
- return [2 /*return*/, this.client];
6772
- });
6562
+ function createLlmToolsFromConfiguration(configuration, options) {
6563
+ if (options === void 0) { options = {}; }
6564
+ var _a = options.isVerbose, isVerbose = _a === void 0 ? false : _a;
6565
+ var llmTools = configuration.map(function (llmConfiguration) {
6566
+ var registeredItem = $llmToolsRegister
6567
+ .list()
6568
+ .find(function (_a) {
6569
+ var packageName = _a.packageName, className = _a.className;
6570
+ return llmConfiguration.packageName === packageName && llmConfiguration.className === className;
6773
6571
  });
6774
- };
6775
- /**
6776
- * Check the `options` passed to `constructor`
6777
- */
6778
- AnthropicClaudeExecutionTools.prototype.checkConfiguration = function () {
6779
- return __awaiter(this, void 0, void 0, function () {
6780
- return __generator(this, function (_a) {
6781
- switch (_a.label) {
6782
- case 0: return [4 /*yield*/, this.getClient()];
6783
- case 1:
6784
- _a.sent();
6785
- return [2 /*return*/];
6786
- }
6787
- });
6788
- });
6789
- };
6790
- /**
6791
- * List all available Anthropic Claude models that can be used
6792
- */
6793
- AnthropicClaudeExecutionTools.prototype.listModels = function () {
6794
- return ANTHROPIC_CLAUDE_MODELS;
6795
- };
6796
- /**
6797
- * Calls Anthropic Claude API to use a chat model.
6798
- */
6799
- AnthropicClaudeExecutionTools.prototype.callChatModel = function (prompt) {
6800
- return __awaiter(this, void 0, void 0, function () {
6801
- var content, parameters, modelRequirements, client, modelName, rawPromptContent, rawRequest, start, complete, rawResponse, contentBlock, resultContent, usage;
6802
- return __generator(this, function (_a) {
6803
- switch (_a.label) {
6804
- case 0:
6805
- if (this.options.isVerbose) {
6806
- console.info('💬 Anthropic Claude callChatModel call');
6807
- }
6808
- content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
6809
- return [4 /*yield*/, this.getClient()];
6810
- case 1:
6811
- client = _a.sent();
6812
- // TODO: [☂] Use here more modelRequirements
6813
- if (modelRequirements.modelVariant !== 'CHAT') {
6814
- throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
6815
- }
6816
- modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
6817
- rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
6818
- rawRequest = {
6819
- model: modelRequirements.modelName || this.getDefaultChatModel().modelName,
6820
- max_tokens: modelRequirements.maxTokens || 4096,
6821
- // <- TODO: [🌾] Make some global max cap for maxTokens
6822
- temperature: modelRequirements.temperature,
6823
- system: modelRequirements.systemMessage,
6824
- // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
6825
- // <- Note: [🧆]
6826
- messages: [
6827
- {
6828
- role: 'user',
6829
- content: rawPromptContent,
6830
- },
6831
- ],
6832
- // TODO: Is here some equivalent of user identification?> user: this.options.user,
6833
- };
6834
- start = getCurrentIsoDate();
6835
- if (this.options.isVerbose) {
6836
- console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
6837
- }
6838
- return [4 /*yield*/, client.messages.create(rawRequest)];
6839
- case 2:
6840
- rawResponse = _a.sent();
6841
- if (this.options.isVerbose) {
6842
- console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
6843
- }
6844
- if (!rawResponse.content[0]) {
6845
- throw new PipelineExecutionError('No content from Anthropic Claude');
6846
- }
6847
- if (rawResponse.content.length > 1) {
6848
- throw new PipelineExecutionError('More than one content blocks from Anthropic Claude');
6849
- }
6850
- contentBlock = rawResponse.content[0];
6851
- if (contentBlock.type !== 'text') {
6852
- throw new PipelineExecutionError("Returned content is not \"text\" type but \"".concat(contentBlock.type, "\""));
6853
- }
6854
- resultContent = contentBlock.text;
6855
- // eslint-disable-next-line prefer-const
6856
- complete = getCurrentIsoDate();
6857
- usage = computeAnthropicClaudeUsage(content, '', rawResponse);
6858
- return [2 /*return*/, {
6859
- content: resultContent,
6860
- modelName: rawResponse.model,
6861
- timing: {
6862
- start: start,
6863
- complete: complete,
6864
- },
6865
- usage: usage,
6866
- rawPromptContent: rawPromptContent,
6867
- rawRequest: rawRequest,
6868
- rawResponse: rawResponse,
6869
- // <- [🗯]
6870
- }];
6871
- }
6872
- });
6873
- });
6874
- };
6875
- /*
6876
- TODO: [👏]
6877
- public async callCompletionModel(
6878
- prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements'>,
6879
- ): Promise<PromptCompletionResult> {
6880
-
6881
- if (this.options.isVerbose) {
6882
- console.info('🖋 Anthropic Claude callCompletionModel call');
6883
- }
6884
-
6885
- const { content, parameters, modelRequirements } = prompt;
6886
-
6887
- // TODO: [☂] Use here more modelRequirements
6888
- if (modelRequirements.modelVariant !== 'COMPLETION') {
6889
- throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
6890
- }
6891
-
6892
- const modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
6893
- const modelSettings = {
6894
- model: modelName,
6895
- max_tokens: modelRequirements.maxTokens || 2000, // <- Note: 2000 is for lagacy reasons
6896
- // <- TODO: [🌾] Make some global max cap for maxTokens
6897
- // <- TODO: Use here `systemMessage`, `temperature` and `seed`
6898
- };
6899
-
6900
- const rawRequest: xxxx.Completions.CompletionCreateParamsNonStreaming = {
6901
- ...modelSettings,
6902
- prompt: rawPromptContent,
6903
- user: this.options.user,
6904
- };
6905
- const start: string_date_iso8601 = getCurrentIsoDate();
6906
- let complete: string_date_iso8601;
6907
-
6908
- if (this.options.isVerbose) {
6909
- console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
6910
- }
6911
- const rawResponse = await this.client.completions.create(rawRequest);
6912
- if (this.options.isVerbose) {
6913
- console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
6914
- }
6915
-
6916
- if (!rawResponse.choices[0]) {
6917
- throw new PipelineExecutionError('No choises from Anthropic Claude');
6918
- }
6919
-
6920
- if (rawResponse.choices.length > 1) {
6921
- // TODO: This should be maybe only warning
6922
- throw new PipelineExecutionError('More than one choise from Anthropic Claude');
6923
- }
6924
-
6925
- const resultContent = rawResponse.choices[0].text;
6926
- // eslint-disable-next-line prefer-const
6927
- complete = getCurrentIsoDate();
6928
- const usage = { price: 'UNKNOWN', inputTokens: 0, outputTokens: 0 /* <- TODO: [🐞] Compute usage * / } satisfies PromptResultUsage;
6929
-
6930
-
6931
-
6932
- return {
6933
- content: resultContent,
6934
- modelName: rawResponse.model || model,
6935
- timing: {
6936
- start,
6937
- complete,
6938
- },
6939
- usage,
6940
- rawResponse,
6941
- // <- [🗯]
6942
- };
6943
- }
6944
- */
6945
- // <- Note: [🤖] callXxxModel
6946
- /**
6947
- * Get the model that should be used as default
6948
- */
6949
- AnthropicClaudeExecutionTools.prototype.getDefaultModel = function (defaultModelName) {
6950
- var model = ANTHROPIC_CLAUDE_MODELS.find(function (_a) {
6951
- var modelName = _a.modelName;
6952
- return modelName.startsWith(defaultModelName);
6953
- });
6954
- if (model === undefined) {
6955
- throw new UnexpectedError(spaceTrim(function (block) {
6956
- return "\n Cannot find model in OpenAI models with name \"".concat(defaultModelName, "\" which should be used as default.\n\n Available models:\n ").concat(block(ANTHROPIC_CLAUDE_MODELS.map(function (_a) {
6957
- var modelName = _a.modelName;
6958
- return "- \"".concat(modelName, "\"");
6959
- }).join('\n')), "\n\n ");
6960
- }));
6961
- }
6962
- return model;
6963
- };
6964
- /**
6965
- * Default model for chat variant.
6966
- */
6967
- AnthropicClaudeExecutionTools.prototype.getDefaultChatModel = function () {
6968
- return this.getDefaultModel('claude-3-opus');
6969
- };
6970
- return AnthropicClaudeExecutionTools;
6971
- }());
6972
- /**
6973
- * TODO: [🍆] JSON mode
6974
- * TODO: [🧠] Maybe handle errors via transformAnthropicError (like transformAzureError)
6975
- * TODO: Maybe Create some common util for callChatModel and callCompletionModel
6976
- * TODO: Maybe make custom OpenAiError
6977
- * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
6978
- * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
6979
- * TODO: [📅] Maybe instead of `RemoteLlmExecutionToolsOptions` use `proxyWithAnonymousRemoteServer` (if implemented)
6980
- */
6981
-
6982
- /**
6983
- * Execution Tools for calling Anthropic Claude API.
6984
- *
6985
- * @public exported from `@promptbook/anthropic-claude`
6986
- */
6987
- var createAnthropicClaudeExecutionTools = Object.assign(function (options) {
6988
- if (options.isProxied) {
6989
- return new RemoteLlmExecutionTools(__assign(__assign({}, options), { isAnonymous: true, llmToolsConfiguration: [
6990
- {
6991
- title: 'Anthropic Claude (proxied)',
6992
- packageName: '@promptbook/anthropic-claude',
6993
- className: 'AnthropicClaudeExecutionTools',
6994
- options: __assign(__assign({}, options), { isProxied: false }),
6995
- },
6996
- ], models: ANTHROPIC_CLAUDE_MODELS }));
6997
- }
6998
- return new AnthropicClaudeExecutionTools(options);
6999
- }, {
7000
- packageName: '@promptbook/anthropic-claude',
7001
- className: 'AnthropicClaudeExecutionTools',
7002
- });
7003
- /**
7004
- * TODO: [🧠] !!!! Make anonymous this with all LLM providers
7005
- * TODO: [🧠][🧱] !!!! Maybe change all `new AnthropicClaudeExecutionTools` -> `createAnthropicClaudeExecutionTools` in manual
7006
- * TODO: [🧠] Maybe auto-detect usage in browser and determine default value of `isProxied`
7007
- * TODO: [🦺] Is there some way how to put `packageName` and `className` on top and function definition on bottom?
7008
- * TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
7009
- */
7010
-
7011
- /**
7012
- * List of available OpenAI models with pricing
7013
- *
7014
- * Note: Done at 2024-05-20
7015
- *
7016
- * @see https://platform.openai.com/docs/models/
7017
- * @see https://openai.com/api/pricing/
7018
- * @public exported from `@promptbook/openai`
7019
- */
7020
- var OPENAI_MODELS = [
7021
- /*/
7022
- {
7023
- modelTitle: 'dall-e-3',
7024
- modelName: 'dall-e-3',
7025
- },
7026
- /**/
7027
- /*/
7028
- {
7029
- modelTitle: 'whisper-1',
7030
- modelName: 'whisper-1',
7031
- },
7032
- /**/
7033
- /**/
7034
- {
7035
- modelVariant: 'COMPLETION',
7036
- modelTitle: 'davinci-002',
7037
- modelName: 'davinci-002',
7038
- pricing: {
7039
- prompt: computeUsage("$2.00 / 1M tokens"),
7040
- output: computeUsage("$2.00 / 1M tokens"), // <- not sure
7041
- },
7042
- },
7043
- /**/
7044
- /*/
7045
- {
7046
- modelTitle: 'dall-e-2',
7047
- modelName: 'dall-e-2',
7048
- },
7049
- /**/
7050
- /**/
7051
- {
7052
- modelVariant: 'CHAT',
7053
- modelTitle: 'gpt-3.5-turbo-16k',
7054
- modelName: 'gpt-3.5-turbo-16k',
7055
- pricing: {
7056
- prompt: computeUsage("$3.00 / 1M tokens"),
7057
- output: computeUsage("$4.00 / 1M tokens"),
7058
- },
7059
- },
7060
- /**/
7061
- /*/
7062
- {
7063
- modelTitle: 'tts-1-hd-1106',
7064
- modelName: 'tts-1-hd-1106',
7065
- },
7066
- /**/
7067
- /*/
7068
- {
7069
- modelTitle: 'tts-1-hd',
7070
- modelName: 'tts-1-hd',
7071
- },
7072
- /**/
7073
- /**/
7074
- {
7075
- modelVariant: 'CHAT',
7076
- modelTitle: 'gpt-4',
7077
- modelName: 'gpt-4',
7078
- pricing: {
7079
- prompt: computeUsage("$30.00 / 1M tokens"),
7080
- output: computeUsage("$60.00 / 1M tokens"),
7081
- },
7082
- },
7083
- /**/
7084
- /**/
7085
- {
7086
- modelVariant: 'CHAT',
7087
- modelTitle: 'gpt-4-32k',
7088
- modelName: 'gpt-4-32k',
7089
- pricing: {
7090
- prompt: computeUsage("$60.00 / 1M tokens"),
7091
- output: computeUsage("$120.00 / 1M tokens"),
7092
- },
7093
- },
7094
- /**/
7095
- /*/
7096
- {
7097
- modelVariant: 'CHAT',
7098
- modelTitle: 'gpt-4-0613',
7099
- modelName: 'gpt-4-0613',
7100
- pricing: {
7101
- prompt: computeUsage(` / 1M tokens`),
7102
- output: computeUsage(` / 1M tokens`),
7103
- },
7104
- },
7105
- /**/
7106
- /**/
7107
- {
7108
- modelVariant: 'CHAT',
7109
- modelTitle: 'gpt-4-turbo-2024-04-09',
7110
- modelName: 'gpt-4-turbo-2024-04-09',
7111
- pricing: {
7112
- prompt: computeUsage("$10.00 / 1M tokens"),
7113
- output: computeUsage("$30.00 / 1M tokens"),
7114
- },
7115
- },
7116
- /**/
7117
- /**/
7118
- {
7119
- modelVariant: 'CHAT',
7120
- modelTitle: 'gpt-3.5-turbo-1106',
7121
- modelName: 'gpt-3.5-turbo-1106',
7122
- pricing: {
7123
- prompt: computeUsage("$1.00 / 1M tokens"),
7124
- output: computeUsage("$2.00 / 1M tokens"),
7125
- },
7126
- },
7127
- /**/
7128
- /**/
7129
- {
7130
- modelVariant: 'CHAT',
7131
- modelTitle: 'gpt-4-turbo',
7132
- modelName: 'gpt-4-turbo',
7133
- pricing: {
7134
- prompt: computeUsage("$10.00 / 1M tokens"),
7135
- output: computeUsage("$30.00 / 1M tokens"),
7136
- },
7137
- },
7138
- /**/
7139
- /**/
7140
- {
7141
- modelVariant: 'COMPLETION',
7142
- modelTitle: 'gpt-3.5-turbo-instruct-0914',
7143
- modelName: 'gpt-3.5-turbo-instruct-0914',
7144
- pricing: {
7145
- prompt: computeUsage("$1.50 / 1M tokens"),
7146
- output: computeUsage("$2.00 / 1M tokens"), // <- For gpt-3.5-turbo-instruct
7147
- },
7148
- },
7149
- /**/
7150
- /**/
7151
- {
7152
- modelVariant: 'COMPLETION',
7153
- modelTitle: 'gpt-3.5-turbo-instruct',
7154
- modelName: 'gpt-3.5-turbo-instruct',
7155
- pricing: {
7156
- prompt: computeUsage("$1.50 / 1M tokens"),
7157
- output: computeUsage("$2.00 / 1M tokens"),
7158
- },
7159
- },
7160
- /**/
7161
- /*/
7162
- {
7163
- modelTitle: 'tts-1',
7164
- modelName: 'tts-1',
7165
- },
7166
- /**/
7167
- /**/
7168
- {
7169
- modelVariant: 'CHAT',
7170
- modelTitle: 'gpt-3.5-turbo',
7171
- modelName: 'gpt-3.5-turbo',
7172
- pricing: {
7173
- prompt: computeUsage("$3.00 / 1M tokens"),
7174
- output: computeUsage("$6.00 / 1M tokens"), // <- Not sure, refer to gpt-3.5-turbo in Fine-tuning models
7175
- },
7176
- },
7177
- /**/
7178
- /**/
7179
- {
7180
- modelVariant: 'CHAT',
7181
- modelTitle: 'gpt-3.5-turbo-0301',
7182
- modelName: 'gpt-3.5-turbo-0301',
7183
- pricing: {
7184
- prompt: computeUsage("$1.50 / 1M tokens"),
7185
- output: computeUsage("$2.00 / 1M tokens"),
7186
- },
7187
- },
7188
- /**/
7189
- /**/
7190
- {
7191
- modelVariant: 'COMPLETION',
7192
- modelTitle: 'babbage-002',
7193
- modelName: 'babbage-002',
7194
- pricing: {
7195
- prompt: computeUsage("$0.40 / 1M tokens"),
7196
- output: computeUsage("$0.40 / 1M tokens"), // <- Not sure
7197
- },
7198
- },
7199
- /**/
7200
- /**/
7201
- {
7202
- modelVariant: 'CHAT',
7203
- modelTitle: 'gpt-4-1106-preview',
7204
- modelName: 'gpt-4-1106-preview',
7205
- pricing: {
7206
- prompt: computeUsage("$10.00 / 1M tokens"),
7207
- output: computeUsage("$30.00 / 1M tokens"),
7208
- },
7209
- },
7210
- /**/
7211
- /**/
7212
- {
7213
- modelVariant: 'CHAT',
7214
- modelTitle: 'gpt-4-0125-preview',
7215
- modelName: 'gpt-4-0125-preview',
7216
- pricing: {
7217
- prompt: computeUsage("$10.00 / 1M tokens"),
7218
- output: computeUsage("$30.00 / 1M tokens"),
7219
- },
7220
- },
7221
- /**/
7222
- /*/
7223
- {
7224
- modelTitle: 'tts-1-1106',
7225
- modelName: 'tts-1-1106',
7226
- },
7227
- /**/
7228
- /**/
7229
- {
7230
- modelVariant: 'CHAT',
7231
- modelTitle: 'gpt-3.5-turbo-0125',
7232
- modelName: 'gpt-3.5-turbo-0125',
7233
- pricing: {
7234
- prompt: computeUsage("$0.50 / 1M tokens"),
7235
- output: computeUsage("$1.50 / 1M tokens"),
7236
- },
7237
- },
7238
- /**/
7239
- /**/
7240
- {
7241
- modelVariant: 'CHAT',
7242
- modelTitle: 'gpt-4-turbo-preview',
7243
- modelName: 'gpt-4-turbo-preview',
7244
- pricing: {
7245
- prompt: computeUsage("$10.00 / 1M tokens"),
7246
- output: computeUsage("$30.00 / 1M tokens"), // <- Not sure, just for gpt-4-turbo
7247
- },
7248
- },
7249
- /**/
7250
- /**/
7251
- {
7252
- modelVariant: 'EMBEDDING',
7253
- modelTitle: 'text-embedding-3-large',
7254
- modelName: 'text-embedding-3-large',
7255
- pricing: {
7256
- prompt: computeUsage("$0.13 / 1M tokens"),
7257
- // TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
7258
- output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
7259
- },
7260
- },
7261
- /**/
7262
- /**/
7263
- {
7264
- modelVariant: 'EMBEDDING',
7265
- modelTitle: 'text-embedding-3-small',
7266
- modelName: 'text-embedding-3-small',
7267
- pricing: {
7268
- prompt: computeUsage("$0.02 / 1M tokens"),
7269
- // TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
7270
- output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
7271
- },
7272
- },
7273
- /**/
7274
- /**/
7275
- {
7276
- modelVariant: 'CHAT',
7277
- modelTitle: 'gpt-3.5-turbo-0613',
7278
- modelName: 'gpt-3.5-turbo-0613',
7279
- pricing: {
7280
- prompt: computeUsage("$1.50 / 1M tokens"),
7281
- output: computeUsage("$2.00 / 1M tokens"),
7282
- },
7283
- },
7284
- /**/
7285
- /**/
7286
- {
7287
- modelVariant: 'EMBEDDING',
7288
- modelTitle: 'text-embedding-ada-002',
7289
- modelName: 'text-embedding-ada-002',
7290
- pricing: {
7291
- prompt: computeUsage("$0.1 / 1M tokens"),
7292
- // TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
7293
- output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
7294
- },
7295
- },
7296
- /**/
7297
- /*/
7298
- {
7299
- modelVariant: 'CHAT',
7300
- modelTitle: 'gpt-4-1106-vision-preview',
7301
- modelName: 'gpt-4-1106-vision-preview',
7302
- },
7303
- /**/
7304
- /*/
7305
- {
7306
- modelVariant: 'CHAT',
7307
- modelTitle: 'gpt-4-vision-preview',
7308
- modelName: 'gpt-4-vision-preview',
7309
- pricing: {
7310
- prompt: computeUsage(`$10.00 / 1M tokens`),
7311
- output: computeUsage(`$30.00 / 1M tokens`),
7312
- },
7313
- },
7314
- /**/
7315
- /**/
7316
- {
7317
- modelVariant: 'CHAT',
7318
- modelTitle: 'gpt-4o-2024-05-13',
7319
- modelName: 'gpt-4o-2024-05-13',
7320
- pricing: {
7321
- prompt: computeUsage("$5.00 / 1M tokens"),
7322
- output: computeUsage("$15.00 / 1M tokens"),
7323
- },
7324
- },
7325
- /**/
7326
- /**/
7327
- {
7328
- modelVariant: 'CHAT',
7329
- modelTitle: 'gpt-4o',
7330
- modelName: 'gpt-4o',
7331
- pricing: {
7332
- prompt: computeUsage("$5.00 / 1M tokens"),
7333
- output: computeUsage("$15.00 / 1M tokens"),
7334
- },
7335
- },
7336
- /**/
7337
- /**/
7338
- {
7339
- modelVariant: 'CHAT',
7340
- modelTitle: 'gpt-3.5-turbo-16k-0613',
7341
- modelName: 'gpt-3.5-turbo-16k-0613',
7342
- pricing: {
7343
- prompt: computeUsage("$3.00 / 1M tokens"),
7344
- output: computeUsage("$4.00 / 1M tokens"),
7345
- },
7346
- },
7347
- /**/
7348
- ];
7349
- /**
7350
- * Note: [🤖] Add models of new variant
7351
- * TODO: [🧠] Some mechanism to propagate unsureness
7352
- * TODO: [🎰] Some mechanism to auto-update available models
7353
- * TODO: [🎰][👮‍♀️] Make this list dynamic - dynamically can be listed modelNames but not modelVariant, legacy status, context length and pricing
7354
- * TODO: [🧠][👮‍♀️] Put here more info like description, isVision, trainingDateCutoff, languages, strengths ( Top-level performance, intelligence, fluency, and understanding), contextWindow,...
7355
- * @see https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4
7356
- * @see https://openai.com/api/pricing/
7357
- * @see /other/playground/playground.ts
7358
- * TODO: [🍓] Make better
7359
- * TODO: Change model titles to human eg: "gpt-4-turbo-2024-04-09" -> "GPT-4 Turbo (2024-04-09)"
7360
- * TODO: [🚸] Not all models are compatible with JSON mode, add this information here and use it
7361
- */
7362
-
7363
- /**
7364
- * Execution Tools for calling Azure OpenAI API.
7365
- *
7366
- * @public exported from `@promptbook/azure-openai`
7367
- */
7368
- var AzureOpenAiExecutionTools = /** @class */ (function () {
7369
- /**
7370
- * Creates OpenAI Execution Tools.
7371
- *
7372
- * @param options which are relevant are directly passed to the OpenAI client
7373
- */
7374
- function AzureOpenAiExecutionTools(options) {
7375
- this.options = options;
7376
- /**
7377
- * OpenAI Azure API client.
7378
- */
7379
- this.client = null;
7380
- }
7381
- Object.defineProperty(AzureOpenAiExecutionTools.prototype, "title", {
7382
- get: function () {
7383
- return 'Azure OpenAI';
7384
- },
7385
- enumerable: false,
7386
- configurable: true
7387
- });
7388
- Object.defineProperty(AzureOpenAiExecutionTools.prototype, "description", {
7389
- get: function () {
7390
- return 'Use all models trained by OpenAI provided by Azure';
7391
- },
7392
- enumerable: false,
7393
- configurable: true
7394
- });
7395
- AzureOpenAiExecutionTools.prototype.getClient = function () {
7396
- return __awaiter(this, void 0, void 0, function () {
7397
- return __generator(this, function (_a) {
7398
- if (this.client === null) {
7399
- this.client = new OpenAIClient("https://".concat(this.options.resourceName, ".openai.azure.com/"), new AzureKeyCredential(this.options.apiKey));
7400
- }
7401
- return [2 /*return*/, this.client];
7402
- });
7403
- });
7404
- };
7405
- /**
7406
- * Check the `options` passed to `constructor`
7407
- */
7408
- AzureOpenAiExecutionTools.prototype.checkConfiguration = function () {
7409
- return __awaiter(this, void 0, void 0, function () {
7410
- return __generator(this, function (_a) {
7411
- switch (_a.label) {
7412
- case 0: return [4 /*yield*/, this.getClient()];
7413
- case 1:
7414
- _a.sent();
7415
- return [2 /*return*/];
7416
- }
7417
- });
7418
- });
7419
- };
7420
- /**
7421
- * List all available Azure OpenAI models that can be used
7422
- */
7423
- AzureOpenAiExecutionTools.prototype.listModels = function () {
7424
- return __awaiter(this, void 0, void 0, function () {
7425
- return __generator(this, function (_a) {
7426
- // TODO: !!! Do here some filtering which models are really available as deployment
7427
- // @see https://management.azure.com/subscriptions/subscriptionId/resourceGroups/resourceGroupName/providers/Microsoft.CognitiveServices/accounts/accountName/deployments?api-version=2023-05-01
7428
- return [2 /*return*/, OPENAI_MODELS.map(function (_a) {
7429
- var modelTitle = _a.modelTitle, modelName = _a.modelName, modelVariant = _a.modelVariant;
7430
- return ({
7431
- modelTitle: "Azure ".concat(modelTitle),
7432
- modelName: modelName,
7433
- modelVariant: modelVariant,
7434
- });
7435
- })];
7436
- });
7437
- });
7438
- };
7439
- /**
7440
- * Calls OpenAI API to use a chat model.
7441
- */
7442
- AzureOpenAiExecutionTools.prototype.callChatModel = function (prompt) {
7443
- var _a, _b;
7444
- return __awaiter(this, void 0, void 0, function () {
7445
- var content, parameters, modelRequirements, client, modelName, modelSettings, rawPromptContent, messages, start, complete, rawRequest, rawResponse, resultContent, usage, error_1;
7446
- return __generator(this, function (_c) {
7447
- switch (_c.label) {
7448
- case 0:
7449
- if (this.options.isVerbose) {
7450
- console.info('💬 OpenAI callChatModel call');
7451
- }
7452
- content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
7453
- return [4 /*yield*/, this.getClient()];
7454
- case 1:
7455
- client = _c.sent();
7456
- // TODO: [☂] Use here more modelRequirements
7457
- if (modelRequirements.modelVariant !== 'CHAT') {
7458
- throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
7459
- }
7460
- _c.label = 2;
7461
- case 2:
7462
- _c.trys.push([2, 4, , 5]);
7463
- modelName = prompt.modelRequirements.modelName || this.options.deploymentName;
7464
- modelSettings = {
7465
- maxTokens: modelRequirements.maxTokens,
7466
- // <- TODO: [🌾] Make some global max cap for maxTokens
7467
- temperature: modelRequirements.temperature,
7468
- user: this.options.user,
7469
- // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
7470
- // <- Note: [🧆]
7471
- };
7472
- rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
7473
- messages = __spreadArray(__spreadArray([], __read((modelRequirements.systemMessage === undefined
7474
- ? []
7475
- : [
7476
- {
7477
- role: 'system',
7478
- content: modelRequirements.systemMessage,
7479
- },
7480
- ])), false), [
7481
- {
7482
- role: 'user',
7483
- content: rawPromptContent,
7484
- },
7485
- ], false);
7486
- start = getCurrentIsoDate();
7487
- complete = void 0;
7488
- if (this.options.isVerbose) {
7489
- console.info(colors.bgWhite('messages'), JSON.stringify(messages, null, 4));
7490
- }
7491
- rawRequest = [modelName, messages, modelSettings];
7492
- return [4 /*yield*/, client.getChatCompletions.apply(client, __spreadArray([], __read(rawRequest), false))];
7493
- case 3:
7494
- rawResponse = _c.sent();
7495
- if (this.options.isVerbose) {
7496
- console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
7497
- }
7498
- if (!rawResponse.choices[0]) {
7499
- throw new PipelineExecutionError('No choises from Azure OpenAI');
7500
- }
7501
- if (rawResponse.choices.length > 1) {
7502
- // TODO: This should be maybe only warning
7503
- throw new PipelineExecutionError('More than one choise from Azure OpenAI');
7504
- }
7505
- if (!rawResponse.choices[0].message || !rawResponse.choices[0].message.content) {
7506
- throw new PipelineExecutionError('Empty response from Azure OpenAI');
7507
- }
7508
- resultContent = rawResponse.choices[0].message.content;
7509
- // eslint-disable-next-line prefer-const
7510
- complete = getCurrentIsoDate();
7511
- usage = {
7512
- price: uncertainNumber() /* <- TODO: [🐞] Compute usage */,
7513
- input: __assign({ tokensCount: uncertainNumber((_a = rawResponse.usage) === null || _a === void 0 ? void 0 : _a.promptTokens) }, computeUsageCounts(prompt.content)),
7514
- output: __assign({ tokensCount: uncertainNumber((_b = rawResponse.usage) === null || _b === void 0 ? void 0 : _b.completionTokens) }, computeUsageCounts(prompt.content)),
7515
- };
7516
- return [2 /*return*/, {
7517
- content: resultContent,
7518
- modelName: modelName,
7519
- timing: {
7520
- start: start,
7521
- complete: complete,
7522
- },
7523
- usage: usage,
7524
- rawPromptContent: rawPromptContent,
7525
- rawRequest: rawRequest,
7526
- rawResponse: rawResponse,
7527
- // <- [🗯]
7528
- }];
7529
- case 4:
7530
- error_1 = _c.sent();
7531
- throw this.transformAzureError(error_1);
7532
- case 5: return [2 /*return*/];
7533
- }
7534
- });
7535
- });
7536
- };
7537
- /**
7538
- * Calls Azure OpenAI API to use a complete model.
7539
- */
7540
- AzureOpenAiExecutionTools.prototype.callCompletionModel = function (prompt) {
7541
- var _a, _b;
7542
- return __awaiter(this, void 0, void 0, function () {
7543
- var content, parameters, modelRequirements, client, modelName, modelSettings, start, complete, rawPromptContent, rawRequest, rawResponse, resultContent, usage, error_2;
7544
- return __generator(this, function (_c) {
7545
- switch (_c.label) {
7546
- case 0:
7547
- if (this.options.isVerbose) {
7548
- console.info('🖋 OpenAI callCompletionModel call');
7549
- }
7550
- content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
7551
- return [4 /*yield*/, this.getClient()];
7552
- case 1:
7553
- client = _c.sent();
7554
- // TODO: [☂] Use here more modelRequirements
7555
- if (modelRequirements.modelVariant !== 'COMPLETION') {
7556
- throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
7557
- }
7558
- _c.label = 2;
7559
- case 2:
7560
- _c.trys.push([2, 4, , 5]);
7561
- modelName = prompt.modelRequirements.modelName || this.options.deploymentName;
7562
- modelSettings = {
7563
- maxTokens: modelRequirements.maxTokens || 2000,
7564
- // <- TODO: [🌾] Make some global max cap for maxTokens
7565
- temperature: modelRequirements.temperature,
7566
- user: this.options.user,
7567
- // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
7568
- // <- Note: [🧆]
7569
- };
7570
- start = getCurrentIsoDate();
7571
- complete = void 0;
7572
- if (this.options.isVerbose) {
7573
- console.info(colors.bgWhite('content'), JSON.stringify(content, null, 4));
7574
- console.info(colors.bgWhite('parameters'), JSON.stringify(parameters, null, 4));
7575
- }
7576
- rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
7577
- rawRequest = [
7578
- modelName,
7579
- [rawPromptContent],
7580
- modelSettings,
7581
- ];
7582
- return [4 /*yield*/, client.getCompletions.apply(client, __spreadArray([], __read(rawRequest), false))];
7583
- case 3:
7584
- rawResponse = _c.sent();
7585
- if (this.options.isVerbose) {
7586
- console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
7587
- }
7588
- if (!rawResponse.choices[0]) {
7589
- throw new PipelineExecutionError('No choises from OpenAI');
7590
- }
7591
- if (rawResponse.choices.length > 1) {
7592
- // TODO: This should be maybe only warning
7593
- throw new PipelineExecutionError('More than one choise from OpenAI');
7594
- }
7595
- resultContent = rawResponse.choices[0].text;
7596
- // eslint-disable-next-line prefer-const
7597
- complete = getCurrentIsoDate();
7598
- usage = {
7599
- price: uncertainNumber() /* <- TODO: [🐞] Compute usage */,
7600
- input: __assign({ tokensCount: uncertainNumber((_a = rawResponse.usage) === null || _a === void 0 ? void 0 : _a.promptTokens) }, computeUsageCounts(prompt.content)),
7601
- output: __assign({ tokensCount: uncertainNumber((_b = rawResponse.usage) === null || _b === void 0 ? void 0 : _b.completionTokens) }, computeUsageCounts(prompt.content)),
7602
- };
7603
- return [2 /*return*/, {
7604
- content: resultContent,
7605
- modelName: modelName,
7606
- timing: {
7607
- start: start,
7608
- complete: complete,
7609
- },
7610
- usage: usage,
7611
- rawPromptContent: rawPromptContent,
7612
- rawRequest: rawRequest,
7613
- rawResponse: rawResponse,
7614
- // <- [🗯]
7615
- }];
7616
- case 4:
7617
- error_2 = _c.sent();
7618
- throw this.transformAzureError(error_2);
7619
- case 5: return [2 /*return*/];
7620
- }
7621
- });
7622
- });
7623
- };
7624
- // <- Note: [🤖] callXxxModel
7625
- /**
7626
- * Changes Azure error (which is not propper Error but object) to propper Error
7627
- */
7628
- AzureOpenAiExecutionTools.prototype.transformAzureError = function (azureError) {
7629
- if (typeof azureError !== 'object' || azureError === null) {
7630
- return new PipelineExecutionError("Unknown Azure OpenAI error");
7631
- }
7632
- var code = azureError.code, message = azureError.message;
7633
- return new PipelineExecutionError("".concat(code, ": ").concat(message));
7634
- };
7635
- return AzureOpenAiExecutionTools;
7636
- }());
7637
- /**
7638
- * TODO: Maybe Create some common util for callChatModel and callCompletionModel
7639
- * TODO: Maybe make custom AzureOpenAiError
7640
- * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
7641
- * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
7642
- */
7643
-
7644
- /**
7645
- * Computes the usage of the OpenAI API based on the response from OpenAI
7646
- *
7647
- * @param promptContent The content of the prompt
7648
- * @param resultContent The content of the result (for embedding prompts or failed prompts pass empty string)
7649
- * @param rawResponse The raw response from OpenAI API
7650
- * @throws {PipelineExecutionError} If the usage is not defined in the response from OpenAI
7651
- * @private internal utility of `OpenAiExecutionTools`
7652
- */
7653
- function computeOpenAiUsage(promptContent, // <- Note: Intentionally using [] to access type properties to bring jsdoc from Prompt/PromptResult to consumer
7654
- resultContent, rawResponse) {
7655
- var _a, _b;
7656
- if (rawResponse.usage === undefined) {
7657
- throw new PipelineExecutionError('The usage is not defined in the response from OpenAI');
7658
- }
7659
- if (((_a = rawResponse.usage) === null || _a === void 0 ? void 0 : _a.prompt_tokens) === undefined) {
7660
- throw new PipelineExecutionError('In OpenAI response `usage.prompt_tokens` not defined');
7661
- }
7662
- var inputTokens = rawResponse.usage.prompt_tokens;
7663
- var outputTokens = ((_b = rawResponse.usage) === null || _b === void 0 ? void 0 : _b.completion_tokens) || 0;
7664
- var modelInfo = OPENAI_MODELS.find(function (model) { return model.modelName === rawResponse.model; });
7665
- var price;
7666
- if (modelInfo === undefined || modelInfo.pricing === undefined) {
7667
- price = uncertainNumber();
7668
- }
7669
- else {
7670
- price = uncertainNumber(inputTokens * modelInfo.pricing.prompt + outputTokens * modelInfo.pricing.output);
7671
- }
7672
- return {
7673
- price: price,
7674
- input: __assign({ tokensCount: uncertainNumber(rawResponse.usage.prompt_tokens) }, computeUsageCounts(promptContent)),
7675
- output: __assign({ tokensCount: uncertainNumber(outputTokens) }, computeUsageCounts(resultContent)),
7676
- };
7677
- }
7678
- /**
7679
- * TODO: [🤝] DRY Maybe some common abstraction between `computeOpenAiUsage` and `computeAnthropicClaudeUsage`
7680
- */
7681
-
7682
- /**
7683
- * Execution Tools for calling OpenAI API
7684
- *
7685
- * @public exported from `@promptbook/openai`
7686
- */
7687
- var OpenAiExecutionTools = /** @class */ (function () {
7688
- /**
7689
- * Creates OpenAI Execution Tools.
7690
- *
7691
- * @param options which are relevant are directly passed to the OpenAI client
7692
- */
7693
- function OpenAiExecutionTools(options) {
7694
- if (options === void 0) { options = {}; }
7695
- this.options = options;
7696
- /**
7697
- * OpenAI API client.
7698
- */
7699
- this.client = null;
7700
- }
7701
- Object.defineProperty(OpenAiExecutionTools.prototype, "title", {
7702
- get: function () {
7703
- return 'OpenAI';
7704
- },
7705
- enumerable: false,
7706
- configurable: true
7707
- });
7708
- Object.defineProperty(OpenAiExecutionTools.prototype, "description", {
7709
- get: function () {
7710
- return 'Use all models provided by OpenAI';
7711
- },
7712
- enumerable: false,
7713
- configurable: true
7714
- });
7715
- OpenAiExecutionTools.prototype.getClient = function () {
7716
- return __awaiter(this, void 0, void 0, function () {
7717
- var openAiOptions;
7718
- return __generator(this, function (_a) {
7719
- if (this.client === null) {
7720
- openAiOptions = __assign({}, this.options);
7721
- delete openAiOptions.isVerbose;
7722
- delete openAiOptions.user;
7723
- this.client = new OpenAI(__assign({}, openAiOptions));
7724
- }
7725
- return [2 /*return*/, this.client];
7726
- });
7727
- });
7728
- };
7729
- /**
7730
- * Check the `options` passed to `constructor`
7731
- */
7732
- OpenAiExecutionTools.prototype.checkConfiguration = function () {
7733
- return __awaiter(this, void 0, void 0, function () {
7734
- return __generator(this, function (_a) {
7735
- switch (_a.label) {
7736
- case 0: return [4 /*yield*/, this.getClient()];
7737
- case 1:
7738
- _a.sent();
7739
- return [2 /*return*/];
7740
- }
7741
- });
7742
- });
7743
- };
7744
- /**
7745
- * List all available OpenAI models that can be used
7746
- */
7747
- OpenAiExecutionTools.prototype.listModels = function () {
7748
- /*
7749
- Note: Dynamic lising of the models
7750
- const models = await this.openai.models.list({});
7751
-
7752
- console.log({ models });
7753
- console.log(models.data);
7754
- */
7755
- return OPENAI_MODELS;
7756
- };
7757
- /**
7758
- * Calls OpenAI API to use a chat model.
7759
- */
7760
- OpenAiExecutionTools.prototype.callChatModel = function (prompt) {
7761
- return __awaiter(this, void 0, void 0, function () {
7762
- var content, parameters, modelRequirements, expectFormat, client, modelName, modelSettings, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
7763
- return __generator(this, function (_a) {
7764
- switch (_a.label) {
7765
- case 0:
7766
- if (this.options.isVerbose) {
7767
- console.info('💬 OpenAI callChatModel call', { prompt: prompt });
7768
- }
7769
- content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements, expectFormat = prompt.expectFormat;
7770
- return [4 /*yield*/, this.getClient()];
7771
- case 1:
7772
- client = _a.sent();
7773
- // TODO: [☂] Use here more modelRequirements
7774
- if (modelRequirements.modelVariant !== 'CHAT') {
7775
- throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
7776
- }
7777
- modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
7778
- modelSettings = {
7779
- model: modelName,
7780
- max_tokens: modelRequirements.maxTokens,
7781
- // <- TODO: [🌾] Make some global max cap for maxTokens
7782
- temperature: modelRequirements.temperature,
7783
- // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
7784
- // <- Note: [🧆]
7785
- };
7786
- if (expectFormat === 'JSON') {
7787
- modelSettings.response_format = {
7788
- type: 'json_object',
7789
- };
7790
- }
7791
- rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
7792
- rawRequest = __assign(__assign({}, modelSettings), { messages: __spreadArray(__spreadArray([], __read((modelRequirements.systemMessage === undefined
7793
- ? []
7794
- : [
7795
- {
7796
- role: 'system',
7797
- content: modelRequirements.systemMessage,
7798
- },
7799
- ])), false), [
7800
- {
7801
- role: 'user',
7802
- content: rawPromptContent,
7803
- },
7804
- ], false), user: this.options.user });
7805
- start = getCurrentIsoDate();
7806
- if (this.options.isVerbose) {
7807
- console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
7808
- }
7809
- return [4 /*yield*/, client.chat.completions.create(rawRequest)];
7810
- case 2:
7811
- rawResponse = _a.sent();
7812
- if (this.options.isVerbose) {
7813
- console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
7814
- }
7815
- if (!rawResponse.choices[0]) {
7816
- throw new PipelineExecutionError('No choises from OpenAI');
7817
- }
7818
- if (rawResponse.choices.length > 1) {
7819
- // TODO: This should be maybe only warning
7820
- throw new PipelineExecutionError('More than one choise from OpenAI');
7821
- }
7822
- resultContent = rawResponse.choices[0].message.content;
7823
- // eslint-disable-next-line prefer-const
7824
- complete = getCurrentIsoDate();
7825
- usage = computeOpenAiUsage(content, resultContent || '', rawResponse);
7826
- if (resultContent === null) {
7827
- throw new PipelineExecutionError('No response message from OpenAI');
7828
- }
7829
- return [2 /*return*/, {
7830
- content: resultContent,
7831
- modelName: rawResponse.model || modelName,
7832
- timing: {
7833
- start: start,
7834
- complete: complete,
7835
- },
7836
- usage: usage,
7837
- rawPromptContent: rawPromptContent,
7838
- rawRequest: rawRequest,
7839
- rawResponse: rawResponse,
7840
- // <- [🗯]
7841
- }];
7842
- }
7843
- });
7844
- });
7845
- };
7846
- /**
7847
- * Calls OpenAI API to use a complete model.
7848
- */
7849
- OpenAiExecutionTools.prototype.callCompletionModel = function (prompt) {
7850
- return __awaiter(this, void 0, void 0, function () {
7851
- var content, parameters, modelRequirements, client, modelName, modelSettings, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
7852
- return __generator(this, function (_a) {
7853
- switch (_a.label) {
7854
- case 0:
7855
- if (this.options.isVerbose) {
7856
- console.info('🖋 OpenAI callCompletionModel call', { prompt: prompt });
7857
- }
7858
- content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
7859
- return [4 /*yield*/, this.getClient()];
7860
- case 1:
7861
- client = _a.sent();
7862
- // TODO: [☂] Use here more modelRequirements
7863
- if (modelRequirements.modelVariant !== 'COMPLETION') {
7864
- throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
7865
- }
7866
- modelName = modelRequirements.modelName || this.getDefaultCompletionModel().modelName;
7867
- modelSettings = {
7868
- model: modelName,
7869
- max_tokens: modelRequirements.maxTokens || 2000,
7870
- // <- TODO: [🌾] Make some global max cap for maxTokens
7871
- temperature: modelRequirements.temperature,
7872
- // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
7873
- // <- Note: [🧆]
7874
- };
7875
- rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
7876
- rawRequest = __assign(__assign({}, modelSettings), { prompt: rawPromptContent, user: this.options.user });
7877
- start = getCurrentIsoDate();
7878
- if (this.options.isVerbose) {
7879
- console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
7880
- }
7881
- return [4 /*yield*/, client.completions.create(rawRequest)];
7882
- case 2:
7883
- rawResponse = _a.sent();
7884
- if (this.options.isVerbose) {
7885
- console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
7886
- }
7887
- if (!rawResponse.choices[0]) {
7888
- throw new PipelineExecutionError('No choises from OpenAI');
7889
- }
7890
- if (rawResponse.choices.length > 1) {
7891
- // TODO: This should be maybe only warning
7892
- throw new PipelineExecutionError('More than one choise from OpenAI');
7893
- }
7894
- resultContent = rawResponse.choices[0].text;
7895
- // eslint-disable-next-line prefer-const
7896
- complete = getCurrentIsoDate();
7897
- usage = computeOpenAiUsage(content, resultContent || '', rawResponse);
7898
- return [2 /*return*/, {
7899
- content: resultContent,
7900
- modelName: rawResponse.model || modelName,
7901
- timing: {
7902
- start: start,
7903
- complete: complete,
7904
- },
7905
- usage: usage,
7906
- rawPromptContent: rawPromptContent,
7907
- rawRequest: rawRequest,
7908
- rawResponse: rawResponse,
7909
- // <- [🗯]
7910
- }];
7911
- }
7912
- });
7913
- });
7914
- };
7915
- /**
7916
- * Calls OpenAI API to use a embedding model
7917
- */
7918
- OpenAiExecutionTools.prototype.callEmbeddingModel = function (prompt) {
7919
- return __awaiter(this, void 0, void 0, function () {
7920
- var content, parameters, modelRequirements, client, modelName, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
7921
- return __generator(this, function (_a) {
7922
- switch (_a.label) {
7923
- case 0:
7924
- if (this.options.isVerbose) {
7925
- console.info('🖋 OpenAI embedding call', { prompt: prompt });
7926
- }
7927
- content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
7928
- return [4 /*yield*/, this.getClient()];
7929
- case 1:
7930
- client = _a.sent();
7931
- // TODO: [☂] Use here more modelRequirements
7932
- if (modelRequirements.modelVariant !== 'EMBEDDING') {
7933
- throw new PipelineExecutionError('Use embed only for EMBEDDING variant');
7934
- }
7935
- modelName = modelRequirements.modelName || this.getDefaultEmbeddingModel().modelName;
7936
- rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
7937
- rawRequest = {
7938
- input: rawPromptContent,
7939
- model: modelName,
7940
- };
7941
- start = getCurrentIsoDate();
7942
- if (this.options.isVerbose) {
7943
- console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
7944
- }
7945
- return [4 /*yield*/, client.embeddings.create(rawRequest)];
7946
- case 2:
7947
- rawResponse = _a.sent();
7948
- if (this.options.isVerbose) {
7949
- console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
7950
- }
7951
- if (rawResponse.data.length !== 1) {
7952
- throw new PipelineExecutionError("Expected exactly 1 data item in response, got ".concat(rawResponse.data.length));
7953
- }
7954
- resultContent = rawResponse.data[0].embedding;
7955
- // eslint-disable-next-line prefer-const
7956
- complete = getCurrentIsoDate();
7957
- usage = computeOpenAiUsage(content, '', rawResponse);
7958
- return [2 /*return*/, {
7959
- content: resultContent,
7960
- modelName: rawResponse.model || modelName,
7961
- timing: {
7962
- start: start,
7963
- complete: complete,
7964
- },
7965
- usage: usage,
7966
- rawPromptContent: rawPromptContent,
7967
- rawRequest: rawRequest,
7968
- rawResponse: rawResponse,
7969
- // <- [🗯]
7970
- }];
7971
- }
7972
- });
7973
- });
7974
- };
7975
- // <- Note: [🤖] callXxxModel
7976
- /**
7977
- * Get the model that should be used as default
7978
- */
7979
- OpenAiExecutionTools.prototype.getDefaultModel = function (defaultModelName) {
7980
- var model = OPENAI_MODELS.find(function (_a) {
7981
- var modelName = _a.modelName;
7982
- return modelName === defaultModelName;
7983
- });
7984
- if (model === undefined) {
7985
- throw new UnexpectedError(spaceTrim(function (block) {
7986
- return "\n Cannot find model in OpenAI models with name \"".concat(defaultModelName, "\" which should be used as default.\n\n Available models:\n ").concat(block(OPENAI_MODELS.map(function (_a) {
7987
- var modelName = _a.modelName;
7988
- return "- \"".concat(modelName, "\"");
7989
- }).join('\n')), "\n\n ");
7990
- }));
7991
- }
7992
- return model;
7993
- };
7994
- /**
7995
- * Default model for chat variant.
7996
- */
7997
- OpenAiExecutionTools.prototype.getDefaultChatModel = function () {
7998
- return this.getDefaultModel('gpt-4o');
7999
- };
8000
- /**
8001
- * Default model for completion variant.
8002
- */
8003
- OpenAiExecutionTools.prototype.getDefaultCompletionModel = function () {
8004
- return this.getDefaultModel('gpt-3.5-turbo-instruct');
8005
- };
8006
- /**
8007
- * Default model for completion variant.
8008
- */
8009
- OpenAiExecutionTools.prototype.getDefaultEmbeddingModel = function () {
8010
- return this.getDefaultModel('text-embedding-3-large');
8011
- };
8012
- return OpenAiExecutionTools;
8013
- }());
8014
- /**
8015
- * TODO: [🧠][🧙‍♂️] Maybe there can be some wizzard for thoose who want to use just OpenAI
8016
- * TODO: Maybe Create some common util for callChatModel and callCompletionModel
8017
- * TODO: Maybe make custom OpenAiError
8018
- * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
8019
- * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
8020
- */
8021
-
8022
- /**
8023
- * Execution Tools for calling OpenAI API
8024
- *
8025
- * @public exported from `@promptbook/openai`
8026
- */
8027
- var createOpenAiExecutionTools = Object.assign(function (options) {
8028
- // TODO: !!!!!! If browser, auto add `dangerouslyAllowBrowser`
8029
- return new OpenAiExecutionTools(options);
8030
- }, {
8031
- packageName: '@promptbook/openai',
8032
- className: 'OpenAiExecutionTools',
8033
- });
8034
- /**
8035
- * TODO: [🦺] Is there some way how to put `packageName` and `className` on top and function definition on bottom?
8036
- * TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
8037
- */
8038
-
8039
- /**
8040
- * @@@
8041
- *
8042
- * TODO: !!!!!! Remove EXECUTION_TOOLS_CLASSES and use $llmToolsRegister instead
8043
- *
8044
- * @private internal type for `createLlmToolsFromConfiguration`
8045
- */
8046
- var EXECUTION_TOOLS_CLASSES = {
8047
- createOpenAiExecutionTools: createOpenAiExecutionTools,
8048
- createAnthropicClaudeExecutionTools: createAnthropicClaudeExecutionTools,
8049
- createAzureOpenAiExecutionTools: function (options) {
8050
- return new AzureOpenAiExecutionTools(
8051
- // <- TODO: [🧱] Implement in a functional (not new Class) way
8052
- options);
8053
- },
8054
- // <- Note: [🦑] Add here new LLM provider
8055
- };
8056
- /**
8057
- * TODO: !!!!!!! Make global register for this
8058
- * TODO: [🧠][🎌] Adding this should be responsibility of each provider package NOT this one central place
8059
- */
8060
-
8061
- /**
8062
- * @@@
8063
- *
8064
- * Note: This function is not cached, every call creates new instance of `MultipleLlmExecutionTools`
8065
- *
8066
- * @returns @@@
8067
- * @public exported from `@promptbook/core`
8068
- */
8069
- function createLlmToolsFromConfiguration(configuration, options) {
8070
- if (options === void 0) { options = {}; }
8071
- var _a = options.isVerbose, isVerbose = _a === void 0 ? false : _a;
8072
- var llmTools = configuration.map(function (llmConfiguration) {
8073
- var constructor = EXECUTION_TOOLS_CLASSES["create".concat(llmConfiguration.className)];
8074
- if (!constructor) {
8075
- throw new Error(spaceTrim(function (block) { return "\n There is no constructor for LLM provider `".concat(llmConfiguration.className, "`\n\n\n @@@\n\n Available constructors are:\n ").concat(block('@@@'), "\n\n\n "); }));
6572
+ if (registeredItem === undefined) {
6573
+ throw new Error(spaceTrim(function (block) { return "\n There is no constructor for LLM provider `".concat(llmConfiguration.className, "` from `").concat(llmConfiguration.packageName, "`\n\n You have probably forgotten install and import the provider package.\n To fix this issue, you can:\n\n Install:\n\n > npm install ").concat(llmConfiguration.packageName, "\n\n And import:\n\n > import '").concat(llmConfiguration.packageName, "';\n\n\n ").concat(block($registeredLlmToolsMessage()), "\n "); }));
8076
6574
  }
8077
- return constructor(__assign({ isVerbose: isVerbose }, llmConfiguration.options));
6575
+ return registeredItem(__assign({ isVerbose: isVerbose }, llmConfiguration.options));
8078
6576
  });
8079
6577
  return joinLlmExecutionTools.apply(void 0, __spreadArray([], __read(llmTools), false));
8080
6578
  }
@@ -8109,7 +6607,7 @@ function createLlmToolsFromEnv(options) {
8109
6607
  var configuration = createLlmToolsFromConfigurationFromEnv();
8110
6608
  if (configuration.length === 0) {
8111
6609
  // TODO: [🥃]
8112
- throw new Error(spaceTrim("\n No LLM tools found in the environment\n\n !!!!!!!@@@@You have maybe forgotten to two things:\n !!!!!!! List all available LLM tools in your environment\n - Azure \n - OpenAI (not imported)\n\n Please set one of environment variables:\n - OPENAI_API_KEY\n - ANTHROPIC_CLAUDE_API_KEY\n "));
6610
+ throw new Error(spaceTrim(function (block) { return "\n No LLM tools found in the environment\n\n Please set one of environment variables:\n - OPENAI_API_KEY\n - ANTHROPIC_CLAUDE_API_KEY\n\n ".concat(block($registeredLlmToolsMessage()), "}\n "); }));
8113
6611
  }
8114
6612
  return createLlmToolsFromConfiguration(configuration, options);
8115
6613
  }