@promptbook/node 0.66.0-8 → 0.66.0-9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. package/esm/index.es.js +193 -1696
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/src/_packages/anthropic-claude.index.d.ts +0 -2
  4. package/esm/typings/src/_packages/cli.index.d.ts +6 -0
  5. package/esm/typings/src/_packages/types.index.d.ts +22 -14
  6. package/esm/typings/src/_packages/utils.index.d.ts +7 -7
  7. package/esm/typings/src/execution/LlmExecutionToolsConstructor.d.ts +1 -1
  8. package/esm/typings/src/llm-providers/_common/$llmToolsMetadataRegister.d.ts +2 -2
  9. package/esm/typings/src/llm-providers/_common/$llmToolsRegister.d.ts +2 -2
  10. package/esm/typings/src/llm-providers/_common/$registeredLlmToolsMessage.d.ts +9 -0
  11. package/esm/typings/src/llm-providers/_common/LlmToolsConfiguration.d.ts +1 -1
  12. package/esm/typings/src/llm-providers/_common/LlmToolsMetadata.d.ts +1 -1
  13. package/esm/typings/src/llm-providers/_common/createLlmToolsFromConfigurationFromEnv.d.ts +1 -0
  14. package/esm/typings/src/llm-providers/_common/getLlmToolsForTestingAndScriptsAndPlayground.d.ts +1 -1
  15. package/esm/typings/src/llm-providers/anthropic-claude/register-constructor.d.ts +1 -0
  16. package/esm/typings/src/llm-providers/azure-openai/register-constructor.d.ts +1 -0
  17. package/esm/typings/src/llm-providers/openai/register-constructor.d.ts +1 -0
  18. package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +1 -0
  19. package/esm/typings/src/llm-providers/remote/interfaces/{Promptbook_Server_Error.d.ts → PromptbookServer_Error.d.ts} +1 -1
  20. package/esm/typings/src/llm-providers/remote/interfaces/PromptbookServer_ListModels_Request.d.ts +34 -0
  21. package/esm/typings/src/llm-providers/remote/interfaces/PromptbookServer_ListModels_Response.d.ts +15 -0
  22. package/esm/typings/src/llm-providers/remote/interfaces/{Promptbook_Server_Progress.d.ts → PromptbookServer_Prompt_Progress.d.ts} +1 -1
  23. package/esm/typings/src/llm-providers/remote/interfaces/{Promptbook_Server_Request.d.ts → PromptbookServer_Prompt_Request.d.ts} +15 -3
  24. package/esm/typings/src/llm-providers/remote/interfaces/{Promptbook_Server_Response.d.ts → PromptbookServer_Prompt_Response.d.ts} +1 -1
  25. package/esm/typings/src/llm-providers/remote/interfaces/RemoteLlmExecutionToolsOptions.d.ts +1 -7
  26. package/esm/typings/src/llm-providers/remote/startRemoteServer.d.ts +1 -0
  27. package/esm/typings/src/utils/{Register.d.ts → $Register.d.ts} +6 -2
  28. package/esm/typings/src/utils/environment/{getGlobalScope.d.ts → $getGlobalScope.d.ts} +1 -1
  29. package/esm/typings/src/utils/organization/f.d.ts +6 -0
  30. package/package.json +2 -6
  31. package/umd/index.umd.js +197 -1698
  32. package/umd/index.umd.js.map +1 -1
  33. package/esm/typings/src/llm-providers/_common/config.d.ts +0 -14
  34. package/esm/typings/src/llm-providers/anthropic-claude/register1.d.ts +0 -4
  35. /package/esm/typings/src/llm-providers/mocked/{fakeTextToExpectations.d.ts → $fakeTextToExpectations.d.ts} +0 -0
  36. /package/esm/typings/src/utils/{currentDate.d.ts → $currentDate.d.ts} +0 -0
  37. /package/esm/typings/src/utils/environment/{isRunningInBrowser.d.ts → $isRunningInBrowser.d.ts} +0 -0
  38. /package/esm/typings/src/utils/environment/{isRunningInNode.d.ts → $isRunningInNode.d.ts} +0 -0
  39. /package/esm/typings/src/utils/environment/{isRunningInWebWorker.d.ts → $isRunningInWebWorker.d.ts} +0 -0
  40. /package/esm/typings/src/utils/files/{isDirectoryExisting.d.ts → $isDirectoryExisting.d.ts} +0 -0
  41. /package/esm/typings/src/utils/files/{isDirectoryExisting.test.d.ts → $isDirectoryExisting.test.d.ts} +0 -0
  42. /package/esm/typings/src/utils/files/{isFileExisting.d.ts → $isFileExisting.d.ts} +0 -0
  43. /package/esm/typings/src/utils/files/{isFileExisting.test.d.ts → $isFileExisting.test.d.ts} +0 -0
  44. /package/esm/typings/src/utils/files/{listAllFiles.d.ts → $listAllFiles.d.ts} +0 -0
  45. /package/esm/typings/src/utils/files/{listAllFiles.test.d.ts → $listAllFiles.test.d.ts} +0 -0
  46. /package/esm/typings/src/utils/random/{randomSeed.d.ts → $randomSeed.d.ts} +0 -0
package/umd/index.umd.js CHANGED
@@ -1,8 +1,8 @@
1
1
  (function (global, factory) {
2
- typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports, require('colors'), require('fs/promises'), require('path'), require('spacetrim'), require('prettier'), require('prettier/parser-html'), require('crypto-js/enc-hex'), require('crypto-js/sha256'), require('path/posix'), require('dotenv'), require('socket.io-client'), require('@anthropic-ai/sdk'), require('@azure/openai'), require('openai')) :
3
- typeof define === 'function' && define.amd ? define(['exports', 'colors', 'fs/promises', 'path', 'spacetrim', 'prettier', 'prettier/parser-html', 'crypto-js/enc-hex', 'crypto-js/sha256', 'path/posix', 'dotenv', 'socket.io-client', '@anthropic-ai/sdk', '@azure/openai', 'openai'], factory) :
4
- (global = typeof globalThis !== 'undefined' ? globalThis : global || self, factory(global["promptbook-node"] = {}, global.colors, global.promises, global.path, global.spaceTrim, global.prettier, global.parserHtml, global.hexEncoder, global.sha256, global.posix, global.dotenv, global.socket_ioClient, global.Anthropic, global.openai, global.OpenAI));
5
- })(this, (function (exports, colors, promises, path, spaceTrim, prettier, parserHtml, hexEncoder, sha256, posix, dotenv, socket_ioClient, Anthropic, openai, OpenAI) { 'use strict';
2
+ typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports, require('colors'), require('fs/promises'), require('path'), require('spacetrim'), require('prettier'), require('prettier/parser-html'), require('crypto-js/enc-hex'), require('crypto-js/sha256'), require('path/posix'), require('dotenv')) :
3
+ typeof define === 'function' && define.amd ? define(['exports', 'colors', 'fs/promises', 'path', 'spacetrim', 'prettier', 'prettier/parser-html', 'crypto-js/enc-hex', 'crypto-js/sha256', 'path/posix', 'dotenv'], factory) :
4
+ (global = typeof globalThis !== 'undefined' ? globalThis : global || self, factory(global["promptbook-node"] = {}, global.colors, global.promises, global.path, global.spaceTrim, global.prettier, global.parserHtml, global.hexEncoder, global.sha256, global.posix, global.dotenv));
5
+ })(this, (function (exports, colors, promises, path, spaceTrim, prettier, parserHtml, hexEncoder, sha256, posix, dotenv) { 'use strict';
6
6
 
7
7
  function _interopDefaultLegacy (e) { return e && typeof e === 'object' && 'default' in e ? e : { 'default': e }; }
8
8
 
@@ -30,14 +30,12 @@
30
30
  var hexEncoder__default = /*#__PURE__*/_interopDefaultLegacy(hexEncoder);
31
31
  var sha256__default = /*#__PURE__*/_interopDefaultLegacy(sha256);
32
32
  var dotenv__namespace = /*#__PURE__*/_interopNamespace(dotenv);
33
- var Anthropic__default = /*#__PURE__*/_interopDefaultLegacy(Anthropic);
34
- var OpenAI__default = /*#__PURE__*/_interopDefaultLegacy(OpenAI);
35
33
 
36
34
  // ⚠️ WARNING: This code has been generated so that any manual changes will be overwritten
37
35
  /**
38
36
  * The version of the Promptbook library
39
37
  */
40
- var PROMPTBOOK_VERSION = '0.66.0-7';
38
+ var PROMPTBOOK_VERSION = '0.66.0-8';
41
39
  // TODO: !!!! List here all the versions and annotate + put into script
42
40
 
43
41
  /*! *****************************************************************************
@@ -205,6 +203,26 @@
205
203
  * TODO: [🧠] Is there a way how to meaningfully test this utility
206
204
  */
207
205
 
206
+ /**
207
+ * Returns the same value that is passed as argument.
208
+ * No side effects.
209
+ *
210
+ * Note: It can be usefull for:
211
+ *
212
+ * 1) Leveling indentation
213
+ * 2) Putting always-true or always-false conditions without getting eslint errors
214
+ *
215
+ * @param value any values
216
+ * @returns the same values
217
+ * @private within the repository
218
+ */
219
+ function just(value) {
220
+ if (value === undefined) {
221
+ return undefined;
222
+ }
223
+ return value;
224
+ }
225
+
208
226
  // <- TODO: [🧠] Better system for generator warnings - not always "code" and "by `@promptbook/cli`"
209
227
  /**
210
228
  * The maximum number of iterations for a loops
@@ -716,7 +734,7 @@
716
734
  });
717
735
  }
718
736
 
719
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.66.0-7",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.66.0-7",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.66.0-7",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.66.0-7",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
737
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.66.0-8",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.66.0-8",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.66.0-8",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.66.0-8",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
720
738
 
721
739
  /**
722
740
  * This error indicates that the promptbook in a markdown format cannot be parsed into a valid promptbook object
@@ -1944,8 +1962,37 @@
1944
1962
  */
1945
1963
  MultipleLlmExecutionTools.prototype.checkConfiguration = function () {
1946
1964
  return __awaiter(this, void 0, void 0, function () {
1947
- return __generator(this, function (_a) {
1948
- return [2 /*return*/];
1965
+ var _a, _b, llmExecutionTools, e_1_1;
1966
+ var e_1, _c;
1967
+ return __generator(this, function (_d) {
1968
+ switch (_d.label) {
1969
+ case 0:
1970
+ _d.trys.push([0, 5, 6, 7]);
1971
+ _a = __values(this.llmExecutionTools), _b = _a.next();
1972
+ _d.label = 1;
1973
+ case 1:
1974
+ if (!!_b.done) return [3 /*break*/, 4];
1975
+ llmExecutionTools = _b.value;
1976
+ return [4 /*yield*/, llmExecutionTools.checkConfiguration()];
1977
+ case 2:
1978
+ _d.sent();
1979
+ _d.label = 3;
1980
+ case 3:
1981
+ _b = _a.next();
1982
+ return [3 /*break*/, 1];
1983
+ case 4: return [3 /*break*/, 7];
1984
+ case 5:
1985
+ e_1_1 = _d.sent();
1986
+ e_1 = { error: e_1_1 };
1987
+ return [3 /*break*/, 7];
1988
+ case 6:
1989
+ try {
1990
+ if (_b && !_b.done && (_c = _a.return)) _c.call(_a);
1991
+ }
1992
+ finally { if (e_1) throw e_1.error; }
1993
+ return [7 /*endfinally*/];
1994
+ case 7: return [2 /*return*/];
1995
+ }
1949
1996
  });
1950
1997
  });
1951
1998
  };
@@ -1955,8 +2002,8 @@
1955
2002
  */
1956
2003
  MultipleLlmExecutionTools.prototype.listModels = function () {
1957
2004
  return __awaiter(this, void 0, void 0, function () {
1958
- var availableModels, _a, _b, llmExecutionTools, models, e_1_1;
1959
- var e_1, _c;
2005
+ var availableModels, _a, _b, llmExecutionTools, models, e_2_1;
2006
+ var e_2, _c;
1960
2007
  return __generator(this, function (_d) {
1961
2008
  switch (_d.label) {
1962
2009
  case 0:
@@ -1979,14 +2026,14 @@
1979
2026
  return [3 /*break*/, 2];
1980
2027
  case 5: return [3 /*break*/, 8];
1981
2028
  case 6:
1982
- e_1_1 = _d.sent();
1983
- e_1 = { error: e_1_1 };
2029
+ e_2_1 = _d.sent();
2030
+ e_2 = { error: e_2_1 };
1984
2031
  return [3 /*break*/, 8];
1985
2032
  case 7:
1986
2033
  try {
1987
2034
  if (_b && !_b.done && (_c = _a.return)) _c.call(_a);
1988
2035
  }
1989
- finally { if (e_1) throw e_1.error; }
2036
+ finally { if (e_2) throw e_2.error; }
1990
2037
  return [7 /*endfinally*/];
1991
2038
  case 8: return [2 /*return*/, availableModels];
1992
2039
  }
@@ -2019,8 +2066,8 @@
2019
2066
  */
2020
2067
  MultipleLlmExecutionTools.prototype.callCommonModel = function (prompt) {
2021
2068
  return __awaiter(this, void 0, void 0, function () {
2022
- var errors, _a, _b, llmExecutionTools, _c, error_1, e_2_1;
2023
- var e_2, _d;
2069
+ var errors, _a, _b, llmExecutionTools, _c, error_1, e_3_1;
2070
+ var e_3, _d;
2024
2071
  var _this = this;
2025
2072
  return __generator(this, function (_e) {
2026
2073
  switch (_e.label) {
@@ -2076,14 +2123,14 @@
2076
2123
  return [3 /*break*/, 2];
2077
2124
  case 14: return [3 /*break*/, 17];
2078
2125
  case 15:
2079
- e_2_1 = _e.sent();
2080
- e_2 = { error: e_2_1 };
2126
+ e_3_1 = _e.sent();
2127
+ e_3 = { error: e_3_1 };
2081
2128
  return [3 /*break*/, 17];
2082
2129
  case 16:
2083
2130
  try {
2084
2131
  if (_b && !_b.done && (_d = _a.return)) _d.call(_a);
2085
2132
  }
2086
- finally { if (e_2) throw e_2.error; }
2133
+ finally { if (e_3) throw e_3.error; }
2087
2134
  return [7 /*endfinally*/];
2088
2135
  case 17:
2089
2136
  if (errors.length === 1) {
@@ -6326,7 +6373,7 @@
6326
6373
  /**
6327
6374
  * @@@
6328
6375
  *
6329
- * Note: `$` is used to indicate that this function is not a pure function - it access global
6376
+ * Note: `$` is used to indicate that this function is not a pure function - it access global scope
6330
6377
  *
6331
6378
  * @public exported from `@promptbook/utils`
6332
6379
  */
@@ -6340,42 +6387,45 @@
6340
6387
  /**
6341
6388
  * Register is @@@
6342
6389
  *
6390
+ * Note: `$` is used to indicate that this function is not a pure function - it accesses and adds variables in global scope.
6391
+ *
6343
6392
  * @private internal utility, exported are only signleton instances of this class
6344
6393
  */
6345
- var Register = /** @class */ (function () {
6346
- function Register(storage) {
6347
- this.storage = storage;
6394
+ var $Register = /** @class */ (function () {
6395
+ function $Register(storageName) {
6396
+ this.storageName = storageName;
6397
+ storageName = "_promptbook_".concat(storageName);
6398
+ var globalScope = $getGlobalScope();
6399
+ if (globalScope[storageName] === undefined) {
6400
+ globalScope[storageName] = [];
6401
+ }
6402
+ else if (!Array.isArray(globalScope[storageName])) {
6403
+ throw new UnexpectedError("Expected (global) ".concat(storageName, " to be an array, but got ").concat(typeof globalScope[storageName]));
6404
+ }
6405
+ this.storage = globalScope[storageName];
6348
6406
  }
6349
- Register.prototype.list = function () {
6407
+ $Register.prototype.list = function () {
6350
6408
  // <- TODO: ReadonlyDeep<Array<TRegistered>>
6351
6409
  return this.storage;
6352
6410
  };
6353
- Register.prototype.register = function (registered) {
6411
+ $Register.prototype.register = function (registered) {
6354
6412
  // <- TODO: What to return here
6355
6413
  var packageName = registered.packageName, className = registered.className;
6356
6414
  var existingRegistrationIndex = this.storage.findIndex(function (item) { return item.packageName === packageName && item.className === className; });
6357
6415
  var existingRegistration = this.storage[existingRegistrationIndex];
6416
+ // TODO: !!!!!! Global IS_VERBOSE mode
6358
6417
  if (!existingRegistration) {
6359
- console.warn("[\uD83D\uDCE6] Registering ".concat(packageName, ".").concat(className, " again"));
6418
+ console.warn("[\uD83D\uDCE6] Registering `".concat(packageName, ".").concat(className, "` to `").concat(this.storageName, "`"));
6360
6419
  this.storage.push(registered);
6361
6420
  }
6362
6421
  else {
6363
- console.warn("[\uD83D\uDCE6] Re-registering ".concat(packageName, ".").concat(className, " again"));
6422
+ console.warn("[\uD83D\uDCE6] Re-registering `".concat(packageName, ".").concat(className, "` to `").concat(this.storageName, "`"));
6364
6423
  this.storage[existingRegistrationIndex] = registered;
6365
6424
  }
6366
6425
  };
6367
- return Register;
6426
+ return $Register;
6368
6427
  }());
6369
6428
 
6370
- // TODO: !!!!!! Move this logic to Register and rename to $Register
6371
- var globalScope = $getGlobalScope();
6372
- if (globalScope.$llmToolsMetadataRegister === undefined) {
6373
- globalScope.$llmToolsMetadataRegister = [];
6374
- }
6375
- else if (!Array.isArray(globalScope.$llmToolsMetadataRegister)) {
6376
- throw new UnexpectedError("Expected $llmToolsMetadataRegister to be an array, but got ".concat(typeof globalScope.$llmToolsMetadataRegister));
6377
- }
6378
- var _ = globalScope.$llmToolsMetadataRegister;
6379
6429
  /**
6380
6430
  * @@@
6381
6431
  *
@@ -6383,8 +6433,7 @@
6383
6433
  * @singleton Only one instance of each register is created per build, but thare can be more @@@
6384
6434
  * @public exported from `@promptbook/core`
6385
6435
  */
6386
- var $llmToolsMetadataRegister = new Register(_);
6387
- $getGlobalScope().$llmToolsMetadataRegister;
6436
+ var $llmToolsMetadataRegister = new $Register('llm_tools_metadata');
6388
6437
 
6389
6438
  /**
6390
6439
  * @@@
@@ -6410,6 +6459,7 @@
6410
6459
  return llmToolsConfiguration;
6411
6460
  }
6412
6461
  /**
6462
+ * TODO: [🧠][🪁] Maybe do allow to do auto-install if package not registered and not found
6413
6463
  * TODO: Add Azure OpenAI
6414
6464
  * TODO: [🧠][🍛]
6415
6465
  * TODO: [🧠] Is there some meaningfull way how to test this util
@@ -6420,1682 +6470,131 @@
6420
6470
  */
6421
6471
 
6422
6472
  /**
6423
- * Remote server is a proxy server that uses its execution tools internally and exposes the executor interface externally.
6424
- *
6425
- * You can simply use `RemoteExecutionTools` on client-side javascript and connect to your remote server.
6426
- * This is useful to make all logic on browser side but not expose your API keys or no need to use customer's GPU.
6427
- *
6428
- * @see https://github.com/webgptorg/promptbook#remote-server
6429
- * @public exported from `@promptbook/remote-client`
6430
- */
6431
- var RemoteLlmExecutionTools = /** @class */ (function () {
6432
- function RemoteLlmExecutionTools(options) {
6433
- this.options = options;
6434
- }
6435
- Object.defineProperty(RemoteLlmExecutionTools.prototype, "title", {
6436
- get: function () {
6437
- // TODO: [🧠] Maybe fetch title+description from the remote server (as well as if model methods are defined)
6438
- return 'Remote server';
6439
- },
6440
- enumerable: false,
6441
- configurable: true
6442
- });
6443
- Object.defineProperty(RemoteLlmExecutionTools.prototype, "description", {
6444
- get: function () {
6445
- return 'Use all models by your remote server';
6446
- },
6447
- enumerable: false,
6448
- configurable: true
6449
- });
6450
- /**
6451
- * Check the configuration of all execution tools
6452
- */
6453
- RemoteLlmExecutionTools.prototype.checkConfiguration = function () {
6454
- return __awaiter(this, void 0, void 0, function () {
6455
- return __generator(this, function (_a) {
6456
- return [2 /*return*/];
6457
- });
6458
- });
6459
- };
6460
- /**
6461
- * List all available models that can be used
6462
- */
6463
- RemoteLlmExecutionTools.prototype.listModels = function () {
6464
- return __awaiter(this, void 0, void 0, function () {
6465
- return __generator(this, function (_a) {
6466
- return [2 /*return*/, (this.options.models ||
6467
- [
6468
- /* !!!!!! */
6469
- ])];
6470
- });
6471
- });
6472
- };
6473
- /**
6474
- * Creates a connection to the remote proxy server.
6475
- */
6476
- RemoteLlmExecutionTools.prototype.makeConnection = function () {
6477
- var _this = this;
6478
- return new Promise(
6479
- // <- TODO: [🧱] Implement in a functional (not new Class) way
6480
- function (resolve, reject) {
6481
- var socket = socket_ioClient.io(_this.options.remoteUrl, {
6482
- path: _this.options.path,
6483
- // path: `${this.remoteUrl.pathname}/socket.io`,
6484
- transports: [/*'websocket', <- TODO: [🌬] Make websocket transport work */ 'polling'],
6485
- });
6486
- // console.log('Connecting to', this.options.remoteUrl.href, { socket });
6487
- socket.on('connect', function () {
6488
- resolve(socket);
6489
- });
6490
- // TODO: !!!! Better timeout handling
6491
- setTimeout(function () {
6492
- reject(new Error("Timeout while connecting to ".concat(_this.options.remoteUrl)));
6493
- }, 1000 /* <- TODO: Timeout to config */);
6494
- });
6495
- };
6496
- /**
6497
- * Calls remote proxy server to use a chat model
6498
- */
6499
- RemoteLlmExecutionTools.prototype.callChatModel = function (prompt) {
6500
- if (this.options.isVerbose) {
6501
- console.info("\uD83D\uDD8B Remote callChatModel call");
6502
- }
6503
- return /* not await */ this.callCommonModel(prompt);
6504
- };
6505
- /**
6506
- * Calls remote proxy server to use a completion model
6507
- */
6508
- RemoteLlmExecutionTools.prototype.callCompletionModel = function (prompt) {
6509
- if (this.options.isVerbose) {
6510
- console.info("\uD83D\uDCAC Remote callCompletionModel call");
6511
- }
6512
- return /* not await */ this.callCommonModel(prompt);
6513
- };
6514
- /**
6515
- * Calls remote proxy server to use a embedding model
6516
- */
6517
- RemoteLlmExecutionTools.prototype.callEmbeddingModel = function (prompt) {
6518
- if (this.options.isVerbose) {
6519
- console.info("\uD83D\uDCAC Remote callEmbeddingModel call");
6520
- }
6521
- return /* not await */ this.callCommonModel(prompt);
6522
- };
6523
- // <- Note: [🤖] callXxxModel
6524
- /**
6525
- * Calls remote proxy server to use both completion or chat model
6526
- */
6527
- RemoteLlmExecutionTools.prototype.callCommonModel = function (prompt) {
6528
- return __awaiter(this, void 0, void 0, function () {
6529
- var socket, promptResult;
6530
- return __generator(this, function (_a) {
6531
- switch (_a.label) {
6532
- case 0: return [4 /*yield*/, this.makeConnection()];
6533
- case 1:
6534
- socket = _a.sent();
6535
- if (this.options.isAnonymous) {
6536
- socket.emit('request', {
6537
- llmToolsConfiguration: this.options.llmToolsConfiguration,
6538
- prompt: prompt,
6539
- // <- TODO: [🛫] `prompt` is NOT fully serializable as JSON, it contains functions which are not serializable
6540
- });
6541
- }
6542
- else {
6543
- socket.emit('request', {
6544
- clientId: this.options.clientId,
6545
- prompt: prompt,
6546
- // <- TODO: [🛫] `prompt` is NOT fully serializable as JSON, it contains functions which are not serializable
6547
- });
6548
- }
6549
- return [4 /*yield*/, new Promise(function (resolve, reject) {
6550
- socket.on('response', function (response) {
6551
- resolve(response.promptResult);
6552
- socket.disconnect();
6553
- });
6554
- socket.on('error', function (error) {
6555
- reject(new PipelineExecutionError(error.errorMessage));
6556
- socket.disconnect();
6557
- });
6558
- })];
6559
- case 2:
6560
- promptResult = _a.sent();
6561
- socket.disconnect();
6562
- return [2 /*return*/, promptResult];
6563
- }
6564
- });
6565
- });
6566
- };
6567
- return RemoteLlmExecutionTools;
6568
- }());
6569
- /**
6570
- * TODO: [🍓] Allow to list compatible models with each variant
6571
- * TODO: [🗯] RemoteLlmExecutionTools should extend Destroyable and implement IDestroyable
6572
- * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
6573
- * TODO: [🧠] Maybe remove `@promptbook/remote-client` and just use `@promptbook/core`
6574
- */
6575
-
6576
- /**
6577
- * Function computeUsage will create price per one token based on the string value found on openai page
6578
- *
6579
- * @private within the repository, used only as internal helper for `OPENAI_MODELS`
6580
- */
6581
- function computeUsage(value) {
6582
- var _a = __read(value.split(' / '), 2), price = _a[0], tokens = _a[1];
6583
- return parseFloat(price.replace('$', '')) / parseFloat(tokens.replace('M tokens', '')) / 1000000;
6584
- }
6585
-
6586
- /**
6587
- * List of available Anthropic Claude models with pricing
6588
- *
6589
- * Note: Done at 2024-08-16
6590
- *
6591
- * @see https://docs.anthropic.com/en/docs/models-overview
6592
- * @public exported from `@promptbook/anthropic-claude`
6593
- */
6594
- var ANTHROPIC_CLAUDE_MODELS = [
6595
- {
6596
- modelVariant: 'CHAT',
6597
- modelTitle: 'Claude 3.5 Sonnet',
6598
- modelName: 'claude-3-5-sonnet-20240620',
6599
- pricing: {
6600
- prompt: computeUsage("$3.00 / 1M tokens"),
6601
- output: computeUsage("$15.00 / 1M tokens"),
6602
- },
6603
- },
6604
- {
6605
- modelVariant: 'CHAT',
6606
- modelTitle: 'Claude 3 Opus',
6607
- modelName: 'claude-3-opus-20240229',
6608
- pricing: {
6609
- prompt: computeUsage("$15.00 / 1M tokens"),
6610
- output: computeUsage("$75.00 / 1M tokens"),
6611
- },
6612
- },
6613
- {
6614
- modelVariant: 'CHAT',
6615
- modelTitle: 'Claude 3 Sonnet',
6616
- modelName: 'claude-3-sonnet-20240229',
6617
- pricing: {
6618
- prompt: computeUsage("$3.00 / 1M tokens"),
6619
- output: computeUsage("$15.00 / 1M tokens"),
6620
- },
6621
- },
6622
- {
6623
- modelVariant: 'CHAT',
6624
- modelTitle: 'Claude 3 Haiku',
6625
- modelName: ' claude-3-haiku-20240307',
6626
- pricing: {
6627
- prompt: computeUsage("$0.25 / 1M tokens"),
6628
- output: computeUsage("$1.25 / 1M tokens"),
6629
- },
6630
- },
6631
- {
6632
- modelVariant: 'CHAT',
6633
- modelTitle: 'Claude 2.1',
6634
- modelName: 'claude-2.1',
6635
- pricing: {
6636
- prompt: computeUsage("$8.00 / 1M tokens"),
6637
- output: computeUsage("$24.00 / 1M tokens"),
6638
- },
6639
- },
6640
- {
6641
- modelVariant: 'CHAT',
6642
- modelTitle: 'Claude 2',
6643
- modelName: 'claude-2.0',
6644
- pricing: {
6645
- prompt: computeUsage("$8.00 / 1M tokens"),
6646
- output: computeUsage("$24.00 / 1M tokens"),
6647
- },
6648
- },
6649
- {
6650
- modelVariant: 'CHAT',
6651
- modelTitle: ' Claude Instant 1.2',
6652
- modelName: 'claude-instant-1.2',
6653
- pricing: {
6654
- prompt: computeUsage("$0.80 / 1M tokens"),
6655
- output: computeUsage("$2.40 / 1M tokens"),
6656
- },
6657
- },
6658
- // TODO: !!! Claude 1 and 2 has also completion versions - ask Hoagy
6659
- ];
6660
- /**
6661
- * Note: [🤖] Add models of new variant
6662
- * TODO: [🧠] !!! Add embedding models OR Anthropic has only chat+completion models?
6663
- * TODO: [🧠] Some mechanism to propagate unsureness
6664
- * TODO: [🧠][👮‍♀️] Put here more info like description, isVision, trainingDateCutoff, languages, strengths ( Top-level performance, intelligence, fluency, and understanding), contextWindow,...
6665
- * TODO: [🎰] Some mechanism to auto-update available models
6666
- */
6667
-
6668
- /**
6669
- * Get current date in ISO 8601 format
6473
+ * @@@
6670
6474
  *
6671
- * @private internal utility
6475
+ * Note: `$` is used to indicate that this interacts with the global scope
6476
+ * @singleton Only one instance of each register is created per build, but thare can be more @@@
6477
+ * @public exported from `@promptbook/core`
6672
6478
  */
6673
- function getCurrentIsoDate() {
6674
- return new Date().toISOString();
6675
- }
6479
+ var $llmToolsRegister = new $Register('llm_execution_tools_constructors');
6676
6480
 
6677
6481
  /**
6678
- * Helper of usage compute
6482
+ * Creates a message with all registered LLM tools
6679
6483
  *
6680
- * @param content the content of prompt or response
6681
- * @returns part of PromptResultUsageCounts
6484
+ * Note: This function is used to create a (error) message when there is no constructor for some LLM provider
6682
6485
  *
6683
- * @private internal utility of LlmExecutionTools
6486
+ * @private internal function of `createLlmToolsFromConfiguration` and `createLlmToolsFromEnv`
6684
6487
  */
6685
- function computeUsageCounts(content) {
6686
- return {
6687
- charactersCount: { value: countCharacters(content) },
6688
- wordsCount: { value: countWords(content) },
6689
- sentencesCount: { value: countSentences(content) },
6690
- linesCount: { value: countLines(content) },
6691
- paragraphsCount: { value: countParagraphs(content) },
6692
- pagesCount: { value: countPages(content) },
6488
+ function $registeredLlmToolsMessage() {
6489
+ var e_1, _a, e_2, _b;
6490
+ /**
6491
+ * Mixes registered LLM tools from $llmToolsMetadataRegister and $llmToolsRegister
6492
+ */
6493
+ var all = [];
6494
+ var _loop_1 = function (packageName, className) {
6495
+ if (all.some(function (item) { return item.packageName === packageName && item.className === className; })) {
6496
+ return "continue";
6497
+ }
6498
+ all.push({ packageName: packageName, className: className });
6693
6499
  };
6694
- }
6695
-
6696
- /**
6697
- * Make UncertainNumber
6698
- *
6699
- * @param value
6700
- *
6701
- * @private utility for initializating UncertainNumber
6702
- */
6703
- function uncertainNumber(value) {
6704
- if (value === null || value === undefined || Number.isNaN(value)) {
6705
- return { value: 0, isUncertain: true };
6706
- }
6707
- return { value: value };
6708
- }
6709
-
6710
- /**
6711
- * Computes the usage of the Anthropic Claude API based on the response from Anthropic Claude
6712
- *
6713
- * @param promptContent The content of the prompt
6714
- * @param resultContent The content of the result (for embedding prompts or failed prompts pass empty string)
6715
- * @param rawResponse The raw response from Anthropic Claude API
6716
- * @throws {PipelineExecutionError} If the usage is not defined in the response from Anthropic Claude
6717
- * @private internal utility of `AnthropicClaudeExecutionTools`
6718
- */
6719
- function computeAnthropicClaudeUsage(promptContent, // <- Note: Intentionally using [] to access type properties to bring jsdoc from Prompt/PromptResult to consumer
6720
- resultContent, rawResponse) {
6721
- var _a, _b;
6722
- if (rawResponse.usage === undefined) {
6723
- throw new PipelineExecutionError('The usage is not defined in the response from Anthropic Claude');
6500
+ try {
6501
+ for (var _c = __values($llmToolsMetadataRegister.list()), _d = _c.next(); !_d.done; _d = _c.next()) {
6502
+ var _e = _d.value, packageName = _e.packageName, className = _e.className;
6503
+ _loop_1(packageName, className);
6504
+ }
6724
6505
  }
6725
- if (((_a = rawResponse.usage) === null || _a === void 0 ? void 0 : _a.input_tokens) === undefined) {
6726
- throw new PipelineExecutionError('In Anthropic Claude response `usage.prompt_tokens` not defined');
6506
+ catch (e_1_1) { e_1 = { error: e_1_1 }; }
6507
+ finally {
6508
+ try {
6509
+ if (_d && !_d.done && (_a = _c.return)) _a.call(_c);
6510
+ }
6511
+ finally { if (e_1) throw e_1.error; }
6727
6512
  }
6728
- var inputTokens = rawResponse.usage.input_tokens;
6729
- var outputTokens = ((_b = rawResponse.usage) === null || _b === void 0 ? void 0 : _b.output_tokens) || 0;
6730
- var modelInfo = ANTHROPIC_CLAUDE_MODELS.find(function (model) { return model.modelName === rawResponse.model; });
6731
- var price;
6732
- if (modelInfo === undefined || modelInfo.pricing === undefined) {
6733
- price = uncertainNumber();
6513
+ var _loop_2 = function (packageName, className) {
6514
+ if (all.some(function (item) { return item.packageName === packageName && item.className === className; })) {
6515
+ return "continue";
6516
+ }
6517
+ all.push({ packageName: packageName, className: className });
6518
+ };
6519
+ try {
6520
+ for (var _f = __values($llmToolsRegister.list()), _g = _f.next(); !_g.done; _g = _f.next()) {
6521
+ var _h = _g.value, packageName = _h.packageName, className = _h.className;
6522
+ _loop_2(packageName, className);
6523
+ }
6734
6524
  }
6735
- else {
6736
- price = uncertainNumber(inputTokens * modelInfo.pricing.prompt + outputTokens * modelInfo.pricing.output);
6525
+ catch (e_2_1) { e_2 = { error: e_2_1 }; }
6526
+ finally {
6527
+ try {
6528
+ if (_g && !_g.done && (_b = _f.return)) _b.call(_f);
6529
+ }
6530
+ finally { if (e_2) throw e_2.error; }
6737
6531
  }
6738
- return {
6739
- price: price,
6740
- input: __assign({ tokensCount: uncertainNumber(rawResponse.usage.input_tokens) }, computeUsageCounts(promptContent)),
6741
- output: __assign({ tokensCount: uncertainNumber(outputTokens) }, computeUsageCounts(resultContent)),
6742
- };
6532
+ var metadata = all.map(function (metadata) {
6533
+ var isMetadataAviailable = $llmToolsMetadataRegister
6534
+ .list()
6535
+ .find(function (_a) {
6536
+ var packageName = _a.packageName, className = _a.className;
6537
+ return metadata.packageName === packageName && metadata.className === className;
6538
+ });
6539
+ var isInstalled = $llmToolsRegister
6540
+ .list()
6541
+ .find(function (_a) {
6542
+ var packageName = _a.packageName, className = _a.className;
6543
+ return metadata.packageName === packageName && metadata.className === className;
6544
+ });
6545
+ return __assign(__assign({}, metadata), { isMetadataAviailable: isMetadataAviailable, isInstalled: isInstalled });
6546
+ });
6547
+ return spaceTrim__default["default"](function (block) { return "\n Available LLM providers are:\n ".concat(block(metadata
6548
+ .map(function (_a, i) {
6549
+ var packageName = _a.packageName, className = _a.className, isMetadataAviailable = _a.isMetadataAviailable, isInstalled = _a.isInstalled;
6550
+ var more;
6551
+ if (just(false)) {
6552
+ more = '';
6553
+ }
6554
+ else if (!isMetadataAviailable && !isInstalled) {
6555
+ // TODO: [�][�] Maybe do allow to do auto-install if package not registered and not found
6556
+ more = "(not installed and no metadata, looks like a unexpected behavior)";
6557
+ }
6558
+ else if (isMetadataAviailable && !isInstalled) {
6559
+ // TODO: [�][�]
6560
+ more = "(not installed)";
6561
+ }
6562
+ else if (!isMetadataAviailable && isInstalled) {
6563
+ more = "(no metadata, looks like a unexpected behavior)";
6564
+ }
6565
+ else if (isMetadataAviailable && isInstalled) {
6566
+ more = "(installed)";
6567
+ }
6568
+ else {
6569
+ more = "(unknown state, looks like a unexpected behavior)";
6570
+ }
6571
+ return "".concat(i + 1, ") `").concat(className, "` from `").concat(packageName, "` ").concat(more);
6572
+ })
6573
+ .join('\n')), "\n "); });
6743
6574
  }
6744
- /**
6745
- * TODO: [🤝] DRY Maybe some common abstraction between `computeOpenAiUsage` and `computeAnthropicClaudeUsage`
6746
- */
6747
6575
 
6748
6576
  /**
6749
- * Execution Tools for calling Anthropic Claude API.
6577
+ * @@@
6578
+ *
6579
+ * Note: This function is not cached, every call creates new instance of `MultipleLlmExecutionTools`
6750
6580
  *
6751
- * @public exported from `@promptbook/anthropic-claude`
6752
- * @deprecated use `createAnthropicClaudeExecutionTools` instead
6581
+ * @returns @@@
6582
+ * @public exported from `@promptbook/core`
6753
6583
  */
6754
- var AnthropicClaudeExecutionTools = /** @class */ (function () {
6755
- /**
6756
- * Creates Anthropic Claude Execution Tools.
6757
- *
6758
- * @param options which are relevant are directly passed to the Anthropic Claude client
6759
- */
6760
- function AnthropicClaudeExecutionTools(options) {
6761
- if (options === void 0) { options = { isProxied: false }; }
6762
- this.options = options;
6763
- /**
6764
- * Anthropic Claude API client.
6765
- */
6766
- this.client = null;
6767
- }
6768
- Object.defineProperty(AnthropicClaudeExecutionTools.prototype, "title", {
6769
- get: function () {
6770
- return 'Anthropic Claude';
6771
- },
6772
- enumerable: false,
6773
- configurable: true
6774
- });
6775
- Object.defineProperty(AnthropicClaudeExecutionTools.prototype, "description", {
6776
- get: function () {
6777
- return 'Use all models provided by Anthropic Claude';
6778
- },
6779
- enumerable: false,
6780
- configurable: true
6781
- });
6782
- AnthropicClaudeExecutionTools.prototype.getClient = function () {
6783
- return __awaiter(this, void 0, void 0, function () {
6784
- var anthropicOptions;
6785
- return __generator(this, function (_a) {
6786
- if (this.client === null) {
6787
- anthropicOptions = __assign({}, this.options);
6788
- delete anthropicOptions.isVerbose;
6789
- delete anthropicOptions.isProxied;
6790
- this.client = new Anthropic__default["default"](anthropicOptions);
6791
- }
6792
- return [2 /*return*/, this.client];
6793
- });
6794
- });
6795
- };
6796
- /**
6797
- * Check the `options` passed to `constructor`
6798
- */
6799
- AnthropicClaudeExecutionTools.prototype.checkConfiguration = function () {
6800
- return __awaiter(this, void 0, void 0, function () {
6801
- return __generator(this, function (_a) {
6802
- switch (_a.label) {
6803
- case 0: return [4 /*yield*/, this.getClient()];
6804
- case 1:
6805
- _a.sent();
6806
- return [2 /*return*/];
6807
- }
6808
- });
6584
+ function createLlmToolsFromConfiguration(configuration, options) {
6585
+ if (options === void 0) { options = {}; }
6586
+ var _a = options.isVerbose, isVerbose = _a === void 0 ? false : _a;
6587
+ var llmTools = configuration.map(function (llmConfiguration) {
6588
+ var registeredItem = $llmToolsRegister
6589
+ .list()
6590
+ .find(function (_a) {
6591
+ var packageName = _a.packageName, className = _a.className;
6592
+ return llmConfiguration.packageName === packageName && llmConfiguration.className === className;
6809
6593
  });
6810
- };
6811
- /**
6812
- * List all available Anthropic Claude models that can be used
6813
- */
6814
- AnthropicClaudeExecutionTools.prototype.listModels = function () {
6815
- return ANTHROPIC_CLAUDE_MODELS;
6816
- };
6817
- /**
6818
- * Calls Anthropic Claude API to use a chat model.
6819
- */
6820
- AnthropicClaudeExecutionTools.prototype.callChatModel = function (prompt) {
6821
- return __awaiter(this, void 0, void 0, function () {
6822
- var content, parameters, modelRequirements, client, modelName, rawPromptContent, rawRequest, start, complete, rawResponse, contentBlock, resultContent, usage;
6823
- return __generator(this, function (_a) {
6824
- switch (_a.label) {
6825
- case 0:
6826
- if (this.options.isVerbose) {
6827
- console.info('💬 Anthropic Claude callChatModel call');
6828
- }
6829
- content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
6830
- return [4 /*yield*/, this.getClient()];
6831
- case 1:
6832
- client = _a.sent();
6833
- // TODO: [☂] Use here more modelRequirements
6834
- if (modelRequirements.modelVariant !== 'CHAT') {
6835
- throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
6836
- }
6837
- modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
6838
- rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
6839
- rawRequest = {
6840
- model: modelRequirements.modelName || this.getDefaultChatModel().modelName,
6841
- max_tokens: modelRequirements.maxTokens || 4096,
6842
- // <- TODO: [🌾] Make some global max cap for maxTokens
6843
- temperature: modelRequirements.temperature,
6844
- system: modelRequirements.systemMessage,
6845
- // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
6846
- // <- Note: [🧆]
6847
- messages: [
6848
- {
6849
- role: 'user',
6850
- content: rawPromptContent,
6851
- },
6852
- ],
6853
- // TODO: Is here some equivalent of user identification?> user: this.options.user,
6854
- };
6855
- start = getCurrentIsoDate();
6856
- if (this.options.isVerbose) {
6857
- console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
6858
- }
6859
- return [4 /*yield*/, client.messages.create(rawRequest)];
6860
- case 2:
6861
- rawResponse = _a.sent();
6862
- if (this.options.isVerbose) {
6863
- console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
6864
- }
6865
- if (!rawResponse.content[0]) {
6866
- throw new PipelineExecutionError('No content from Anthropic Claude');
6867
- }
6868
- if (rawResponse.content.length > 1) {
6869
- throw new PipelineExecutionError('More than one content blocks from Anthropic Claude');
6870
- }
6871
- contentBlock = rawResponse.content[0];
6872
- if (contentBlock.type !== 'text') {
6873
- throw new PipelineExecutionError("Returned content is not \"text\" type but \"".concat(contentBlock.type, "\""));
6874
- }
6875
- resultContent = contentBlock.text;
6876
- // eslint-disable-next-line prefer-const
6877
- complete = getCurrentIsoDate();
6878
- usage = computeAnthropicClaudeUsage(content, '', rawResponse);
6879
- return [2 /*return*/, {
6880
- content: resultContent,
6881
- modelName: rawResponse.model,
6882
- timing: {
6883
- start: start,
6884
- complete: complete,
6885
- },
6886
- usage: usage,
6887
- rawPromptContent: rawPromptContent,
6888
- rawRequest: rawRequest,
6889
- rawResponse: rawResponse,
6890
- // <- [🗯]
6891
- }];
6892
- }
6893
- });
6894
- });
6895
- };
6896
- /*
6897
- TODO: [👏]
6898
- public async callCompletionModel(
6899
- prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements'>,
6900
- ): Promise<PromptCompletionResult> {
6901
-
6902
- if (this.options.isVerbose) {
6903
- console.info('🖋 Anthropic Claude callCompletionModel call');
6904
- }
6905
-
6906
- const { content, parameters, modelRequirements } = prompt;
6907
-
6908
- // TODO: [☂] Use here more modelRequirements
6909
- if (modelRequirements.modelVariant !== 'COMPLETION') {
6910
- throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
6911
- }
6912
-
6913
- const modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
6914
- const modelSettings = {
6915
- model: modelName,
6916
- max_tokens: modelRequirements.maxTokens || 2000, // <- Note: 2000 is for lagacy reasons
6917
- // <- TODO: [🌾] Make some global max cap for maxTokens
6918
- // <- TODO: Use here `systemMessage`, `temperature` and `seed`
6919
- };
6920
-
6921
- const rawRequest: xxxx.Completions.CompletionCreateParamsNonStreaming = {
6922
- ...modelSettings,
6923
- prompt: rawPromptContent,
6924
- user: this.options.user,
6925
- };
6926
- const start: string_date_iso8601 = getCurrentIsoDate();
6927
- let complete: string_date_iso8601;
6928
-
6929
- if (this.options.isVerbose) {
6930
- console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
6931
- }
6932
- const rawResponse = await this.client.completions.create(rawRequest);
6933
- if (this.options.isVerbose) {
6934
- console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
6935
- }
6936
-
6937
- if (!rawResponse.choices[0]) {
6938
- throw new PipelineExecutionError('No choises from Anthropic Claude');
6939
- }
6940
-
6941
- if (rawResponse.choices.length > 1) {
6942
- // TODO: This should be maybe only warning
6943
- throw new PipelineExecutionError('More than one choise from Anthropic Claude');
6944
- }
6945
-
6946
- const resultContent = rawResponse.choices[0].text;
6947
- // eslint-disable-next-line prefer-const
6948
- complete = getCurrentIsoDate();
6949
- const usage = { price: 'UNKNOWN', inputTokens: 0, outputTokens: 0 /* <- TODO: [🐞] Compute usage * / } satisfies PromptResultUsage;
6950
-
6951
-
6952
-
6953
- return {
6954
- content: resultContent,
6955
- modelName: rawResponse.model || model,
6956
- timing: {
6957
- start,
6958
- complete,
6959
- },
6960
- usage,
6961
- rawResponse,
6962
- // <- [🗯]
6963
- };
6964
- }
6965
- */
6966
- // <- Note: [🤖] callXxxModel
6967
- /**
6968
- * Get the model that should be used as default
6969
- */
6970
- AnthropicClaudeExecutionTools.prototype.getDefaultModel = function (defaultModelName) {
6971
- var model = ANTHROPIC_CLAUDE_MODELS.find(function (_a) {
6972
- var modelName = _a.modelName;
6973
- return modelName.startsWith(defaultModelName);
6974
- });
6975
- if (model === undefined) {
6976
- throw new UnexpectedError(spaceTrim__default["default"](function (block) {
6977
- return "\n Cannot find model in OpenAI models with name \"".concat(defaultModelName, "\" which should be used as default.\n\n Available models:\n ").concat(block(ANTHROPIC_CLAUDE_MODELS.map(function (_a) {
6978
- var modelName = _a.modelName;
6979
- return "- \"".concat(modelName, "\"");
6980
- }).join('\n')), "\n\n ");
6981
- }));
6982
- }
6983
- return model;
6984
- };
6985
- /**
6986
- * Default model for chat variant.
6987
- */
6988
- AnthropicClaudeExecutionTools.prototype.getDefaultChatModel = function () {
6989
- return this.getDefaultModel('claude-3-opus');
6990
- };
6991
- return AnthropicClaudeExecutionTools;
6992
- }());
6993
- /**
6994
- * TODO: [🍆] JSON mode
6995
- * TODO: [🧠] Maybe handle errors via transformAnthropicError (like transformAzureError)
6996
- * TODO: Maybe Create some common util for callChatModel and callCompletionModel
6997
- * TODO: Maybe make custom OpenAiError
6998
- * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
6999
- * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
7000
- * TODO: [📅] Maybe instead of `RemoteLlmExecutionToolsOptions` use `proxyWithAnonymousRemoteServer` (if implemented)
7001
- */
7002
-
7003
- /**
7004
- * Execution Tools for calling Anthropic Claude API.
7005
- *
7006
- * @public exported from `@promptbook/anthropic-claude`
7007
- */
7008
- var createAnthropicClaudeExecutionTools = Object.assign(function (options) {
7009
- if (options.isProxied) {
7010
- return new RemoteLlmExecutionTools(__assign(__assign({}, options), { isAnonymous: true, llmToolsConfiguration: [
7011
- {
7012
- title: 'Anthropic Claude (proxied)',
7013
- packageName: '@promptbook/anthropic-claude',
7014
- className: 'AnthropicClaudeExecutionTools',
7015
- options: __assign(__assign({}, options), { isProxied: false }),
7016
- },
7017
- ], models: ANTHROPIC_CLAUDE_MODELS }));
7018
- }
7019
- return new AnthropicClaudeExecutionTools(options);
7020
- }, {
7021
- packageName: '@promptbook/anthropic-claude',
7022
- className: 'AnthropicClaudeExecutionTools',
7023
- });
7024
- /**
7025
- * TODO: [🧠] !!!! Make anonymous this with all LLM providers
7026
- * TODO: [🧠][🧱] !!!! Maybe change all `new AnthropicClaudeExecutionTools` -> `createAnthropicClaudeExecutionTools` in manual
7027
- * TODO: [🧠] Maybe auto-detect usage in browser and determine default value of `isProxied`
7028
- * TODO: [🦺] Is there some way how to put `packageName` and `className` on top and function definition on bottom?
7029
- * TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
7030
- */
7031
-
7032
- /**
7033
- * List of available OpenAI models with pricing
7034
- *
7035
- * Note: Done at 2024-05-20
7036
- *
7037
- * @see https://platform.openai.com/docs/models/
7038
- * @see https://openai.com/api/pricing/
7039
- * @public exported from `@promptbook/openai`
7040
- */
7041
- var OPENAI_MODELS = [
7042
- /*/
7043
- {
7044
- modelTitle: 'dall-e-3',
7045
- modelName: 'dall-e-3',
7046
- },
7047
- /**/
7048
- /*/
7049
- {
7050
- modelTitle: 'whisper-1',
7051
- modelName: 'whisper-1',
7052
- },
7053
- /**/
7054
- /**/
7055
- {
7056
- modelVariant: 'COMPLETION',
7057
- modelTitle: 'davinci-002',
7058
- modelName: 'davinci-002',
7059
- pricing: {
7060
- prompt: computeUsage("$2.00 / 1M tokens"),
7061
- output: computeUsage("$2.00 / 1M tokens"), // <- not sure
7062
- },
7063
- },
7064
- /**/
7065
- /*/
7066
- {
7067
- modelTitle: 'dall-e-2',
7068
- modelName: 'dall-e-2',
7069
- },
7070
- /**/
7071
- /**/
7072
- {
7073
- modelVariant: 'CHAT',
7074
- modelTitle: 'gpt-3.5-turbo-16k',
7075
- modelName: 'gpt-3.5-turbo-16k',
7076
- pricing: {
7077
- prompt: computeUsage("$3.00 / 1M tokens"),
7078
- output: computeUsage("$4.00 / 1M tokens"),
7079
- },
7080
- },
7081
- /**/
7082
- /*/
7083
- {
7084
- modelTitle: 'tts-1-hd-1106',
7085
- modelName: 'tts-1-hd-1106',
7086
- },
7087
- /**/
7088
- /*/
7089
- {
7090
- modelTitle: 'tts-1-hd',
7091
- modelName: 'tts-1-hd',
7092
- },
7093
- /**/
7094
- /**/
7095
- {
7096
- modelVariant: 'CHAT',
7097
- modelTitle: 'gpt-4',
7098
- modelName: 'gpt-4',
7099
- pricing: {
7100
- prompt: computeUsage("$30.00 / 1M tokens"),
7101
- output: computeUsage("$60.00 / 1M tokens"),
7102
- },
7103
- },
7104
- /**/
7105
- /**/
7106
- {
7107
- modelVariant: 'CHAT',
7108
- modelTitle: 'gpt-4-32k',
7109
- modelName: 'gpt-4-32k',
7110
- pricing: {
7111
- prompt: computeUsage("$60.00 / 1M tokens"),
7112
- output: computeUsage("$120.00 / 1M tokens"),
7113
- },
7114
- },
7115
- /**/
7116
- /*/
7117
- {
7118
- modelVariant: 'CHAT',
7119
- modelTitle: 'gpt-4-0613',
7120
- modelName: 'gpt-4-0613',
7121
- pricing: {
7122
- prompt: computeUsage(` / 1M tokens`),
7123
- output: computeUsage(` / 1M tokens`),
7124
- },
7125
- },
7126
- /**/
7127
- /**/
7128
- {
7129
- modelVariant: 'CHAT',
7130
- modelTitle: 'gpt-4-turbo-2024-04-09',
7131
- modelName: 'gpt-4-turbo-2024-04-09',
7132
- pricing: {
7133
- prompt: computeUsage("$10.00 / 1M tokens"),
7134
- output: computeUsage("$30.00 / 1M tokens"),
7135
- },
7136
- },
7137
- /**/
7138
- /**/
7139
- {
7140
- modelVariant: 'CHAT',
7141
- modelTitle: 'gpt-3.5-turbo-1106',
7142
- modelName: 'gpt-3.5-turbo-1106',
7143
- pricing: {
7144
- prompt: computeUsage("$1.00 / 1M tokens"),
7145
- output: computeUsage("$2.00 / 1M tokens"),
7146
- },
7147
- },
7148
- /**/
7149
- /**/
7150
- {
7151
- modelVariant: 'CHAT',
7152
- modelTitle: 'gpt-4-turbo',
7153
- modelName: 'gpt-4-turbo',
7154
- pricing: {
7155
- prompt: computeUsage("$10.00 / 1M tokens"),
7156
- output: computeUsage("$30.00 / 1M tokens"),
7157
- },
7158
- },
7159
- /**/
7160
- /**/
7161
- {
7162
- modelVariant: 'COMPLETION',
7163
- modelTitle: 'gpt-3.5-turbo-instruct-0914',
7164
- modelName: 'gpt-3.5-turbo-instruct-0914',
7165
- pricing: {
7166
- prompt: computeUsage("$1.50 / 1M tokens"),
7167
- output: computeUsage("$2.00 / 1M tokens"), // <- For gpt-3.5-turbo-instruct
7168
- },
7169
- },
7170
- /**/
7171
- /**/
7172
- {
7173
- modelVariant: 'COMPLETION',
7174
- modelTitle: 'gpt-3.5-turbo-instruct',
7175
- modelName: 'gpt-3.5-turbo-instruct',
7176
- pricing: {
7177
- prompt: computeUsage("$1.50 / 1M tokens"),
7178
- output: computeUsage("$2.00 / 1M tokens"),
7179
- },
7180
- },
7181
- /**/
7182
- /*/
7183
- {
7184
- modelTitle: 'tts-1',
7185
- modelName: 'tts-1',
7186
- },
7187
- /**/
7188
- /**/
7189
- {
7190
- modelVariant: 'CHAT',
7191
- modelTitle: 'gpt-3.5-turbo',
7192
- modelName: 'gpt-3.5-turbo',
7193
- pricing: {
7194
- prompt: computeUsage("$3.00 / 1M tokens"),
7195
- output: computeUsage("$6.00 / 1M tokens"), // <- Not sure, refer to gpt-3.5-turbo in Fine-tuning models
7196
- },
7197
- },
7198
- /**/
7199
- /**/
7200
- {
7201
- modelVariant: 'CHAT',
7202
- modelTitle: 'gpt-3.5-turbo-0301',
7203
- modelName: 'gpt-3.5-turbo-0301',
7204
- pricing: {
7205
- prompt: computeUsage("$1.50 / 1M tokens"),
7206
- output: computeUsage("$2.00 / 1M tokens"),
7207
- },
7208
- },
7209
- /**/
7210
- /**/
7211
- {
7212
- modelVariant: 'COMPLETION',
7213
- modelTitle: 'babbage-002',
7214
- modelName: 'babbage-002',
7215
- pricing: {
7216
- prompt: computeUsage("$0.40 / 1M tokens"),
7217
- output: computeUsage("$0.40 / 1M tokens"), // <- Not sure
7218
- },
7219
- },
7220
- /**/
7221
- /**/
7222
- {
7223
- modelVariant: 'CHAT',
7224
- modelTitle: 'gpt-4-1106-preview',
7225
- modelName: 'gpt-4-1106-preview',
7226
- pricing: {
7227
- prompt: computeUsage("$10.00 / 1M tokens"),
7228
- output: computeUsage("$30.00 / 1M tokens"),
7229
- },
7230
- },
7231
- /**/
7232
- /**/
7233
- {
7234
- modelVariant: 'CHAT',
7235
- modelTitle: 'gpt-4-0125-preview',
7236
- modelName: 'gpt-4-0125-preview',
7237
- pricing: {
7238
- prompt: computeUsage("$10.00 / 1M tokens"),
7239
- output: computeUsage("$30.00 / 1M tokens"),
7240
- },
7241
- },
7242
- /**/
7243
- /*/
7244
- {
7245
- modelTitle: 'tts-1-1106',
7246
- modelName: 'tts-1-1106',
7247
- },
7248
- /**/
7249
- /**/
7250
- {
7251
- modelVariant: 'CHAT',
7252
- modelTitle: 'gpt-3.5-turbo-0125',
7253
- modelName: 'gpt-3.5-turbo-0125',
7254
- pricing: {
7255
- prompt: computeUsage("$0.50 / 1M tokens"),
7256
- output: computeUsage("$1.50 / 1M tokens"),
7257
- },
7258
- },
7259
- /**/
7260
- /**/
7261
- {
7262
- modelVariant: 'CHAT',
7263
- modelTitle: 'gpt-4-turbo-preview',
7264
- modelName: 'gpt-4-turbo-preview',
7265
- pricing: {
7266
- prompt: computeUsage("$10.00 / 1M tokens"),
7267
- output: computeUsage("$30.00 / 1M tokens"), // <- Not sure, just for gpt-4-turbo
7268
- },
7269
- },
7270
- /**/
7271
- /**/
7272
- {
7273
- modelVariant: 'EMBEDDING',
7274
- modelTitle: 'text-embedding-3-large',
7275
- modelName: 'text-embedding-3-large',
7276
- pricing: {
7277
- prompt: computeUsage("$0.13 / 1M tokens"),
7278
- // TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
7279
- output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
7280
- },
7281
- },
7282
- /**/
7283
- /**/
7284
- {
7285
- modelVariant: 'EMBEDDING',
7286
- modelTitle: 'text-embedding-3-small',
7287
- modelName: 'text-embedding-3-small',
7288
- pricing: {
7289
- prompt: computeUsage("$0.02 / 1M tokens"),
7290
- // TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
7291
- output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
7292
- },
7293
- },
7294
- /**/
7295
- /**/
7296
- {
7297
- modelVariant: 'CHAT',
7298
- modelTitle: 'gpt-3.5-turbo-0613',
7299
- modelName: 'gpt-3.5-turbo-0613',
7300
- pricing: {
7301
- prompt: computeUsage("$1.50 / 1M tokens"),
7302
- output: computeUsage("$2.00 / 1M tokens"),
7303
- },
7304
- },
7305
- /**/
7306
- /**/
7307
- {
7308
- modelVariant: 'EMBEDDING',
7309
- modelTitle: 'text-embedding-ada-002',
7310
- modelName: 'text-embedding-ada-002',
7311
- pricing: {
7312
- prompt: computeUsage("$0.1 / 1M tokens"),
7313
- // TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
7314
- output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
7315
- },
7316
- },
7317
- /**/
7318
- /*/
7319
- {
7320
- modelVariant: 'CHAT',
7321
- modelTitle: 'gpt-4-1106-vision-preview',
7322
- modelName: 'gpt-4-1106-vision-preview',
7323
- },
7324
- /**/
7325
- /*/
7326
- {
7327
- modelVariant: 'CHAT',
7328
- modelTitle: 'gpt-4-vision-preview',
7329
- modelName: 'gpt-4-vision-preview',
7330
- pricing: {
7331
- prompt: computeUsage(`$10.00 / 1M tokens`),
7332
- output: computeUsage(`$30.00 / 1M tokens`),
7333
- },
7334
- },
7335
- /**/
7336
- /**/
7337
- {
7338
- modelVariant: 'CHAT',
7339
- modelTitle: 'gpt-4o-2024-05-13',
7340
- modelName: 'gpt-4o-2024-05-13',
7341
- pricing: {
7342
- prompt: computeUsage("$5.00 / 1M tokens"),
7343
- output: computeUsage("$15.00 / 1M tokens"),
7344
- },
7345
- },
7346
- /**/
7347
- /**/
7348
- {
7349
- modelVariant: 'CHAT',
7350
- modelTitle: 'gpt-4o',
7351
- modelName: 'gpt-4o',
7352
- pricing: {
7353
- prompt: computeUsage("$5.00 / 1M tokens"),
7354
- output: computeUsage("$15.00 / 1M tokens"),
7355
- },
7356
- },
7357
- /**/
7358
- /**/
7359
- {
7360
- modelVariant: 'CHAT',
7361
- modelTitle: 'gpt-3.5-turbo-16k-0613',
7362
- modelName: 'gpt-3.5-turbo-16k-0613',
7363
- pricing: {
7364
- prompt: computeUsage("$3.00 / 1M tokens"),
7365
- output: computeUsage("$4.00 / 1M tokens"),
7366
- },
7367
- },
7368
- /**/
7369
- ];
7370
- /**
7371
- * Note: [🤖] Add models of new variant
7372
- * TODO: [🧠] Some mechanism to propagate unsureness
7373
- * TODO: [🎰] Some mechanism to auto-update available models
7374
- * TODO: [🎰][👮‍♀️] Make this list dynamic - dynamically can be listed modelNames but not modelVariant, legacy status, context length and pricing
7375
- * TODO: [🧠][👮‍♀️] Put here more info like description, isVision, trainingDateCutoff, languages, strengths ( Top-level performance, intelligence, fluency, and understanding), contextWindow,...
7376
- * @see https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4
7377
- * @see https://openai.com/api/pricing/
7378
- * @see /other/playground/playground.ts
7379
- * TODO: [🍓] Make better
7380
- * TODO: Change model titles to human eg: "gpt-4-turbo-2024-04-09" -> "GPT-4 Turbo (2024-04-09)"
7381
- * TODO: [🚸] Not all models are compatible with JSON mode, add this information here and use it
7382
- */
7383
-
7384
- /**
7385
- * Execution Tools for calling Azure OpenAI API.
7386
- *
7387
- * @public exported from `@promptbook/azure-openai`
7388
- */
7389
- var AzureOpenAiExecutionTools = /** @class */ (function () {
7390
- /**
7391
- * Creates OpenAI Execution Tools.
7392
- *
7393
- * @param options which are relevant are directly passed to the OpenAI client
7394
- */
7395
- function AzureOpenAiExecutionTools(options) {
7396
- this.options = options;
7397
- /**
7398
- * OpenAI Azure API client.
7399
- */
7400
- this.client = null;
7401
- }
7402
- Object.defineProperty(AzureOpenAiExecutionTools.prototype, "title", {
7403
- get: function () {
7404
- return 'Azure OpenAI';
7405
- },
7406
- enumerable: false,
7407
- configurable: true
7408
- });
7409
- Object.defineProperty(AzureOpenAiExecutionTools.prototype, "description", {
7410
- get: function () {
7411
- return 'Use all models trained by OpenAI provided by Azure';
7412
- },
7413
- enumerable: false,
7414
- configurable: true
7415
- });
7416
- AzureOpenAiExecutionTools.prototype.getClient = function () {
7417
- return __awaiter(this, void 0, void 0, function () {
7418
- return __generator(this, function (_a) {
7419
- if (this.client === null) {
7420
- this.client = new openai.OpenAIClient("https://".concat(this.options.resourceName, ".openai.azure.com/"), new openai.AzureKeyCredential(this.options.apiKey));
7421
- }
7422
- return [2 /*return*/, this.client];
7423
- });
7424
- });
7425
- };
7426
- /**
7427
- * Check the `options` passed to `constructor`
7428
- */
7429
- AzureOpenAiExecutionTools.prototype.checkConfiguration = function () {
7430
- return __awaiter(this, void 0, void 0, function () {
7431
- return __generator(this, function (_a) {
7432
- switch (_a.label) {
7433
- case 0: return [4 /*yield*/, this.getClient()];
7434
- case 1:
7435
- _a.sent();
7436
- return [2 /*return*/];
7437
- }
7438
- });
7439
- });
7440
- };
7441
- /**
7442
- * List all available Azure OpenAI models that can be used
7443
- */
7444
- AzureOpenAiExecutionTools.prototype.listModels = function () {
7445
- return __awaiter(this, void 0, void 0, function () {
7446
- return __generator(this, function (_a) {
7447
- // TODO: !!! Do here some filtering which models are really available as deployment
7448
- // @see https://management.azure.com/subscriptions/subscriptionId/resourceGroups/resourceGroupName/providers/Microsoft.CognitiveServices/accounts/accountName/deployments?api-version=2023-05-01
7449
- return [2 /*return*/, OPENAI_MODELS.map(function (_a) {
7450
- var modelTitle = _a.modelTitle, modelName = _a.modelName, modelVariant = _a.modelVariant;
7451
- return ({
7452
- modelTitle: "Azure ".concat(modelTitle),
7453
- modelName: modelName,
7454
- modelVariant: modelVariant,
7455
- });
7456
- })];
7457
- });
7458
- });
7459
- };
7460
- /**
7461
- * Calls OpenAI API to use a chat model.
7462
- */
7463
- AzureOpenAiExecutionTools.prototype.callChatModel = function (prompt) {
7464
- var _a, _b;
7465
- return __awaiter(this, void 0, void 0, function () {
7466
- var content, parameters, modelRequirements, client, modelName, modelSettings, rawPromptContent, messages, start, complete, rawRequest, rawResponse, resultContent, usage, error_1;
7467
- return __generator(this, function (_c) {
7468
- switch (_c.label) {
7469
- case 0:
7470
- if (this.options.isVerbose) {
7471
- console.info('💬 OpenAI callChatModel call');
7472
- }
7473
- content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
7474
- return [4 /*yield*/, this.getClient()];
7475
- case 1:
7476
- client = _c.sent();
7477
- // TODO: [☂] Use here more modelRequirements
7478
- if (modelRequirements.modelVariant !== 'CHAT') {
7479
- throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
7480
- }
7481
- _c.label = 2;
7482
- case 2:
7483
- _c.trys.push([2, 4, , 5]);
7484
- modelName = prompt.modelRequirements.modelName || this.options.deploymentName;
7485
- modelSettings = {
7486
- maxTokens: modelRequirements.maxTokens,
7487
- // <- TODO: [🌾] Make some global max cap for maxTokens
7488
- temperature: modelRequirements.temperature,
7489
- user: this.options.user,
7490
- // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
7491
- // <- Note: [🧆]
7492
- };
7493
- rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
7494
- messages = __spreadArray(__spreadArray([], __read((modelRequirements.systemMessage === undefined
7495
- ? []
7496
- : [
7497
- {
7498
- role: 'system',
7499
- content: modelRequirements.systemMessage,
7500
- },
7501
- ])), false), [
7502
- {
7503
- role: 'user',
7504
- content: rawPromptContent,
7505
- },
7506
- ], false);
7507
- start = getCurrentIsoDate();
7508
- complete = void 0;
7509
- if (this.options.isVerbose) {
7510
- console.info(colors__default["default"].bgWhite('messages'), JSON.stringify(messages, null, 4));
7511
- }
7512
- rawRequest = [modelName, messages, modelSettings];
7513
- return [4 /*yield*/, client.getChatCompletions.apply(client, __spreadArray([], __read(rawRequest), false))];
7514
- case 3:
7515
- rawResponse = _c.sent();
7516
- if (this.options.isVerbose) {
7517
- console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
7518
- }
7519
- if (!rawResponse.choices[0]) {
7520
- throw new PipelineExecutionError('No choises from Azure OpenAI');
7521
- }
7522
- if (rawResponse.choices.length > 1) {
7523
- // TODO: This should be maybe only warning
7524
- throw new PipelineExecutionError('More than one choise from Azure OpenAI');
7525
- }
7526
- if (!rawResponse.choices[0].message || !rawResponse.choices[0].message.content) {
7527
- throw new PipelineExecutionError('Empty response from Azure OpenAI');
7528
- }
7529
- resultContent = rawResponse.choices[0].message.content;
7530
- // eslint-disable-next-line prefer-const
7531
- complete = getCurrentIsoDate();
7532
- usage = {
7533
- price: uncertainNumber() /* <- TODO: [🐞] Compute usage */,
7534
- input: __assign({ tokensCount: uncertainNumber((_a = rawResponse.usage) === null || _a === void 0 ? void 0 : _a.promptTokens) }, computeUsageCounts(prompt.content)),
7535
- output: __assign({ tokensCount: uncertainNumber((_b = rawResponse.usage) === null || _b === void 0 ? void 0 : _b.completionTokens) }, computeUsageCounts(prompt.content)),
7536
- };
7537
- return [2 /*return*/, {
7538
- content: resultContent,
7539
- modelName: modelName,
7540
- timing: {
7541
- start: start,
7542
- complete: complete,
7543
- },
7544
- usage: usage,
7545
- rawPromptContent: rawPromptContent,
7546
- rawRequest: rawRequest,
7547
- rawResponse: rawResponse,
7548
- // <- [🗯]
7549
- }];
7550
- case 4:
7551
- error_1 = _c.sent();
7552
- throw this.transformAzureError(error_1);
7553
- case 5: return [2 /*return*/];
7554
- }
7555
- });
7556
- });
7557
- };
7558
- /**
7559
- * Calls Azure OpenAI API to use a complete model.
7560
- */
7561
- AzureOpenAiExecutionTools.prototype.callCompletionModel = function (prompt) {
7562
- var _a, _b;
7563
- return __awaiter(this, void 0, void 0, function () {
7564
- var content, parameters, modelRequirements, client, modelName, modelSettings, start, complete, rawPromptContent, rawRequest, rawResponse, resultContent, usage, error_2;
7565
- return __generator(this, function (_c) {
7566
- switch (_c.label) {
7567
- case 0:
7568
- if (this.options.isVerbose) {
7569
- console.info('🖋 OpenAI callCompletionModel call');
7570
- }
7571
- content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
7572
- return [4 /*yield*/, this.getClient()];
7573
- case 1:
7574
- client = _c.sent();
7575
- // TODO: [☂] Use here more modelRequirements
7576
- if (modelRequirements.modelVariant !== 'COMPLETION') {
7577
- throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
7578
- }
7579
- _c.label = 2;
7580
- case 2:
7581
- _c.trys.push([2, 4, , 5]);
7582
- modelName = prompt.modelRequirements.modelName || this.options.deploymentName;
7583
- modelSettings = {
7584
- maxTokens: modelRequirements.maxTokens || 2000,
7585
- // <- TODO: [🌾] Make some global max cap for maxTokens
7586
- temperature: modelRequirements.temperature,
7587
- user: this.options.user,
7588
- // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
7589
- // <- Note: [🧆]
7590
- };
7591
- start = getCurrentIsoDate();
7592
- complete = void 0;
7593
- if (this.options.isVerbose) {
7594
- console.info(colors__default["default"].bgWhite('content'), JSON.stringify(content, null, 4));
7595
- console.info(colors__default["default"].bgWhite('parameters'), JSON.stringify(parameters, null, 4));
7596
- }
7597
- rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
7598
- rawRequest = [
7599
- modelName,
7600
- [rawPromptContent],
7601
- modelSettings,
7602
- ];
7603
- return [4 /*yield*/, client.getCompletions.apply(client, __spreadArray([], __read(rawRequest), false))];
7604
- case 3:
7605
- rawResponse = _c.sent();
7606
- if (this.options.isVerbose) {
7607
- console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
7608
- }
7609
- if (!rawResponse.choices[0]) {
7610
- throw new PipelineExecutionError('No choises from OpenAI');
7611
- }
7612
- if (rawResponse.choices.length > 1) {
7613
- // TODO: This should be maybe only warning
7614
- throw new PipelineExecutionError('More than one choise from OpenAI');
7615
- }
7616
- resultContent = rawResponse.choices[0].text;
7617
- // eslint-disable-next-line prefer-const
7618
- complete = getCurrentIsoDate();
7619
- usage = {
7620
- price: uncertainNumber() /* <- TODO: [🐞] Compute usage */,
7621
- input: __assign({ tokensCount: uncertainNumber((_a = rawResponse.usage) === null || _a === void 0 ? void 0 : _a.promptTokens) }, computeUsageCounts(prompt.content)),
7622
- output: __assign({ tokensCount: uncertainNumber((_b = rawResponse.usage) === null || _b === void 0 ? void 0 : _b.completionTokens) }, computeUsageCounts(prompt.content)),
7623
- };
7624
- return [2 /*return*/, {
7625
- content: resultContent,
7626
- modelName: modelName,
7627
- timing: {
7628
- start: start,
7629
- complete: complete,
7630
- },
7631
- usage: usage,
7632
- rawPromptContent: rawPromptContent,
7633
- rawRequest: rawRequest,
7634
- rawResponse: rawResponse,
7635
- // <- [🗯]
7636
- }];
7637
- case 4:
7638
- error_2 = _c.sent();
7639
- throw this.transformAzureError(error_2);
7640
- case 5: return [2 /*return*/];
7641
- }
7642
- });
7643
- });
7644
- };
7645
- // <- Note: [🤖] callXxxModel
7646
- /**
7647
- * Changes Azure error (which is not propper Error but object) to propper Error
7648
- */
7649
- AzureOpenAiExecutionTools.prototype.transformAzureError = function (azureError) {
7650
- if (typeof azureError !== 'object' || azureError === null) {
7651
- return new PipelineExecutionError("Unknown Azure OpenAI error");
7652
- }
7653
- var code = azureError.code, message = azureError.message;
7654
- return new PipelineExecutionError("".concat(code, ": ").concat(message));
7655
- };
7656
- return AzureOpenAiExecutionTools;
7657
- }());
7658
- /**
7659
- * TODO: Maybe Create some common util for callChatModel and callCompletionModel
7660
- * TODO: Maybe make custom AzureOpenAiError
7661
- * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
7662
- * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
7663
- */
7664
-
7665
- /**
7666
- * Computes the usage of the OpenAI API based on the response from OpenAI
7667
- *
7668
- * @param promptContent The content of the prompt
7669
- * @param resultContent The content of the result (for embedding prompts or failed prompts pass empty string)
7670
- * @param rawResponse The raw response from OpenAI API
7671
- * @throws {PipelineExecutionError} If the usage is not defined in the response from OpenAI
7672
- * @private internal utility of `OpenAiExecutionTools`
7673
- */
7674
- function computeOpenAiUsage(promptContent, // <- Note: Intentionally using [] to access type properties to bring jsdoc from Prompt/PromptResult to consumer
7675
- resultContent, rawResponse) {
7676
- var _a, _b;
7677
- if (rawResponse.usage === undefined) {
7678
- throw new PipelineExecutionError('The usage is not defined in the response from OpenAI');
7679
- }
7680
- if (((_a = rawResponse.usage) === null || _a === void 0 ? void 0 : _a.prompt_tokens) === undefined) {
7681
- throw new PipelineExecutionError('In OpenAI response `usage.prompt_tokens` not defined');
7682
- }
7683
- var inputTokens = rawResponse.usage.prompt_tokens;
7684
- var outputTokens = ((_b = rawResponse.usage) === null || _b === void 0 ? void 0 : _b.completion_tokens) || 0;
7685
- var modelInfo = OPENAI_MODELS.find(function (model) { return model.modelName === rawResponse.model; });
7686
- var price;
7687
- if (modelInfo === undefined || modelInfo.pricing === undefined) {
7688
- price = uncertainNumber();
7689
- }
7690
- else {
7691
- price = uncertainNumber(inputTokens * modelInfo.pricing.prompt + outputTokens * modelInfo.pricing.output);
7692
- }
7693
- return {
7694
- price: price,
7695
- input: __assign({ tokensCount: uncertainNumber(rawResponse.usage.prompt_tokens) }, computeUsageCounts(promptContent)),
7696
- output: __assign({ tokensCount: uncertainNumber(outputTokens) }, computeUsageCounts(resultContent)),
7697
- };
7698
- }
7699
- /**
7700
- * TODO: [🤝] DRY Maybe some common abstraction between `computeOpenAiUsage` and `computeAnthropicClaudeUsage`
7701
- */
7702
-
7703
- /**
7704
- * Execution Tools for calling OpenAI API
7705
- *
7706
- * @public exported from `@promptbook/openai`
7707
- */
7708
- var OpenAiExecutionTools = /** @class */ (function () {
7709
- /**
7710
- * Creates OpenAI Execution Tools.
7711
- *
7712
- * @param options which are relevant are directly passed to the OpenAI client
7713
- */
7714
- function OpenAiExecutionTools(options) {
7715
- if (options === void 0) { options = {}; }
7716
- this.options = options;
7717
- /**
7718
- * OpenAI API client.
7719
- */
7720
- this.client = null;
7721
- }
7722
- Object.defineProperty(OpenAiExecutionTools.prototype, "title", {
7723
- get: function () {
7724
- return 'OpenAI';
7725
- },
7726
- enumerable: false,
7727
- configurable: true
7728
- });
7729
- Object.defineProperty(OpenAiExecutionTools.prototype, "description", {
7730
- get: function () {
7731
- return 'Use all models provided by OpenAI';
7732
- },
7733
- enumerable: false,
7734
- configurable: true
7735
- });
7736
- OpenAiExecutionTools.prototype.getClient = function () {
7737
- return __awaiter(this, void 0, void 0, function () {
7738
- var openAiOptions;
7739
- return __generator(this, function (_a) {
7740
- if (this.client === null) {
7741
- openAiOptions = __assign({}, this.options);
7742
- delete openAiOptions.isVerbose;
7743
- delete openAiOptions.user;
7744
- this.client = new OpenAI__default["default"](__assign({}, openAiOptions));
7745
- }
7746
- return [2 /*return*/, this.client];
7747
- });
7748
- });
7749
- };
7750
- /**
7751
- * Check the `options` passed to `constructor`
7752
- */
7753
- OpenAiExecutionTools.prototype.checkConfiguration = function () {
7754
- return __awaiter(this, void 0, void 0, function () {
7755
- return __generator(this, function (_a) {
7756
- switch (_a.label) {
7757
- case 0: return [4 /*yield*/, this.getClient()];
7758
- case 1:
7759
- _a.sent();
7760
- return [2 /*return*/];
7761
- }
7762
- });
7763
- });
7764
- };
7765
- /**
7766
- * List all available OpenAI models that can be used
7767
- */
7768
- OpenAiExecutionTools.prototype.listModels = function () {
7769
- /*
7770
- Note: Dynamic lising of the models
7771
- const models = await this.openai.models.list({});
7772
-
7773
- console.log({ models });
7774
- console.log(models.data);
7775
- */
7776
- return OPENAI_MODELS;
7777
- };
7778
- /**
7779
- * Calls OpenAI API to use a chat model.
7780
- */
7781
- OpenAiExecutionTools.prototype.callChatModel = function (prompt) {
7782
- return __awaiter(this, void 0, void 0, function () {
7783
- var content, parameters, modelRequirements, expectFormat, client, modelName, modelSettings, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
7784
- return __generator(this, function (_a) {
7785
- switch (_a.label) {
7786
- case 0:
7787
- if (this.options.isVerbose) {
7788
- console.info('💬 OpenAI callChatModel call', { prompt: prompt });
7789
- }
7790
- content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements, expectFormat = prompt.expectFormat;
7791
- return [4 /*yield*/, this.getClient()];
7792
- case 1:
7793
- client = _a.sent();
7794
- // TODO: [☂] Use here more modelRequirements
7795
- if (modelRequirements.modelVariant !== 'CHAT') {
7796
- throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
7797
- }
7798
- modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
7799
- modelSettings = {
7800
- model: modelName,
7801
- max_tokens: modelRequirements.maxTokens,
7802
- // <- TODO: [🌾] Make some global max cap for maxTokens
7803
- temperature: modelRequirements.temperature,
7804
- // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
7805
- // <- Note: [🧆]
7806
- };
7807
- if (expectFormat === 'JSON') {
7808
- modelSettings.response_format = {
7809
- type: 'json_object',
7810
- };
7811
- }
7812
- rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
7813
- rawRequest = __assign(__assign({}, modelSettings), { messages: __spreadArray(__spreadArray([], __read((modelRequirements.systemMessage === undefined
7814
- ? []
7815
- : [
7816
- {
7817
- role: 'system',
7818
- content: modelRequirements.systemMessage,
7819
- },
7820
- ])), false), [
7821
- {
7822
- role: 'user',
7823
- content: rawPromptContent,
7824
- },
7825
- ], false), user: this.options.user });
7826
- start = getCurrentIsoDate();
7827
- if (this.options.isVerbose) {
7828
- console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
7829
- }
7830
- return [4 /*yield*/, client.chat.completions.create(rawRequest)];
7831
- case 2:
7832
- rawResponse = _a.sent();
7833
- if (this.options.isVerbose) {
7834
- console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
7835
- }
7836
- if (!rawResponse.choices[0]) {
7837
- throw new PipelineExecutionError('No choises from OpenAI');
7838
- }
7839
- if (rawResponse.choices.length > 1) {
7840
- // TODO: This should be maybe only warning
7841
- throw new PipelineExecutionError('More than one choise from OpenAI');
7842
- }
7843
- resultContent = rawResponse.choices[0].message.content;
7844
- // eslint-disable-next-line prefer-const
7845
- complete = getCurrentIsoDate();
7846
- usage = computeOpenAiUsage(content, resultContent || '', rawResponse);
7847
- if (resultContent === null) {
7848
- throw new PipelineExecutionError('No response message from OpenAI');
7849
- }
7850
- return [2 /*return*/, {
7851
- content: resultContent,
7852
- modelName: rawResponse.model || modelName,
7853
- timing: {
7854
- start: start,
7855
- complete: complete,
7856
- },
7857
- usage: usage,
7858
- rawPromptContent: rawPromptContent,
7859
- rawRequest: rawRequest,
7860
- rawResponse: rawResponse,
7861
- // <- [🗯]
7862
- }];
7863
- }
7864
- });
7865
- });
7866
- };
7867
- /**
7868
- * Calls OpenAI API to use a complete model.
7869
- */
7870
- OpenAiExecutionTools.prototype.callCompletionModel = function (prompt) {
7871
- return __awaiter(this, void 0, void 0, function () {
7872
- var content, parameters, modelRequirements, client, modelName, modelSettings, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
7873
- return __generator(this, function (_a) {
7874
- switch (_a.label) {
7875
- case 0:
7876
- if (this.options.isVerbose) {
7877
- console.info('🖋 OpenAI callCompletionModel call', { prompt: prompt });
7878
- }
7879
- content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
7880
- return [4 /*yield*/, this.getClient()];
7881
- case 1:
7882
- client = _a.sent();
7883
- // TODO: [☂] Use here more modelRequirements
7884
- if (modelRequirements.modelVariant !== 'COMPLETION') {
7885
- throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
7886
- }
7887
- modelName = modelRequirements.modelName || this.getDefaultCompletionModel().modelName;
7888
- modelSettings = {
7889
- model: modelName,
7890
- max_tokens: modelRequirements.maxTokens || 2000,
7891
- // <- TODO: [🌾] Make some global max cap for maxTokens
7892
- temperature: modelRequirements.temperature,
7893
- // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
7894
- // <- Note: [🧆]
7895
- };
7896
- rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
7897
- rawRequest = __assign(__assign({}, modelSettings), { prompt: rawPromptContent, user: this.options.user });
7898
- start = getCurrentIsoDate();
7899
- if (this.options.isVerbose) {
7900
- console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
7901
- }
7902
- return [4 /*yield*/, client.completions.create(rawRequest)];
7903
- case 2:
7904
- rawResponse = _a.sent();
7905
- if (this.options.isVerbose) {
7906
- console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
7907
- }
7908
- if (!rawResponse.choices[0]) {
7909
- throw new PipelineExecutionError('No choises from OpenAI');
7910
- }
7911
- if (rawResponse.choices.length > 1) {
7912
- // TODO: This should be maybe only warning
7913
- throw new PipelineExecutionError('More than one choise from OpenAI');
7914
- }
7915
- resultContent = rawResponse.choices[0].text;
7916
- // eslint-disable-next-line prefer-const
7917
- complete = getCurrentIsoDate();
7918
- usage = computeOpenAiUsage(content, resultContent || '', rawResponse);
7919
- return [2 /*return*/, {
7920
- content: resultContent,
7921
- modelName: rawResponse.model || modelName,
7922
- timing: {
7923
- start: start,
7924
- complete: complete,
7925
- },
7926
- usage: usage,
7927
- rawPromptContent: rawPromptContent,
7928
- rawRequest: rawRequest,
7929
- rawResponse: rawResponse,
7930
- // <- [🗯]
7931
- }];
7932
- }
7933
- });
7934
- });
7935
- };
7936
- /**
7937
- * Calls OpenAI API to use a embedding model
7938
- */
7939
- OpenAiExecutionTools.prototype.callEmbeddingModel = function (prompt) {
7940
- return __awaiter(this, void 0, void 0, function () {
7941
- var content, parameters, modelRequirements, client, modelName, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
7942
- return __generator(this, function (_a) {
7943
- switch (_a.label) {
7944
- case 0:
7945
- if (this.options.isVerbose) {
7946
- console.info('🖋 OpenAI embedding call', { prompt: prompt });
7947
- }
7948
- content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
7949
- return [4 /*yield*/, this.getClient()];
7950
- case 1:
7951
- client = _a.sent();
7952
- // TODO: [☂] Use here more modelRequirements
7953
- if (modelRequirements.modelVariant !== 'EMBEDDING') {
7954
- throw new PipelineExecutionError('Use embed only for EMBEDDING variant');
7955
- }
7956
- modelName = modelRequirements.modelName || this.getDefaultEmbeddingModel().modelName;
7957
- rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
7958
- rawRequest = {
7959
- input: rawPromptContent,
7960
- model: modelName,
7961
- };
7962
- start = getCurrentIsoDate();
7963
- if (this.options.isVerbose) {
7964
- console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
7965
- }
7966
- return [4 /*yield*/, client.embeddings.create(rawRequest)];
7967
- case 2:
7968
- rawResponse = _a.sent();
7969
- if (this.options.isVerbose) {
7970
- console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
7971
- }
7972
- if (rawResponse.data.length !== 1) {
7973
- throw new PipelineExecutionError("Expected exactly 1 data item in response, got ".concat(rawResponse.data.length));
7974
- }
7975
- resultContent = rawResponse.data[0].embedding;
7976
- // eslint-disable-next-line prefer-const
7977
- complete = getCurrentIsoDate();
7978
- usage = computeOpenAiUsage(content, '', rawResponse);
7979
- return [2 /*return*/, {
7980
- content: resultContent,
7981
- modelName: rawResponse.model || modelName,
7982
- timing: {
7983
- start: start,
7984
- complete: complete,
7985
- },
7986
- usage: usage,
7987
- rawPromptContent: rawPromptContent,
7988
- rawRequest: rawRequest,
7989
- rawResponse: rawResponse,
7990
- // <- [🗯]
7991
- }];
7992
- }
7993
- });
7994
- });
7995
- };
7996
- // <- Note: [🤖] callXxxModel
7997
- /**
7998
- * Get the model that should be used as default
7999
- */
8000
- OpenAiExecutionTools.prototype.getDefaultModel = function (defaultModelName) {
8001
- var model = OPENAI_MODELS.find(function (_a) {
8002
- var modelName = _a.modelName;
8003
- return modelName === defaultModelName;
8004
- });
8005
- if (model === undefined) {
8006
- throw new UnexpectedError(spaceTrim__default["default"](function (block) {
8007
- return "\n Cannot find model in OpenAI models with name \"".concat(defaultModelName, "\" which should be used as default.\n\n Available models:\n ").concat(block(OPENAI_MODELS.map(function (_a) {
8008
- var modelName = _a.modelName;
8009
- return "- \"".concat(modelName, "\"");
8010
- }).join('\n')), "\n\n ");
8011
- }));
8012
- }
8013
- return model;
8014
- };
8015
- /**
8016
- * Default model for chat variant.
8017
- */
8018
- OpenAiExecutionTools.prototype.getDefaultChatModel = function () {
8019
- return this.getDefaultModel('gpt-4o');
8020
- };
8021
- /**
8022
- * Default model for completion variant.
8023
- */
8024
- OpenAiExecutionTools.prototype.getDefaultCompletionModel = function () {
8025
- return this.getDefaultModel('gpt-3.5-turbo-instruct');
8026
- };
8027
- /**
8028
- * Default model for completion variant.
8029
- */
8030
- OpenAiExecutionTools.prototype.getDefaultEmbeddingModel = function () {
8031
- return this.getDefaultModel('text-embedding-3-large');
8032
- };
8033
- return OpenAiExecutionTools;
8034
- }());
8035
- /**
8036
- * TODO: [🧠][🧙‍♂️] Maybe there can be some wizzard for thoose who want to use just OpenAI
8037
- * TODO: Maybe Create some common util for callChatModel and callCompletionModel
8038
- * TODO: Maybe make custom OpenAiError
8039
- * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
8040
- * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
8041
- */
8042
-
8043
- /**
8044
- * Execution Tools for calling OpenAI API
8045
- *
8046
- * @public exported from `@promptbook/openai`
8047
- */
8048
- var createOpenAiExecutionTools = Object.assign(function (options) {
8049
- // TODO: !!!!!! If browser, auto add `dangerouslyAllowBrowser`
8050
- return new OpenAiExecutionTools(options);
8051
- }, {
8052
- packageName: '@promptbook/openai',
8053
- className: 'OpenAiExecutionTools',
8054
- });
8055
- /**
8056
- * TODO: [🦺] Is there some way how to put `packageName` and `className` on top and function definition on bottom?
8057
- * TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
8058
- */
8059
-
8060
- /**
8061
- * @@@
8062
- *
8063
- * TODO: !!!!!! Remove EXECUTION_TOOLS_CLASSES and use $llmToolsRegister instead
8064
- *
8065
- * @private internal type for `createLlmToolsFromConfiguration`
8066
- */
8067
- var EXECUTION_TOOLS_CLASSES = {
8068
- createOpenAiExecutionTools: createOpenAiExecutionTools,
8069
- createAnthropicClaudeExecutionTools: createAnthropicClaudeExecutionTools,
8070
- createAzureOpenAiExecutionTools: function (options) {
8071
- return new AzureOpenAiExecutionTools(
8072
- // <- TODO: [🧱] Implement in a functional (not new Class) way
8073
- options);
8074
- },
8075
- // <- Note: [🦑] Add here new LLM provider
8076
- };
8077
- /**
8078
- * TODO: !!!!!!! Make global register for this
8079
- * TODO: [🧠][🎌] Adding this should be responsibility of each provider package NOT this one central place
8080
- */
8081
-
8082
- /**
8083
- * @@@
8084
- *
8085
- * Note: This function is not cached, every call creates new instance of `MultipleLlmExecutionTools`
8086
- *
8087
- * @returns @@@
8088
- * @public exported from `@promptbook/core`
8089
- */
8090
- function createLlmToolsFromConfiguration(configuration, options) {
8091
- if (options === void 0) { options = {}; }
8092
- var _a = options.isVerbose, isVerbose = _a === void 0 ? false : _a;
8093
- var llmTools = configuration.map(function (llmConfiguration) {
8094
- var constructor = EXECUTION_TOOLS_CLASSES["create".concat(llmConfiguration.className)];
8095
- if (!constructor) {
8096
- throw new Error(spaceTrim__default["default"](function (block) { return "\n There is no constructor for LLM provider `".concat(llmConfiguration.className, "`\n\n\n @@@\n\n Available constructors are:\n ").concat(block('@@@'), "\n\n\n "); }));
6594
+ if (registeredItem === undefined) {
6595
+ throw new Error(spaceTrim__default["default"](function (block) { return "\n There is no constructor for LLM provider `".concat(llmConfiguration.className, "` from `").concat(llmConfiguration.packageName, "`\n\n You have probably forgotten install and import the provider package.\n To fix this issue, you can:\n\n Install:\n\n > npm install ").concat(llmConfiguration.packageName, "\n\n And import:\n\n > import '").concat(llmConfiguration.packageName, "';\n\n\n ").concat(block($registeredLlmToolsMessage()), "\n "); }));
8097
6596
  }
8098
- return constructor(__assign({ isVerbose: isVerbose }, llmConfiguration.options));
6597
+ return registeredItem(__assign({ isVerbose: isVerbose }, llmConfiguration.options));
8099
6598
  });
8100
6599
  return joinLlmExecutionTools.apply(void 0, __spreadArray([], __read(llmTools), false));
8101
6600
  }
@@ -8130,7 +6629,7 @@
8130
6629
  var configuration = createLlmToolsFromConfigurationFromEnv();
8131
6630
  if (configuration.length === 0) {
8132
6631
  // TODO: [🥃]
8133
- throw new Error(spaceTrim__default["default"]("\n No LLM tools found in the environment\n\n !!!!!!!@@@@You have maybe forgotten to two things:\n !!!!!!! List all available LLM tools in your environment\n - Azure \n - OpenAI (not imported)\n\n Please set one of environment variables:\n - OPENAI_API_KEY\n - ANTHROPIC_CLAUDE_API_KEY\n "));
6632
+ throw new Error(spaceTrim__default["default"](function (block) { return "\n No LLM tools found in the environment\n\n Please set one of environment variables:\n - OPENAI_API_KEY\n - ANTHROPIC_CLAUDE_API_KEY\n\n ".concat(block($registeredLlmToolsMessage()), "}\n "); }));
8134
6633
  }
8135
6634
  return createLlmToolsFromConfiguration(configuration, options);
8136
6635
  }