@promptbook/core 0.66.0-7 → 0.66.0-9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. package/esm/index.es.js +247 -1736
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/src/_packages/anthropic-claude.index.d.ts +0 -2
  4. package/esm/typings/src/_packages/azure-openai.index.d.ts +4 -0
  5. package/esm/typings/src/_packages/cli.index.d.ts +8 -0
  6. package/esm/typings/src/_packages/core.index.d.ts +2 -0
  7. package/esm/typings/src/_packages/types.index.d.ts +22 -14
  8. package/esm/typings/src/_packages/utils.index.d.ts +7 -7
  9. package/esm/typings/src/execution/LlmExecutionToolsConstructor.d.ts +1 -1
  10. package/esm/typings/src/llm-providers/_common/$llmToolsMetadataRegister.d.ts +2 -2
  11. package/esm/typings/src/llm-providers/_common/$llmToolsRegister.d.ts +2 -2
  12. package/esm/typings/src/llm-providers/_common/$registeredLlmToolsMessage.d.ts +9 -0
  13. package/esm/typings/src/llm-providers/_common/LlmToolsConfiguration.d.ts +1 -1
  14. package/esm/typings/src/llm-providers/_common/LlmToolsMetadata.d.ts +1 -1
  15. package/esm/typings/src/llm-providers/_common/createLlmToolsFromConfigurationFromEnv.d.ts +1 -0
  16. package/esm/typings/src/llm-providers/_common/getLlmToolsForTestingAndScriptsAndPlayground.d.ts +1 -1
  17. package/esm/typings/src/llm-providers/anthropic-claude/register-constructor.d.ts +1 -0
  18. package/esm/typings/src/llm-providers/azure-openai/createAzureOpenAiExecutionTools.d.ts +15 -0
  19. package/esm/typings/src/llm-providers/azure-openai/register-configuration.d.ts +9 -0
  20. package/esm/typings/src/llm-providers/azure-openai/register-constructor.d.ts +12 -0
  21. package/esm/typings/src/llm-providers/openai/register-constructor.d.ts +1 -0
  22. package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +1 -0
  23. package/esm/typings/src/llm-providers/remote/interfaces/{Promptbook_Server_Error.d.ts → PromptbookServer_Error.d.ts} +1 -1
  24. package/esm/typings/src/llm-providers/remote/interfaces/PromptbookServer_ListModels_Request.d.ts +34 -0
  25. package/esm/typings/src/llm-providers/remote/interfaces/PromptbookServer_ListModels_Response.d.ts +15 -0
  26. package/esm/typings/src/llm-providers/remote/interfaces/{Promptbook_Server_Progress.d.ts → PromptbookServer_Prompt_Progress.d.ts} +1 -1
  27. package/esm/typings/src/llm-providers/remote/interfaces/{Promptbook_Server_Request.d.ts → PromptbookServer_Prompt_Request.d.ts} +15 -3
  28. package/esm/typings/src/llm-providers/remote/interfaces/{Promptbook_Server_Response.d.ts → PromptbookServer_Prompt_Response.d.ts} +1 -1
  29. package/esm/typings/src/llm-providers/remote/interfaces/RemoteLlmExecutionToolsOptions.d.ts +1 -7
  30. package/esm/typings/src/llm-providers/remote/startRemoteServer.d.ts +1 -0
  31. package/esm/typings/src/utils/{Register.d.ts → $Register.d.ts} +6 -2
  32. package/esm/typings/src/utils/environment/{getGlobalScope.d.ts → $getGlobalScope.d.ts} +1 -1
  33. package/esm/typings/src/utils/organization/f.d.ts +6 -0
  34. package/package.json +1 -6
  35. package/umd/index.umd.js +251 -1737
  36. package/umd/index.umd.js.map +1 -1
  37. package/esm/typings/src/llm-providers/_common/config.d.ts +0 -14
  38. package/esm/typings/src/llm-providers/anthropic-claude/register1.d.ts +0 -4
  39. /package/esm/typings/src/llm-providers/mocked/{fakeTextToExpectations.d.ts → $fakeTextToExpectations.d.ts} +0 -0
  40. /package/esm/typings/src/utils/{currentDate.d.ts → $currentDate.d.ts} +0 -0
  41. /package/esm/typings/src/utils/environment/{isRunningInBrowser.d.ts → $isRunningInBrowser.d.ts} +0 -0
  42. /package/esm/typings/src/utils/environment/{isRunningInNode.d.ts → $isRunningInNode.d.ts} +0 -0
  43. /package/esm/typings/src/utils/environment/{isRunningInWebWorker.d.ts → $isRunningInWebWorker.d.ts} +0 -0
  44. /package/esm/typings/src/utils/files/{isDirectoryExisting.d.ts → $isDirectoryExisting.d.ts} +0 -0
  45. /package/esm/typings/src/utils/files/{isDirectoryExisting.test.d.ts → $isDirectoryExisting.test.d.ts} +0 -0
  46. /package/esm/typings/src/utils/files/{isFileExisting.d.ts → $isFileExisting.d.ts} +0 -0
  47. /package/esm/typings/src/utils/files/{isFileExisting.test.d.ts → $isFileExisting.test.d.ts} +0 -0
  48. /package/esm/typings/src/utils/files/{listAllFiles.d.ts → $listAllFiles.d.ts} +0 -0
  49. /package/esm/typings/src/utils/files/{listAllFiles.test.d.ts → $listAllFiles.test.d.ts} +0 -0
  50. /package/esm/typings/src/utils/random/{randomSeed.d.ts → $randomSeed.d.ts} +0 -0
package/umd/index.umd.js CHANGED
@@ -1,8 +1,8 @@
1
1
  (function (global, factory) {
2
- typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports, require('spacetrim'), require('prettier'), require('prettier/parser-html'), require('crypto-js/enc-hex'), require('crypto-js/sha256'), require('socket.io-client'), require('@anthropic-ai/sdk'), require('colors'), require('@azure/openai'), require('openai'), require('moment')) :
3
- typeof define === 'function' && define.amd ? define(['exports', 'spacetrim', 'prettier', 'prettier/parser-html', 'crypto-js/enc-hex', 'crypto-js/sha256', 'socket.io-client', '@anthropic-ai/sdk', 'colors', '@azure/openai', 'openai', 'moment'], factory) :
4
- (global = typeof globalThis !== 'undefined' ? globalThis : global || self, factory(global["promptbook-core"] = {}, global.spaceTrim, global.prettier, global.parserHtml, global.hexEncoder, global.sha256, global.socket_ioClient, global.Anthropic, global.colors, global.openai, global.OpenAI, global.moment));
5
- })(this, (function (exports, spaceTrim, prettier, parserHtml, hexEncoder, sha256, socket_ioClient, Anthropic, colors, openai, OpenAI, moment) { 'use strict';
2
+ typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports, require('spacetrim'), require('prettier'), require('prettier/parser-html'), require('crypto-js/enc-hex'), require('crypto-js/sha256'), require('moment')) :
3
+ typeof define === 'function' && define.amd ? define(['exports', 'spacetrim', 'prettier', 'prettier/parser-html', 'crypto-js/enc-hex', 'crypto-js/sha256', 'moment'], factory) :
4
+ (global = typeof globalThis !== 'undefined' ? globalThis : global || self, factory(global["promptbook-core"] = {}, global.spaceTrim, global.prettier, global.parserHtml, global.hexEncoder, global.sha256, global.moment));
5
+ })(this, (function (exports, spaceTrim, prettier, parserHtml, hexEncoder, sha256, moment) { 'use strict';
6
6
 
7
7
  function _interopDefaultLegacy (e) { return e && typeof e === 'object' && 'default' in e ? e : { 'default': e }; }
8
8
 
@@ -10,16 +10,13 @@
10
10
  var parserHtml__default = /*#__PURE__*/_interopDefaultLegacy(parserHtml);
11
11
  var hexEncoder__default = /*#__PURE__*/_interopDefaultLegacy(hexEncoder);
12
12
  var sha256__default = /*#__PURE__*/_interopDefaultLegacy(sha256);
13
- var Anthropic__default = /*#__PURE__*/_interopDefaultLegacy(Anthropic);
14
- var colors__default = /*#__PURE__*/_interopDefaultLegacy(colors);
15
- var OpenAI__default = /*#__PURE__*/_interopDefaultLegacy(OpenAI);
16
13
  var moment__default = /*#__PURE__*/_interopDefaultLegacy(moment);
17
14
 
18
15
  // ⚠️ WARNING: This code has been generated so that any manual changes will be overwritten
19
16
  /**
20
17
  * The version of the Promptbook library
21
18
  */
22
- var PROMPTBOOK_VERSION = '0.66.0-6';
19
+ var PROMPTBOOK_VERSION = '0.66.0-8';
23
20
  // TODO: !!!! List here all the versions and annotate + put into script
24
21
 
25
22
  /*! *****************************************************************************
@@ -1650,7 +1647,7 @@
1650
1647
  });
1651
1648
  }
1652
1649
 
1653
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.66.0-6",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.66.0-6",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.66.0-6",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.66.0-6",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
1650
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.66.0-8",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.66.0-8",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.66.0-8",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.66.0-8",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
1654
1651
 
1655
1652
  var defaultDiacriticsRemovalMap = [
1656
1653
  {
@@ -2248,8 +2245,37 @@
2248
2245
  */
2249
2246
  MultipleLlmExecutionTools.prototype.checkConfiguration = function () {
2250
2247
  return __awaiter(this, void 0, void 0, function () {
2251
- return __generator(this, function (_a) {
2252
- return [2 /*return*/];
2248
+ var _a, _b, llmExecutionTools, e_1_1;
2249
+ var e_1, _c;
2250
+ return __generator(this, function (_d) {
2251
+ switch (_d.label) {
2252
+ case 0:
2253
+ _d.trys.push([0, 5, 6, 7]);
2254
+ _a = __values(this.llmExecutionTools), _b = _a.next();
2255
+ _d.label = 1;
2256
+ case 1:
2257
+ if (!!_b.done) return [3 /*break*/, 4];
2258
+ llmExecutionTools = _b.value;
2259
+ return [4 /*yield*/, llmExecutionTools.checkConfiguration()];
2260
+ case 2:
2261
+ _d.sent();
2262
+ _d.label = 3;
2263
+ case 3:
2264
+ _b = _a.next();
2265
+ return [3 /*break*/, 1];
2266
+ case 4: return [3 /*break*/, 7];
2267
+ case 5:
2268
+ e_1_1 = _d.sent();
2269
+ e_1 = { error: e_1_1 };
2270
+ return [3 /*break*/, 7];
2271
+ case 6:
2272
+ try {
2273
+ if (_b && !_b.done && (_c = _a.return)) _c.call(_a);
2274
+ }
2275
+ finally { if (e_1) throw e_1.error; }
2276
+ return [7 /*endfinally*/];
2277
+ case 7: return [2 /*return*/];
2278
+ }
2253
2279
  });
2254
2280
  });
2255
2281
  };
@@ -2259,8 +2285,8 @@
2259
2285
  */
2260
2286
  MultipleLlmExecutionTools.prototype.listModels = function () {
2261
2287
  return __awaiter(this, void 0, void 0, function () {
2262
- var availableModels, _a, _b, llmExecutionTools, models, e_1_1;
2263
- var e_1, _c;
2288
+ var availableModels, _a, _b, llmExecutionTools, models, e_2_1;
2289
+ var e_2, _c;
2264
2290
  return __generator(this, function (_d) {
2265
2291
  switch (_d.label) {
2266
2292
  case 0:
@@ -2283,14 +2309,14 @@
2283
2309
  return [3 /*break*/, 2];
2284
2310
  case 5: return [3 /*break*/, 8];
2285
2311
  case 6:
2286
- e_1_1 = _d.sent();
2287
- e_1 = { error: e_1_1 };
2312
+ e_2_1 = _d.sent();
2313
+ e_2 = { error: e_2_1 };
2288
2314
  return [3 /*break*/, 8];
2289
2315
  case 7:
2290
2316
  try {
2291
2317
  if (_b && !_b.done && (_c = _a.return)) _c.call(_a);
2292
2318
  }
2293
- finally { if (e_1) throw e_1.error; }
2319
+ finally { if (e_2) throw e_2.error; }
2294
2320
  return [7 /*endfinally*/];
2295
2321
  case 8: return [2 /*return*/, availableModels];
2296
2322
  }
@@ -2323,8 +2349,8 @@
2323
2349
  */
2324
2350
  MultipleLlmExecutionTools.prototype.callCommonModel = function (prompt) {
2325
2351
  return __awaiter(this, void 0, void 0, function () {
2326
- var errors, _a, _b, llmExecutionTools, _c, error_1, e_2_1;
2327
- var e_2, _d;
2352
+ var errors, _a, _b, llmExecutionTools, _c, error_1, e_3_1;
2353
+ var e_3, _d;
2328
2354
  var _this = this;
2329
2355
  return __generator(this, function (_e) {
2330
2356
  switch (_e.label) {
@@ -2380,14 +2406,14 @@
2380
2406
  return [3 /*break*/, 2];
2381
2407
  case 14: return [3 /*break*/, 17];
2382
2408
  case 15:
2383
- e_2_1 = _e.sent();
2384
- e_2 = { error: e_2_1 };
2409
+ e_3_1 = _e.sent();
2410
+ e_3 = { error: e_3_1 };
2385
2411
  return [3 /*break*/, 17];
2386
2412
  case 16:
2387
2413
  try {
2388
2414
  if (_b && !_b.done && (_d = _a.return)) _d.call(_a);
2389
2415
  }
2390
- finally { if (e_2) throw e_2.error; }
2416
+ finally { if (e_3) throw e_3.error; }
2391
2417
  return [7 /*endfinally*/];
2392
2418
  case 17:
2393
2419
  if (errors.length === 1) {
@@ -6522,7 +6548,7 @@
6522
6548
  /**
6523
6549
  * @@@
6524
6550
  *
6525
- * Note: `$` is used to indicate that this function is not a pure function - it access global
6551
+ * Note: `$` is used to indicate that this function is not a pure function - it access global scope
6526
6552
  *
6527
6553
  * @public exported from `@promptbook/utils`
6528
6554
  */
@@ -6536,41 +6562,45 @@
6536
6562
  /**
6537
6563
  * Register is @@@
6538
6564
  *
6565
+ * Note: `$` is used to indicate that this function is not a pure function - it accesses and adds variables in global scope.
6566
+ *
6539
6567
  * @private internal utility, exported are only signleton instances of this class
6540
6568
  */
6541
- var Register = /** @class */ (function () {
6542
- function Register(storage) {
6543
- this.storage = storage;
6569
+ var $Register = /** @class */ (function () {
6570
+ function $Register(storageName) {
6571
+ this.storageName = storageName;
6572
+ storageName = "_promptbook_".concat(storageName);
6573
+ var globalScope = $getGlobalScope();
6574
+ if (globalScope[storageName] === undefined) {
6575
+ globalScope[storageName] = [];
6576
+ }
6577
+ else if (!Array.isArray(globalScope[storageName])) {
6578
+ throw new UnexpectedError("Expected (global) ".concat(storageName, " to be an array, but got ").concat(typeof globalScope[storageName]));
6579
+ }
6580
+ this.storage = globalScope[storageName];
6544
6581
  }
6545
- Register.prototype.list = function () {
6582
+ $Register.prototype.list = function () {
6546
6583
  // <- TODO: ReadonlyDeep<Array<TRegistered>>
6547
6584
  return this.storage;
6548
6585
  };
6549
- Register.prototype.register = function (registered) {
6586
+ $Register.prototype.register = function (registered) {
6550
6587
  // <- TODO: What to return here
6551
6588
  var packageName = registered.packageName, className = registered.className;
6552
6589
  var existingRegistrationIndex = this.storage.findIndex(function (item) { return item.packageName === packageName && item.className === className; });
6553
6590
  var existingRegistration = this.storage[existingRegistrationIndex];
6554
- if (existingRegistration) {
6555
- console.warn("!!!!!! Re-registering ".concat(packageName, ".").concat(className, " again"));
6556
- this.storage[existingRegistrationIndex] = registered;
6591
+ // TODO: !!!!!! Global IS_VERBOSE mode
6592
+ if (!existingRegistration) {
6593
+ console.warn("[\uD83D\uDCE6] Registering `".concat(packageName, ".").concat(className, "` to `").concat(this.storageName, "`"));
6594
+ this.storage.push(registered);
6557
6595
  }
6558
6596
  else {
6559
- this.storage.push(registered);
6597
+ console.warn("[\uD83D\uDCE6] Re-registering `".concat(packageName, ".").concat(className, "` to `").concat(this.storageName, "`"));
6598
+ this.storage[existingRegistrationIndex] = registered;
6560
6599
  }
6561
6600
  };
6562
- return Register;
6601
+ return $Register;
6563
6602
  }());
6564
6603
 
6565
- // TODO: !!!!!! Move this logic to Register and rename to $Register
6566
- var globalScope = $getGlobalScope();
6567
- if (globalScope.$llmToolsMetadataRegister === undefined) {
6568
- globalScope.$llmToolsMetadataRegister = [];
6569
- }
6570
- else if (!Array.isArray(globalScope.$llmToolsMetadataRegister)) {
6571
- throw new UnexpectedError("Expected $llmToolsMetadataRegister to be an array, but got ".concat(typeof globalScope.$llmToolsMetadataRegister));
6572
- }
6573
- var _ = globalScope.$llmToolsMetadataRegister;
6574
6604
  /**
6575
6605
  * @@@
6576
6606
  *
@@ -6578,8 +6608,7 @@
6578
6608
  * @singleton Only one instance of each register is created per build, but thare can be more @@@
6579
6609
  * @public exported from `@promptbook/core`
6580
6610
  */
6581
- var $llmToolsMetadataRegister = new Register(_);
6582
- $getGlobalScope().$llmToolsMetadataRegister;
6611
+ var $llmToolsMetadataRegister = new $Register('llm_tools_metadata');
6583
6612
 
6584
6613
  /**
6585
6614
  * @@@
@@ -6588,1738 +6617,176 @@
6588
6617
  * @singleton Only one instance of each register is created per build, but thare can be more @@@
6589
6618
  * @public exported from `@promptbook/core`
6590
6619
  */
6591
- var $llmToolsRegister = new Register([
6592
- // TODO: !!!!!! Take from global scope
6593
- ]);
6620
+ var $llmToolsRegister = new $Register('llm_execution_tools_constructors');
6594
6621
 
6595
6622
  /**
6596
- * Remote server is a proxy server that uses its execution tools internally and exposes the executor interface externally.
6623
+ * Creates a message with all registered LLM tools
6597
6624
  *
6598
- * You can simply use `RemoteExecutionTools` on client-side javascript and connect to your remote server.
6599
- * This is useful to make all logic on browser side but not expose your API keys or no need to use customer's GPU.
6625
+ * Note: This function is used to create a (error) message when there is no constructor for some LLM provider
6600
6626
  *
6601
- * @see https://github.com/webgptorg/promptbook#remote-server
6602
- * @public exported from `@promptbook/remote-client`
6627
+ * @private internal function of `createLlmToolsFromConfiguration` and `createLlmToolsFromEnv`
6603
6628
  */
6604
- var RemoteLlmExecutionTools = /** @class */ (function () {
6605
- function RemoteLlmExecutionTools(options) {
6606
- this.options = options;
6607
- }
6608
- Object.defineProperty(RemoteLlmExecutionTools.prototype, "title", {
6609
- get: function () {
6610
- // TODO: [🧠] Maybe fetch title+description from the remote server (as well as if model methods are defined)
6611
- return 'Remote server';
6612
- },
6613
- enumerable: false,
6614
- configurable: true
6615
- });
6616
- Object.defineProperty(RemoteLlmExecutionTools.prototype, "description", {
6617
- get: function () {
6618
- return 'Use all models by your remote server';
6619
- },
6620
- enumerable: false,
6621
- configurable: true
6622
- });
6623
- /**
6624
- * Check the configuration of all execution tools
6625
- */
6626
- RemoteLlmExecutionTools.prototype.checkConfiguration = function () {
6627
- return __awaiter(this, void 0, void 0, function () {
6628
- return __generator(this, function (_a) {
6629
- return [2 /*return*/];
6630
- });
6631
- });
6632
- };
6633
- /**
6634
- * List all available models that can be used
6635
- */
6636
- RemoteLlmExecutionTools.prototype.listModels = function () {
6637
- return __awaiter(this, void 0, void 0, function () {
6638
- return __generator(this, function (_a) {
6639
- return [2 /*return*/, (this.options.models ||
6640
- [
6641
- /* !!!!!! */
6642
- ])];
6643
- });
6644
- });
6645
- };
6646
- /**
6647
- * Creates a connection to the remote proxy server.
6648
- */
6649
- RemoteLlmExecutionTools.prototype.makeConnection = function () {
6650
- var _this = this;
6651
- return new Promise(
6652
- // <- TODO: [🧱] Implement in a functional (not new Class) way
6653
- function (resolve, reject) {
6654
- var socket = socket_ioClient.io(_this.options.remoteUrl, {
6655
- path: _this.options.path,
6656
- // path: `${this.remoteUrl.pathname}/socket.io`,
6657
- transports: [/*'websocket', <- TODO: [🌬] Make websocket transport work */ 'polling'],
6658
- });
6659
- // console.log('Connecting to', this.options.remoteUrl.href, { socket });
6660
- socket.on('connect', function () {
6661
- resolve(socket);
6662
- });
6663
- // TODO: !!!! Better timeout handling
6664
- setTimeout(function () {
6665
- reject(new Error("Timeout while connecting to ".concat(_this.options.remoteUrl)));
6666
- }, 1000 /* <- TODO: Timeout to config */);
6667
- });
6668
- };
6629
+ function $registeredLlmToolsMessage() {
6630
+ var e_1, _a, e_2, _b;
6669
6631
  /**
6670
- * Calls remote proxy server to use a chat model
6632
+ * Mixes registered LLM tools from $llmToolsMetadataRegister and $llmToolsRegister
6671
6633
  */
6672
- RemoteLlmExecutionTools.prototype.callChatModel = function (prompt) {
6673
- if (this.options.isVerbose) {
6674
- console.info("\uD83D\uDD8B Remote callChatModel call");
6634
+ var all = [];
6635
+ var _loop_1 = function (packageName, className) {
6636
+ if (all.some(function (item) { return item.packageName === packageName && item.className === className; })) {
6637
+ return "continue";
6675
6638
  }
6676
- return /* not await */ this.callCommonModel(prompt);
6639
+ all.push({ packageName: packageName, className: className });
6677
6640
  };
6678
- /**
6679
- * Calls remote proxy server to use a completion model
6680
- */
6681
- RemoteLlmExecutionTools.prototype.callCompletionModel = function (prompt) {
6682
- if (this.options.isVerbose) {
6683
- console.info("\uD83D\uDCAC Remote callCompletionModel call");
6641
+ try {
6642
+ for (var _c = __values($llmToolsMetadataRegister.list()), _d = _c.next(); !_d.done; _d = _c.next()) {
6643
+ var _e = _d.value, packageName = _e.packageName, className = _e.className;
6644
+ _loop_1(packageName, className);
6684
6645
  }
6685
- return /* not await */ this.callCommonModel(prompt);
6686
- };
6687
- /**
6688
- * Calls remote proxy server to use a embedding model
6689
- */
6690
- RemoteLlmExecutionTools.prototype.callEmbeddingModel = function (prompt) {
6691
- if (this.options.isVerbose) {
6692
- console.info("\uD83D\uDCAC Remote callEmbeddingModel call");
6646
+ }
6647
+ catch (e_1_1) { e_1 = { error: e_1_1 }; }
6648
+ finally {
6649
+ try {
6650
+ if (_d && !_d.done && (_a = _c.return)) _a.call(_c);
6651
+ }
6652
+ finally { if (e_1) throw e_1.error; }
6653
+ }
6654
+ var _loop_2 = function (packageName, className) {
6655
+ if (all.some(function (item) { return item.packageName === packageName && item.className === className; })) {
6656
+ return "continue";
6693
6657
  }
6694
- return /* not await */ this.callCommonModel(prompt);
6658
+ all.push({ packageName: packageName, className: className });
6695
6659
  };
6696
- // <- Note: [🤖] callXxxModel
6697
- /**
6698
- * Calls remote proxy server to use both completion or chat model
6699
- */
6700
- RemoteLlmExecutionTools.prototype.callCommonModel = function (prompt) {
6701
- return __awaiter(this, void 0, void 0, function () {
6702
- var socket, promptResult;
6703
- return __generator(this, function (_a) {
6704
- switch (_a.label) {
6705
- case 0: return [4 /*yield*/, this.makeConnection()];
6706
- case 1:
6707
- socket = _a.sent();
6708
- if (this.options.isAnonymous) {
6709
- socket.emit('request', {
6710
- llmToolsConfiguration: this.options.llmToolsConfiguration,
6711
- prompt: prompt,
6712
- // <- TODO: [🛫] `prompt` is NOT fully serializable as JSON, it contains functions which are not serializable
6713
- });
6714
- }
6715
- else {
6716
- socket.emit('request', {
6717
- clientId: this.options.clientId,
6718
- prompt: prompt,
6719
- // <- TODO: [🛫] `prompt` is NOT fully serializable as JSON, it contains functions which are not serializable
6720
- });
6721
- }
6722
- return [4 /*yield*/, new Promise(function (resolve, reject) {
6723
- socket.on('response', function (response) {
6724
- resolve(response.promptResult);
6725
- socket.disconnect();
6726
- });
6727
- socket.on('error', function (error) {
6728
- reject(new PipelineExecutionError(error.errorMessage));
6729
- socket.disconnect();
6730
- });
6731
- })];
6732
- case 2:
6733
- promptResult = _a.sent();
6734
- socket.disconnect();
6735
- return [2 /*return*/, promptResult];
6736
- }
6737
- });
6660
+ try {
6661
+ for (var _f = __values($llmToolsRegister.list()), _g = _f.next(); !_g.done; _g = _f.next()) {
6662
+ var _h = _g.value, packageName = _h.packageName, className = _h.className;
6663
+ _loop_2(packageName, className);
6664
+ }
6665
+ }
6666
+ catch (e_2_1) { e_2 = { error: e_2_1 }; }
6667
+ finally {
6668
+ try {
6669
+ if (_g && !_g.done && (_b = _f.return)) _b.call(_f);
6670
+ }
6671
+ finally { if (e_2) throw e_2.error; }
6672
+ }
6673
+ var metadata = all.map(function (metadata) {
6674
+ var isMetadataAviailable = $llmToolsMetadataRegister
6675
+ .list()
6676
+ .find(function (_a) {
6677
+ var packageName = _a.packageName, className = _a.className;
6678
+ return metadata.packageName === packageName && metadata.className === className;
6738
6679
  });
6739
- };
6740
- return RemoteLlmExecutionTools;
6741
- }());
6742
- /**
6743
- * TODO: [🍓] Allow to list compatible models with each variant
6744
- * TODO: [🗯] RemoteLlmExecutionTools should extend Destroyable and implement IDestroyable
6745
- * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
6746
- * TODO: [🧠] Maybe remove `@promptbook/remote-client` and just use `@promptbook/core`
6747
- */
6748
-
6749
- /**
6750
- * Function computeUsage will create price per one token based on the string value found on openai page
6751
- *
6752
- * @private within the repository, used only as internal helper for `OPENAI_MODELS`
6753
- */
6754
- function computeUsage(value) {
6755
- var _a = __read(value.split(' / '), 2), price = _a[0], tokens = _a[1];
6756
- return parseFloat(price.replace('$', '')) / parseFloat(tokens.replace('M tokens', '')) / 1000000;
6757
- }
6758
-
6759
- /**
6760
- * List of available Anthropic Claude models with pricing
6761
- *
6762
- * Note: Done at 2024-08-16
6763
- *
6764
- * @see https://docs.anthropic.com/en/docs/models-overview
6765
- * @public exported from `@promptbook/anthropic-claude`
6766
- */
6767
- var ANTHROPIC_CLAUDE_MODELS = [
6768
- {
6769
- modelVariant: 'CHAT',
6770
- modelTitle: 'Claude 3.5 Sonnet',
6771
- modelName: 'claude-3-5-sonnet-20240620',
6772
- pricing: {
6773
- prompt: computeUsage("$3.00 / 1M tokens"),
6774
- output: computeUsage("$15.00 / 1M tokens"),
6775
- },
6776
- },
6777
- {
6778
- modelVariant: 'CHAT',
6779
- modelTitle: 'Claude 3 Opus',
6780
- modelName: 'claude-3-opus-20240229',
6781
- pricing: {
6782
- prompt: computeUsage("$15.00 / 1M tokens"),
6783
- output: computeUsage("$75.00 / 1M tokens"),
6784
- },
6785
- },
6786
- {
6787
- modelVariant: 'CHAT',
6788
- modelTitle: 'Claude 3 Sonnet',
6789
- modelName: 'claude-3-sonnet-20240229',
6790
- pricing: {
6791
- prompt: computeUsage("$3.00 / 1M tokens"),
6792
- output: computeUsage("$15.00 / 1M tokens"),
6793
- },
6794
- },
6795
- {
6796
- modelVariant: 'CHAT',
6797
- modelTitle: 'Claude 3 Haiku',
6798
- modelName: ' claude-3-haiku-20240307',
6799
- pricing: {
6800
- prompt: computeUsage("$0.25 / 1M tokens"),
6801
- output: computeUsage("$1.25 / 1M tokens"),
6802
- },
6803
- },
6804
- {
6805
- modelVariant: 'CHAT',
6806
- modelTitle: 'Claude 2.1',
6807
- modelName: 'claude-2.1',
6808
- pricing: {
6809
- prompt: computeUsage("$8.00 / 1M tokens"),
6810
- output: computeUsage("$24.00 / 1M tokens"),
6811
- },
6812
- },
6813
- {
6814
- modelVariant: 'CHAT',
6815
- modelTitle: 'Claude 2',
6816
- modelName: 'claude-2.0',
6817
- pricing: {
6818
- prompt: computeUsage("$8.00 / 1M tokens"),
6819
- output: computeUsage("$24.00 / 1M tokens"),
6820
- },
6821
- },
6822
- {
6823
- modelVariant: 'CHAT',
6824
- modelTitle: ' Claude Instant 1.2',
6825
- modelName: 'claude-instant-1.2',
6826
- pricing: {
6827
- prompt: computeUsage("$0.80 / 1M tokens"),
6828
- output: computeUsage("$2.40 / 1M tokens"),
6829
- },
6830
- },
6831
- // TODO: !!! Claude 1 and 2 has also completion versions - ask Hoagy
6832
- ];
6833
- /**
6834
- * Note: [🤖] Add models of new variant
6835
- * TODO: [🧠] !!! Add embedding models OR Anthropic has only chat+completion models?
6836
- * TODO: [🧠] Some mechanism to propagate unsureness
6837
- * TODO: [🧠][👮‍♀️] Put here more info like description, isVision, trainingDateCutoff, languages, strengths ( Top-level performance, intelligence, fluency, and understanding), contextWindow,...
6838
- * TODO: [🎰] Some mechanism to auto-update available models
6839
- */
6840
-
6841
- /**
6842
- * Get current date in ISO 8601 format
6843
- *
6844
- * @private internal utility
6845
- */
6846
- function getCurrentIsoDate() {
6847
- return new Date().toISOString();
6680
+ var isInstalled = $llmToolsRegister
6681
+ .list()
6682
+ .find(function (_a) {
6683
+ var packageName = _a.packageName, className = _a.className;
6684
+ return metadata.packageName === packageName && metadata.className === className;
6685
+ });
6686
+ return __assign(__assign({}, metadata), { isMetadataAviailable: isMetadataAviailable, isInstalled: isInstalled });
6687
+ });
6688
+ return spaceTrim__default["default"](function (block) { return "\n Available LLM providers are:\n ".concat(block(metadata
6689
+ .map(function (_a, i) {
6690
+ var packageName = _a.packageName, className = _a.className, isMetadataAviailable = _a.isMetadataAviailable, isInstalled = _a.isInstalled;
6691
+ var more;
6692
+ if (just(false)) {
6693
+ more = '';
6694
+ }
6695
+ else if (!isMetadataAviailable && !isInstalled) {
6696
+ // TODO: [�][�] Maybe do allow to do auto-install if package not registered and not found
6697
+ more = "(not installed and no metadata, looks like a unexpected behavior)";
6698
+ }
6699
+ else if (isMetadataAviailable && !isInstalled) {
6700
+ // TODO: [�][�]
6701
+ more = "(not installed)";
6702
+ }
6703
+ else if (!isMetadataAviailable && isInstalled) {
6704
+ more = "(no metadata, looks like a unexpected behavior)";
6705
+ }
6706
+ else if (isMetadataAviailable && isInstalled) {
6707
+ more = "(installed)";
6708
+ }
6709
+ else {
6710
+ more = "(unknown state, looks like a unexpected behavior)";
6711
+ }
6712
+ return "".concat(i + 1, ") `").concat(className, "` from `").concat(packageName, "` ").concat(more);
6713
+ })
6714
+ .join('\n')), "\n "); });
6848
6715
  }
6849
6716
 
6850
6717
  /**
6851
- * Helper of usage compute
6718
+ * @@@
6852
6719
  *
6853
- * @param content the content of prompt or response
6854
- * @returns part of PromptResultUsageCounts
6720
+ * Note: This function is not cached, every call creates new instance of `MultipleLlmExecutionTools`
6855
6721
  *
6856
- * @private internal utility of LlmExecutionTools
6722
+ * @returns @@@
6723
+ * @public exported from `@promptbook/core`
6857
6724
  */
6858
- function computeUsageCounts(content) {
6859
- return {
6860
- charactersCount: { value: countCharacters(content) },
6861
- wordsCount: { value: countWords(content) },
6862
- sentencesCount: { value: countSentences(content) },
6863
- linesCount: { value: countLines(content) },
6864
- paragraphsCount: { value: countParagraphs(content) },
6865
- pagesCount: { value: countPages(content) },
6866
- };
6725
+ function createLlmToolsFromConfiguration(configuration, options) {
6726
+ if (options === void 0) { options = {}; }
6727
+ var _a = options.isVerbose, isVerbose = _a === void 0 ? false : _a;
6728
+ var llmTools = configuration.map(function (llmConfiguration) {
6729
+ var registeredItem = $llmToolsRegister
6730
+ .list()
6731
+ .find(function (_a) {
6732
+ var packageName = _a.packageName, className = _a.className;
6733
+ return llmConfiguration.packageName === packageName && llmConfiguration.className === className;
6734
+ });
6735
+ if (registeredItem === undefined) {
6736
+ throw new Error(spaceTrim__default["default"](function (block) { return "\n There is no constructor for LLM provider `".concat(llmConfiguration.className, "` from `").concat(llmConfiguration.packageName, "`\n\n You have probably forgotten install and import the provider package.\n To fix this issue, you can:\n\n Install:\n\n > npm install ").concat(llmConfiguration.packageName, "\n\n And import:\n\n > import '").concat(llmConfiguration.packageName, "';\n\n\n ").concat(block($registeredLlmToolsMessage()), "\n "); }));
6737
+ }
6738
+ return registeredItem(__assign({ isVerbose: isVerbose }, llmConfiguration.options));
6739
+ });
6740
+ return joinLlmExecutionTools.apply(void 0, __spreadArray([], __read(llmTools), false));
6867
6741
  }
6868
-
6869
6742
  /**
6870
- * Make UncertainNumber
6871
- *
6872
- * @param value
6873
- *
6874
- * @private utility for initializating UncertainNumber
6743
+ * TODO: [🎌] Togethere with `createLlmToolsFromConfiguration` + 'EXECUTION_TOOLS_CLASSES' gets to `@promptbook/core` ALL model providers, make this more efficient
6744
+ * TODO: [🧠][🎌] Dynamically install required providers
6745
+ * TODO: @@@ write discussion about this - wizzard
6746
+ * TODO: [🧠][🍛] Which name is better `createLlmToolsFromConfig` or `createLlmToolsFromConfiguration`?
6747
+ * TODO: [🧠] Is there some meaningfull way how to test this util
6748
+ * TODO: This should be maybe not under `_common` but under `utils`
6875
6749
  */
6876
- function uncertainNumber(value) {
6877
- if (value === null || value === undefined || Number.isNaN(value)) {
6878
- return { value: 0, isUncertain: true };
6879
- }
6880
- return { value: value };
6881
- }
6882
6750
 
6883
6751
  /**
6884
- * Computes the usage of the Anthropic Claude API based on the response from Anthropic Claude
6752
+ * Stores
6885
6753
  *
6886
- * @param promptContent The content of the prompt
6887
- * @param resultContent The content of the result (for embedding prompts or failed prompts pass empty string)
6888
- * @param rawResponse The raw response from Anthropic Claude API
6889
- * @throws {PipelineExecutionError} If the usage is not defined in the response from Anthropic Claude
6890
- * @private internal utility of `AnthropicClaudeExecutionTools`
6754
+ * @public exported from `@promptbook/core`
6891
6755
  */
6892
- function computeAnthropicClaudeUsage(promptContent, // <- Note: Intentionally using [] to access type properties to bring jsdoc from Prompt/PromptResult to consumer
6893
- resultContent, rawResponse) {
6894
- var _a, _b;
6895
- if (rawResponse.usage === undefined) {
6896
- throw new PipelineExecutionError('The usage is not defined in the response from Anthropic Claude');
6897
- }
6898
- if (((_a = rawResponse.usage) === null || _a === void 0 ? void 0 : _a.input_tokens) === undefined) {
6899
- throw new PipelineExecutionError('In Anthropic Claude response `usage.prompt_tokens` not defined');
6900
- }
6901
- var inputTokens = rawResponse.usage.input_tokens;
6902
- var outputTokens = ((_b = rawResponse.usage) === null || _b === void 0 ? void 0 : _b.output_tokens) || 0;
6903
- var modelInfo = ANTHROPIC_CLAUDE_MODELS.find(function (model) { return model.modelName === rawResponse.model; });
6904
- var price;
6905
- if (modelInfo === undefined || modelInfo.pricing === undefined) {
6906
- price = uncertainNumber();
6907
- }
6908
- else {
6909
- price = uncertainNumber(inputTokens * modelInfo.pricing.prompt + outputTokens * modelInfo.pricing.output);
6756
+ var MemoryStorage = /** @class */ (function () {
6757
+ function MemoryStorage() {
6758
+ this.storage = {};
6910
6759
  }
6911
- return {
6912
- price: price,
6913
- input: __assign({ tokensCount: uncertainNumber(rawResponse.usage.input_tokens) }, computeUsageCounts(promptContent)),
6914
- output: __assign({ tokensCount: uncertainNumber(outputTokens) }, computeUsageCounts(resultContent)),
6915
- };
6916
- }
6917
- /**
6918
- * TODO: [🤝] DRY Maybe some common abstraction between `computeOpenAiUsage` and `computeAnthropicClaudeUsage`
6919
- */
6920
-
6921
- /**
6922
- * Execution Tools for calling Anthropic Claude API.
6923
- *
6924
- * @public exported from `@promptbook/anthropic-claude`
6925
- * @deprecated use `createAnthropicClaudeExecutionTools` instead
6926
- */
6927
- var AnthropicClaudeExecutionTools = /** @class */ (function () {
6928
- /**
6929
- * Creates Anthropic Claude Execution Tools.
6930
- *
6931
- * @param options which are relevant are directly passed to the Anthropic Claude client
6932
- */
6933
- function AnthropicClaudeExecutionTools(options) {
6934
- if (options === void 0) { options = { isProxied: false }; }
6935
- this.options = options;
6760
+ Object.defineProperty(MemoryStorage.prototype, "length", {
6936
6761
  /**
6937
- * Anthropic Claude API client.
6762
+ * Returns the number of key/value pairs currently present in the list associated with the object.
6938
6763
  */
6939
- this.client = null;
6940
- }
6941
- Object.defineProperty(AnthropicClaudeExecutionTools.prototype, "title", {
6942
- get: function () {
6943
- return 'Anthropic Claude';
6944
- },
6945
- enumerable: false,
6946
- configurable: true
6947
- });
6948
- Object.defineProperty(AnthropicClaudeExecutionTools.prototype, "description", {
6949
6764
  get: function () {
6950
- return 'Use all models provided by Anthropic Claude';
6765
+ return Object.keys(this.storage).length;
6951
6766
  },
6952
6767
  enumerable: false,
6953
6768
  configurable: true
6954
6769
  });
6955
- AnthropicClaudeExecutionTools.prototype.getClient = function () {
6956
- return __awaiter(this, void 0, void 0, function () {
6957
- var anthropicOptions;
6958
- return __generator(this, function (_a) {
6959
- if (this.client === null) {
6960
- anthropicOptions = __assign({}, this.options);
6961
- delete anthropicOptions.isVerbose;
6962
- delete anthropicOptions.isProxied;
6963
- this.client = new Anthropic__default["default"](anthropicOptions);
6964
- }
6965
- return [2 /*return*/, this.client];
6966
- });
6967
- });
6770
+ /**
6771
+ * Empties the list associated with the object of all key/value pairs, if there are any.
6772
+ */
6773
+ MemoryStorage.prototype.clear = function () {
6774
+ this.storage = {};
6968
6775
  };
6969
6776
  /**
6970
- * Check the `options` passed to `constructor`
6777
+ * Returns the current value associated with the given key, or null if the given key does not exist in the list associated with the object.
6971
6778
  */
6972
- AnthropicClaudeExecutionTools.prototype.checkConfiguration = function () {
6973
- return __awaiter(this, void 0, void 0, function () {
6974
- return __generator(this, function (_a) {
6975
- switch (_a.label) {
6976
- case 0: return [4 /*yield*/, this.getClient()];
6977
- case 1:
6978
- _a.sent();
6979
- return [2 /*return*/];
6980
- }
6981
- });
6982
- });
6779
+ MemoryStorage.prototype.getItem = function (key) {
6780
+ return this.storage[key] || null;
6983
6781
  };
6984
6782
  /**
6985
- * List all available Anthropic Claude models that can be used
6783
+ * Returns the name of the nth key in the list, or null if n is greater than or equal to the number of key/value pairs in the object.
6986
6784
  */
6987
- AnthropicClaudeExecutionTools.prototype.listModels = function () {
6988
- return ANTHROPIC_CLAUDE_MODELS;
6785
+ MemoryStorage.prototype.key = function (index) {
6786
+ return Object.keys(this.storage)[index] || null;
6989
6787
  };
6990
6788
  /**
6991
- * Calls Anthropic Claude API to use a chat model.
6992
- */
6993
- AnthropicClaudeExecutionTools.prototype.callChatModel = function (prompt) {
6994
- return __awaiter(this, void 0, void 0, function () {
6995
- var content, parameters, modelRequirements, client, modelName, rawPromptContent, rawRequest, start, complete, rawResponse, contentBlock, resultContent, usage;
6996
- return __generator(this, function (_a) {
6997
- switch (_a.label) {
6998
- case 0:
6999
- if (this.options.isVerbose) {
7000
- console.info('💬 Anthropic Claude callChatModel call');
7001
- }
7002
- content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
7003
- return [4 /*yield*/, this.getClient()];
7004
- case 1:
7005
- client = _a.sent();
7006
- // TODO: [☂] Use here more modelRequirements
7007
- if (modelRequirements.modelVariant !== 'CHAT') {
7008
- throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
7009
- }
7010
- modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
7011
- rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
7012
- rawRequest = {
7013
- model: modelRequirements.modelName || this.getDefaultChatModel().modelName,
7014
- max_tokens: modelRequirements.maxTokens || 4096,
7015
- // <- TODO: [🌾] Make some global max cap for maxTokens
7016
- temperature: modelRequirements.temperature,
7017
- system: modelRequirements.systemMessage,
7018
- // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
7019
- // <- Note: [🧆]
7020
- messages: [
7021
- {
7022
- role: 'user',
7023
- content: rawPromptContent,
7024
- },
7025
- ],
7026
- // TODO: Is here some equivalent of user identification?> user: this.options.user,
7027
- };
7028
- start = getCurrentIsoDate();
7029
- if (this.options.isVerbose) {
7030
- console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
7031
- }
7032
- return [4 /*yield*/, client.messages.create(rawRequest)];
7033
- case 2:
7034
- rawResponse = _a.sent();
7035
- if (this.options.isVerbose) {
7036
- console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
7037
- }
7038
- if (!rawResponse.content[0]) {
7039
- throw new PipelineExecutionError('No content from Anthropic Claude');
7040
- }
7041
- if (rawResponse.content.length > 1) {
7042
- throw new PipelineExecutionError('More than one content blocks from Anthropic Claude');
7043
- }
7044
- contentBlock = rawResponse.content[0];
7045
- if (contentBlock.type !== 'text') {
7046
- throw new PipelineExecutionError("Returned content is not \"text\" type but \"".concat(contentBlock.type, "\""));
7047
- }
7048
- resultContent = contentBlock.text;
7049
- // eslint-disable-next-line prefer-const
7050
- complete = getCurrentIsoDate();
7051
- usage = computeAnthropicClaudeUsage(content, '', rawResponse);
7052
- return [2 /*return*/, {
7053
- content: resultContent,
7054
- modelName: rawResponse.model,
7055
- timing: {
7056
- start: start,
7057
- complete: complete,
7058
- },
7059
- usage: usage,
7060
- rawPromptContent: rawPromptContent,
7061
- rawRequest: rawRequest,
7062
- rawResponse: rawResponse,
7063
- // <- [🗯]
7064
- }];
7065
- }
7066
- });
7067
- });
7068
- };
7069
- /*
7070
- TODO: [👏]
7071
- public async callCompletionModel(
7072
- prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements'>,
7073
- ): Promise<PromptCompletionResult> {
7074
-
7075
- if (this.options.isVerbose) {
7076
- console.info('🖋 Anthropic Claude callCompletionModel call');
7077
- }
7078
-
7079
- const { content, parameters, modelRequirements } = prompt;
7080
-
7081
- // TODO: [☂] Use here more modelRequirements
7082
- if (modelRequirements.modelVariant !== 'COMPLETION') {
7083
- throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
7084
- }
7085
-
7086
- const modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
7087
- const modelSettings = {
7088
- model: modelName,
7089
- max_tokens: modelRequirements.maxTokens || 2000, // <- Note: 2000 is for lagacy reasons
7090
- // <- TODO: [🌾] Make some global max cap for maxTokens
7091
- // <- TODO: Use here `systemMessage`, `temperature` and `seed`
7092
- };
7093
-
7094
- const rawRequest: xxxx.Completions.CompletionCreateParamsNonStreaming = {
7095
- ...modelSettings,
7096
- prompt: rawPromptContent,
7097
- user: this.options.user,
7098
- };
7099
- const start: string_date_iso8601 = getCurrentIsoDate();
7100
- let complete: string_date_iso8601;
7101
-
7102
- if (this.options.isVerbose) {
7103
- console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
7104
- }
7105
- const rawResponse = await this.client.completions.create(rawRequest);
7106
- if (this.options.isVerbose) {
7107
- console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
7108
- }
7109
-
7110
- if (!rawResponse.choices[0]) {
7111
- throw new PipelineExecutionError('No choises from Anthropic Claude');
7112
- }
7113
-
7114
- if (rawResponse.choices.length > 1) {
7115
- // TODO: This should be maybe only warning
7116
- throw new PipelineExecutionError('More than one choise from Anthropic Claude');
7117
- }
7118
-
7119
- const resultContent = rawResponse.choices[0].text;
7120
- // eslint-disable-next-line prefer-const
7121
- complete = getCurrentIsoDate();
7122
- const usage = { price: 'UNKNOWN', inputTokens: 0, outputTokens: 0 /* <- TODO: [🐞] Compute usage * / } satisfies PromptResultUsage;
7123
-
7124
-
7125
-
7126
- return {
7127
- content: resultContent,
7128
- modelName: rawResponse.model || model,
7129
- timing: {
7130
- start,
7131
- complete,
7132
- },
7133
- usage,
7134
- rawResponse,
7135
- // <- [🗯]
7136
- };
7137
- }
7138
- */
7139
- // <- Note: [🤖] callXxxModel
7140
- /**
7141
- * Get the model that should be used as default
7142
- */
7143
- AnthropicClaudeExecutionTools.prototype.getDefaultModel = function (defaultModelName) {
7144
- var model = ANTHROPIC_CLAUDE_MODELS.find(function (_a) {
7145
- var modelName = _a.modelName;
7146
- return modelName.startsWith(defaultModelName);
7147
- });
7148
- if (model === undefined) {
7149
- throw new UnexpectedError(spaceTrim__default["default"](function (block) {
7150
- return "\n Cannot find model in OpenAI models with name \"".concat(defaultModelName, "\" which should be used as default.\n\n Available models:\n ").concat(block(ANTHROPIC_CLAUDE_MODELS.map(function (_a) {
7151
- var modelName = _a.modelName;
7152
- return "- \"".concat(modelName, "\"");
7153
- }).join('\n')), "\n\n ");
7154
- }));
7155
- }
7156
- return model;
7157
- };
7158
- /**
7159
- * Default model for chat variant.
7160
- */
7161
- AnthropicClaudeExecutionTools.prototype.getDefaultChatModel = function () {
7162
- return this.getDefaultModel('claude-3-opus');
7163
- };
7164
- return AnthropicClaudeExecutionTools;
7165
- }());
7166
- /**
7167
- * TODO: [🍆] JSON mode
7168
- * TODO: [🧠] Maybe handle errors via transformAnthropicError (like transformAzureError)
7169
- * TODO: Maybe Create some common util for callChatModel and callCompletionModel
7170
- * TODO: Maybe make custom OpenAiError
7171
- * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
7172
- * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
7173
- * TODO: [📅] Maybe instead of `RemoteLlmExecutionToolsOptions` use `proxyWithAnonymousRemoteServer` (if implemented)
7174
- */
7175
-
7176
- /**
7177
- * Execution Tools for calling Anthropic Claude API.
7178
- *
7179
- * @public exported from `@promptbook/anthropic-claude`
7180
- */
7181
- var createAnthropicClaudeExecutionTools = Object.assign(function (options) {
7182
- if (options.isProxied) {
7183
- return new RemoteLlmExecutionTools(__assign(__assign({}, options), { isAnonymous: true, llmToolsConfiguration: [
7184
- {
7185
- title: 'Anthropic Claude (proxied)',
7186
- packageName: '@promptbook/anthropic-claude',
7187
- className: 'AnthropicClaudeExecutionTools',
7188
- options: __assign(__assign({}, options), { isProxied: false }),
7189
- },
7190
- ], models: ANTHROPIC_CLAUDE_MODELS }));
7191
- }
7192
- return new AnthropicClaudeExecutionTools(options);
7193
- }, {
7194
- packageName: '@promptbook/anthropic-claude',
7195
- className: 'AnthropicClaudeExecutionTools',
7196
- });
7197
- /**
7198
- * TODO: [🧠] !!!! Make anonymous this with all LLM providers
7199
- * TODO: [🧠][🧱] !!!! Maybe change all `new AnthropicClaudeExecutionTools` -> `createAnthropicClaudeExecutionTools` in manual
7200
- * TODO: [🧠] Maybe auto-detect usage in browser and determine default value of `isProxied`
7201
- * TODO: [🦺] Is there some way how to put `packageName` and `className` on top and function definition on bottom?
7202
- * TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
7203
- */
7204
-
7205
- /**
7206
- * List of available OpenAI models with pricing
7207
- *
7208
- * Note: Done at 2024-05-20
7209
- *
7210
- * @see https://platform.openai.com/docs/models/
7211
- * @see https://openai.com/api/pricing/
7212
- * @public exported from `@promptbook/openai`
7213
- */
7214
- var OPENAI_MODELS = [
7215
- /*/
7216
- {
7217
- modelTitle: 'dall-e-3',
7218
- modelName: 'dall-e-3',
7219
- },
7220
- /**/
7221
- /*/
7222
- {
7223
- modelTitle: 'whisper-1',
7224
- modelName: 'whisper-1',
7225
- },
7226
- /**/
7227
- /**/
7228
- {
7229
- modelVariant: 'COMPLETION',
7230
- modelTitle: 'davinci-002',
7231
- modelName: 'davinci-002',
7232
- pricing: {
7233
- prompt: computeUsage("$2.00 / 1M tokens"),
7234
- output: computeUsage("$2.00 / 1M tokens"), // <- not sure
7235
- },
7236
- },
7237
- /**/
7238
- /*/
7239
- {
7240
- modelTitle: 'dall-e-2',
7241
- modelName: 'dall-e-2',
7242
- },
7243
- /**/
7244
- /**/
7245
- {
7246
- modelVariant: 'CHAT',
7247
- modelTitle: 'gpt-3.5-turbo-16k',
7248
- modelName: 'gpt-3.5-turbo-16k',
7249
- pricing: {
7250
- prompt: computeUsage("$3.00 / 1M tokens"),
7251
- output: computeUsage("$4.00 / 1M tokens"),
7252
- },
7253
- },
7254
- /**/
7255
- /*/
7256
- {
7257
- modelTitle: 'tts-1-hd-1106',
7258
- modelName: 'tts-1-hd-1106',
7259
- },
7260
- /**/
7261
- /*/
7262
- {
7263
- modelTitle: 'tts-1-hd',
7264
- modelName: 'tts-1-hd',
7265
- },
7266
- /**/
7267
- /**/
7268
- {
7269
- modelVariant: 'CHAT',
7270
- modelTitle: 'gpt-4',
7271
- modelName: 'gpt-4',
7272
- pricing: {
7273
- prompt: computeUsage("$30.00 / 1M tokens"),
7274
- output: computeUsage("$60.00 / 1M tokens"),
7275
- },
7276
- },
7277
- /**/
7278
- /**/
7279
- {
7280
- modelVariant: 'CHAT',
7281
- modelTitle: 'gpt-4-32k',
7282
- modelName: 'gpt-4-32k',
7283
- pricing: {
7284
- prompt: computeUsage("$60.00 / 1M tokens"),
7285
- output: computeUsage("$120.00 / 1M tokens"),
7286
- },
7287
- },
7288
- /**/
7289
- /*/
7290
- {
7291
- modelVariant: 'CHAT',
7292
- modelTitle: 'gpt-4-0613',
7293
- modelName: 'gpt-4-0613',
7294
- pricing: {
7295
- prompt: computeUsage(` / 1M tokens`),
7296
- output: computeUsage(` / 1M tokens`),
7297
- },
7298
- },
7299
- /**/
7300
- /**/
7301
- {
7302
- modelVariant: 'CHAT',
7303
- modelTitle: 'gpt-4-turbo-2024-04-09',
7304
- modelName: 'gpt-4-turbo-2024-04-09',
7305
- pricing: {
7306
- prompt: computeUsage("$10.00 / 1M tokens"),
7307
- output: computeUsage("$30.00 / 1M tokens"),
7308
- },
7309
- },
7310
- /**/
7311
- /**/
7312
- {
7313
- modelVariant: 'CHAT',
7314
- modelTitle: 'gpt-3.5-turbo-1106',
7315
- modelName: 'gpt-3.5-turbo-1106',
7316
- pricing: {
7317
- prompt: computeUsage("$1.00 / 1M tokens"),
7318
- output: computeUsage("$2.00 / 1M tokens"),
7319
- },
7320
- },
7321
- /**/
7322
- /**/
7323
- {
7324
- modelVariant: 'CHAT',
7325
- modelTitle: 'gpt-4-turbo',
7326
- modelName: 'gpt-4-turbo',
7327
- pricing: {
7328
- prompt: computeUsage("$10.00 / 1M tokens"),
7329
- output: computeUsage("$30.00 / 1M tokens"),
7330
- },
7331
- },
7332
- /**/
7333
- /**/
7334
- {
7335
- modelVariant: 'COMPLETION',
7336
- modelTitle: 'gpt-3.5-turbo-instruct-0914',
7337
- modelName: 'gpt-3.5-turbo-instruct-0914',
7338
- pricing: {
7339
- prompt: computeUsage("$1.50 / 1M tokens"),
7340
- output: computeUsage("$2.00 / 1M tokens"), // <- For gpt-3.5-turbo-instruct
7341
- },
7342
- },
7343
- /**/
7344
- /**/
7345
- {
7346
- modelVariant: 'COMPLETION',
7347
- modelTitle: 'gpt-3.5-turbo-instruct',
7348
- modelName: 'gpt-3.5-turbo-instruct',
7349
- pricing: {
7350
- prompt: computeUsage("$1.50 / 1M tokens"),
7351
- output: computeUsage("$2.00 / 1M tokens"),
7352
- },
7353
- },
7354
- /**/
7355
- /*/
7356
- {
7357
- modelTitle: 'tts-1',
7358
- modelName: 'tts-1',
7359
- },
7360
- /**/
7361
- /**/
7362
- {
7363
- modelVariant: 'CHAT',
7364
- modelTitle: 'gpt-3.5-turbo',
7365
- modelName: 'gpt-3.5-turbo',
7366
- pricing: {
7367
- prompt: computeUsage("$3.00 / 1M tokens"),
7368
- output: computeUsage("$6.00 / 1M tokens"), // <- Not sure, refer to gpt-3.5-turbo in Fine-tuning models
7369
- },
7370
- },
7371
- /**/
7372
- /**/
7373
- {
7374
- modelVariant: 'CHAT',
7375
- modelTitle: 'gpt-3.5-turbo-0301',
7376
- modelName: 'gpt-3.5-turbo-0301',
7377
- pricing: {
7378
- prompt: computeUsage("$1.50 / 1M tokens"),
7379
- output: computeUsage("$2.00 / 1M tokens"),
7380
- },
7381
- },
7382
- /**/
7383
- /**/
7384
- {
7385
- modelVariant: 'COMPLETION',
7386
- modelTitle: 'babbage-002',
7387
- modelName: 'babbage-002',
7388
- pricing: {
7389
- prompt: computeUsage("$0.40 / 1M tokens"),
7390
- output: computeUsage("$0.40 / 1M tokens"), // <- Not sure
7391
- },
7392
- },
7393
- /**/
7394
- /**/
7395
- {
7396
- modelVariant: 'CHAT',
7397
- modelTitle: 'gpt-4-1106-preview',
7398
- modelName: 'gpt-4-1106-preview',
7399
- pricing: {
7400
- prompt: computeUsage("$10.00 / 1M tokens"),
7401
- output: computeUsage("$30.00 / 1M tokens"),
7402
- },
7403
- },
7404
- /**/
7405
- /**/
7406
- {
7407
- modelVariant: 'CHAT',
7408
- modelTitle: 'gpt-4-0125-preview',
7409
- modelName: 'gpt-4-0125-preview',
7410
- pricing: {
7411
- prompt: computeUsage("$10.00 / 1M tokens"),
7412
- output: computeUsage("$30.00 / 1M tokens"),
7413
- },
7414
- },
7415
- /**/
7416
- /*/
7417
- {
7418
- modelTitle: 'tts-1-1106',
7419
- modelName: 'tts-1-1106',
7420
- },
7421
- /**/
7422
- /**/
7423
- {
7424
- modelVariant: 'CHAT',
7425
- modelTitle: 'gpt-3.5-turbo-0125',
7426
- modelName: 'gpt-3.5-turbo-0125',
7427
- pricing: {
7428
- prompt: computeUsage("$0.50 / 1M tokens"),
7429
- output: computeUsage("$1.50 / 1M tokens"),
7430
- },
7431
- },
7432
- /**/
7433
- /**/
7434
- {
7435
- modelVariant: 'CHAT',
7436
- modelTitle: 'gpt-4-turbo-preview',
7437
- modelName: 'gpt-4-turbo-preview',
7438
- pricing: {
7439
- prompt: computeUsage("$10.00 / 1M tokens"),
7440
- output: computeUsage("$30.00 / 1M tokens"), // <- Not sure, just for gpt-4-turbo
7441
- },
7442
- },
7443
- /**/
7444
- /**/
7445
- {
7446
- modelVariant: 'EMBEDDING',
7447
- modelTitle: 'text-embedding-3-large',
7448
- modelName: 'text-embedding-3-large',
7449
- pricing: {
7450
- prompt: computeUsage("$0.13 / 1M tokens"),
7451
- // TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
7452
- output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
7453
- },
7454
- },
7455
- /**/
7456
- /**/
7457
- {
7458
- modelVariant: 'EMBEDDING',
7459
- modelTitle: 'text-embedding-3-small',
7460
- modelName: 'text-embedding-3-small',
7461
- pricing: {
7462
- prompt: computeUsage("$0.02 / 1M tokens"),
7463
- // TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
7464
- output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
7465
- },
7466
- },
7467
- /**/
7468
- /**/
7469
- {
7470
- modelVariant: 'CHAT',
7471
- modelTitle: 'gpt-3.5-turbo-0613',
7472
- modelName: 'gpt-3.5-turbo-0613',
7473
- pricing: {
7474
- prompt: computeUsage("$1.50 / 1M tokens"),
7475
- output: computeUsage("$2.00 / 1M tokens"),
7476
- },
7477
- },
7478
- /**/
7479
- /**/
7480
- {
7481
- modelVariant: 'EMBEDDING',
7482
- modelTitle: 'text-embedding-ada-002',
7483
- modelName: 'text-embedding-ada-002',
7484
- pricing: {
7485
- prompt: computeUsage("$0.1 / 1M tokens"),
7486
- // TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
7487
- output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
7488
- },
7489
- },
7490
- /**/
7491
- /*/
7492
- {
7493
- modelVariant: 'CHAT',
7494
- modelTitle: 'gpt-4-1106-vision-preview',
7495
- modelName: 'gpt-4-1106-vision-preview',
7496
- },
7497
- /**/
7498
- /*/
7499
- {
7500
- modelVariant: 'CHAT',
7501
- modelTitle: 'gpt-4-vision-preview',
7502
- modelName: 'gpt-4-vision-preview',
7503
- pricing: {
7504
- prompt: computeUsage(`$10.00 / 1M tokens`),
7505
- output: computeUsage(`$30.00 / 1M tokens`),
7506
- },
7507
- },
7508
- /**/
7509
- /**/
7510
- {
7511
- modelVariant: 'CHAT',
7512
- modelTitle: 'gpt-4o-2024-05-13',
7513
- modelName: 'gpt-4o-2024-05-13',
7514
- pricing: {
7515
- prompt: computeUsage("$5.00 / 1M tokens"),
7516
- output: computeUsage("$15.00 / 1M tokens"),
7517
- },
7518
- },
7519
- /**/
7520
- /**/
7521
- {
7522
- modelVariant: 'CHAT',
7523
- modelTitle: 'gpt-4o',
7524
- modelName: 'gpt-4o',
7525
- pricing: {
7526
- prompt: computeUsage("$5.00 / 1M tokens"),
7527
- output: computeUsage("$15.00 / 1M tokens"),
7528
- },
7529
- },
7530
- /**/
7531
- /**/
7532
- {
7533
- modelVariant: 'CHAT',
7534
- modelTitle: 'gpt-3.5-turbo-16k-0613',
7535
- modelName: 'gpt-3.5-turbo-16k-0613',
7536
- pricing: {
7537
- prompt: computeUsage("$3.00 / 1M tokens"),
7538
- output: computeUsage("$4.00 / 1M tokens"),
7539
- },
7540
- },
7541
- /**/
7542
- ];
7543
- /**
7544
- * Note: [🤖] Add models of new variant
7545
- * TODO: [🧠] Some mechanism to propagate unsureness
7546
- * TODO: [🎰] Some mechanism to auto-update available models
7547
- * TODO: [🎰][👮‍♀️] Make this list dynamic - dynamically can be listed modelNames but not modelVariant, legacy status, context length and pricing
7548
- * TODO: [🧠][👮‍♀️] Put here more info like description, isVision, trainingDateCutoff, languages, strengths ( Top-level performance, intelligence, fluency, and understanding), contextWindow,...
7549
- * @see https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4
7550
- * @see https://openai.com/api/pricing/
7551
- * @see /other/playground/playground.ts
7552
- * TODO: [🍓] Make better
7553
- * TODO: Change model titles to human eg: "gpt-4-turbo-2024-04-09" -> "GPT-4 Turbo (2024-04-09)"
7554
- * TODO: [🚸] Not all models are compatible with JSON mode, add this information here and use it
7555
- */
7556
-
7557
- /**
7558
- * Execution Tools for calling Azure OpenAI API.
7559
- *
7560
- * @public exported from `@promptbook/azure-openai`
7561
- */
7562
- var AzureOpenAiExecutionTools = /** @class */ (function () {
7563
- /**
7564
- * Creates OpenAI Execution Tools.
7565
- *
7566
- * @param options which are relevant are directly passed to the OpenAI client
7567
- */
7568
- function AzureOpenAiExecutionTools(options) {
7569
- this.options = options;
7570
- /**
7571
- * OpenAI Azure API client.
7572
- */
7573
- this.client = null;
7574
- }
7575
- Object.defineProperty(AzureOpenAiExecutionTools.prototype, "title", {
7576
- get: function () {
7577
- return 'Azure OpenAI';
7578
- },
7579
- enumerable: false,
7580
- configurable: true
7581
- });
7582
- Object.defineProperty(AzureOpenAiExecutionTools.prototype, "description", {
7583
- get: function () {
7584
- return 'Use all models trained by OpenAI provided by Azure';
7585
- },
7586
- enumerable: false,
7587
- configurable: true
7588
- });
7589
- AzureOpenAiExecutionTools.prototype.getClient = function () {
7590
- return __awaiter(this, void 0, void 0, function () {
7591
- return __generator(this, function (_a) {
7592
- if (this.client === null) {
7593
- this.client = new openai.OpenAIClient("https://".concat(this.options.resourceName, ".openai.azure.com/"), new openai.AzureKeyCredential(this.options.apiKey));
7594
- }
7595
- return [2 /*return*/, this.client];
7596
- });
7597
- });
7598
- };
7599
- /**
7600
- * Check the `options` passed to `constructor`
7601
- */
7602
- AzureOpenAiExecutionTools.prototype.checkConfiguration = function () {
7603
- return __awaiter(this, void 0, void 0, function () {
7604
- return __generator(this, function (_a) {
7605
- switch (_a.label) {
7606
- case 0: return [4 /*yield*/, this.getClient()];
7607
- case 1:
7608
- _a.sent();
7609
- return [2 /*return*/];
7610
- }
7611
- });
7612
- });
7613
- };
7614
- /**
7615
- * List all available Azure OpenAI models that can be used
7616
- */
7617
- AzureOpenAiExecutionTools.prototype.listModels = function () {
7618
- return __awaiter(this, void 0, void 0, function () {
7619
- return __generator(this, function (_a) {
7620
- // TODO: !!! Do here some filtering which models are really available as deployment
7621
- // @see https://management.azure.com/subscriptions/subscriptionId/resourceGroups/resourceGroupName/providers/Microsoft.CognitiveServices/accounts/accountName/deployments?api-version=2023-05-01
7622
- return [2 /*return*/, OPENAI_MODELS.map(function (_a) {
7623
- var modelTitle = _a.modelTitle, modelName = _a.modelName, modelVariant = _a.modelVariant;
7624
- return ({
7625
- modelTitle: "Azure ".concat(modelTitle),
7626
- modelName: modelName,
7627
- modelVariant: modelVariant,
7628
- });
7629
- })];
7630
- });
7631
- });
7632
- };
7633
- /**
7634
- * Calls OpenAI API to use a chat model.
7635
- */
7636
- AzureOpenAiExecutionTools.prototype.callChatModel = function (prompt) {
7637
- var _a, _b;
7638
- return __awaiter(this, void 0, void 0, function () {
7639
- var content, parameters, modelRequirements, client, modelName, modelSettings, rawPromptContent, messages, start, complete, rawRequest, rawResponse, resultContent, usage, error_1;
7640
- return __generator(this, function (_c) {
7641
- switch (_c.label) {
7642
- case 0:
7643
- if (this.options.isVerbose) {
7644
- console.info('💬 OpenAI callChatModel call');
7645
- }
7646
- content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
7647
- return [4 /*yield*/, this.getClient()];
7648
- case 1:
7649
- client = _c.sent();
7650
- // TODO: [☂] Use here more modelRequirements
7651
- if (modelRequirements.modelVariant !== 'CHAT') {
7652
- throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
7653
- }
7654
- _c.label = 2;
7655
- case 2:
7656
- _c.trys.push([2, 4, , 5]);
7657
- modelName = prompt.modelRequirements.modelName || this.options.deploymentName;
7658
- modelSettings = {
7659
- maxTokens: modelRequirements.maxTokens,
7660
- // <- TODO: [🌾] Make some global max cap for maxTokens
7661
- temperature: modelRequirements.temperature,
7662
- user: this.options.user,
7663
- // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
7664
- // <- Note: [🧆]
7665
- };
7666
- rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
7667
- messages = __spreadArray(__spreadArray([], __read((modelRequirements.systemMessage === undefined
7668
- ? []
7669
- : [
7670
- {
7671
- role: 'system',
7672
- content: modelRequirements.systemMessage,
7673
- },
7674
- ])), false), [
7675
- {
7676
- role: 'user',
7677
- content: rawPromptContent,
7678
- },
7679
- ], false);
7680
- start = getCurrentIsoDate();
7681
- complete = void 0;
7682
- if (this.options.isVerbose) {
7683
- console.info(colors__default["default"].bgWhite('messages'), JSON.stringify(messages, null, 4));
7684
- }
7685
- rawRequest = [modelName, messages, modelSettings];
7686
- return [4 /*yield*/, client.getChatCompletions.apply(client, __spreadArray([], __read(rawRequest), false))];
7687
- case 3:
7688
- rawResponse = _c.sent();
7689
- if (this.options.isVerbose) {
7690
- console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
7691
- }
7692
- if (!rawResponse.choices[0]) {
7693
- throw new PipelineExecutionError('No choises from Azure OpenAI');
7694
- }
7695
- if (rawResponse.choices.length > 1) {
7696
- // TODO: This should be maybe only warning
7697
- throw new PipelineExecutionError('More than one choise from Azure OpenAI');
7698
- }
7699
- if (!rawResponse.choices[0].message || !rawResponse.choices[0].message.content) {
7700
- throw new PipelineExecutionError('Empty response from Azure OpenAI');
7701
- }
7702
- resultContent = rawResponse.choices[0].message.content;
7703
- // eslint-disable-next-line prefer-const
7704
- complete = getCurrentIsoDate();
7705
- usage = {
7706
- price: uncertainNumber() /* <- TODO: [🐞] Compute usage */,
7707
- input: __assign({ tokensCount: uncertainNumber((_a = rawResponse.usage) === null || _a === void 0 ? void 0 : _a.promptTokens) }, computeUsageCounts(prompt.content)),
7708
- output: __assign({ tokensCount: uncertainNumber((_b = rawResponse.usage) === null || _b === void 0 ? void 0 : _b.completionTokens) }, computeUsageCounts(prompt.content)),
7709
- };
7710
- return [2 /*return*/, {
7711
- content: resultContent,
7712
- modelName: modelName,
7713
- timing: {
7714
- start: start,
7715
- complete: complete,
7716
- },
7717
- usage: usage,
7718
- rawPromptContent: rawPromptContent,
7719
- rawRequest: rawRequest,
7720
- rawResponse: rawResponse,
7721
- // <- [🗯]
7722
- }];
7723
- case 4:
7724
- error_1 = _c.sent();
7725
- throw this.transformAzureError(error_1);
7726
- case 5: return [2 /*return*/];
7727
- }
7728
- });
7729
- });
7730
- };
7731
- /**
7732
- * Calls Azure OpenAI API to use a complete model.
7733
- */
7734
- AzureOpenAiExecutionTools.prototype.callCompletionModel = function (prompt) {
7735
- var _a, _b;
7736
- return __awaiter(this, void 0, void 0, function () {
7737
- var content, parameters, modelRequirements, client, modelName, modelSettings, start, complete, rawPromptContent, rawRequest, rawResponse, resultContent, usage, error_2;
7738
- return __generator(this, function (_c) {
7739
- switch (_c.label) {
7740
- case 0:
7741
- if (this.options.isVerbose) {
7742
- console.info('🖋 OpenAI callCompletionModel call');
7743
- }
7744
- content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
7745
- return [4 /*yield*/, this.getClient()];
7746
- case 1:
7747
- client = _c.sent();
7748
- // TODO: [☂] Use here more modelRequirements
7749
- if (modelRequirements.modelVariant !== 'COMPLETION') {
7750
- throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
7751
- }
7752
- _c.label = 2;
7753
- case 2:
7754
- _c.trys.push([2, 4, , 5]);
7755
- modelName = prompt.modelRequirements.modelName || this.options.deploymentName;
7756
- modelSettings = {
7757
- maxTokens: modelRequirements.maxTokens || 2000,
7758
- // <- TODO: [🌾] Make some global max cap for maxTokens
7759
- temperature: modelRequirements.temperature,
7760
- user: this.options.user,
7761
- // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
7762
- // <- Note: [🧆]
7763
- };
7764
- start = getCurrentIsoDate();
7765
- complete = void 0;
7766
- if (this.options.isVerbose) {
7767
- console.info(colors__default["default"].bgWhite('content'), JSON.stringify(content, null, 4));
7768
- console.info(colors__default["default"].bgWhite('parameters'), JSON.stringify(parameters, null, 4));
7769
- }
7770
- rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
7771
- rawRequest = [
7772
- modelName,
7773
- [rawPromptContent],
7774
- modelSettings,
7775
- ];
7776
- return [4 /*yield*/, client.getCompletions.apply(client, __spreadArray([], __read(rawRequest), false))];
7777
- case 3:
7778
- rawResponse = _c.sent();
7779
- if (this.options.isVerbose) {
7780
- console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
7781
- }
7782
- if (!rawResponse.choices[0]) {
7783
- throw new PipelineExecutionError('No choises from OpenAI');
7784
- }
7785
- if (rawResponse.choices.length > 1) {
7786
- // TODO: This should be maybe only warning
7787
- throw new PipelineExecutionError('More than one choise from OpenAI');
7788
- }
7789
- resultContent = rawResponse.choices[0].text;
7790
- // eslint-disable-next-line prefer-const
7791
- complete = getCurrentIsoDate();
7792
- usage = {
7793
- price: uncertainNumber() /* <- TODO: [🐞] Compute usage */,
7794
- input: __assign({ tokensCount: uncertainNumber((_a = rawResponse.usage) === null || _a === void 0 ? void 0 : _a.promptTokens) }, computeUsageCounts(prompt.content)),
7795
- output: __assign({ tokensCount: uncertainNumber((_b = rawResponse.usage) === null || _b === void 0 ? void 0 : _b.completionTokens) }, computeUsageCounts(prompt.content)),
7796
- };
7797
- return [2 /*return*/, {
7798
- content: resultContent,
7799
- modelName: modelName,
7800
- timing: {
7801
- start: start,
7802
- complete: complete,
7803
- },
7804
- usage: usage,
7805
- rawPromptContent: rawPromptContent,
7806
- rawRequest: rawRequest,
7807
- rawResponse: rawResponse,
7808
- // <- [🗯]
7809
- }];
7810
- case 4:
7811
- error_2 = _c.sent();
7812
- throw this.transformAzureError(error_2);
7813
- case 5: return [2 /*return*/];
7814
- }
7815
- });
7816
- });
7817
- };
7818
- // <- Note: [🤖] callXxxModel
7819
- /**
7820
- * Changes Azure error (which is not propper Error but object) to propper Error
7821
- */
7822
- AzureOpenAiExecutionTools.prototype.transformAzureError = function (azureError) {
7823
- if (typeof azureError !== 'object' || azureError === null) {
7824
- return new PipelineExecutionError("Unknown Azure OpenAI error");
7825
- }
7826
- var code = azureError.code, message = azureError.message;
7827
- return new PipelineExecutionError("".concat(code, ": ").concat(message));
7828
- };
7829
- return AzureOpenAiExecutionTools;
7830
- }());
7831
- /**
7832
- * TODO: Maybe Create some common util for callChatModel and callCompletionModel
7833
- * TODO: Maybe make custom AzureOpenAiError
7834
- * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
7835
- * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
7836
- */
7837
-
7838
- /**
7839
- * Computes the usage of the OpenAI API based on the response from OpenAI
7840
- *
7841
- * @param promptContent The content of the prompt
7842
- * @param resultContent The content of the result (for embedding prompts or failed prompts pass empty string)
7843
- * @param rawResponse The raw response from OpenAI API
7844
- * @throws {PipelineExecutionError} If the usage is not defined in the response from OpenAI
7845
- * @private internal utility of `OpenAiExecutionTools`
7846
- */
7847
- function computeOpenAiUsage(promptContent, // <- Note: Intentionally using [] to access type properties to bring jsdoc from Prompt/PromptResult to consumer
7848
- resultContent, rawResponse) {
7849
- var _a, _b;
7850
- if (rawResponse.usage === undefined) {
7851
- throw new PipelineExecutionError('The usage is not defined in the response from OpenAI');
7852
- }
7853
- if (((_a = rawResponse.usage) === null || _a === void 0 ? void 0 : _a.prompt_tokens) === undefined) {
7854
- throw new PipelineExecutionError('In OpenAI response `usage.prompt_tokens` not defined');
7855
- }
7856
- var inputTokens = rawResponse.usage.prompt_tokens;
7857
- var outputTokens = ((_b = rawResponse.usage) === null || _b === void 0 ? void 0 : _b.completion_tokens) || 0;
7858
- var modelInfo = OPENAI_MODELS.find(function (model) { return model.modelName === rawResponse.model; });
7859
- var price;
7860
- if (modelInfo === undefined || modelInfo.pricing === undefined) {
7861
- price = uncertainNumber();
7862
- }
7863
- else {
7864
- price = uncertainNumber(inputTokens * modelInfo.pricing.prompt + outputTokens * modelInfo.pricing.output);
7865
- }
7866
- return {
7867
- price: price,
7868
- input: __assign({ tokensCount: uncertainNumber(rawResponse.usage.prompt_tokens) }, computeUsageCounts(promptContent)),
7869
- output: __assign({ tokensCount: uncertainNumber(outputTokens) }, computeUsageCounts(resultContent)),
7870
- };
7871
- }
7872
- /**
7873
- * TODO: [🤝] DRY Maybe some common abstraction between `computeOpenAiUsage` and `computeAnthropicClaudeUsage`
7874
- */
7875
-
7876
- /**
7877
- * Execution Tools for calling OpenAI API
7878
- *
7879
- * @public exported from `@promptbook/openai`
7880
- */
7881
- var OpenAiExecutionTools = /** @class */ (function () {
7882
- /**
7883
- * Creates OpenAI Execution Tools.
7884
- *
7885
- * @param options which are relevant are directly passed to the OpenAI client
7886
- */
7887
- function OpenAiExecutionTools(options) {
7888
- if (options === void 0) { options = {}; }
7889
- this.options = options;
7890
- /**
7891
- * OpenAI API client.
7892
- */
7893
- this.client = null;
7894
- }
7895
- Object.defineProperty(OpenAiExecutionTools.prototype, "title", {
7896
- get: function () {
7897
- return 'OpenAI';
7898
- },
7899
- enumerable: false,
7900
- configurable: true
7901
- });
7902
- Object.defineProperty(OpenAiExecutionTools.prototype, "description", {
7903
- get: function () {
7904
- return 'Use all models provided by OpenAI';
7905
- },
7906
- enumerable: false,
7907
- configurable: true
7908
- });
7909
- OpenAiExecutionTools.prototype.getClient = function () {
7910
- return __awaiter(this, void 0, void 0, function () {
7911
- var openAiOptions;
7912
- return __generator(this, function (_a) {
7913
- if (this.client === null) {
7914
- openAiOptions = __assign({}, this.options);
7915
- delete openAiOptions.isVerbose;
7916
- delete openAiOptions.user;
7917
- this.client = new OpenAI__default["default"](__assign({}, openAiOptions));
7918
- }
7919
- return [2 /*return*/, this.client];
7920
- });
7921
- });
7922
- };
7923
- /**
7924
- * Check the `options` passed to `constructor`
7925
- */
7926
- OpenAiExecutionTools.prototype.checkConfiguration = function () {
7927
- return __awaiter(this, void 0, void 0, function () {
7928
- return __generator(this, function (_a) {
7929
- switch (_a.label) {
7930
- case 0: return [4 /*yield*/, this.getClient()];
7931
- case 1:
7932
- _a.sent();
7933
- return [2 /*return*/];
7934
- }
7935
- });
7936
- });
7937
- };
7938
- /**
7939
- * List all available OpenAI models that can be used
7940
- */
7941
- OpenAiExecutionTools.prototype.listModels = function () {
7942
- /*
7943
- Note: Dynamic lising of the models
7944
- const models = await this.openai.models.list({});
7945
-
7946
- console.log({ models });
7947
- console.log(models.data);
7948
- */
7949
- return OPENAI_MODELS;
7950
- };
7951
- /**
7952
- * Calls OpenAI API to use a chat model.
7953
- */
7954
- OpenAiExecutionTools.prototype.callChatModel = function (prompt) {
7955
- return __awaiter(this, void 0, void 0, function () {
7956
- var content, parameters, modelRequirements, expectFormat, client, modelName, modelSettings, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
7957
- return __generator(this, function (_a) {
7958
- switch (_a.label) {
7959
- case 0:
7960
- if (this.options.isVerbose) {
7961
- console.info('💬 OpenAI callChatModel call', { prompt: prompt });
7962
- }
7963
- content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements, expectFormat = prompt.expectFormat;
7964
- return [4 /*yield*/, this.getClient()];
7965
- case 1:
7966
- client = _a.sent();
7967
- // TODO: [☂] Use here more modelRequirements
7968
- if (modelRequirements.modelVariant !== 'CHAT') {
7969
- throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
7970
- }
7971
- modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
7972
- modelSettings = {
7973
- model: modelName,
7974
- max_tokens: modelRequirements.maxTokens,
7975
- // <- TODO: [🌾] Make some global max cap for maxTokens
7976
- temperature: modelRequirements.temperature,
7977
- // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
7978
- // <- Note: [🧆]
7979
- };
7980
- if (expectFormat === 'JSON') {
7981
- modelSettings.response_format = {
7982
- type: 'json_object',
7983
- };
7984
- }
7985
- rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
7986
- rawRequest = __assign(__assign({}, modelSettings), { messages: __spreadArray(__spreadArray([], __read((modelRequirements.systemMessage === undefined
7987
- ? []
7988
- : [
7989
- {
7990
- role: 'system',
7991
- content: modelRequirements.systemMessage,
7992
- },
7993
- ])), false), [
7994
- {
7995
- role: 'user',
7996
- content: rawPromptContent,
7997
- },
7998
- ], false), user: this.options.user });
7999
- start = getCurrentIsoDate();
8000
- if (this.options.isVerbose) {
8001
- console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
8002
- }
8003
- return [4 /*yield*/, client.chat.completions.create(rawRequest)];
8004
- case 2:
8005
- rawResponse = _a.sent();
8006
- if (this.options.isVerbose) {
8007
- console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
8008
- }
8009
- if (!rawResponse.choices[0]) {
8010
- throw new PipelineExecutionError('No choises from OpenAI');
8011
- }
8012
- if (rawResponse.choices.length > 1) {
8013
- // TODO: This should be maybe only warning
8014
- throw new PipelineExecutionError('More than one choise from OpenAI');
8015
- }
8016
- resultContent = rawResponse.choices[0].message.content;
8017
- // eslint-disable-next-line prefer-const
8018
- complete = getCurrentIsoDate();
8019
- usage = computeOpenAiUsage(content, resultContent || '', rawResponse);
8020
- if (resultContent === null) {
8021
- throw new PipelineExecutionError('No response message from OpenAI');
8022
- }
8023
- return [2 /*return*/, {
8024
- content: resultContent,
8025
- modelName: rawResponse.model || modelName,
8026
- timing: {
8027
- start: start,
8028
- complete: complete,
8029
- },
8030
- usage: usage,
8031
- rawPromptContent: rawPromptContent,
8032
- rawRequest: rawRequest,
8033
- rawResponse: rawResponse,
8034
- // <- [🗯]
8035
- }];
8036
- }
8037
- });
8038
- });
8039
- };
8040
- /**
8041
- * Calls OpenAI API to use a complete model.
8042
- */
8043
- OpenAiExecutionTools.prototype.callCompletionModel = function (prompt) {
8044
- return __awaiter(this, void 0, void 0, function () {
8045
- var content, parameters, modelRequirements, client, modelName, modelSettings, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
8046
- return __generator(this, function (_a) {
8047
- switch (_a.label) {
8048
- case 0:
8049
- if (this.options.isVerbose) {
8050
- console.info('🖋 OpenAI callCompletionModel call', { prompt: prompt });
8051
- }
8052
- content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
8053
- return [4 /*yield*/, this.getClient()];
8054
- case 1:
8055
- client = _a.sent();
8056
- // TODO: [☂] Use here more modelRequirements
8057
- if (modelRequirements.modelVariant !== 'COMPLETION') {
8058
- throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
8059
- }
8060
- modelName = modelRequirements.modelName || this.getDefaultCompletionModel().modelName;
8061
- modelSettings = {
8062
- model: modelName,
8063
- max_tokens: modelRequirements.maxTokens || 2000,
8064
- // <- TODO: [🌾] Make some global max cap for maxTokens
8065
- temperature: modelRequirements.temperature,
8066
- // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
8067
- // <- Note: [🧆]
8068
- };
8069
- rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
8070
- rawRequest = __assign(__assign({}, modelSettings), { prompt: rawPromptContent, user: this.options.user });
8071
- start = getCurrentIsoDate();
8072
- if (this.options.isVerbose) {
8073
- console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
8074
- }
8075
- return [4 /*yield*/, client.completions.create(rawRequest)];
8076
- case 2:
8077
- rawResponse = _a.sent();
8078
- if (this.options.isVerbose) {
8079
- console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
8080
- }
8081
- if (!rawResponse.choices[0]) {
8082
- throw new PipelineExecutionError('No choises from OpenAI');
8083
- }
8084
- if (rawResponse.choices.length > 1) {
8085
- // TODO: This should be maybe only warning
8086
- throw new PipelineExecutionError('More than one choise from OpenAI');
8087
- }
8088
- resultContent = rawResponse.choices[0].text;
8089
- // eslint-disable-next-line prefer-const
8090
- complete = getCurrentIsoDate();
8091
- usage = computeOpenAiUsage(content, resultContent || '', rawResponse);
8092
- return [2 /*return*/, {
8093
- content: resultContent,
8094
- modelName: rawResponse.model || modelName,
8095
- timing: {
8096
- start: start,
8097
- complete: complete,
8098
- },
8099
- usage: usage,
8100
- rawPromptContent: rawPromptContent,
8101
- rawRequest: rawRequest,
8102
- rawResponse: rawResponse,
8103
- // <- [🗯]
8104
- }];
8105
- }
8106
- });
8107
- });
8108
- };
8109
- /**
8110
- * Calls OpenAI API to use a embedding model
8111
- */
8112
- OpenAiExecutionTools.prototype.callEmbeddingModel = function (prompt) {
8113
- return __awaiter(this, void 0, void 0, function () {
8114
- var content, parameters, modelRequirements, client, modelName, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
8115
- return __generator(this, function (_a) {
8116
- switch (_a.label) {
8117
- case 0:
8118
- if (this.options.isVerbose) {
8119
- console.info('🖋 OpenAI embedding call', { prompt: prompt });
8120
- }
8121
- content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
8122
- return [4 /*yield*/, this.getClient()];
8123
- case 1:
8124
- client = _a.sent();
8125
- // TODO: [☂] Use here more modelRequirements
8126
- if (modelRequirements.modelVariant !== 'EMBEDDING') {
8127
- throw new PipelineExecutionError('Use embed only for EMBEDDING variant');
8128
- }
8129
- modelName = modelRequirements.modelName || this.getDefaultEmbeddingModel().modelName;
8130
- rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
8131
- rawRequest = {
8132
- input: rawPromptContent,
8133
- model: modelName,
8134
- };
8135
- start = getCurrentIsoDate();
8136
- if (this.options.isVerbose) {
8137
- console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
8138
- }
8139
- return [4 /*yield*/, client.embeddings.create(rawRequest)];
8140
- case 2:
8141
- rawResponse = _a.sent();
8142
- if (this.options.isVerbose) {
8143
- console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
8144
- }
8145
- if (rawResponse.data.length !== 1) {
8146
- throw new PipelineExecutionError("Expected exactly 1 data item in response, got ".concat(rawResponse.data.length));
8147
- }
8148
- resultContent = rawResponse.data[0].embedding;
8149
- // eslint-disable-next-line prefer-const
8150
- complete = getCurrentIsoDate();
8151
- usage = computeOpenAiUsage(content, '', rawResponse);
8152
- return [2 /*return*/, {
8153
- content: resultContent,
8154
- modelName: rawResponse.model || modelName,
8155
- timing: {
8156
- start: start,
8157
- complete: complete,
8158
- },
8159
- usage: usage,
8160
- rawPromptContent: rawPromptContent,
8161
- rawRequest: rawRequest,
8162
- rawResponse: rawResponse,
8163
- // <- [🗯]
8164
- }];
8165
- }
8166
- });
8167
- });
8168
- };
8169
- // <- Note: [🤖] callXxxModel
8170
- /**
8171
- * Get the model that should be used as default
8172
- */
8173
- OpenAiExecutionTools.prototype.getDefaultModel = function (defaultModelName) {
8174
- var model = OPENAI_MODELS.find(function (_a) {
8175
- var modelName = _a.modelName;
8176
- return modelName === defaultModelName;
8177
- });
8178
- if (model === undefined) {
8179
- throw new UnexpectedError(spaceTrim__default["default"](function (block) {
8180
- return "\n Cannot find model in OpenAI models with name \"".concat(defaultModelName, "\" which should be used as default.\n\n Available models:\n ").concat(block(OPENAI_MODELS.map(function (_a) {
8181
- var modelName = _a.modelName;
8182
- return "- \"".concat(modelName, "\"");
8183
- }).join('\n')), "\n\n ");
8184
- }));
8185
- }
8186
- return model;
8187
- };
8188
- /**
8189
- * Default model for chat variant.
8190
- */
8191
- OpenAiExecutionTools.prototype.getDefaultChatModel = function () {
8192
- return this.getDefaultModel('gpt-4o');
8193
- };
8194
- /**
8195
- * Default model for completion variant.
8196
- */
8197
- OpenAiExecutionTools.prototype.getDefaultCompletionModel = function () {
8198
- return this.getDefaultModel('gpt-3.5-turbo-instruct');
8199
- };
8200
- /**
8201
- * Default model for completion variant.
8202
- */
8203
- OpenAiExecutionTools.prototype.getDefaultEmbeddingModel = function () {
8204
- return this.getDefaultModel('text-embedding-3-large');
8205
- };
8206
- return OpenAiExecutionTools;
8207
- }());
8208
- /**
8209
- * TODO: [🧠][🧙‍♂️] Maybe there can be some wizzard for thoose who want to use just OpenAI
8210
- * TODO: Maybe Create some common util for callChatModel and callCompletionModel
8211
- * TODO: Maybe make custom OpenAiError
8212
- * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
8213
- * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
8214
- */
8215
-
8216
- /**
8217
- * Execution Tools for calling OpenAI API
8218
- *
8219
- * @public exported from `@promptbook/openai`
8220
- */
8221
- var createOpenAiExecutionTools = Object.assign(function (options) {
8222
- // TODO: !!!!!! If browser, auto add `dangerouslyAllowBrowser`
8223
- return new OpenAiExecutionTools(options);
8224
- }, {
8225
- packageName: '@promptbook/openai',
8226
- className: 'OpenAiExecutionTools',
8227
- });
8228
- /**
8229
- * TODO: [🦺] Is there some way how to put `packageName` and `className` on top and function definition on bottom?
8230
- * TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
8231
- */
8232
-
8233
- /**
8234
- * @@@
8235
- *
8236
- * TODO: !!!!!! Remove EXECUTION_TOOLS_CLASSES and use $llmToolsRegister instead
8237
- *
8238
- * @private internal type for `createLlmToolsFromConfiguration`
8239
- */
8240
- var EXECUTION_TOOLS_CLASSES = {
8241
- createOpenAiExecutionTools: createOpenAiExecutionTools,
8242
- createAnthropicClaudeExecutionTools: createAnthropicClaudeExecutionTools,
8243
- createAzureOpenAiExecutionTools: function (options) {
8244
- return new AzureOpenAiExecutionTools(
8245
- // <- TODO: [🧱] Implement in a functional (not new Class) way
8246
- options);
8247
- },
8248
- // <- Note: [🦑] Add here new LLM provider
8249
- };
8250
- /**
8251
- * TODO: !!!!!!! Make global register for this
8252
- * TODO: [🧠][🎌] Adding this should be responsibility of each provider package NOT this one central place
8253
- */
8254
-
8255
- /**
8256
- * @@@
8257
- *
8258
- * Note: This function is not cached, every call creates new instance of `MultipleLlmExecutionTools`
8259
- *
8260
- * @returns @@@
8261
- * @public exported from `@promptbook/core`
8262
- */
8263
- function createLlmToolsFromConfiguration(configuration, options) {
8264
- if (options === void 0) { options = {}; }
8265
- var _a = options.isVerbose, isVerbose = _a === void 0 ? false : _a;
8266
- var llmTools = configuration.map(function (llmConfiguration) {
8267
- var constructor = EXECUTION_TOOLS_CLASSES["create".concat(llmConfiguration.className)];
8268
- if (!constructor) {
8269
- throw new Error(spaceTrim__default["default"](function (block) { return "\n There is no constructor for LLM provider `".concat(llmConfiguration.className, "`\n\n\n @@@\n\n Available constructors are:\n ").concat(block('@@@'), "\n\n\n "); }));
8270
- }
8271
- return constructor(__assign({ isVerbose: isVerbose }, llmConfiguration.options));
8272
- });
8273
- return joinLlmExecutionTools.apply(void 0, __spreadArray([], __read(llmTools), false));
8274
- }
8275
- /**
8276
- * TODO: [🎌] Togethere with `createLlmToolsFromConfiguration` + 'EXECUTION_TOOLS_CLASSES' gets to `@promptbook/core` ALL model providers, make this more efficient
8277
- * TODO: [🧠][🎌] Dynamically install required providers
8278
- * TODO: @@@ write discussion about this - wizzard
8279
- * TODO: [🧠][🍛] Which name is better `createLlmToolsFromConfig` or `createLlmToolsFromConfiguration`?
8280
- * TODO: [🧠] Is there some meaningfull way how to test this util
8281
- * TODO: This should be maybe not under `_common` but under `utils`
8282
- */
8283
-
8284
- /**
8285
- * Stores
8286
- *
8287
- * @public exported from `@promptbook/core`
8288
- */
8289
- var MemoryStorage = /** @class */ (function () {
8290
- function MemoryStorage() {
8291
- this.storage = {};
8292
- }
8293
- Object.defineProperty(MemoryStorage.prototype, "length", {
8294
- /**
8295
- * Returns the number of key/value pairs currently present in the list associated with the object.
8296
- */
8297
- get: function () {
8298
- return Object.keys(this.storage).length;
8299
- },
8300
- enumerable: false,
8301
- configurable: true
8302
- });
8303
- /**
8304
- * Empties the list associated with the object of all key/value pairs, if there are any.
8305
- */
8306
- MemoryStorage.prototype.clear = function () {
8307
- this.storage = {};
8308
- };
8309
- /**
8310
- * Returns the current value associated with the given key, or null if the given key does not exist in the list associated with the object.
8311
- */
8312
- MemoryStorage.prototype.getItem = function (key) {
8313
- return this.storage[key] || null;
8314
- };
8315
- /**
8316
- * Returns the name of the nth key in the list, or null if n is greater than or equal to the number of key/value pairs in the object.
8317
- */
8318
- MemoryStorage.prototype.key = function (index) {
8319
- return Object.keys(this.storage)[index] || null;
8320
- };
8321
- /**
8322
- * Sets the value of the pair identified by key to value, creating a new key/value pair if none existed for key previously.
6789
+ * Sets the value of the pair identified by key to value, creating a new key/value pair if none existed for key previously.
8323
6790
  */
8324
6791
  MemoryStorage.prototype.setItem = function (key, value) {
8325
6792
  this.storage[key] = value;
@@ -8539,7 +7006,7 @@
8539
7006
  if (typeof env.ANTHROPIC_CLAUDE_API_KEY === 'string') {
8540
7007
  return {
8541
7008
  title: 'Claude (from env)',
8542
- packageName: '@promptbook/antrhopic-claude',
7009
+ packageName: '@promptbook/anthropic-claude',
8543
7010
  className: 'AnthropicClaudeExecutionTools',
8544
7011
  options: {
8545
7012
  apiKey: process.env.ANTHROPIC_CLAUDE_API_KEY,
@@ -8550,6 +7017,52 @@
8550
7017
  },
8551
7018
  });
8552
7019
 
7020
+ /**
7021
+ * @@@ registration1 of default configuration for Azure Open AI
7022
+ *
7023
+ * Note: [🏐] Configurations registrations are done in @@@ BUT constructor @@@
7024
+ *
7025
+ * @public exported from `@promptbook/core`
7026
+ * @public exported from `@promptbook/cli`
7027
+ */
7028
+ var _AzureOpenAiMetadataRegistration = $llmToolsMetadataRegister.register({
7029
+ title: 'Azure Open AI',
7030
+ packageName: '@promptbook/azure-openai',
7031
+ className: 'AzureOpenAiExecutionTools',
7032
+ getBoilerplateConfiguration: function () {
7033
+ return {
7034
+ title: 'Azure Open AI (boilerplate)',
7035
+ packageName: '@promptbook/azure-openai',
7036
+ className: 'AzureOpenAiExecutionTools',
7037
+ options: {
7038
+ apiKey: 'sk-',
7039
+ },
7040
+ };
7041
+ },
7042
+ createConfigurationFromEnv: function (env) {
7043
+ if (typeof env.AZUREOPENAI_RESOURCE_NAME === 'string' &&
7044
+ typeof env.AZUREOPENAI_DEPLOYMENT_NAME === 'string' &&
7045
+ typeof env.AZUREOPENAI_API_KEY === 'string') {
7046
+ return {
7047
+ title: 'Azure Open AI (from env)',
7048
+ packageName: '@promptbook/azure-openai',
7049
+ className: 'AzureOpenAiExecutionTools',
7050
+ options: {
7051
+ resourceName: env.AZUREOPENAI_RESOURCE_NAME,
7052
+ deploymentName: env.AZUREOPENAI_DEPLOYMENT_NAME,
7053
+ apiKey: env.AZUREOPENAI_API_KEY,
7054
+ },
7055
+ };
7056
+ }
7057
+ else if (typeof env.AZUREOPENAI_RESOURCE_NAME === 'string' ||
7058
+ typeof env.AZUREOPENAI_DEPLOYMENT_NAME === 'string' ||
7059
+ typeof env.AZUREOPENAI_API_KEY === 'string') {
7060
+ throw new Error(spaceTrim__default["default"]("\n You must provide all of the following environment variables:\n \n - AZUREOPENAI_RESOURCE_NAME (".concat(typeof env.AZUREOPENAI_RESOURCE_NAME === 'string' ? 'defined' : 'not defined', ")\n - AZUREOPENAI_DEPLOYMENT_NAME (").concat(typeof env.AZUREOPENAI_DEPLOYMENT_NAME === 'string' ? 'defined' : 'not defined', ")\n - AZUREOPENAI_API_KEY (").concat(typeof env.AZUREOPENAI_API_KEY === 'string' ? 'defined' : 'not defined', ") \n ")));
7061
+ }
7062
+ return null;
7063
+ },
7064
+ });
7065
+
8553
7066
  /**
8554
7067
  * @@@ registration1 of default configuration for Open AI
8555
7068
  *
@@ -8559,9 +7072,9 @@
8559
7072
  * @public exported from `@promptbook/cli`
8560
7073
  */
8561
7074
  var _OpenAiMetadataRegistration = $llmToolsMetadataRegister.register({
8562
- title: 'Anthropic Claude',
8563
- packageName: '@promptbook/anthropic-claude',
8564
- className: 'AnthropicClaudeExecutionTools',
7075
+ title: 'Open AI',
7076
+ packageName: '@promptbook/openai',
7077
+ className: 'OpenAiExecutionTools',
8565
7078
  getBoilerplateConfiguration: function () {
8566
7079
  return {
8567
7080
  title: 'Open AI (boilerplate)',
@@ -8971,6 +7484,7 @@
8971
7484
  exports.VersionMismatchError = VersionMismatchError;
8972
7485
  exports.ZERO_USAGE = ZERO_USAGE;
8973
7486
  exports._AnthropicClaudeMetadataRegistration = _AnthropicClaudeMetadataRegistration;
7487
+ exports._AzureOpenAiMetadataRegistration = _AzureOpenAiMetadataRegistration;
8974
7488
  exports._OpenAiMetadataRegistration = _OpenAiMetadataRegistration;
8975
7489
  exports.addUsage = addUsage;
8976
7490
  exports.assertsExecutionSuccessful = assertsExecutionSuccessful;