@promptbook/core 0.66.0-8 → 0.66.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. package/esm/index.es.js +174 -1706
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/src/_packages/anthropic-claude.index.d.ts +0 -2
  4. package/esm/typings/src/_packages/cli.index.d.ts +6 -0
  5. package/esm/typings/src/_packages/core.index.d.ts +2 -0
  6. package/esm/typings/src/_packages/types.index.d.ts +22 -14
  7. package/esm/typings/src/_packages/utils.index.d.ts +7 -7
  8. package/esm/typings/src/config.d.ts +6 -0
  9. package/esm/typings/src/execution/LlmExecutionToolsConstructor.d.ts +1 -1
  10. package/esm/typings/src/llm-providers/_common/$llmToolsMetadataRegister.d.ts +2 -2
  11. package/esm/typings/src/llm-providers/_common/$llmToolsRegister.d.ts +2 -2
  12. package/esm/typings/src/llm-providers/_common/$registeredLlmToolsMessage.d.ts +9 -0
  13. package/esm/typings/src/llm-providers/_common/LlmToolsConfiguration.d.ts +1 -1
  14. package/esm/typings/src/llm-providers/_common/LlmToolsMetadata.d.ts +1 -1
  15. package/esm/typings/src/llm-providers/_common/createLlmToolsFromConfigurationFromEnv.d.ts +1 -0
  16. package/esm/typings/src/llm-providers/_common/getLlmToolsForTestingAndScriptsAndPlayground.d.ts +1 -1
  17. package/esm/typings/src/llm-providers/anthropic-claude/register-constructor.d.ts +1 -0
  18. package/esm/typings/src/llm-providers/azure-openai/register-constructor.d.ts +1 -0
  19. package/esm/typings/src/llm-providers/openai/register-constructor.d.ts +1 -0
  20. package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +1 -0
  21. package/esm/typings/src/llm-providers/remote/interfaces/{Promptbook_Server_Error.d.ts → PromptbookServer_Error.d.ts} +1 -1
  22. package/esm/typings/src/llm-providers/remote/interfaces/PromptbookServer_ListModels_Request.d.ts +34 -0
  23. package/esm/typings/src/llm-providers/remote/interfaces/PromptbookServer_ListModels_Response.d.ts +15 -0
  24. package/esm/typings/src/llm-providers/remote/interfaces/{Promptbook_Server_Progress.d.ts → PromptbookServer_Prompt_Progress.d.ts} +1 -1
  25. package/esm/typings/src/llm-providers/remote/interfaces/{Promptbook_Server_Request.d.ts → PromptbookServer_Prompt_Request.d.ts} +15 -3
  26. package/esm/typings/src/llm-providers/remote/interfaces/{Promptbook_Server_Response.d.ts → PromptbookServer_Prompt_Response.d.ts} +1 -1
  27. package/esm/typings/src/llm-providers/remote/interfaces/RemoteLlmExecutionToolsOptions.d.ts +1 -7
  28. package/esm/typings/src/llm-providers/remote/startRemoteServer.d.ts +1 -0
  29. package/esm/typings/src/utils/{Register.d.ts → $Register.d.ts} +6 -2
  30. package/esm/typings/src/utils/environment/{getGlobalScope.d.ts → $getGlobalScope.d.ts} +1 -1
  31. package/esm/typings/src/utils/organization/f.d.ts +6 -0
  32. package/package.json +1 -6
  33. package/umd/index.umd.js +178 -1707
  34. package/umd/index.umd.js.map +1 -1
  35. package/esm/typings/src/llm-providers/_common/config.d.ts +0 -14
  36. package/esm/typings/src/llm-providers/anthropic-claude/register1.d.ts +0 -4
  37. /package/esm/typings/src/llm-providers/mocked/{fakeTextToExpectations.d.ts → $fakeTextToExpectations.d.ts} +0 -0
  38. /package/esm/typings/src/utils/{currentDate.d.ts → $currentDate.d.ts} +0 -0
  39. /package/esm/typings/src/utils/environment/{isRunningInBrowser.d.ts → $isRunningInBrowser.d.ts} +0 -0
  40. /package/esm/typings/src/utils/environment/{isRunningInNode.d.ts → $isRunningInNode.d.ts} +0 -0
  41. /package/esm/typings/src/utils/environment/{isRunningInWebWorker.d.ts → $isRunningInWebWorker.d.ts} +0 -0
  42. /package/esm/typings/src/utils/files/{isDirectoryExisting.d.ts → $isDirectoryExisting.d.ts} +0 -0
  43. /package/esm/typings/src/utils/files/{isDirectoryExisting.test.d.ts → $isDirectoryExisting.test.d.ts} +0 -0
  44. /package/esm/typings/src/utils/files/{isFileExisting.d.ts → $isFileExisting.d.ts} +0 -0
  45. /package/esm/typings/src/utils/files/{isFileExisting.test.d.ts → $isFileExisting.test.d.ts} +0 -0
  46. /package/esm/typings/src/utils/files/{listAllFiles.d.ts → $listAllFiles.d.ts} +0 -0
  47. /package/esm/typings/src/utils/files/{listAllFiles.test.d.ts → $listAllFiles.test.d.ts} +0 -0
  48. /package/esm/typings/src/utils/random/{randomSeed.d.ts → $randomSeed.d.ts} +0 -0
package/umd/index.umd.js CHANGED
@@ -1,8 +1,8 @@
1
1
  (function (global, factory) {
2
- typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports, require('spacetrim'), require('prettier'), require('prettier/parser-html'), require('crypto-js/enc-hex'), require('crypto-js/sha256'), require('socket.io-client'), require('@anthropic-ai/sdk'), require('colors'), require('@azure/openai'), require('openai'), require('moment')) :
3
- typeof define === 'function' && define.amd ? define(['exports', 'spacetrim', 'prettier', 'prettier/parser-html', 'crypto-js/enc-hex', 'crypto-js/sha256', 'socket.io-client', '@anthropic-ai/sdk', 'colors', '@azure/openai', 'openai', 'moment'], factory) :
4
- (global = typeof globalThis !== 'undefined' ? globalThis : global || self, factory(global["promptbook-core"] = {}, global.spaceTrim, global.prettier, global.parserHtml, global.hexEncoder, global.sha256, global.socket_ioClient, global.Anthropic, global.colors, global.openai, global.OpenAI, global.moment));
5
- })(this, (function (exports, spaceTrim, prettier, parserHtml, hexEncoder, sha256, socket_ioClient, Anthropic, colors, openai, OpenAI, moment) { 'use strict';
2
+ typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports, require('spacetrim'), require('prettier'), require('prettier/parser-html'), require('crypto-js/enc-hex'), require('crypto-js/sha256'), require('moment')) :
3
+ typeof define === 'function' && define.amd ? define(['exports', 'spacetrim', 'prettier', 'prettier/parser-html', 'crypto-js/enc-hex', 'crypto-js/sha256', 'moment'], factory) :
4
+ (global = typeof globalThis !== 'undefined' ? globalThis : global || self, factory(global["promptbook-core"] = {}, global.spaceTrim, global.prettier, global.parserHtml, global.hexEncoder, global.sha256, global.moment));
5
+ })(this, (function (exports, spaceTrim, prettier, parserHtml, hexEncoder, sha256, moment) { 'use strict';
6
6
 
7
7
  function _interopDefaultLegacy (e) { return e && typeof e === 'object' && 'default' in e ? e : { 'default': e }; }
8
8
 
@@ -10,16 +10,13 @@
10
10
  var parserHtml__default = /*#__PURE__*/_interopDefaultLegacy(parserHtml);
11
11
  var hexEncoder__default = /*#__PURE__*/_interopDefaultLegacy(hexEncoder);
12
12
  var sha256__default = /*#__PURE__*/_interopDefaultLegacy(sha256);
13
- var Anthropic__default = /*#__PURE__*/_interopDefaultLegacy(Anthropic);
14
- var colors__default = /*#__PURE__*/_interopDefaultLegacy(colors);
15
- var OpenAI__default = /*#__PURE__*/_interopDefaultLegacy(OpenAI);
16
13
  var moment__default = /*#__PURE__*/_interopDefaultLegacy(moment);
17
14
 
18
15
  // ⚠️ WARNING: This code has been generated so that any manual changes will be overwritten
19
16
  /**
20
17
  * The version of the Promptbook library
21
18
  */
22
- var PROMPTBOOK_VERSION = '0.66.0-7';
19
+ var PROMPTBOOK_VERSION = '0.66.0-9';
23
20
  // TODO: !!!! List here all the versions and annotate + put into script
24
21
 
25
22
  /*! *****************************************************************************
@@ -600,6 +597,13 @@
600
597
  * @public exported from `@promptbook/core`
601
598
  */
602
599
  var DEFAULT_REMOTE_URL_PATH = '/promptbook/socket.io';
600
+ // <- TODO: [🧜‍♂️]
601
+ /**
602
+ * @@@
603
+ *
604
+ * @public exported from `@promptbook/core`
605
+ */
606
+ var IS_VERBOSE = false;
603
607
  /**
604
608
  * TODO: [🧠][🧜‍♂️] Maybe join remoteUrl and path into single value
605
609
  */
@@ -1333,7 +1337,7 @@
1333
1337
  return __generator(this, function (_d) {
1334
1338
  switch (_d.label) {
1335
1339
  case 0:
1336
- _a = options || {}, _b = _a.isVerbose, isVerbose = _b === void 0 ? false : _b, _c = _a.isLazyLoaded, isLazyLoaded = _c === void 0 ? false : _c;
1340
+ _a = options || {}, _b = _a.isVerbose, isVerbose = _b === void 0 ? IS_VERBOSE : _b, _c = _a.isLazyLoaded, isLazyLoaded = _c === void 0 ? false : _c;
1337
1341
  collection = createCollectionFromPromise(function () { return __awaiter(_this, void 0, void 0, function () {
1338
1342
  return __generator(this, function (_a) {
1339
1343
  if (isVerbose) {
@@ -1650,7 +1654,7 @@
1650
1654
  });
1651
1655
  }
1652
1656
 
1653
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.66.0-7",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.66.0-7",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.66.0-7",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.66.0-7",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
1657
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.66.0-9",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.66.0-9",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.66.0-9",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.66.0-9",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
1654
1658
 
1655
1659
  var defaultDiacriticsRemovalMap = [
1656
1660
  {
@@ -2248,8 +2252,37 @@
2248
2252
  */
2249
2253
  MultipleLlmExecutionTools.prototype.checkConfiguration = function () {
2250
2254
  return __awaiter(this, void 0, void 0, function () {
2251
- return __generator(this, function (_a) {
2252
- return [2 /*return*/];
2255
+ var _a, _b, llmExecutionTools, e_1_1;
2256
+ var e_1, _c;
2257
+ return __generator(this, function (_d) {
2258
+ switch (_d.label) {
2259
+ case 0:
2260
+ _d.trys.push([0, 5, 6, 7]);
2261
+ _a = __values(this.llmExecutionTools), _b = _a.next();
2262
+ _d.label = 1;
2263
+ case 1:
2264
+ if (!!_b.done) return [3 /*break*/, 4];
2265
+ llmExecutionTools = _b.value;
2266
+ return [4 /*yield*/, llmExecutionTools.checkConfiguration()];
2267
+ case 2:
2268
+ _d.sent();
2269
+ _d.label = 3;
2270
+ case 3:
2271
+ _b = _a.next();
2272
+ return [3 /*break*/, 1];
2273
+ case 4: return [3 /*break*/, 7];
2274
+ case 5:
2275
+ e_1_1 = _d.sent();
2276
+ e_1 = { error: e_1_1 };
2277
+ return [3 /*break*/, 7];
2278
+ case 6:
2279
+ try {
2280
+ if (_b && !_b.done && (_c = _a.return)) _c.call(_a);
2281
+ }
2282
+ finally { if (e_1) throw e_1.error; }
2283
+ return [7 /*endfinally*/];
2284
+ case 7: return [2 /*return*/];
2285
+ }
2253
2286
  });
2254
2287
  });
2255
2288
  };
@@ -2259,8 +2292,8 @@
2259
2292
  */
2260
2293
  MultipleLlmExecutionTools.prototype.listModels = function () {
2261
2294
  return __awaiter(this, void 0, void 0, function () {
2262
- var availableModels, _a, _b, llmExecutionTools, models, e_1_1;
2263
- var e_1, _c;
2295
+ var availableModels, _a, _b, llmExecutionTools, models, e_2_1;
2296
+ var e_2, _c;
2264
2297
  return __generator(this, function (_d) {
2265
2298
  switch (_d.label) {
2266
2299
  case 0:
@@ -2283,14 +2316,14 @@
2283
2316
  return [3 /*break*/, 2];
2284
2317
  case 5: return [3 /*break*/, 8];
2285
2318
  case 6:
2286
- e_1_1 = _d.sent();
2287
- e_1 = { error: e_1_1 };
2319
+ e_2_1 = _d.sent();
2320
+ e_2 = { error: e_2_1 };
2288
2321
  return [3 /*break*/, 8];
2289
2322
  case 7:
2290
2323
  try {
2291
2324
  if (_b && !_b.done && (_c = _a.return)) _c.call(_a);
2292
2325
  }
2293
- finally { if (e_1) throw e_1.error; }
2326
+ finally { if (e_2) throw e_2.error; }
2294
2327
  return [7 /*endfinally*/];
2295
2328
  case 8: return [2 /*return*/, availableModels];
2296
2329
  }
@@ -2323,8 +2356,8 @@
2323
2356
  */
2324
2357
  MultipleLlmExecutionTools.prototype.callCommonModel = function (prompt) {
2325
2358
  return __awaiter(this, void 0, void 0, function () {
2326
- var errors, _a, _b, llmExecutionTools, _c, error_1, e_2_1;
2327
- var e_2, _d;
2359
+ var errors, _a, _b, llmExecutionTools, _c, error_1, e_3_1;
2360
+ var e_3, _d;
2328
2361
  var _this = this;
2329
2362
  return __generator(this, function (_e) {
2330
2363
  switch (_e.label) {
@@ -2380,14 +2413,14 @@
2380
2413
  return [3 /*break*/, 2];
2381
2414
  case 14: return [3 /*break*/, 17];
2382
2415
  case 15:
2383
- e_2_1 = _e.sent();
2384
- e_2 = { error: e_2_1 };
2416
+ e_3_1 = _e.sent();
2417
+ e_3 = { error: e_3_1 };
2385
2418
  return [3 /*break*/, 17];
2386
2419
  case 16:
2387
2420
  try {
2388
2421
  if (_b && !_b.done && (_d = _a.return)) _d.call(_a);
2389
2422
  }
2390
- finally { if (e_2) throw e_2.error; }
2423
+ finally { if (e_3) throw e_3.error; }
2391
2424
  return [7 /*endfinally*/];
2392
2425
  case 17:
2393
2426
  if (errors.length === 1) {
@@ -2870,7 +2903,7 @@
2870
2903
  function createPipelineExecutor(options) {
2871
2904
  var _this = this;
2872
2905
  var pipeline = options.pipeline, tools = options.tools, _a = options.settings, settings = _a === void 0 ? {} : _a;
2873
- var _b = settings.maxExecutionAttempts, maxExecutionAttempts = _b === void 0 ? MAX_EXECUTION_ATTEMPTS : _b, _c = settings.maxParallelCount, maxParallelCount = _c === void 0 ? MAX_PARALLEL_COUNT : _c, _d = settings.isVerbose, isVerbose = _d === void 0 ? false : _d, _e = settings.isNotPreparedWarningSupressed, isNotPreparedWarningSupressed = _e === void 0 ? false : _e;
2906
+ var _b = settings.maxExecutionAttempts, maxExecutionAttempts = _b === void 0 ? MAX_EXECUTION_ATTEMPTS : _b, _c = settings.maxParallelCount, maxParallelCount = _c === void 0 ? MAX_PARALLEL_COUNT : _c, _d = settings.isVerbose, isVerbose = _d === void 0 ? IS_VERBOSE : _d, _e = settings.isNotPreparedWarningSupressed, isNotPreparedWarningSupressed = _e === void 0 ? false : _e;
2874
2907
  validatePipeline(pipeline);
2875
2908
  var llmTools = joinLlmExecutionTools.apply(void 0, __spreadArray([], __read(arrayableToArray(tools.llm)), false));
2876
2909
  var preparedPipeline;
@@ -3665,7 +3698,7 @@
3665
3698
  return __generator(this, function (_j) {
3666
3699
  switch (_j.label) {
3667
3700
  case 0:
3668
- llmTools = options.llmTools, _a = options.maxParallelCount, maxParallelCount = _a === void 0 ? MAX_PARALLEL_COUNT : _a, _b = options.isVerbose, isVerbose = _b === void 0 ? false : _b;
3701
+ llmTools = options.llmTools, _a = options.maxParallelCount, maxParallelCount = _a === void 0 ? MAX_PARALLEL_COUNT : _a, _b = options.isVerbose, isVerbose = _b === void 0 ? IS_VERBOSE : _b;
3669
3702
  TODO_USE(maxParallelCount); // <- [🪂]
3670
3703
  collection = createCollectionFromJson.apply(void 0, __spreadArray([], __read(PipelineCollection), false));
3671
3704
  _c = createPipelineExecutor;
@@ -3952,7 +3985,7 @@
3952
3985
  return __generator(this, function (_d) {
3953
3986
  switch (_d.label) {
3954
3987
  case 0:
3955
- llmTools = options.llmTools, _a = options.isVerbose, isVerbose = _a === void 0 ? false : _a;
3988
+ llmTools = options.llmTools, _a = options.isVerbose, isVerbose = _a === void 0 ? IS_VERBOSE : _a;
3956
3989
  collection = createCollectionFromJson.apply(void 0, __spreadArray([], __read(PipelineCollection), false));
3957
3990
  _b = createPipelineExecutor;
3958
3991
  _c = {};
@@ -4109,7 +4142,7 @@
4109
4142
  if (isPipelinePrepared(pipeline)) {
4110
4143
  return [2 /*return*/, pipeline];
4111
4144
  }
4112
- llmTools = options.llmTools, _a = options.maxParallelCount, maxParallelCount = _a === void 0 ? MAX_PARALLEL_COUNT : _a, _b = options.isVerbose, isVerbose = _b === void 0 ? false : _b;
4145
+ llmTools = options.llmTools, _a = options.maxParallelCount, maxParallelCount = _a === void 0 ? MAX_PARALLEL_COUNT : _a, _b = options.isVerbose, isVerbose = _b === void 0 ? IS_VERBOSE : _b;
4113
4146
  parameters = pipeline.parameters, promptTemplates = pipeline.promptTemplates, knowledgeSources = pipeline.knowledgeSources, personas = pipeline.personas;
4114
4147
  llmToolsWithUsage = countTotalUsage(llmTools);
4115
4148
  currentPreparation = {
@@ -6522,7 +6555,7 @@
6522
6555
  /**
6523
6556
  * @@@
6524
6557
  *
6525
- * Note: `$` is used to indicate that this function is not a pure function - it access global
6558
+ * Note: `$` is used to indicate that this function is not a pure function - it access global scope
6526
6559
  *
6527
6560
  * @public exported from `@promptbook/utils`
6528
6561
  */
@@ -6536,42 +6569,42 @@
6536
6569
  /**
6537
6570
  * Register is @@@
6538
6571
  *
6572
+ * Note: `$` is used to indicate that this function is not a pure function - it accesses and adds variables in global scope.
6573
+ *
6539
6574
  * @private internal utility, exported are only signleton instances of this class
6540
6575
  */
6541
- var Register = /** @class */ (function () {
6542
- function Register(storage) {
6543
- this.storage = storage;
6576
+ var $Register = /** @class */ (function () {
6577
+ function $Register(storageName) {
6578
+ this.storageName = storageName;
6579
+ storageName = "_promptbook_".concat(storageName);
6580
+ var globalScope = $getGlobalScope();
6581
+ if (globalScope[storageName] === undefined) {
6582
+ globalScope[storageName] = [];
6583
+ }
6584
+ else if (!Array.isArray(globalScope[storageName])) {
6585
+ throw new UnexpectedError("Expected (global) ".concat(storageName, " to be an array, but got ").concat(typeof globalScope[storageName]));
6586
+ }
6587
+ this.storage = globalScope[storageName];
6544
6588
  }
6545
- Register.prototype.list = function () {
6589
+ $Register.prototype.list = function () {
6546
6590
  // <- TODO: ReadonlyDeep<Array<TRegistered>>
6547
6591
  return this.storage;
6548
6592
  };
6549
- Register.prototype.register = function (registered) {
6593
+ $Register.prototype.register = function (registered) {
6550
6594
  // <- TODO: What to return here
6551
6595
  var packageName = registered.packageName, className = registered.className;
6552
6596
  var existingRegistrationIndex = this.storage.findIndex(function (item) { return item.packageName === packageName && item.className === className; });
6553
6597
  var existingRegistration = this.storage[existingRegistrationIndex];
6554
6598
  if (!existingRegistration) {
6555
- console.warn("[\uD83D\uDCE6] Registering ".concat(packageName, ".").concat(className, " again"));
6556
6599
  this.storage.push(registered);
6557
6600
  }
6558
6601
  else {
6559
- console.warn("[\uD83D\uDCE6] Re-registering ".concat(packageName, ".").concat(className, " again"));
6560
6602
  this.storage[existingRegistrationIndex] = registered;
6561
6603
  }
6562
6604
  };
6563
- return Register;
6605
+ return $Register;
6564
6606
  }());
6565
6607
 
6566
- // TODO: !!!!!! Move this logic to Register and rename to $Register
6567
- var globalScope = $getGlobalScope();
6568
- if (globalScope.$llmToolsMetadataRegister === undefined) {
6569
- globalScope.$llmToolsMetadataRegister = [];
6570
- }
6571
- else if (!Array.isArray(globalScope.$llmToolsMetadataRegister)) {
6572
- throw new UnexpectedError("Expected $llmToolsMetadataRegister to be an array, but got ".concat(typeof globalScope.$llmToolsMetadataRegister));
6573
- }
6574
- var _ = globalScope.$llmToolsMetadataRegister;
6575
6608
  /**
6576
6609
  * @@@
6577
6610
  *
@@ -6579,8 +6612,7 @@
6579
6612
  * @singleton Only one instance of each register is created per build, but thare can be more @@@
6580
6613
  * @public exported from `@promptbook/core`
6581
6614
  */
6582
- var $llmToolsMetadataRegister = new Register(_);
6583
- $getGlobalScope().$llmToolsMetadataRegister;
6615
+ var $llmToolsMetadataRegister = new $Register('llm_tools_metadata');
6584
6616
 
6585
6617
  /**
6586
6618
  * @@@
@@ -6589,1687 +6621,125 @@
6589
6621
  * @singleton Only one instance of each register is created per build, but thare can be more @@@
6590
6622
  * @public exported from `@promptbook/core`
6591
6623
  */
6592
- var $llmToolsRegister = new Register([
6593
- // TODO: !!!!!! Take from global scope
6594
- ]);
6624
+ var $llmToolsRegister = new $Register('llm_execution_tools_constructors');
6595
6625
 
6596
6626
  /**
6597
- * Remote server is a proxy server that uses its execution tools internally and exposes the executor interface externally.
6627
+ * Creates a message with all registered LLM tools
6598
6628
  *
6599
- * You can simply use `RemoteExecutionTools` on client-side javascript and connect to your remote server.
6600
- * This is useful to make all logic on browser side but not expose your API keys or no need to use customer's GPU.
6629
+ * Note: This function is used to create a (error) message when there is no constructor for some LLM provider
6601
6630
  *
6602
- * @see https://github.com/webgptorg/promptbook#remote-server
6603
- * @public exported from `@promptbook/remote-client`
6631
+ * @private internal function of `createLlmToolsFromConfiguration` and `createLlmToolsFromEnv`
6604
6632
  */
6605
- var RemoteLlmExecutionTools = /** @class */ (function () {
6606
- function RemoteLlmExecutionTools(options) {
6607
- this.options = options;
6608
- }
6609
- Object.defineProperty(RemoteLlmExecutionTools.prototype, "title", {
6610
- get: function () {
6611
- // TODO: [🧠] Maybe fetch title+description from the remote server (as well as if model methods are defined)
6612
- return 'Remote server';
6613
- },
6614
- enumerable: false,
6615
- configurable: true
6616
- });
6617
- Object.defineProperty(RemoteLlmExecutionTools.prototype, "description", {
6618
- get: function () {
6619
- return 'Use all models by your remote server';
6620
- },
6621
- enumerable: false,
6622
- configurable: true
6623
- });
6624
- /**
6625
- * Check the configuration of all execution tools
6626
- */
6627
- RemoteLlmExecutionTools.prototype.checkConfiguration = function () {
6628
- return __awaiter(this, void 0, void 0, function () {
6629
- return __generator(this, function (_a) {
6630
- return [2 /*return*/];
6631
- });
6632
- });
6633
- };
6634
- /**
6635
- * List all available models that can be used
6636
- */
6637
- RemoteLlmExecutionTools.prototype.listModels = function () {
6638
- return __awaiter(this, void 0, void 0, function () {
6639
- return __generator(this, function (_a) {
6640
- return [2 /*return*/, (this.options.models ||
6641
- [
6642
- /* !!!!!! */
6643
- ])];
6644
- });
6645
- });
6646
- };
6647
- /**
6648
- * Creates a connection to the remote proxy server.
6649
- */
6650
- RemoteLlmExecutionTools.prototype.makeConnection = function () {
6651
- var _this = this;
6652
- return new Promise(
6653
- // <- TODO: [🧱] Implement in a functional (not new Class) way
6654
- function (resolve, reject) {
6655
- var socket = socket_ioClient.io(_this.options.remoteUrl, {
6656
- path: _this.options.path,
6657
- // path: `${this.remoteUrl.pathname}/socket.io`,
6658
- transports: [/*'websocket', <- TODO: [🌬] Make websocket transport work */ 'polling'],
6659
- });
6660
- // console.log('Connecting to', this.options.remoteUrl.href, { socket });
6661
- socket.on('connect', function () {
6662
- resolve(socket);
6663
- });
6664
- // TODO: !!!! Better timeout handling
6665
- setTimeout(function () {
6666
- reject(new Error("Timeout while connecting to ".concat(_this.options.remoteUrl)));
6667
- }, 1000 /* <- TODO: Timeout to config */);
6668
- });
6669
- };
6670
- /**
6671
- * Calls remote proxy server to use a chat model
6672
- */
6673
- RemoteLlmExecutionTools.prototype.callChatModel = function (prompt) {
6674
- if (this.options.isVerbose) {
6675
- console.info("\uD83D\uDD8B Remote callChatModel call");
6676
- }
6677
- return /* not await */ this.callCommonModel(prompt);
6678
- };
6633
+ function $registeredLlmToolsMessage() {
6634
+ var e_1, _a, e_2, _b;
6679
6635
  /**
6680
- * Calls remote proxy server to use a completion model
6636
+ * Mixes registered LLM tools from $llmToolsMetadataRegister and $llmToolsRegister
6681
6637
  */
6682
- RemoteLlmExecutionTools.prototype.callCompletionModel = function (prompt) {
6683
- if (this.options.isVerbose) {
6684
- console.info("\uD83D\uDCAC Remote callCompletionModel call");
6638
+ var all = [];
6639
+ var _loop_1 = function (packageName, className) {
6640
+ if (all.some(function (item) { return item.packageName === packageName && item.className === className; })) {
6641
+ return "continue";
6685
6642
  }
6686
- return /* not await */ this.callCommonModel(prompt);
6643
+ all.push({ packageName: packageName, className: className });
6687
6644
  };
6688
- /**
6689
- * Calls remote proxy server to use a embedding model
6690
- */
6691
- RemoteLlmExecutionTools.prototype.callEmbeddingModel = function (prompt) {
6692
- if (this.options.isVerbose) {
6693
- console.info("\uD83D\uDCAC Remote callEmbeddingModel call");
6645
+ try {
6646
+ for (var _c = __values($llmToolsMetadataRegister.list()), _d = _c.next(); !_d.done; _d = _c.next()) {
6647
+ var _e = _d.value, packageName = _e.packageName, className = _e.className;
6648
+ _loop_1(packageName, className);
6694
6649
  }
6695
- return /* not await */ this.callCommonModel(prompt);
6696
- };
6697
- // <- Note: [🤖] callXxxModel
6698
- /**
6699
- * Calls remote proxy server to use both completion or chat model
6700
- */
6701
- RemoteLlmExecutionTools.prototype.callCommonModel = function (prompt) {
6702
- return __awaiter(this, void 0, void 0, function () {
6703
- var socket, promptResult;
6704
- return __generator(this, function (_a) {
6705
- switch (_a.label) {
6706
- case 0: return [4 /*yield*/, this.makeConnection()];
6707
- case 1:
6708
- socket = _a.sent();
6709
- if (this.options.isAnonymous) {
6710
- socket.emit('request', {
6711
- llmToolsConfiguration: this.options.llmToolsConfiguration,
6712
- prompt: prompt,
6713
- // <- TODO: [🛫] `prompt` is NOT fully serializable as JSON, it contains functions which are not serializable
6714
- });
6715
- }
6716
- else {
6717
- socket.emit('request', {
6718
- clientId: this.options.clientId,
6719
- prompt: prompt,
6720
- // <- TODO: [🛫] `prompt` is NOT fully serializable as JSON, it contains functions which are not serializable
6721
- });
6722
- }
6723
- return [4 /*yield*/, new Promise(function (resolve, reject) {
6724
- socket.on('response', function (response) {
6725
- resolve(response.promptResult);
6726
- socket.disconnect();
6727
- });
6728
- socket.on('error', function (error) {
6729
- reject(new PipelineExecutionError(error.errorMessage));
6730
- socket.disconnect();
6731
- });
6732
- })];
6733
- case 2:
6734
- promptResult = _a.sent();
6735
- socket.disconnect();
6736
- return [2 /*return*/, promptResult];
6737
- }
6738
- });
6739
- });
6740
- };
6741
- return RemoteLlmExecutionTools;
6742
- }());
6743
- /**
6744
- * TODO: [🍓] Allow to list compatible models with each variant
6745
- * TODO: [🗯] RemoteLlmExecutionTools should extend Destroyable and implement IDestroyable
6746
- * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
6747
- * TODO: [🧠] Maybe remove `@promptbook/remote-client` and just use `@promptbook/core`
6748
- */
6749
-
6750
- /**
6751
- * Function computeUsage will create price per one token based on the string value found on openai page
6752
- *
6753
- * @private within the repository, used only as internal helper for `OPENAI_MODELS`
6754
- */
6755
- function computeUsage(value) {
6756
- var _a = __read(value.split(' / '), 2), price = _a[0], tokens = _a[1];
6757
- return parseFloat(price.replace('$', '')) / parseFloat(tokens.replace('M tokens', '')) / 1000000;
6758
- }
6759
-
6760
- /**
6761
- * List of available Anthropic Claude models with pricing
6762
- *
6763
- * Note: Done at 2024-08-16
6764
- *
6765
- * @see https://docs.anthropic.com/en/docs/models-overview
6766
- * @public exported from `@promptbook/anthropic-claude`
6767
- */
6768
- var ANTHROPIC_CLAUDE_MODELS = [
6769
- {
6770
- modelVariant: 'CHAT',
6771
- modelTitle: 'Claude 3.5 Sonnet',
6772
- modelName: 'claude-3-5-sonnet-20240620',
6773
- pricing: {
6774
- prompt: computeUsage("$3.00 / 1M tokens"),
6775
- output: computeUsage("$15.00 / 1M tokens"),
6776
- },
6777
- },
6778
- {
6779
- modelVariant: 'CHAT',
6780
- modelTitle: 'Claude 3 Opus',
6781
- modelName: 'claude-3-opus-20240229',
6782
- pricing: {
6783
- prompt: computeUsage("$15.00 / 1M tokens"),
6784
- output: computeUsage("$75.00 / 1M tokens"),
6785
- },
6786
- },
6787
- {
6788
- modelVariant: 'CHAT',
6789
- modelTitle: 'Claude 3 Sonnet',
6790
- modelName: 'claude-3-sonnet-20240229',
6791
- pricing: {
6792
- prompt: computeUsage("$3.00 / 1M tokens"),
6793
- output: computeUsage("$15.00 / 1M tokens"),
6794
- },
6795
- },
6796
- {
6797
- modelVariant: 'CHAT',
6798
- modelTitle: 'Claude 3 Haiku',
6799
- modelName: ' claude-3-haiku-20240307',
6800
- pricing: {
6801
- prompt: computeUsage("$0.25 / 1M tokens"),
6802
- output: computeUsage("$1.25 / 1M tokens"),
6803
- },
6804
- },
6805
- {
6806
- modelVariant: 'CHAT',
6807
- modelTitle: 'Claude 2.1',
6808
- modelName: 'claude-2.1',
6809
- pricing: {
6810
- prompt: computeUsage("$8.00 / 1M tokens"),
6811
- output: computeUsage("$24.00 / 1M tokens"),
6812
- },
6813
- },
6814
- {
6815
- modelVariant: 'CHAT',
6816
- modelTitle: 'Claude 2',
6817
- modelName: 'claude-2.0',
6818
- pricing: {
6819
- prompt: computeUsage("$8.00 / 1M tokens"),
6820
- output: computeUsage("$24.00 / 1M tokens"),
6821
- },
6822
- },
6823
- {
6824
- modelVariant: 'CHAT',
6825
- modelTitle: ' Claude Instant 1.2',
6826
- modelName: 'claude-instant-1.2',
6827
- pricing: {
6828
- prompt: computeUsage("$0.80 / 1M tokens"),
6829
- output: computeUsage("$2.40 / 1M tokens"),
6830
- },
6831
- },
6832
- // TODO: !!! Claude 1 and 2 has also completion versions - ask Hoagy
6833
- ];
6834
- /**
6835
- * Note: [🤖] Add models of new variant
6836
- * TODO: [🧠] !!! Add embedding models OR Anthropic has only chat+completion models?
6837
- * TODO: [🧠] Some mechanism to propagate unsureness
6838
- * TODO: [🧠][👮‍♀️] Put here more info like description, isVision, trainingDateCutoff, languages, strengths ( Top-level performance, intelligence, fluency, and understanding), contextWindow,...
6839
- * TODO: [🎰] Some mechanism to auto-update available models
6840
- */
6841
-
6842
- /**
6843
- * Get current date in ISO 8601 format
6844
- *
6845
- * @private internal utility
6846
- */
6847
- function getCurrentIsoDate() {
6848
- return new Date().toISOString();
6849
- }
6850
-
6851
- /**
6852
- * Helper of usage compute
6853
- *
6854
- * @param content the content of prompt or response
6855
- * @returns part of PromptResultUsageCounts
6856
- *
6857
- * @private internal utility of LlmExecutionTools
6858
- */
6859
- function computeUsageCounts(content) {
6860
- return {
6861
- charactersCount: { value: countCharacters(content) },
6862
- wordsCount: { value: countWords(content) },
6863
- sentencesCount: { value: countSentences(content) },
6864
- linesCount: { value: countLines(content) },
6865
- paragraphsCount: { value: countParagraphs(content) },
6866
- pagesCount: { value: countPages(content) },
6867
- };
6868
- }
6869
-
6870
- /**
6871
- * Make UncertainNumber
6872
- *
6873
- * @param value
6874
- *
6875
- * @private utility for initializating UncertainNumber
6876
- */
6877
- function uncertainNumber(value) {
6878
- if (value === null || value === undefined || Number.isNaN(value)) {
6879
- return { value: 0, isUncertain: true };
6880
- }
6881
- return { value: value };
6882
- }
6883
-
6884
- /**
6885
- * Computes the usage of the Anthropic Claude API based on the response from Anthropic Claude
6886
- *
6887
- * @param promptContent The content of the prompt
6888
- * @param resultContent The content of the result (for embedding prompts or failed prompts pass empty string)
6889
- * @param rawResponse The raw response from Anthropic Claude API
6890
- * @throws {PipelineExecutionError} If the usage is not defined in the response from Anthropic Claude
6891
- * @private internal utility of `AnthropicClaudeExecutionTools`
6892
- */
6893
- function computeAnthropicClaudeUsage(promptContent, // <- Note: Intentionally using [] to access type properties to bring jsdoc from Prompt/PromptResult to consumer
6894
- resultContent, rawResponse) {
6895
- var _a, _b;
6896
- if (rawResponse.usage === undefined) {
6897
- throw new PipelineExecutionError('The usage is not defined in the response from Anthropic Claude');
6898
6650
  }
6899
- if (((_a = rawResponse.usage) === null || _a === void 0 ? void 0 : _a.input_tokens) === undefined) {
6900
- throw new PipelineExecutionError('In Anthropic Claude response `usage.prompt_tokens` not defined');
6651
+ catch (e_1_1) { e_1 = { error: e_1_1 }; }
6652
+ finally {
6653
+ try {
6654
+ if (_d && !_d.done && (_a = _c.return)) _a.call(_c);
6655
+ }
6656
+ finally { if (e_1) throw e_1.error; }
6901
6657
  }
6902
- var inputTokens = rawResponse.usage.input_tokens;
6903
- var outputTokens = ((_b = rawResponse.usage) === null || _b === void 0 ? void 0 : _b.output_tokens) || 0;
6904
- var modelInfo = ANTHROPIC_CLAUDE_MODELS.find(function (model) { return model.modelName === rawResponse.model; });
6905
- var price;
6906
- if (modelInfo === undefined || modelInfo.pricing === undefined) {
6907
- price = uncertainNumber();
6658
+ var _loop_2 = function (packageName, className) {
6659
+ if (all.some(function (item) { return item.packageName === packageName && item.className === className; })) {
6660
+ return "continue";
6661
+ }
6662
+ all.push({ packageName: packageName, className: className });
6663
+ };
6664
+ try {
6665
+ for (var _f = __values($llmToolsRegister.list()), _g = _f.next(); !_g.done; _g = _f.next()) {
6666
+ var _h = _g.value, packageName = _h.packageName, className = _h.className;
6667
+ _loop_2(packageName, className);
6668
+ }
6908
6669
  }
6909
- else {
6910
- price = uncertainNumber(inputTokens * modelInfo.pricing.prompt + outputTokens * modelInfo.pricing.output);
6670
+ catch (e_2_1) { e_2 = { error: e_2_1 }; }
6671
+ finally {
6672
+ try {
6673
+ if (_g && !_g.done && (_b = _f.return)) _b.call(_f);
6674
+ }
6675
+ finally { if (e_2) throw e_2.error; }
6911
6676
  }
6912
- return {
6913
- price: price,
6914
- input: __assign({ tokensCount: uncertainNumber(rawResponse.usage.input_tokens) }, computeUsageCounts(promptContent)),
6915
- output: __assign({ tokensCount: uncertainNumber(outputTokens) }, computeUsageCounts(resultContent)),
6916
- };
6677
+ var metadata = all.map(function (metadata) {
6678
+ var isMetadataAviailable = $llmToolsMetadataRegister
6679
+ .list()
6680
+ .find(function (_a) {
6681
+ var packageName = _a.packageName, className = _a.className;
6682
+ return metadata.packageName === packageName && metadata.className === className;
6683
+ });
6684
+ var isInstalled = $llmToolsRegister
6685
+ .list()
6686
+ .find(function (_a) {
6687
+ var packageName = _a.packageName, className = _a.className;
6688
+ return metadata.packageName === packageName && metadata.className === className;
6689
+ });
6690
+ return __assign(__assign({}, metadata), { isMetadataAviailable: isMetadataAviailable, isInstalled: isInstalled });
6691
+ });
6692
+ return spaceTrim__default["default"](function (block) { return "\n Available LLM providers are:\n ".concat(block(metadata
6693
+ .map(function (_a, i) {
6694
+ var packageName = _a.packageName, className = _a.className, isMetadataAviailable = _a.isMetadataAviailable, isInstalled = _a.isInstalled;
6695
+ var more;
6696
+ if (just(false)) {
6697
+ more = '';
6698
+ }
6699
+ else if (!isMetadataAviailable && !isInstalled) {
6700
+ // TODO: [�][�] Maybe do allow to do auto-install if package not registered and not found
6701
+ more = "(not installed and no metadata, looks like a unexpected behavior)";
6702
+ }
6703
+ else if (isMetadataAviailable && !isInstalled) {
6704
+ // TODO: [�][�]
6705
+ more = "(not installed)";
6706
+ }
6707
+ else if (!isMetadataAviailable && isInstalled) {
6708
+ more = "(no metadata, looks like a unexpected behavior)";
6709
+ }
6710
+ else if (isMetadataAviailable && isInstalled) {
6711
+ more = "(installed)";
6712
+ }
6713
+ else {
6714
+ more = "(unknown state, looks like a unexpected behavior)";
6715
+ }
6716
+ return "".concat(i + 1, ") `").concat(className, "` from `").concat(packageName, "` ").concat(more);
6717
+ })
6718
+ .join('\n')), "\n "); });
6917
6719
  }
6918
- /**
6919
- * TODO: [🤝] DRY Maybe some common abstraction between `computeOpenAiUsage` and `computeAnthropicClaudeUsage`
6920
- */
6921
6720
 
6922
6721
  /**
6923
- * Execution Tools for calling Anthropic Claude API.
6722
+ * @@@
6723
+ *
6724
+ * Note: This function is not cached, every call creates new instance of `MultipleLlmExecutionTools`
6924
6725
  *
6925
- * @public exported from `@promptbook/anthropic-claude`
6926
- * @deprecated use `createAnthropicClaudeExecutionTools` instead
6726
+ * @returns @@@
6727
+ * @public exported from `@promptbook/core`
6927
6728
  */
6928
- var AnthropicClaudeExecutionTools = /** @class */ (function () {
6929
- /**
6930
- * Creates Anthropic Claude Execution Tools.
6931
- *
6932
- * @param options which are relevant are directly passed to the Anthropic Claude client
6933
- */
6934
- function AnthropicClaudeExecutionTools(options) {
6935
- if (options === void 0) { options = { isProxied: false }; }
6936
- this.options = options;
6937
- /**
6938
- * Anthropic Claude API client.
6939
- */
6940
- this.client = null;
6941
- }
6942
- Object.defineProperty(AnthropicClaudeExecutionTools.prototype, "title", {
6943
- get: function () {
6944
- return 'Anthropic Claude';
6945
- },
6946
- enumerable: false,
6947
- configurable: true
6948
- });
6949
- Object.defineProperty(AnthropicClaudeExecutionTools.prototype, "description", {
6950
- get: function () {
6951
- return 'Use all models provided by Anthropic Claude';
6952
- },
6953
- enumerable: false,
6954
- configurable: true
6955
- });
6956
- AnthropicClaudeExecutionTools.prototype.getClient = function () {
6957
- return __awaiter(this, void 0, void 0, function () {
6958
- var anthropicOptions;
6959
- return __generator(this, function (_a) {
6960
- if (this.client === null) {
6961
- anthropicOptions = __assign({}, this.options);
6962
- delete anthropicOptions.isVerbose;
6963
- delete anthropicOptions.isProxied;
6964
- this.client = new Anthropic__default["default"](anthropicOptions);
6965
- }
6966
- return [2 /*return*/, this.client];
6967
- });
6968
- });
6969
- };
6970
- /**
6971
- * Check the `options` passed to `constructor`
6972
- */
6973
- AnthropicClaudeExecutionTools.prototype.checkConfiguration = function () {
6974
- return __awaiter(this, void 0, void 0, function () {
6975
- return __generator(this, function (_a) {
6976
- switch (_a.label) {
6977
- case 0: return [4 /*yield*/, this.getClient()];
6978
- case 1:
6979
- _a.sent();
6980
- return [2 /*return*/];
6981
- }
6982
- });
6729
+ function createLlmToolsFromConfiguration(configuration, options) {
6730
+ if (options === void 0) { options = {}; }
6731
+ var _a = options.isVerbose, isVerbose = _a === void 0 ? IS_VERBOSE : _a;
6732
+ var llmTools = configuration.map(function (llmConfiguration) {
6733
+ var registeredItem = $llmToolsRegister
6734
+ .list()
6735
+ .find(function (_a) {
6736
+ var packageName = _a.packageName, className = _a.className;
6737
+ return llmConfiguration.packageName === packageName && llmConfiguration.className === className;
6983
6738
  });
6984
- };
6985
- /**
6986
- * List all available Anthropic Claude models that can be used
6987
- */
6988
- AnthropicClaudeExecutionTools.prototype.listModels = function () {
6989
- return ANTHROPIC_CLAUDE_MODELS;
6990
- };
6991
- /**
6992
- * Calls Anthropic Claude API to use a chat model.
6993
- */
6994
- AnthropicClaudeExecutionTools.prototype.callChatModel = function (prompt) {
6995
- return __awaiter(this, void 0, void 0, function () {
6996
- var content, parameters, modelRequirements, client, modelName, rawPromptContent, rawRequest, start, complete, rawResponse, contentBlock, resultContent, usage;
6997
- return __generator(this, function (_a) {
6998
- switch (_a.label) {
6999
- case 0:
7000
- if (this.options.isVerbose) {
7001
- console.info('💬 Anthropic Claude callChatModel call');
7002
- }
7003
- content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
7004
- return [4 /*yield*/, this.getClient()];
7005
- case 1:
7006
- client = _a.sent();
7007
- // TODO: [☂] Use here more modelRequirements
7008
- if (modelRequirements.modelVariant !== 'CHAT') {
7009
- throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
7010
- }
7011
- modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
7012
- rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
7013
- rawRequest = {
7014
- model: modelRequirements.modelName || this.getDefaultChatModel().modelName,
7015
- max_tokens: modelRequirements.maxTokens || 4096,
7016
- // <- TODO: [🌾] Make some global max cap for maxTokens
7017
- temperature: modelRequirements.temperature,
7018
- system: modelRequirements.systemMessage,
7019
- // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
7020
- // <- Note: [🧆]
7021
- messages: [
7022
- {
7023
- role: 'user',
7024
- content: rawPromptContent,
7025
- },
7026
- ],
7027
- // TODO: Is here some equivalent of user identification?> user: this.options.user,
7028
- };
7029
- start = getCurrentIsoDate();
7030
- if (this.options.isVerbose) {
7031
- console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
7032
- }
7033
- return [4 /*yield*/, client.messages.create(rawRequest)];
7034
- case 2:
7035
- rawResponse = _a.sent();
7036
- if (this.options.isVerbose) {
7037
- console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
7038
- }
7039
- if (!rawResponse.content[0]) {
7040
- throw new PipelineExecutionError('No content from Anthropic Claude');
7041
- }
7042
- if (rawResponse.content.length > 1) {
7043
- throw new PipelineExecutionError('More than one content blocks from Anthropic Claude');
7044
- }
7045
- contentBlock = rawResponse.content[0];
7046
- if (contentBlock.type !== 'text') {
7047
- throw new PipelineExecutionError("Returned content is not \"text\" type but \"".concat(contentBlock.type, "\""));
7048
- }
7049
- resultContent = contentBlock.text;
7050
- // eslint-disable-next-line prefer-const
7051
- complete = getCurrentIsoDate();
7052
- usage = computeAnthropicClaudeUsage(content, '', rawResponse);
7053
- return [2 /*return*/, {
7054
- content: resultContent,
7055
- modelName: rawResponse.model,
7056
- timing: {
7057
- start: start,
7058
- complete: complete,
7059
- },
7060
- usage: usage,
7061
- rawPromptContent: rawPromptContent,
7062
- rawRequest: rawRequest,
7063
- rawResponse: rawResponse,
7064
- // <- [🗯]
7065
- }];
7066
- }
7067
- });
7068
- });
7069
- };
7070
- /*
7071
- TODO: [👏]
7072
- public async callCompletionModel(
7073
- prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements'>,
7074
- ): Promise<PromptCompletionResult> {
7075
-
7076
- if (this.options.isVerbose) {
7077
- console.info('🖋 Anthropic Claude callCompletionModel call');
7078
- }
7079
-
7080
- const { content, parameters, modelRequirements } = prompt;
7081
-
7082
- // TODO: [☂] Use here more modelRequirements
7083
- if (modelRequirements.modelVariant !== 'COMPLETION') {
7084
- throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
7085
- }
7086
-
7087
- const modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
7088
- const modelSettings = {
7089
- model: modelName,
7090
- max_tokens: modelRequirements.maxTokens || 2000, // <- Note: 2000 is for lagacy reasons
7091
- // <- TODO: [🌾] Make some global max cap for maxTokens
7092
- // <- TODO: Use here `systemMessage`, `temperature` and `seed`
7093
- };
7094
-
7095
- const rawRequest: xxxx.Completions.CompletionCreateParamsNonStreaming = {
7096
- ...modelSettings,
7097
- prompt: rawPromptContent,
7098
- user: this.options.user,
7099
- };
7100
- const start: string_date_iso8601 = getCurrentIsoDate();
7101
- let complete: string_date_iso8601;
7102
-
7103
- if (this.options.isVerbose) {
7104
- console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
7105
- }
7106
- const rawResponse = await this.client.completions.create(rawRequest);
7107
- if (this.options.isVerbose) {
7108
- console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
7109
- }
7110
-
7111
- if (!rawResponse.choices[0]) {
7112
- throw new PipelineExecutionError('No choises from Anthropic Claude');
7113
- }
7114
-
7115
- if (rawResponse.choices.length > 1) {
7116
- // TODO: This should be maybe only warning
7117
- throw new PipelineExecutionError('More than one choise from Anthropic Claude');
7118
- }
7119
-
7120
- const resultContent = rawResponse.choices[0].text;
7121
- // eslint-disable-next-line prefer-const
7122
- complete = getCurrentIsoDate();
7123
- const usage = { price: 'UNKNOWN', inputTokens: 0, outputTokens: 0 /* <- TODO: [🐞] Compute usage * / } satisfies PromptResultUsage;
7124
-
7125
-
7126
-
7127
- return {
7128
- content: resultContent,
7129
- modelName: rawResponse.model || model,
7130
- timing: {
7131
- start,
7132
- complete,
7133
- },
7134
- usage,
7135
- rawResponse,
7136
- // <- [🗯]
7137
- };
7138
- }
7139
- */
7140
- // <- Note: [🤖] callXxxModel
7141
- /**
7142
- * Get the model that should be used as default
7143
- */
7144
- AnthropicClaudeExecutionTools.prototype.getDefaultModel = function (defaultModelName) {
7145
- var model = ANTHROPIC_CLAUDE_MODELS.find(function (_a) {
7146
- var modelName = _a.modelName;
7147
- return modelName.startsWith(defaultModelName);
7148
- });
7149
- if (model === undefined) {
7150
- throw new UnexpectedError(spaceTrim__default["default"](function (block) {
7151
- return "\n Cannot find model in OpenAI models with name \"".concat(defaultModelName, "\" which should be used as default.\n\n Available models:\n ").concat(block(ANTHROPIC_CLAUDE_MODELS.map(function (_a) {
7152
- var modelName = _a.modelName;
7153
- return "- \"".concat(modelName, "\"");
7154
- }).join('\n')), "\n\n ");
7155
- }));
7156
- }
7157
- return model;
7158
- };
7159
- /**
7160
- * Default model for chat variant.
7161
- */
7162
- AnthropicClaudeExecutionTools.prototype.getDefaultChatModel = function () {
7163
- return this.getDefaultModel('claude-3-opus');
7164
- };
7165
- return AnthropicClaudeExecutionTools;
7166
- }());
7167
- /**
7168
- * TODO: [🍆] JSON mode
7169
- * TODO: [🧠] Maybe handle errors via transformAnthropicError (like transformAzureError)
7170
- * TODO: Maybe Create some common util for callChatModel and callCompletionModel
7171
- * TODO: Maybe make custom OpenAiError
7172
- * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
7173
- * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
7174
- * TODO: [📅] Maybe instead of `RemoteLlmExecutionToolsOptions` use `proxyWithAnonymousRemoteServer` (if implemented)
7175
- */
7176
-
7177
- /**
7178
- * Execution Tools for calling Anthropic Claude API.
7179
- *
7180
- * @public exported from `@promptbook/anthropic-claude`
7181
- */
7182
- var createAnthropicClaudeExecutionTools = Object.assign(function (options) {
7183
- if (options.isProxied) {
7184
- return new RemoteLlmExecutionTools(__assign(__assign({}, options), { isAnonymous: true, llmToolsConfiguration: [
7185
- {
7186
- title: 'Anthropic Claude (proxied)',
7187
- packageName: '@promptbook/anthropic-claude',
7188
- className: 'AnthropicClaudeExecutionTools',
7189
- options: __assign(__assign({}, options), { isProxied: false }),
7190
- },
7191
- ], models: ANTHROPIC_CLAUDE_MODELS }));
7192
- }
7193
- return new AnthropicClaudeExecutionTools(options);
7194
- }, {
7195
- packageName: '@promptbook/anthropic-claude',
7196
- className: 'AnthropicClaudeExecutionTools',
7197
- });
7198
- /**
7199
- * TODO: [🧠] !!!! Make anonymous this with all LLM providers
7200
- * TODO: [🧠][🧱] !!!! Maybe change all `new AnthropicClaudeExecutionTools` -> `createAnthropicClaudeExecutionTools` in manual
7201
- * TODO: [🧠] Maybe auto-detect usage in browser and determine default value of `isProxied`
7202
- * TODO: [🦺] Is there some way how to put `packageName` and `className` on top and function definition on bottom?
7203
- * TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
7204
- */
7205
-
7206
- /**
7207
- * List of available OpenAI models with pricing
7208
- *
7209
- * Note: Done at 2024-05-20
7210
- *
7211
- * @see https://platform.openai.com/docs/models/
7212
- * @see https://openai.com/api/pricing/
7213
- * @public exported from `@promptbook/openai`
7214
- */
7215
- var OPENAI_MODELS = [
7216
- /*/
7217
- {
7218
- modelTitle: 'dall-e-3',
7219
- modelName: 'dall-e-3',
7220
- },
7221
- /**/
7222
- /*/
7223
- {
7224
- modelTitle: 'whisper-1',
7225
- modelName: 'whisper-1',
7226
- },
7227
- /**/
7228
- /**/
7229
- {
7230
- modelVariant: 'COMPLETION',
7231
- modelTitle: 'davinci-002',
7232
- modelName: 'davinci-002',
7233
- pricing: {
7234
- prompt: computeUsage("$2.00 / 1M tokens"),
7235
- output: computeUsage("$2.00 / 1M tokens"), // <- not sure
7236
- },
7237
- },
7238
- /**/
7239
- /*/
7240
- {
7241
- modelTitle: 'dall-e-2',
7242
- modelName: 'dall-e-2',
7243
- },
7244
- /**/
7245
- /**/
7246
- {
7247
- modelVariant: 'CHAT',
7248
- modelTitle: 'gpt-3.5-turbo-16k',
7249
- modelName: 'gpt-3.5-turbo-16k',
7250
- pricing: {
7251
- prompt: computeUsage("$3.00 / 1M tokens"),
7252
- output: computeUsage("$4.00 / 1M tokens"),
7253
- },
7254
- },
7255
- /**/
7256
- /*/
7257
- {
7258
- modelTitle: 'tts-1-hd-1106',
7259
- modelName: 'tts-1-hd-1106',
7260
- },
7261
- /**/
7262
- /*/
7263
- {
7264
- modelTitle: 'tts-1-hd',
7265
- modelName: 'tts-1-hd',
7266
- },
7267
- /**/
7268
- /**/
7269
- {
7270
- modelVariant: 'CHAT',
7271
- modelTitle: 'gpt-4',
7272
- modelName: 'gpt-4',
7273
- pricing: {
7274
- prompt: computeUsage("$30.00 / 1M tokens"),
7275
- output: computeUsage("$60.00 / 1M tokens"),
7276
- },
7277
- },
7278
- /**/
7279
- /**/
7280
- {
7281
- modelVariant: 'CHAT',
7282
- modelTitle: 'gpt-4-32k',
7283
- modelName: 'gpt-4-32k',
7284
- pricing: {
7285
- prompt: computeUsage("$60.00 / 1M tokens"),
7286
- output: computeUsage("$120.00 / 1M tokens"),
7287
- },
7288
- },
7289
- /**/
7290
- /*/
7291
- {
7292
- modelVariant: 'CHAT',
7293
- modelTitle: 'gpt-4-0613',
7294
- modelName: 'gpt-4-0613',
7295
- pricing: {
7296
- prompt: computeUsage(` / 1M tokens`),
7297
- output: computeUsage(` / 1M tokens`),
7298
- },
7299
- },
7300
- /**/
7301
- /**/
7302
- {
7303
- modelVariant: 'CHAT',
7304
- modelTitle: 'gpt-4-turbo-2024-04-09',
7305
- modelName: 'gpt-4-turbo-2024-04-09',
7306
- pricing: {
7307
- prompt: computeUsage("$10.00 / 1M tokens"),
7308
- output: computeUsage("$30.00 / 1M tokens"),
7309
- },
7310
- },
7311
- /**/
7312
- /**/
7313
- {
7314
- modelVariant: 'CHAT',
7315
- modelTitle: 'gpt-3.5-turbo-1106',
7316
- modelName: 'gpt-3.5-turbo-1106',
7317
- pricing: {
7318
- prompt: computeUsage("$1.00 / 1M tokens"),
7319
- output: computeUsage("$2.00 / 1M tokens"),
7320
- },
7321
- },
7322
- /**/
7323
- /**/
7324
- {
7325
- modelVariant: 'CHAT',
7326
- modelTitle: 'gpt-4-turbo',
7327
- modelName: 'gpt-4-turbo',
7328
- pricing: {
7329
- prompt: computeUsage("$10.00 / 1M tokens"),
7330
- output: computeUsage("$30.00 / 1M tokens"),
7331
- },
7332
- },
7333
- /**/
7334
- /**/
7335
- {
7336
- modelVariant: 'COMPLETION',
7337
- modelTitle: 'gpt-3.5-turbo-instruct-0914',
7338
- modelName: 'gpt-3.5-turbo-instruct-0914',
7339
- pricing: {
7340
- prompt: computeUsage("$1.50 / 1M tokens"),
7341
- output: computeUsage("$2.00 / 1M tokens"), // <- For gpt-3.5-turbo-instruct
7342
- },
7343
- },
7344
- /**/
7345
- /**/
7346
- {
7347
- modelVariant: 'COMPLETION',
7348
- modelTitle: 'gpt-3.5-turbo-instruct',
7349
- modelName: 'gpt-3.5-turbo-instruct',
7350
- pricing: {
7351
- prompt: computeUsage("$1.50 / 1M tokens"),
7352
- output: computeUsage("$2.00 / 1M tokens"),
7353
- },
7354
- },
7355
- /**/
7356
- /*/
7357
- {
7358
- modelTitle: 'tts-1',
7359
- modelName: 'tts-1',
7360
- },
7361
- /**/
7362
- /**/
7363
- {
7364
- modelVariant: 'CHAT',
7365
- modelTitle: 'gpt-3.5-turbo',
7366
- modelName: 'gpt-3.5-turbo',
7367
- pricing: {
7368
- prompt: computeUsage("$3.00 / 1M tokens"),
7369
- output: computeUsage("$6.00 / 1M tokens"), // <- Not sure, refer to gpt-3.5-turbo in Fine-tuning models
7370
- },
7371
- },
7372
- /**/
7373
- /**/
7374
- {
7375
- modelVariant: 'CHAT',
7376
- modelTitle: 'gpt-3.5-turbo-0301',
7377
- modelName: 'gpt-3.5-turbo-0301',
7378
- pricing: {
7379
- prompt: computeUsage("$1.50 / 1M tokens"),
7380
- output: computeUsage("$2.00 / 1M tokens"),
7381
- },
7382
- },
7383
- /**/
7384
- /**/
7385
- {
7386
- modelVariant: 'COMPLETION',
7387
- modelTitle: 'babbage-002',
7388
- modelName: 'babbage-002',
7389
- pricing: {
7390
- prompt: computeUsage("$0.40 / 1M tokens"),
7391
- output: computeUsage("$0.40 / 1M tokens"), // <- Not sure
7392
- },
7393
- },
7394
- /**/
7395
- /**/
7396
- {
7397
- modelVariant: 'CHAT',
7398
- modelTitle: 'gpt-4-1106-preview',
7399
- modelName: 'gpt-4-1106-preview',
7400
- pricing: {
7401
- prompt: computeUsage("$10.00 / 1M tokens"),
7402
- output: computeUsage("$30.00 / 1M tokens"),
7403
- },
7404
- },
7405
- /**/
7406
- /**/
7407
- {
7408
- modelVariant: 'CHAT',
7409
- modelTitle: 'gpt-4-0125-preview',
7410
- modelName: 'gpt-4-0125-preview',
7411
- pricing: {
7412
- prompt: computeUsage("$10.00 / 1M tokens"),
7413
- output: computeUsage("$30.00 / 1M tokens"),
7414
- },
7415
- },
7416
- /**/
7417
- /*/
7418
- {
7419
- modelTitle: 'tts-1-1106',
7420
- modelName: 'tts-1-1106',
7421
- },
7422
- /**/
7423
- /**/
7424
- {
7425
- modelVariant: 'CHAT',
7426
- modelTitle: 'gpt-3.5-turbo-0125',
7427
- modelName: 'gpt-3.5-turbo-0125',
7428
- pricing: {
7429
- prompt: computeUsage("$0.50 / 1M tokens"),
7430
- output: computeUsage("$1.50 / 1M tokens"),
7431
- },
7432
- },
7433
- /**/
7434
- /**/
7435
- {
7436
- modelVariant: 'CHAT',
7437
- modelTitle: 'gpt-4-turbo-preview',
7438
- modelName: 'gpt-4-turbo-preview',
7439
- pricing: {
7440
- prompt: computeUsage("$10.00 / 1M tokens"),
7441
- output: computeUsage("$30.00 / 1M tokens"), // <- Not sure, just for gpt-4-turbo
7442
- },
7443
- },
7444
- /**/
7445
- /**/
7446
- {
7447
- modelVariant: 'EMBEDDING',
7448
- modelTitle: 'text-embedding-3-large',
7449
- modelName: 'text-embedding-3-large',
7450
- pricing: {
7451
- prompt: computeUsage("$0.13 / 1M tokens"),
7452
- // TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
7453
- output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
7454
- },
7455
- },
7456
- /**/
7457
- /**/
7458
- {
7459
- modelVariant: 'EMBEDDING',
7460
- modelTitle: 'text-embedding-3-small',
7461
- modelName: 'text-embedding-3-small',
7462
- pricing: {
7463
- prompt: computeUsage("$0.02 / 1M tokens"),
7464
- // TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
7465
- output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
7466
- },
7467
- },
7468
- /**/
7469
- /**/
7470
- {
7471
- modelVariant: 'CHAT',
7472
- modelTitle: 'gpt-3.5-turbo-0613',
7473
- modelName: 'gpt-3.5-turbo-0613',
7474
- pricing: {
7475
- prompt: computeUsage("$1.50 / 1M tokens"),
7476
- output: computeUsage("$2.00 / 1M tokens"),
7477
- },
7478
- },
7479
- /**/
7480
- /**/
7481
- {
7482
- modelVariant: 'EMBEDDING',
7483
- modelTitle: 'text-embedding-ada-002',
7484
- modelName: 'text-embedding-ada-002',
7485
- pricing: {
7486
- prompt: computeUsage("$0.1 / 1M tokens"),
7487
- // TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
7488
- output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
7489
- },
7490
- },
7491
- /**/
7492
- /*/
7493
- {
7494
- modelVariant: 'CHAT',
7495
- modelTitle: 'gpt-4-1106-vision-preview',
7496
- modelName: 'gpt-4-1106-vision-preview',
7497
- },
7498
- /**/
7499
- /*/
7500
- {
7501
- modelVariant: 'CHAT',
7502
- modelTitle: 'gpt-4-vision-preview',
7503
- modelName: 'gpt-4-vision-preview',
7504
- pricing: {
7505
- prompt: computeUsage(`$10.00 / 1M tokens`),
7506
- output: computeUsage(`$30.00 / 1M tokens`),
7507
- },
7508
- },
7509
- /**/
7510
- /**/
7511
- {
7512
- modelVariant: 'CHAT',
7513
- modelTitle: 'gpt-4o-2024-05-13',
7514
- modelName: 'gpt-4o-2024-05-13',
7515
- pricing: {
7516
- prompt: computeUsage("$5.00 / 1M tokens"),
7517
- output: computeUsage("$15.00 / 1M tokens"),
7518
- },
7519
- },
7520
- /**/
7521
- /**/
7522
- {
7523
- modelVariant: 'CHAT',
7524
- modelTitle: 'gpt-4o',
7525
- modelName: 'gpt-4o',
7526
- pricing: {
7527
- prompt: computeUsage("$5.00 / 1M tokens"),
7528
- output: computeUsage("$15.00 / 1M tokens"),
7529
- },
7530
- },
7531
- /**/
7532
- /**/
7533
- {
7534
- modelVariant: 'CHAT',
7535
- modelTitle: 'gpt-3.5-turbo-16k-0613',
7536
- modelName: 'gpt-3.5-turbo-16k-0613',
7537
- pricing: {
7538
- prompt: computeUsage("$3.00 / 1M tokens"),
7539
- output: computeUsage("$4.00 / 1M tokens"),
7540
- },
7541
- },
7542
- /**/
7543
- ];
7544
- /**
7545
- * Note: [🤖] Add models of new variant
7546
- * TODO: [🧠] Some mechanism to propagate unsureness
7547
- * TODO: [🎰] Some mechanism to auto-update available models
7548
- * TODO: [🎰][👮‍♀️] Make this list dynamic - dynamically can be listed modelNames but not modelVariant, legacy status, context length and pricing
7549
- * TODO: [🧠][👮‍♀️] Put here more info like description, isVision, trainingDateCutoff, languages, strengths ( Top-level performance, intelligence, fluency, and understanding), contextWindow,...
7550
- * @see https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4
7551
- * @see https://openai.com/api/pricing/
7552
- * @see /other/playground/playground.ts
7553
- * TODO: [🍓] Make better
7554
- * TODO: Change model titles to human eg: "gpt-4-turbo-2024-04-09" -> "GPT-4 Turbo (2024-04-09)"
7555
- * TODO: [🚸] Not all models are compatible with JSON mode, add this information here and use it
7556
- */
7557
-
7558
- /**
7559
- * Execution Tools for calling Azure OpenAI API.
7560
- *
7561
- * @public exported from `@promptbook/azure-openai`
7562
- */
7563
- var AzureOpenAiExecutionTools = /** @class */ (function () {
7564
- /**
7565
- * Creates OpenAI Execution Tools.
7566
- *
7567
- * @param options which are relevant are directly passed to the OpenAI client
7568
- */
7569
- function AzureOpenAiExecutionTools(options) {
7570
- this.options = options;
7571
- /**
7572
- * OpenAI Azure API client.
7573
- */
7574
- this.client = null;
7575
- }
7576
- Object.defineProperty(AzureOpenAiExecutionTools.prototype, "title", {
7577
- get: function () {
7578
- return 'Azure OpenAI';
7579
- },
7580
- enumerable: false,
7581
- configurable: true
7582
- });
7583
- Object.defineProperty(AzureOpenAiExecutionTools.prototype, "description", {
7584
- get: function () {
7585
- return 'Use all models trained by OpenAI provided by Azure';
7586
- },
7587
- enumerable: false,
7588
- configurable: true
7589
- });
7590
- AzureOpenAiExecutionTools.prototype.getClient = function () {
7591
- return __awaiter(this, void 0, void 0, function () {
7592
- return __generator(this, function (_a) {
7593
- if (this.client === null) {
7594
- this.client = new openai.OpenAIClient("https://".concat(this.options.resourceName, ".openai.azure.com/"), new openai.AzureKeyCredential(this.options.apiKey));
7595
- }
7596
- return [2 /*return*/, this.client];
7597
- });
7598
- });
7599
- };
7600
- /**
7601
- * Check the `options` passed to `constructor`
7602
- */
7603
- AzureOpenAiExecutionTools.prototype.checkConfiguration = function () {
7604
- return __awaiter(this, void 0, void 0, function () {
7605
- return __generator(this, function (_a) {
7606
- switch (_a.label) {
7607
- case 0: return [4 /*yield*/, this.getClient()];
7608
- case 1:
7609
- _a.sent();
7610
- return [2 /*return*/];
7611
- }
7612
- });
7613
- });
7614
- };
7615
- /**
7616
- * List all available Azure OpenAI models that can be used
7617
- */
7618
- AzureOpenAiExecutionTools.prototype.listModels = function () {
7619
- return __awaiter(this, void 0, void 0, function () {
7620
- return __generator(this, function (_a) {
7621
- // TODO: !!! Do here some filtering which models are really available as deployment
7622
- // @see https://management.azure.com/subscriptions/subscriptionId/resourceGroups/resourceGroupName/providers/Microsoft.CognitiveServices/accounts/accountName/deployments?api-version=2023-05-01
7623
- return [2 /*return*/, OPENAI_MODELS.map(function (_a) {
7624
- var modelTitle = _a.modelTitle, modelName = _a.modelName, modelVariant = _a.modelVariant;
7625
- return ({
7626
- modelTitle: "Azure ".concat(modelTitle),
7627
- modelName: modelName,
7628
- modelVariant: modelVariant,
7629
- });
7630
- })];
7631
- });
7632
- });
7633
- };
7634
- /**
7635
- * Calls OpenAI API to use a chat model.
7636
- */
7637
- AzureOpenAiExecutionTools.prototype.callChatModel = function (prompt) {
7638
- var _a, _b;
7639
- return __awaiter(this, void 0, void 0, function () {
7640
- var content, parameters, modelRequirements, client, modelName, modelSettings, rawPromptContent, messages, start, complete, rawRequest, rawResponse, resultContent, usage, error_1;
7641
- return __generator(this, function (_c) {
7642
- switch (_c.label) {
7643
- case 0:
7644
- if (this.options.isVerbose) {
7645
- console.info('💬 OpenAI callChatModel call');
7646
- }
7647
- content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
7648
- return [4 /*yield*/, this.getClient()];
7649
- case 1:
7650
- client = _c.sent();
7651
- // TODO: [☂] Use here more modelRequirements
7652
- if (modelRequirements.modelVariant !== 'CHAT') {
7653
- throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
7654
- }
7655
- _c.label = 2;
7656
- case 2:
7657
- _c.trys.push([2, 4, , 5]);
7658
- modelName = prompt.modelRequirements.modelName || this.options.deploymentName;
7659
- modelSettings = {
7660
- maxTokens: modelRequirements.maxTokens,
7661
- // <- TODO: [🌾] Make some global max cap for maxTokens
7662
- temperature: modelRequirements.temperature,
7663
- user: this.options.user,
7664
- // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
7665
- // <- Note: [🧆]
7666
- };
7667
- rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
7668
- messages = __spreadArray(__spreadArray([], __read((modelRequirements.systemMessage === undefined
7669
- ? []
7670
- : [
7671
- {
7672
- role: 'system',
7673
- content: modelRequirements.systemMessage,
7674
- },
7675
- ])), false), [
7676
- {
7677
- role: 'user',
7678
- content: rawPromptContent,
7679
- },
7680
- ], false);
7681
- start = getCurrentIsoDate();
7682
- complete = void 0;
7683
- if (this.options.isVerbose) {
7684
- console.info(colors__default["default"].bgWhite('messages'), JSON.stringify(messages, null, 4));
7685
- }
7686
- rawRequest = [modelName, messages, modelSettings];
7687
- return [4 /*yield*/, client.getChatCompletions.apply(client, __spreadArray([], __read(rawRequest), false))];
7688
- case 3:
7689
- rawResponse = _c.sent();
7690
- if (this.options.isVerbose) {
7691
- console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
7692
- }
7693
- if (!rawResponse.choices[0]) {
7694
- throw new PipelineExecutionError('No choises from Azure OpenAI');
7695
- }
7696
- if (rawResponse.choices.length > 1) {
7697
- // TODO: This should be maybe only warning
7698
- throw new PipelineExecutionError('More than one choise from Azure OpenAI');
7699
- }
7700
- if (!rawResponse.choices[0].message || !rawResponse.choices[0].message.content) {
7701
- throw new PipelineExecutionError('Empty response from Azure OpenAI');
7702
- }
7703
- resultContent = rawResponse.choices[0].message.content;
7704
- // eslint-disable-next-line prefer-const
7705
- complete = getCurrentIsoDate();
7706
- usage = {
7707
- price: uncertainNumber() /* <- TODO: [🐞] Compute usage */,
7708
- input: __assign({ tokensCount: uncertainNumber((_a = rawResponse.usage) === null || _a === void 0 ? void 0 : _a.promptTokens) }, computeUsageCounts(prompt.content)),
7709
- output: __assign({ tokensCount: uncertainNumber((_b = rawResponse.usage) === null || _b === void 0 ? void 0 : _b.completionTokens) }, computeUsageCounts(prompt.content)),
7710
- };
7711
- return [2 /*return*/, {
7712
- content: resultContent,
7713
- modelName: modelName,
7714
- timing: {
7715
- start: start,
7716
- complete: complete,
7717
- },
7718
- usage: usage,
7719
- rawPromptContent: rawPromptContent,
7720
- rawRequest: rawRequest,
7721
- rawResponse: rawResponse,
7722
- // <- [🗯]
7723
- }];
7724
- case 4:
7725
- error_1 = _c.sent();
7726
- throw this.transformAzureError(error_1);
7727
- case 5: return [2 /*return*/];
7728
- }
7729
- });
7730
- });
7731
- };
7732
- /**
7733
- * Calls Azure OpenAI API to use a complete model.
7734
- */
7735
- AzureOpenAiExecutionTools.prototype.callCompletionModel = function (prompt) {
7736
- var _a, _b;
7737
- return __awaiter(this, void 0, void 0, function () {
7738
- var content, parameters, modelRequirements, client, modelName, modelSettings, start, complete, rawPromptContent, rawRequest, rawResponse, resultContent, usage, error_2;
7739
- return __generator(this, function (_c) {
7740
- switch (_c.label) {
7741
- case 0:
7742
- if (this.options.isVerbose) {
7743
- console.info('🖋 OpenAI callCompletionModel call');
7744
- }
7745
- content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
7746
- return [4 /*yield*/, this.getClient()];
7747
- case 1:
7748
- client = _c.sent();
7749
- // TODO: [☂] Use here more modelRequirements
7750
- if (modelRequirements.modelVariant !== 'COMPLETION') {
7751
- throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
7752
- }
7753
- _c.label = 2;
7754
- case 2:
7755
- _c.trys.push([2, 4, , 5]);
7756
- modelName = prompt.modelRequirements.modelName || this.options.deploymentName;
7757
- modelSettings = {
7758
- maxTokens: modelRequirements.maxTokens || 2000,
7759
- // <- TODO: [🌾] Make some global max cap for maxTokens
7760
- temperature: modelRequirements.temperature,
7761
- user: this.options.user,
7762
- // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
7763
- // <- Note: [🧆]
7764
- };
7765
- start = getCurrentIsoDate();
7766
- complete = void 0;
7767
- if (this.options.isVerbose) {
7768
- console.info(colors__default["default"].bgWhite('content'), JSON.stringify(content, null, 4));
7769
- console.info(colors__default["default"].bgWhite('parameters'), JSON.stringify(parameters, null, 4));
7770
- }
7771
- rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
7772
- rawRequest = [
7773
- modelName,
7774
- [rawPromptContent],
7775
- modelSettings,
7776
- ];
7777
- return [4 /*yield*/, client.getCompletions.apply(client, __spreadArray([], __read(rawRequest), false))];
7778
- case 3:
7779
- rawResponse = _c.sent();
7780
- if (this.options.isVerbose) {
7781
- console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
7782
- }
7783
- if (!rawResponse.choices[0]) {
7784
- throw new PipelineExecutionError('No choises from OpenAI');
7785
- }
7786
- if (rawResponse.choices.length > 1) {
7787
- // TODO: This should be maybe only warning
7788
- throw new PipelineExecutionError('More than one choise from OpenAI');
7789
- }
7790
- resultContent = rawResponse.choices[0].text;
7791
- // eslint-disable-next-line prefer-const
7792
- complete = getCurrentIsoDate();
7793
- usage = {
7794
- price: uncertainNumber() /* <- TODO: [🐞] Compute usage */,
7795
- input: __assign({ tokensCount: uncertainNumber((_a = rawResponse.usage) === null || _a === void 0 ? void 0 : _a.promptTokens) }, computeUsageCounts(prompt.content)),
7796
- output: __assign({ tokensCount: uncertainNumber((_b = rawResponse.usage) === null || _b === void 0 ? void 0 : _b.completionTokens) }, computeUsageCounts(prompt.content)),
7797
- };
7798
- return [2 /*return*/, {
7799
- content: resultContent,
7800
- modelName: modelName,
7801
- timing: {
7802
- start: start,
7803
- complete: complete,
7804
- },
7805
- usage: usage,
7806
- rawPromptContent: rawPromptContent,
7807
- rawRequest: rawRequest,
7808
- rawResponse: rawResponse,
7809
- // <- [🗯]
7810
- }];
7811
- case 4:
7812
- error_2 = _c.sent();
7813
- throw this.transformAzureError(error_2);
7814
- case 5: return [2 /*return*/];
7815
- }
7816
- });
7817
- });
7818
- };
7819
- // <- Note: [🤖] callXxxModel
7820
- /**
7821
- * Changes Azure error (which is not propper Error but object) to propper Error
7822
- */
7823
- AzureOpenAiExecutionTools.prototype.transformAzureError = function (azureError) {
7824
- if (typeof azureError !== 'object' || azureError === null) {
7825
- return new PipelineExecutionError("Unknown Azure OpenAI error");
7826
- }
7827
- var code = azureError.code, message = azureError.message;
7828
- return new PipelineExecutionError("".concat(code, ": ").concat(message));
7829
- };
7830
- return AzureOpenAiExecutionTools;
7831
- }());
7832
- /**
7833
- * TODO: Maybe Create some common util for callChatModel and callCompletionModel
7834
- * TODO: Maybe make custom AzureOpenAiError
7835
- * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
7836
- * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
7837
- */
7838
-
7839
- /**
7840
- * Computes the usage of the OpenAI API based on the response from OpenAI
7841
- *
7842
- * @param promptContent The content of the prompt
7843
- * @param resultContent The content of the result (for embedding prompts or failed prompts pass empty string)
7844
- * @param rawResponse The raw response from OpenAI API
7845
- * @throws {PipelineExecutionError} If the usage is not defined in the response from OpenAI
7846
- * @private internal utility of `OpenAiExecutionTools`
7847
- */
7848
- function computeOpenAiUsage(promptContent, // <- Note: Intentionally using [] to access type properties to bring jsdoc from Prompt/PromptResult to consumer
7849
- resultContent, rawResponse) {
7850
- var _a, _b;
7851
- if (rawResponse.usage === undefined) {
7852
- throw new PipelineExecutionError('The usage is not defined in the response from OpenAI');
7853
- }
7854
- if (((_a = rawResponse.usage) === null || _a === void 0 ? void 0 : _a.prompt_tokens) === undefined) {
7855
- throw new PipelineExecutionError('In OpenAI response `usage.prompt_tokens` not defined');
7856
- }
7857
- var inputTokens = rawResponse.usage.prompt_tokens;
7858
- var outputTokens = ((_b = rawResponse.usage) === null || _b === void 0 ? void 0 : _b.completion_tokens) || 0;
7859
- var modelInfo = OPENAI_MODELS.find(function (model) { return model.modelName === rawResponse.model; });
7860
- var price;
7861
- if (modelInfo === undefined || modelInfo.pricing === undefined) {
7862
- price = uncertainNumber();
7863
- }
7864
- else {
7865
- price = uncertainNumber(inputTokens * modelInfo.pricing.prompt + outputTokens * modelInfo.pricing.output);
7866
- }
7867
- return {
7868
- price: price,
7869
- input: __assign({ tokensCount: uncertainNumber(rawResponse.usage.prompt_tokens) }, computeUsageCounts(promptContent)),
7870
- output: __assign({ tokensCount: uncertainNumber(outputTokens) }, computeUsageCounts(resultContent)),
7871
- };
7872
- }
7873
- /**
7874
- * TODO: [🤝] DRY Maybe some common abstraction between `computeOpenAiUsage` and `computeAnthropicClaudeUsage`
7875
- */
7876
-
7877
- /**
7878
- * Execution Tools for calling OpenAI API
7879
- *
7880
- * @public exported from `@promptbook/openai`
7881
- */
7882
- var OpenAiExecutionTools = /** @class */ (function () {
7883
- /**
7884
- * Creates OpenAI Execution Tools.
7885
- *
7886
- * @param options which are relevant are directly passed to the OpenAI client
7887
- */
7888
- function OpenAiExecutionTools(options) {
7889
- if (options === void 0) { options = {}; }
7890
- this.options = options;
7891
- /**
7892
- * OpenAI API client.
7893
- */
7894
- this.client = null;
7895
- }
7896
- Object.defineProperty(OpenAiExecutionTools.prototype, "title", {
7897
- get: function () {
7898
- return 'OpenAI';
7899
- },
7900
- enumerable: false,
7901
- configurable: true
7902
- });
7903
- Object.defineProperty(OpenAiExecutionTools.prototype, "description", {
7904
- get: function () {
7905
- return 'Use all models provided by OpenAI';
7906
- },
7907
- enumerable: false,
7908
- configurable: true
7909
- });
7910
- OpenAiExecutionTools.prototype.getClient = function () {
7911
- return __awaiter(this, void 0, void 0, function () {
7912
- var openAiOptions;
7913
- return __generator(this, function (_a) {
7914
- if (this.client === null) {
7915
- openAiOptions = __assign({}, this.options);
7916
- delete openAiOptions.isVerbose;
7917
- delete openAiOptions.user;
7918
- this.client = new OpenAI__default["default"](__assign({}, openAiOptions));
7919
- }
7920
- return [2 /*return*/, this.client];
7921
- });
7922
- });
7923
- };
7924
- /**
7925
- * Check the `options` passed to `constructor`
7926
- */
7927
- OpenAiExecutionTools.prototype.checkConfiguration = function () {
7928
- return __awaiter(this, void 0, void 0, function () {
7929
- return __generator(this, function (_a) {
7930
- switch (_a.label) {
7931
- case 0: return [4 /*yield*/, this.getClient()];
7932
- case 1:
7933
- _a.sent();
7934
- return [2 /*return*/];
7935
- }
7936
- });
7937
- });
7938
- };
7939
- /**
7940
- * List all available OpenAI models that can be used
7941
- */
7942
- OpenAiExecutionTools.prototype.listModels = function () {
7943
- /*
7944
- Note: Dynamic lising of the models
7945
- const models = await this.openai.models.list({});
7946
-
7947
- console.log({ models });
7948
- console.log(models.data);
7949
- */
7950
- return OPENAI_MODELS;
7951
- };
7952
- /**
7953
- * Calls OpenAI API to use a chat model.
7954
- */
7955
- OpenAiExecutionTools.prototype.callChatModel = function (prompt) {
7956
- return __awaiter(this, void 0, void 0, function () {
7957
- var content, parameters, modelRequirements, expectFormat, client, modelName, modelSettings, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
7958
- return __generator(this, function (_a) {
7959
- switch (_a.label) {
7960
- case 0:
7961
- if (this.options.isVerbose) {
7962
- console.info('💬 OpenAI callChatModel call', { prompt: prompt });
7963
- }
7964
- content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements, expectFormat = prompt.expectFormat;
7965
- return [4 /*yield*/, this.getClient()];
7966
- case 1:
7967
- client = _a.sent();
7968
- // TODO: [☂] Use here more modelRequirements
7969
- if (modelRequirements.modelVariant !== 'CHAT') {
7970
- throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
7971
- }
7972
- modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
7973
- modelSettings = {
7974
- model: modelName,
7975
- max_tokens: modelRequirements.maxTokens,
7976
- // <- TODO: [🌾] Make some global max cap for maxTokens
7977
- temperature: modelRequirements.temperature,
7978
- // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
7979
- // <- Note: [🧆]
7980
- };
7981
- if (expectFormat === 'JSON') {
7982
- modelSettings.response_format = {
7983
- type: 'json_object',
7984
- };
7985
- }
7986
- rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
7987
- rawRequest = __assign(__assign({}, modelSettings), { messages: __spreadArray(__spreadArray([], __read((modelRequirements.systemMessage === undefined
7988
- ? []
7989
- : [
7990
- {
7991
- role: 'system',
7992
- content: modelRequirements.systemMessage,
7993
- },
7994
- ])), false), [
7995
- {
7996
- role: 'user',
7997
- content: rawPromptContent,
7998
- },
7999
- ], false), user: this.options.user });
8000
- start = getCurrentIsoDate();
8001
- if (this.options.isVerbose) {
8002
- console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
8003
- }
8004
- return [4 /*yield*/, client.chat.completions.create(rawRequest)];
8005
- case 2:
8006
- rawResponse = _a.sent();
8007
- if (this.options.isVerbose) {
8008
- console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
8009
- }
8010
- if (!rawResponse.choices[0]) {
8011
- throw new PipelineExecutionError('No choises from OpenAI');
8012
- }
8013
- if (rawResponse.choices.length > 1) {
8014
- // TODO: This should be maybe only warning
8015
- throw new PipelineExecutionError('More than one choise from OpenAI');
8016
- }
8017
- resultContent = rawResponse.choices[0].message.content;
8018
- // eslint-disable-next-line prefer-const
8019
- complete = getCurrentIsoDate();
8020
- usage = computeOpenAiUsage(content, resultContent || '', rawResponse);
8021
- if (resultContent === null) {
8022
- throw new PipelineExecutionError('No response message from OpenAI');
8023
- }
8024
- return [2 /*return*/, {
8025
- content: resultContent,
8026
- modelName: rawResponse.model || modelName,
8027
- timing: {
8028
- start: start,
8029
- complete: complete,
8030
- },
8031
- usage: usage,
8032
- rawPromptContent: rawPromptContent,
8033
- rawRequest: rawRequest,
8034
- rawResponse: rawResponse,
8035
- // <- [🗯]
8036
- }];
8037
- }
8038
- });
8039
- });
8040
- };
8041
- /**
8042
- * Calls OpenAI API to use a complete model.
8043
- */
8044
- OpenAiExecutionTools.prototype.callCompletionModel = function (prompt) {
8045
- return __awaiter(this, void 0, void 0, function () {
8046
- var content, parameters, modelRequirements, client, modelName, modelSettings, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
8047
- return __generator(this, function (_a) {
8048
- switch (_a.label) {
8049
- case 0:
8050
- if (this.options.isVerbose) {
8051
- console.info('🖋 OpenAI callCompletionModel call', { prompt: prompt });
8052
- }
8053
- content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
8054
- return [4 /*yield*/, this.getClient()];
8055
- case 1:
8056
- client = _a.sent();
8057
- // TODO: [☂] Use here more modelRequirements
8058
- if (modelRequirements.modelVariant !== 'COMPLETION') {
8059
- throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
8060
- }
8061
- modelName = modelRequirements.modelName || this.getDefaultCompletionModel().modelName;
8062
- modelSettings = {
8063
- model: modelName,
8064
- max_tokens: modelRequirements.maxTokens || 2000,
8065
- // <- TODO: [🌾] Make some global max cap for maxTokens
8066
- temperature: modelRequirements.temperature,
8067
- // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
8068
- // <- Note: [🧆]
8069
- };
8070
- rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
8071
- rawRequest = __assign(__assign({}, modelSettings), { prompt: rawPromptContent, user: this.options.user });
8072
- start = getCurrentIsoDate();
8073
- if (this.options.isVerbose) {
8074
- console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
8075
- }
8076
- return [4 /*yield*/, client.completions.create(rawRequest)];
8077
- case 2:
8078
- rawResponse = _a.sent();
8079
- if (this.options.isVerbose) {
8080
- console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
8081
- }
8082
- if (!rawResponse.choices[0]) {
8083
- throw new PipelineExecutionError('No choises from OpenAI');
8084
- }
8085
- if (rawResponse.choices.length > 1) {
8086
- // TODO: This should be maybe only warning
8087
- throw new PipelineExecutionError('More than one choise from OpenAI');
8088
- }
8089
- resultContent = rawResponse.choices[0].text;
8090
- // eslint-disable-next-line prefer-const
8091
- complete = getCurrentIsoDate();
8092
- usage = computeOpenAiUsage(content, resultContent || '', rawResponse);
8093
- return [2 /*return*/, {
8094
- content: resultContent,
8095
- modelName: rawResponse.model || modelName,
8096
- timing: {
8097
- start: start,
8098
- complete: complete,
8099
- },
8100
- usage: usage,
8101
- rawPromptContent: rawPromptContent,
8102
- rawRequest: rawRequest,
8103
- rawResponse: rawResponse,
8104
- // <- [🗯]
8105
- }];
8106
- }
8107
- });
8108
- });
8109
- };
8110
- /**
8111
- * Calls OpenAI API to use a embedding model
8112
- */
8113
- OpenAiExecutionTools.prototype.callEmbeddingModel = function (prompt) {
8114
- return __awaiter(this, void 0, void 0, function () {
8115
- var content, parameters, modelRequirements, client, modelName, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
8116
- return __generator(this, function (_a) {
8117
- switch (_a.label) {
8118
- case 0:
8119
- if (this.options.isVerbose) {
8120
- console.info('🖋 OpenAI embedding call', { prompt: prompt });
8121
- }
8122
- content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
8123
- return [4 /*yield*/, this.getClient()];
8124
- case 1:
8125
- client = _a.sent();
8126
- // TODO: [☂] Use here more modelRequirements
8127
- if (modelRequirements.modelVariant !== 'EMBEDDING') {
8128
- throw new PipelineExecutionError('Use embed only for EMBEDDING variant');
8129
- }
8130
- modelName = modelRequirements.modelName || this.getDefaultEmbeddingModel().modelName;
8131
- rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
8132
- rawRequest = {
8133
- input: rawPromptContent,
8134
- model: modelName,
8135
- };
8136
- start = getCurrentIsoDate();
8137
- if (this.options.isVerbose) {
8138
- console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
8139
- }
8140
- return [4 /*yield*/, client.embeddings.create(rawRequest)];
8141
- case 2:
8142
- rawResponse = _a.sent();
8143
- if (this.options.isVerbose) {
8144
- console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
8145
- }
8146
- if (rawResponse.data.length !== 1) {
8147
- throw new PipelineExecutionError("Expected exactly 1 data item in response, got ".concat(rawResponse.data.length));
8148
- }
8149
- resultContent = rawResponse.data[0].embedding;
8150
- // eslint-disable-next-line prefer-const
8151
- complete = getCurrentIsoDate();
8152
- usage = computeOpenAiUsage(content, '', rawResponse);
8153
- return [2 /*return*/, {
8154
- content: resultContent,
8155
- modelName: rawResponse.model || modelName,
8156
- timing: {
8157
- start: start,
8158
- complete: complete,
8159
- },
8160
- usage: usage,
8161
- rawPromptContent: rawPromptContent,
8162
- rawRequest: rawRequest,
8163
- rawResponse: rawResponse,
8164
- // <- [🗯]
8165
- }];
8166
- }
8167
- });
8168
- });
8169
- };
8170
- // <- Note: [🤖] callXxxModel
8171
- /**
8172
- * Get the model that should be used as default
8173
- */
8174
- OpenAiExecutionTools.prototype.getDefaultModel = function (defaultModelName) {
8175
- var model = OPENAI_MODELS.find(function (_a) {
8176
- var modelName = _a.modelName;
8177
- return modelName === defaultModelName;
8178
- });
8179
- if (model === undefined) {
8180
- throw new UnexpectedError(spaceTrim__default["default"](function (block) {
8181
- return "\n Cannot find model in OpenAI models with name \"".concat(defaultModelName, "\" which should be used as default.\n\n Available models:\n ").concat(block(OPENAI_MODELS.map(function (_a) {
8182
- var modelName = _a.modelName;
8183
- return "- \"".concat(modelName, "\"");
8184
- }).join('\n')), "\n\n ");
8185
- }));
8186
- }
8187
- return model;
8188
- };
8189
- /**
8190
- * Default model for chat variant.
8191
- */
8192
- OpenAiExecutionTools.prototype.getDefaultChatModel = function () {
8193
- return this.getDefaultModel('gpt-4o');
8194
- };
8195
- /**
8196
- * Default model for completion variant.
8197
- */
8198
- OpenAiExecutionTools.prototype.getDefaultCompletionModel = function () {
8199
- return this.getDefaultModel('gpt-3.5-turbo-instruct');
8200
- };
8201
- /**
8202
- * Default model for completion variant.
8203
- */
8204
- OpenAiExecutionTools.prototype.getDefaultEmbeddingModel = function () {
8205
- return this.getDefaultModel('text-embedding-3-large');
8206
- };
8207
- return OpenAiExecutionTools;
8208
- }());
8209
- /**
8210
- * TODO: [🧠][🧙‍♂️] Maybe there can be some wizzard for thoose who want to use just OpenAI
8211
- * TODO: Maybe Create some common util for callChatModel and callCompletionModel
8212
- * TODO: Maybe make custom OpenAiError
8213
- * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
8214
- * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
8215
- */
8216
-
8217
- /**
8218
- * Execution Tools for calling OpenAI API
8219
- *
8220
- * @public exported from `@promptbook/openai`
8221
- */
8222
- var createOpenAiExecutionTools = Object.assign(function (options) {
8223
- // TODO: !!!!!! If browser, auto add `dangerouslyAllowBrowser`
8224
- return new OpenAiExecutionTools(options);
8225
- }, {
8226
- packageName: '@promptbook/openai',
8227
- className: 'OpenAiExecutionTools',
8228
- });
8229
- /**
8230
- * TODO: [🦺] Is there some way how to put `packageName` and `className` on top and function definition on bottom?
8231
- * TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
8232
- */
8233
-
8234
- /**
8235
- * @@@
8236
- *
8237
- * TODO: !!!!!! Remove EXECUTION_TOOLS_CLASSES and use $llmToolsRegister instead
8238
- *
8239
- * @private internal type for `createLlmToolsFromConfiguration`
8240
- */
8241
- var EXECUTION_TOOLS_CLASSES = {
8242
- createOpenAiExecutionTools: createOpenAiExecutionTools,
8243
- createAnthropicClaudeExecutionTools: createAnthropicClaudeExecutionTools,
8244
- createAzureOpenAiExecutionTools: function (options) {
8245
- return new AzureOpenAiExecutionTools(
8246
- // <- TODO: [🧱] Implement in a functional (not new Class) way
8247
- options);
8248
- },
8249
- // <- Note: [🦑] Add here new LLM provider
8250
- };
8251
- /**
8252
- * TODO: !!!!!!! Make global register for this
8253
- * TODO: [🧠][🎌] Adding this should be responsibility of each provider package NOT this one central place
8254
- */
8255
-
8256
- /**
8257
- * @@@
8258
- *
8259
- * Note: This function is not cached, every call creates new instance of `MultipleLlmExecutionTools`
8260
- *
8261
- * @returns @@@
8262
- * @public exported from `@promptbook/core`
8263
- */
8264
- function createLlmToolsFromConfiguration(configuration, options) {
8265
- if (options === void 0) { options = {}; }
8266
- var _a = options.isVerbose, isVerbose = _a === void 0 ? false : _a;
8267
- var llmTools = configuration.map(function (llmConfiguration) {
8268
- var constructor = EXECUTION_TOOLS_CLASSES["create".concat(llmConfiguration.className)];
8269
- if (!constructor) {
8270
- throw new Error(spaceTrim__default["default"](function (block) { return "\n There is no constructor for LLM provider `".concat(llmConfiguration.className, "`\n\n\n @@@\n\n Available constructors are:\n ").concat(block('@@@'), "\n\n\n "); }));
6739
+ if (registeredItem === undefined) {
6740
+ throw new Error(spaceTrim__default["default"](function (block) { return "\n There is no constructor for LLM provider `".concat(llmConfiguration.className, "` from `").concat(llmConfiguration.packageName, "`\n\n You have probably forgotten install and import the provider package.\n To fix this issue, you can:\n\n Install:\n\n > npm install ").concat(llmConfiguration.packageName, "\n\n And import:\n\n > import '").concat(llmConfiguration.packageName, "';\n\n\n ").concat(block($registeredLlmToolsMessage()), "\n "); }));
8271
6741
  }
8272
- return constructor(__assign({ isVerbose: isVerbose }, llmConfiguration.options));
6742
+ return registeredItem(__assign({ isVerbose: isVerbose }, llmConfiguration.options));
8273
6743
  });
8274
6744
  return joinLlmExecutionTools.apply(void 0, __spreadArray([], __read(llmTools), false));
8275
6745
  }
@@ -8540,7 +7010,7 @@
8540
7010
  if (typeof env.ANTHROPIC_CLAUDE_API_KEY === 'string') {
8541
7011
  return {
8542
7012
  title: 'Claude (from env)',
8543
- packageName: '@promptbook/antrhopic-claude',
7013
+ packageName: '@promptbook/anthropic-claude',
8544
7014
  className: 'AnthropicClaudeExecutionTools',
8545
7015
  options: {
8546
7016
  apiKey: process.env.ANTHROPIC_CLAUDE_API_KEY,
@@ -8996,6 +7466,7 @@
8996
7466
  exports.EXPECTATION_UNITS = EXPECTATION_UNITS;
8997
7467
  exports.EnvironmentMismatchError = EnvironmentMismatchError;
8998
7468
  exports.ExecutionReportStringOptionsDefaults = ExecutionReportStringOptionsDefaults;
7469
+ exports.IS_VERBOSE = IS_VERBOSE;
8999
7470
  exports.LimitReachedError = LimitReachedError;
9000
7471
  exports.MAX_EXECUTION_ATTEMPTS = MAX_EXECUTION_ATTEMPTS;
9001
7472
  exports.MAX_FILENAME_LENGTH = MAX_FILENAME_LENGTH;