@promptbook/remote-server 0.66.0-7 → 0.66.0-9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. package/esm/index.es.js +266 -2167
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/src/_packages/anthropic-claude.index.d.ts +0 -2
  4. package/esm/typings/src/_packages/azure-openai.index.d.ts +4 -0
  5. package/esm/typings/src/_packages/cli.index.d.ts +8 -0
  6. package/esm/typings/src/_packages/core.index.d.ts +2 -0
  7. package/esm/typings/src/_packages/types.index.d.ts +22 -14
  8. package/esm/typings/src/_packages/utils.index.d.ts +7 -7
  9. package/esm/typings/src/execution/LlmExecutionToolsConstructor.d.ts +1 -1
  10. package/esm/typings/src/llm-providers/_common/$llmToolsMetadataRegister.d.ts +2 -2
  11. package/esm/typings/src/llm-providers/_common/$llmToolsRegister.d.ts +2 -2
  12. package/esm/typings/src/llm-providers/_common/$registeredLlmToolsMessage.d.ts +9 -0
  13. package/esm/typings/src/llm-providers/_common/LlmToolsConfiguration.d.ts +1 -1
  14. package/esm/typings/src/llm-providers/_common/LlmToolsMetadata.d.ts +1 -1
  15. package/esm/typings/src/llm-providers/_common/createLlmToolsFromConfigurationFromEnv.d.ts +1 -0
  16. package/esm/typings/src/llm-providers/_common/getLlmToolsForTestingAndScriptsAndPlayground.d.ts +1 -1
  17. package/esm/typings/src/llm-providers/anthropic-claude/register-constructor.d.ts +1 -0
  18. package/esm/typings/src/llm-providers/azure-openai/createAzureOpenAiExecutionTools.d.ts +15 -0
  19. package/esm/typings/src/llm-providers/azure-openai/register-configuration.d.ts +9 -0
  20. package/esm/typings/src/llm-providers/azure-openai/register-constructor.d.ts +12 -0
  21. package/esm/typings/src/llm-providers/openai/register-constructor.d.ts +1 -0
  22. package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +1 -0
  23. package/esm/typings/src/llm-providers/remote/interfaces/{Promptbook_Server_Error.d.ts → PromptbookServer_Error.d.ts} +1 -1
  24. package/esm/typings/src/llm-providers/remote/interfaces/PromptbookServer_ListModels_Request.d.ts +34 -0
  25. package/esm/typings/src/llm-providers/remote/interfaces/PromptbookServer_ListModels_Response.d.ts +15 -0
  26. package/esm/typings/src/llm-providers/remote/interfaces/{Promptbook_Server_Progress.d.ts → PromptbookServer_Prompt_Progress.d.ts} +1 -1
  27. package/esm/typings/src/llm-providers/remote/interfaces/{Promptbook_Server_Request.d.ts → PromptbookServer_Prompt_Request.d.ts} +15 -3
  28. package/esm/typings/src/llm-providers/remote/interfaces/{Promptbook_Server_Response.d.ts → PromptbookServer_Prompt_Response.d.ts} +1 -1
  29. package/esm/typings/src/llm-providers/remote/interfaces/RemoteLlmExecutionToolsOptions.d.ts +1 -7
  30. package/esm/typings/src/llm-providers/remote/startRemoteServer.d.ts +1 -0
  31. package/esm/typings/src/utils/{Register.d.ts → $Register.d.ts} +6 -2
  32. package/esm/typings/src/utils/environment/{getGlobalScope.d.ts → $getGlobalScope.d.ts} +1 -1
  33. package/esm/typings/src/utils/organization/f.d.ts +6 -0
  34. package/package.json +2 -6
  35. package/umd/index.umd.js +270 -2169
  36. package/umd/index.umd.js.map +1 -1
  37. package/esm/typings/src/llm-providers/_common/config.d.ts +0 -14
  38. package/esm/typings/src/llm-providers/anthropic-claude/register1.d.ts +0 -4
  39. /package/esm/typings/src/llm-providers/mocked/{fakeTextToExpectations.d.ts → $fakeTextToExpectations.d.ts} +0 -0
  40. /package/esm/typings/src/utils/{currentDate.d.ts → $currentDate.d.ts} +0 -0
  41. /package/esm/typings/src/utils/environment/{isRunningInBrowser.d.ts → $isRunningInBrowser.d.ts} +0 -0
  42. /package/esm/typings/src/utils/environment/{isRunningInNode.d.ts → $isRunningInNode.d.ts} +0 -0
  43. /package/esm/typings/src/utils/environment/{isRunningInWebWorker.d.ts → $isRunningInWebWorker.d.ts} +0 -0
  44. /package/esm/typings/src/utils/files/{isDirectoryExisting.d.ts → $isDirectoryExisting.d.ts} +0 -0
  45. /package/esm/typings/src/utils/files/{isDirectoryExisting.test.d.ts → $isDirectoryExisting.test.d.ts} +0 -0
  46. /package/esm/typings/src/utils/files/{isFileExisting.d.ts → $isFileExisting.d.ts} +0 -0
  47. /package/esm/typings/src/utils/files/{isFileExisting.test.d.ts → $isFileExisting.test.d.ts} +0 -0
  48. /package/esm/typings/src/utils/files/{listAllFiles.d.ts → $listAllFiles.d.ts} +0 -0
  49. /package/esm/typings/src/utils/files/{listAllFiles.test.d.ts → $listAllFiles.test.d.ts} +0 -0
  50. /package/esm/typings/src/utils/random/{randomSeed.d.ts → $randomSeed.d.ts} +0 -0
package/umd/index.umd.js CHANGED
@@ -1,22 +1,20 @@
1
1
  (function (global, factory) {
2
- typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports, require('colors'), require('http'), require('socket.io'), require('spacetrim'), require('socket.io-client'), require('@anthropic-ai/sdk'), require('@azure/openai'), require('openai')) :
3
- typeof define === 'function' && define.amd ? define(['exports', 'colors', 'http', 'socket.io', 'spacetrim', 'socket.io-client', '@anthropic-ai/sdk', '@azure/openai', 'openai'], factory) :
4
- (global = typeof globalThis !== 'undefined' ? globalThis : global || self, factory(global["promptbook-remote-server"] = {}, global.colors, global.http, global.socket_io, global.spaceTrim, global.socket_ioClient, global.Anthropic, global.openai, global.OpenAI));
5
- })(this, (function (exports, colors, http, socket_io, spaceTrim, socket_ioClient, Anthropic, openai, OpenAI) { 'use strict';
2
+ typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports, require('colors'), require('http'), require('socket.io'), require('spacetrim')) :
3
+ typeof define === 'function' && define.amd ? define(['exports', 'colors', 'http', 'socket.io', 'spacetrim'], factory) :
4
+ (global = typeof globalThis !== 'undefined' ? globalThis : global || self, factory(global["promptbook-remote-server"] = {}, global.colors, global.http, global.socket_io, global.spaceTrim));
5
+ })(this, (function (exports, colors, http, socket_io, spaceTrim) { 'use strict';
6
6
 
7
7
  function _interopDefaultLegacy (e) { return e && typeof e === 'object' && 'default' in e ? e : { 'default': e }; }
8
8
 
9
9
  var colors__default = /*#__PURE__*/_interopDefaultLegacy(colors);
10
10
  var http__default = /*#__PURE__*/_interopDefaultLegacy(http);
11
11
  var spaceTrim__default = /*#__PURE__*/_interopDefaultLegacy(spaceTrim);
12
- var Anthropic__default = /*#__PURE__*/_interopDefaultLegacy(Anthropic);
13
- var OpenAI__default = /*#__PURE__*/_interopDefaultLegacy(OpenAI);
14
12
 
15
13
  // ⚠️ WARNING: This code has been generated so that any manual changes will be overwritten
16
14
  /**
17
15
  * The version of the Promptbook library
18
16
  */
19
- var PROMPTBOOK_VERSION = '0.66.0-6';
17
+ var PROMPTBOOK_VERSION = '0.66.0-8';
20
18
  // TODO: !!!! List here all the versions and annotate + put into script
21
19
 
22
20
  /*! *****************************************************************************
@@ -208,8 +206,37 @@
208
206
  */
209
207
  MultipleLlmExecutionTools.prototype.checkConfiguration = function () {
210
208
  return __awaiter(this, void 0, void 0, function () {
211
- return __generator(this, function (_a) {
212
- return [2 /*return*/];
209
+ var _a, _b, llmExecutionTools, e_1_1;
210
+ var e_1, _c;
211
+ return __generator(this, function (_d) {
212
+ switch (_d.label) {
213
+ case 0:
214
+ _d.trys.push([0, 5, 6, 7]);
215
+ _a = __values(this.llmExecutionTools), _b = _a.next();
216
+ _d.label = 1;
217
+ case 1:
218
+ if (!!_b.done) return [3 /*break*/, 4];
219
+ llmExecutionTools = _b.value;
220
+ return [4 /*yield*/, llmExecutionTools.checkConfiguration()];
221
+ case 2:
222
+ _d.sent();
223
+ _d.label = 3;
224
+ case 3:
225
+ _b = _a.next();
226
+ return [3 /*break*/, 1];
227
+ case 4: return [3 /*break*/, 7];
228
+ case 5:
229
+ e_1_1 = _d.sent();
230
+ e_1 = { error: e_1_1 };
231
+ return [3 /*break*/, 7];
232
+ case 6:
233
+ try {
234
+ if (_b && !_b.done && (_c = _a.return)) _c.call(_a);
235
+ }
236
+ finally { if (e_1) throw e_1.error; }
237
+ return [7 /*endfinally*/];
238
+ case 7: return [2 /*return*/];
239
+ }
213
240
  });
214
241
  });
215
242
  };
@@ -219,8 +246,8 @@
219
246
  */
220
247
  MultipleLlmExecutionTools.prototype.listModels = function () {
221
248
  return __awaiter(this, void 0, void 0, function () {
222
- var availableModels, _a, _b, llmExecutionTools, models, e_1_1;
223
- var e_1, _c;
249
+ var availableModels, _a, _b, llmExecutionTools, models, e_2_1;
250
+ var e_2, _c;
224
251
  return __generator(this, function (_d) {
225
252
  switch (_d.label) {
226
253
  case 0:
@@ -243,14 +270,14 @@
243
270
  return [3 /*break*/, 2];
244
271
  case 5: return [3 /*break*/, 8];
245
272
  case 6:
246
- e_1_1 = _d.sent();
247
- e_1 = { error: e_1_1 };
273
+ e_2_1 = _d.sent();
274
+ e_2 = { error: e_2_1 };
248
275
  return [3 /*break*/, 8];
249
276
  case 7:
250
277
  try {
251
278
  if (_b && !_b.done && (_c = _a.return)) _c.call(_a);
252
279
  }
253
- finally { if (e_1) throw e_1.error; }
280
+ finally { if (e_2) throw e_2.error; }
254
281
  return [7 /*endfinally*/];
255
282
  case 8: return [2 /*return*/, availableModels];
256
283
  }
@@ -283,8 +310,8 @@
283
310
  */
284
311
  MultipleLlmExecutionTools.prototype.callCommonModel = function (prompt) {
285
312
  return __awaiter(this, void 0, void 0, function () {
286
- var errors, _a, _b, llmExecutionTools, _c, error_1, e_2_1;
287
- var e_2, _d;
313
+ var errors, _a, _b, llmExecutionTools, _c, error_1, e_3_1;
314
+ var e_3, _d;
288
315
  var _this = this;
289
316
  return __generator(this, function (_e) {
290
317
  switch (_e.label) {
@@ -340,14 +367,14 @@
340
367
  return [3 /*break*/, 2];
341
368
  case 14: return [3 /*break*/, 17];
342
369
  case 15:
343
- e_2_1 = _e.sent();
344
- e_2 = { error: e_2_1 };
370
+ e_3_1 = _e.sent();
371
+ e_3 = { error: e_3_1 };
345
372
  return [3 /*break*/, 17];
346
373
  case 16:
347
374
  try {
348
375
  if (_b && !_b.done && (_d = _a.return)) _d.call(_a);
349
376
  }
350
- finally { if (e_2) throw e_2.error; }
377
+ finally { if (e_3) throw e_3.error; }
351
378
  return [7 /*endfinally*/];
352
379
  case 17:
353
380
  if (errors.length === 1) {
@@ -435,2209 +462,233 @@
435
462
  */
436
463
 
437
464
  /**
438
- * Remote server is a proxy server that uses its execution tools internally and exposes the executor interface externally.
465
+ * @@@
439
466
  *
440
- * You can simply use `RemoteExecutionTools` on client-side javascript and connect to your remote server.
441
- * This is useful to make all logic on browser side but not expose your API keys or no need to use customer's GPU.
467
+ * Note: `$` is used to indicate that this function is not a pure function - it access global scope
442
468
  *
443
- * @see https://github.com/webgptorg/promptbook#remote-server
444
- * @public exported from `@promptbook/remote-client`
469
+ * @public exported from `@promptbook/utils`
445
470
  */
446
- var RemoteLlmExecutionTools = /** @class */ (function () {
447
- function RemoteLlmExecutionTools(options) {
448
- this.options = options;
449
- }
450
- Object.defineProperty(RemoteLlmExecutionTools.prototype, "title", {
451
- get: function () {
452
- // TODO: [🧠] Maybe fetch title+description from the remote server (as well as if model methods are defined)
453
- return 'Remote server';
454
- },
455
- enumerable: false,
456
- configurable: true
457
- });
458
- Object.defineProperty(RemoteLlmExecutionTools.prototype, "description", {
459
- get: function () {
460
- return 'Use all models by your remote server';
461
- },
462
- enumerable: false,
463
- configurable: true
464
- });
465
- /**
466
- * Check the configuration of all execution tools
467
- */
468
- RemoteLlmExecutionTools.prototype.checkConfiguration = function () {
469
- return __awaiter(this, void 0, void 0, function () {
470
- return __generator(this, function (_a) {
471
- return [2 /*return*/];
472
- });
473
- });
474
- };
475
- /**
476
- * List all available models that can be used
477
- */
478
- RemoteLlmExecutionTools.prototype.listModels = function () {
479
- return __awaiter(this, void 0, void 0, function () {
480
- return __generator(this, function (_a) {
481
- return [2 /*return*/, (this.options.models ||
482
- [
483
- /* !!!!!! */
484
- ])];
485
- });
486
- });
487
- };
488
- /**
489
- * Creates a connection to the remote proxy server.
490
- */
491
- RemoteLlmExecutionTools.prototype.makeConnection = function () {
492
- var _this = this;
493
- return new Promise(
494
- // <- TODO: [🧱] Implement in a functional (not new Class) way
495
- function (resolve, reject) {
496
- var socket = socket_ioClient.io(_this.options.remoteUrl, {
497
- path: _this.options.path,
498
- // path: `${this.remoteUrl.pathname}/socket.io`,
499
- transports: [/*'websocket', <- TODO: [🌬] Make websocket transport work */ 'polling'],
500
- });
501
- // console.log('Connecting to', this.options.remoteUrl.href, { socket });
502
- socket.on('connect', function () {
503
- resolve(socket);
504
- });
505
- // TODO: !!!! Better timeout handling
506
- setTimeout(function () {
507
- reject(new Error("Timeout while connecting to ".concat(_this.options.remoteUrl)));
508
- }, 1000 /* <- TODO: Timeout to config */);
509
- });
510
- };
511
- /**
512
- * Calls remote proxy server to use a chat model
513
- */
514
- RemoteLlmExecutionTools.prototype.callChatModel = function (prompt) {
515
- if (this.options.isVerbose) {
516
- console.info("\uD83D\uDD8B Remote callChatModel call");
471
+ function $getGlobalScope() {
472
+ return Function('return this')();
473
+ }
474
+ /***
475
+ * TODO: !!!!! Make private and promptbook registry from this
476
+ */
477
+
478
+ /**
479
+ * Register is @@@
480
+ *
481
+ * Note: `$` is used to indicate that this function is not a pure function - it accesses and adds variables in global scope.
482
+ *
483
+ * @private internal utility, exported are only signleton instances of this class
484
+ */
485
+ var $Register = /** @class */ (function () {
486
+ function $Register(storageName) {
487
+ this.storageName = storageName;
488
+ storageName = "_promptbook_".concat(storageName);
489
+ var globalScope = $getGlobalScope();
490
+ if (globalScope[storageName] === undefined) {
491
+ globalScope[storageName] = [];
517
492
  }
518
- return /* not await */ this.callCommonModel(prompt);
519
- };
520
- /**
521
- * Calls remote proxy server to use a completion model
522
- */
523
- RemoteLlmExecutionTools.prototype.callCompletionModel = function (prompt) {
524
- if (this.options.isVerbose) {
525
- console.info("\uD83D\uDCAC Remote callCompletionModel call");
493
+ else if (!Array.isArray(globalScope[storageName])) {
494
+ throw new UnexpectedError("Expected (global) ".concat(storageName, " to be an array, but got ").concat(typeof globalScope[storageName]));
526
495
  }
527
- return /* not await */ this.callCommonModel(prompt);
528
- };
529
- /**
530
- * Calls remote proxy server to use a embedding model
531
- */
532
- RemoteLlmExecutionTools.prototype.callEmbeddingModel = function (prompt) {
533
- if (this.options.isVerbose) {
534
- console.info("\uD83D\uDCAC Remote callEmbeddingModel call");
496
+ this.storage = globalScope[storageName];
497
+ }
498
+ $Register.prototype.list = function () {
499
+ // <- TODO: ReadonlyDeep<Array<TRegistered>>
500
+ return this.storage;
501
+ };
502
+ $Register.prototype.register = function (registered) {
503
+ // <- TODO: What to return here
504
+ var packageName = registered.packageName, className = registered.className;
505
+ var existingRegistrationIndex = this.storage.findIndex(function (item) { return item.packageName === packageName && item.className === className; });
506
+ var existingRegistration = this.storage[existingRegistrationIndex];
507
+ // TODO: !!!!!! Global IS_VERBOSE mode
508
+ if (!existingRegistration) {
509
+ console.warn("[\uD83D\uDCE6] Registering `".concat(packageName, ".").concat(className, "` to `").concat(this.storageName, "`"));
510
+ this.storage.push(registered);
511
+ }
512
+ else {
513
+ console.warn("[\uD83D\uDCE6] Re-registering `".concat(packageName, ".").concat(className, "` to `").concat(this.storageName, "`"));
514
+ this.storage[existingRegistrationIndex] = registered;
535
515
  }
536
- return /* not await */ this.callCommonModel(prompt);
537
- };
538
- // <- Note: [🤖] callXxxModel
539
- /**
540
- * Calls remote proxy server to use both completion or chat model
541
- */
542
- RemoteLlmExecutionTools.prototype.callCommonModel = function (prompt) {
543
- return __awaiter(this, void 0, void 0, function () {
544
- var socket, promptResult;
545
- return __generator(this, function (_a) {
546
- switch (_a.label) {
547
- case 0: return [4 /*yield*/, this.makeConnection()];
548
- case 1:
549
- socket = _a.sent();
550
- if (this.options.isAnonymous) {
551
- socket.emit('request', {
552
- llmToolsConfiguration: this.options.llmToolsConfiguration,
553
- prompt: prompt,
554
- // <- TODO: [🛫] `prompt` is NOT fully serializable as JSON, it contains functions which are not serializable
555
- });
556
- }
557
- else {
558
- socket.emit('request', {
559
- clientId: this.options.clientId,
560
- prompt: prompt,
561
- // <- TODO: [🛫] `prompt` is NOT fully serializable as JSON, it contains functions which are not serializable
562
- });
563
- }
564
- return [4 /*yield*/, new Promise(function (resolve, reject) {
565
- socket.on('response', function (response) {
566
- resolve(response.promptResult);
567
- socket.disconnect();
568
- });
569
- socket.on('error', function (error) {
570
- reject(new PipelineExecutionError(error.errorMessage));
571
- socket.disconnect();
572
- });
573
- })];
574
- case 2:
575
- promptResult = _a.sent();
576
- socket.disconnect();
577
- return [2 /*return*/, promptResult];
578
- }
579
- });
580
- });
581
516
  };
582
- return RemoteLlmExecutionTools;
517
+ return $Register;
583
518
  }());
584
- /**
585
- * TODO: [🍓] Allow to list compatible models with each variant
586
- * TODO: [🗯] RemoteLlmExecutionTools should extend Destroyable and implement IDestroyable
587
- * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
588
- * TODO: [🧠] Maybe remove `@promptbook/remote-client` and just use `@promptbook/core`
589
- */
590
519
 
591
520
  /**
592
- * Function computeUsage will create price per one token based on the string value found on openai page
521
+ * @@@
593
522
  *
594
- * @private within the repository, used only as internal helper for `OPENAI_MODELS`
523
+ * Note: `$` is used to indicate that this interacts with the global scope
524
+ * @singleton Only one instance of each register is created per build, but thare can be more @@@
525
+ * @public exported from `@promptbook/core`
595
526
  */
596
- function computeUsage(value) {
597
- var _a = __read(value.split(' / '), 2), price = _a[0], tokens = _a[1];
598
- return parseFloat(price.replace('$', '')) / parseFloat(tokens.replace('M tokens', '')) / 1000000;
599
- }
527
+ var $llmToolsRegister = new $Register('llm_execution_tools_constructors');
600
528
 
601
529
  /**
602
- * List of available Anthropic Claude models with pricing
530
+ * Returns the same value that is passed as argument.
531
+ * No side effects.
603
532
  *
604
- * Note: Done at 2024-08-16
533
+ * Note: It can be usefull for:
605
534
  *
606
- * @see https://docs.anthropic.com/en/docs/models-overview
607
- * @public exported from `@promptbook/anthropic-claude`
608
- */
609
- var ANTHROPIC_CLAUDE_MODELS = [
610
- {
611
- modelVariant: 'CHAT',
612
- modelTitle: 'Claude 3.5 Sonnet',
613
- modelName: 'claude-3-5-sonnet-20240620',
614
- pricing: {
615
- prompt: computeUsage("$3.00 / 1M tokens"),
616
- output: computeUsage("$15.00 / 1M tokens"),
617
- },
618
- },
619
- {
620
- modelVariant: 'CHAT',
621
- modelTitle: 'Claude 3 Opus',
622
- modelName: 'claude-3-opus-20240229',
623
- pricing: {
624
- prompt: computeUsage("$15.00 / 1M tokens"),
625
- output: computeUsage("$75.00 / 1M tokens"),
626
- },
627
- },
628
- {
629
- modelVariant: 'CHAT',
630
- modelTitle: 'Claude 3 Sonnet',
631
- modelName: 'claude-3-sonnet-20240229',
632
- pricing: {
633
- prompt: computeUsage("$3.00 / 1M tokens"),
634
- output: computeUsage("$15.00 / 1M tokens"),
635
- },
636
- },
637
- {
638
- modelVariant: 'CHAT',
639
- modelTitle: 'Claude 3 Haiku',
640
- modelName: ' claude-3-haiku-20240307',
641
- pricing: {
642
- prompt: computeUsage("$0.25 / 1M tokens"),
643
- output: computeUsage("$1.25 / 1M tokens"),
644
- },
645
- },
646
- {
647
- modelVariant: 'CHAT',
648
- modelTitle: 'Claude 2.1',
649
- modelName: 'claude-2.1',
650
- pricing: {
651
- prompt: computeUsage("$8.00 / 1M tokens"),
652
- output: computeUsage("$24.00 / 1M tokens"),
653
- },
654
- },
655
- {
656
- modelVariant: 'CHAT',
657
- modelTitle: 'Claude 2',
658
- modelName: 'claude-2.0',
659
- pricing: {
660
- prompt: computeUsage("$8.00 / 1M tokens"),
661
- output: computeUsage("$24.00 / 1M tokens"),
662
- },
663
- },
664
- {
665
- modelVariant: 'CHAT',
666
- modelTitle: ' Claude Instant 1.2',
667
- modelName: 'claude-instant-1.2',
668
- pricing: {
669
- prompt: computeUsage("$0.80 / 1M tokens"),
670
- output: computeUsage("$2.40 / 1M tokens"),
671
- },
672
- },
673
- // TODO: !!! Claude 1 and 2 has also completion versions - ask Hoagy
674
- ];
675
- /**
676
- * Note: [🤖] Add models of new variant
677
- * TODO: [🧠] !!! Add embedding models OR Anthropic has only chat+completion models?
678
- * TODO: [🧠] Some mechanism to propagate unsureness
679
- * TODO: [🧠][👮‍♀️] Put here more info like description, isVision, trainingDateCutoff, languages, strengths ( Top-level performance, intelligence, fluency, and understanding), contextWindow,...
680
- * TODO: [🎰] Some mechanism to auto-update available models
535
+ * 1) Leveling indentation
536
+ * 2) Putting always-true or always-false conditions without getting eslint errors
537
+ *
538
+ * @param value any values
539
+ * @returns the same values
540
+ * @private within the repository
681
541
  */
542
+ function just(value) {
543
+ if (value === undefined) {
544
+ return undefined;
545
+ }
546
+ return value;
547
+ }
682
548
 
683
549
  /**
684
- * Get current date in ISO 8601 format
550
+ * @@@
685
551
  *
686
- * @private internal utility
552
+ * Note: `$` is used to indicate that this interacts with the global scope
553
+ * @singleton Only one instance of each register is created per build, but thare can be more @@@
554
+ * @public exported from `@promptbook/core`
687
555
  */
688
- function getCurrentIsoDate() {
689
- return new Date().toISOString();
690
- }
556
+ var $llmToolsMetadataRegister = new $Register('llm_tools_metadata');
691
557
 
692
558
  /**
693
- * @@@
559
+ * Creates a message with all registered LLM tools
694
560
  *
695
- * Note: `$` is used to indicate that this function is not a pure function - it mutates given object
696
- * Note: This function mutates the object and returns the original (but mutated-deep-freezed) object
561
+ * Note: This function is used to create a (error) message when there is no constructor for some LLM provider
697
562
  *
698
- * @returns The same object as the input, but deeply frozen
699
- * @public exported from `@promptbook/utils`
563
+ * @private internal function of `createLlmToolsFromConfiguration` and `createLlmToolsFromEnv`
700
564
  */
701
- function $deepFreeze(objectValue) {
702
- var e_1, _a;
703
- var propertyNames = Object.getOwnPropertyNames(objectValue);
565
+ function $registeredLlmToolsMessage() {
566
+ var e_1, _a, e_2, _b;
567
+ /**
568
+ * Mixes registered LLM tools from $llmToolsMetadataRegister and $llmToolsRegister
569
+ */
570
+ var all = [];
571
+ var _loop_1 = function (packageName, className) {
572
+ if (all.some(function (item) { return item.packageName === packageName && item.className === className; })) {
573
+ return "continue";
574
+ }
575
+ all.push({ packageName: packageName, className: className });
576
+ };
704
577
  try {
705
- for (var propertyNames_1 = __values(propertyNames), propertyNames_1_1 = propertyNames_1.next(); !propertyNames_1_1.done; propertyNames_1_1 = propertyNames_1.next()) {
706
- var propertyName = propertyNames_1_1.value;
707
- var value = objectValue[propertyName];
708
- if (value && typeof value === 'object') {
709
- $deepFreeze(value);
710
- }
578
+ for (var _c = __values($llmToolsMetadataRegister.list()), _d = _c.next(); !_d.done; _d = _c.next()) {
579
+ var _e = _d.value, packageName = _e.packageName, className = _e.className;
580
+ _loop_1(packageName, className);
711
581
  }
712
582
  }
713
583
  catch (e_1_1) { e_1 = { error: e_1_1 }; }
714
584
  finally {
715
585
  try {
716
- if (propertyNames_1_1 && !propertyNames_1_1.done && (_a = propertyNames_1.return)) _a.call(propertyNames_1);
586
+ if (_d && !_d.done && (_a = _c.return)) _a.call(_c);
717
587
  }
718
588
  finally { if (e_1) throw e_1.error; }
719
589
  }
720
- return Object.freeze(objectValue);
721
- }
722
- /**
723
- * TODO: [🧠] Is there a way how to meaningfully test this utility
724
- */
725
-
726
- // <- TODO: [🧠] Better system for generator warnings - not always "code" and "by `@promptbook/cli`"
727
- /**
728
- * The maximum number of iterations for a loops
729
- *
730
- * @private within the repository - too low-level in comparison with other `MAX_...`
731
- */
732
- var LOOP_LIMIT = 1000;
733
- /**
734
- * Nonce which is used for replacing things in strings
735
- *
736
- * @private within the repository
737
- */
738
- var REPLACING_NONCE = 'u$k42k%!V2zo34w7Fu#@QUHYPW';
739
- /**
740
- * The names of the parameters that are reserved for special purposes
741
- *
742
- * @public exported from `@promptbook/core`
743
- */
744
- $deepFreeze([
745
- 'content',
746
- 'context',
747
- 'knowledge',
748
- 'samples',
749
- 'modelName',
750
- 'currentDate',
751
- // <- TODO: Add more like 'date', 'modelName',...
752
- // <- TODO: Add [emoji] + instructions ACRY when adding new reserved parameter
753
- ]);
754
- /**
755
- * @@@
756
- *
757
- * @private within the repository
758
- */
759
- var RESERVED_PARAMETER_MISSING_VALUE = 'MISSING-' + REPLACING_NONCE;
760
- /**
761
- * @@@
762
- *
763
- * @private within the repository
764
- */
765
- var RESERVED_PARAMETER_RESTRICTED = 'RESTRICTED-' + REPLACING_NONCE;
766
- /**
767
- * TODO: [🧠][🧜‍♂️] Maybe join remoteUrl and path into single value
768
- */
769
-
770
- /**
771
- * This error type indicates that some limit was reached
772
- *
773
- * @public exported from `@promptbook/core`
774
- */
775
- var LimitReachedError = /** @class */ (function (_super) {
776
- __extends(LimitReachedError, _super);
777
- function LimitReachedError(message) {
778
- var _this = _super.call(this, message) || this;
779
- _this.name = 'LimitReachedError';
780
- Object.setPrototypeOf(_this, LimitReachedError.prototype);
781
- return _this;
782
- }
783
- return LimitReachedError;
784
- }(Error));
785
-
786
- /**
787
- * Replaces parameters in template with values from parameters object
788
- *
789
- * @param template the template with parameters in {curly} braces
790
- * @param parameters the object with parameters
791
- * @returns the template with replaced parameters
792
- * @throws {PipelineExecutionError} if parameter is not defined, not closed, or not opened
793
- * @public exported from `@promptbook/utils`
794
- */
795
- function replaceParameters(template, parameters) {
796
- var e_1, _a;
590
+ var _loop_2 = function (packageName, className) {
591
+ if (all.some(function (item) { return item.packageName === packageName && item.className === className; })) {
592
+ return "continue";
593
+ }
594
+ all.push({ packageName: packageName, className: className });
595
+ };
797
596
  try {
798
- for (var _b = __values(Object.entries(parameters)), _c = _b.next(); !_c.done; _c = _b.next()) {
799
- var _d = __read(_c.value, 2), parameterName = _d[0], parameterValue = _d[1];
800
- if (parameterValue === RESERVED_PARAMETER_MISSING_VALUE) {
801
- throw new UnexpectedError("Parameter {".concat(parameterName, "} has missing value"));
802
- }
803
- else if (parameterValue === RESERVED_PARAMETER_RESTRICTED) {
804
- // TODO: [🍵]
805
- throw new UnexpectedError("Parameter {".concat(parameterName, "} is restricted to use"));
806
- }
597
+ for (var _f = __values($llmToolsRegister.list()), _g = _f.next(); !_g.done; _g = _f.next()) {
598
+ var _h = _g.value, packageName = _h.packageName, className = _h.className;
599
+ _loop_2(packageName, className);
807
600
  }
808
601
  }
809
- catch (e_1_1) { e_1 = { error: e_1_1 }; }
602
+ catch (e_2_1) { e_2 = { error: e_2_1 }; }
810
603
  finally {
811
604
  try {
812
- if (_c && !_c.done && (_a = _b.return)) _a.call(_b);
605
+ if (_g && !_g.done && (_b = _f.return)) _b.call(_f);
813
606
  }
814
- finally { if (e_1) throw e_1.error; }
607
+ finally { if (e_2) throw e_2.error; }
815
608
  }
816
- var replacedTemplate = template;
817
- var match;
818
- var loopLimit = LOOP_LIMIT;
819
- var _loop_1 = function () {
820
- if (loopLimit-- < 0) {
821
- throw new LimitReachedError('Loop limit reached during parameters replacement in `replaceParameters`');
609
+ var metadata = all.map(function (metadata) {
610
+ var isMetadataAviailable = $llmToolsMetadataRegister
611
+ .list()
612
+ .find(function (_a) {
613
+ var packageName = _a.packageName, className = _a.className;
614
+ return metadata.packageName === packageName && metadata.className === className;
615
+ });
616
+ var isInstalled = $llmToolsRegister
617
+ .list()
618
+ .find(function (_a) {
619
+ var packageName = _a.packageName, className = _a.className;
620
+ return metadata.packageName === packageName && metadata.className === className;
621
+ });
622
+ return __assign(__assign({}, metadata), { isMetadataAviailable: isMetadataAviailable, isInstalled: isInstalled });
623
+ });
624
+ return spaceTrim__default["default"](function (block) { return "\n Available LLM providers are:\n ".concat(block(metadata
625
+ .map(function (_a, i) {
626
+ var packageName = _a.packageName, className = _a.className, isMetadataAviailable = _a.isMetadataAviailable, isInstalled = _a.isInstalled;
627
+ var more;
628
+ if (just(false)) {
629
+ more = '';
822
630
  }
823
- var precol = match.groups.precol;
824
- var parameterName = match.groups.parameterName;
825
- if (parameterName === '') {
826
- return "continue";
631
+ else if (!isMetadataAviailable && !isInstalled) {
632
+ // TODO: [�][�] Maybe do allow to do auto-install if package not registered and not found
633
+ more = "(not installed and no metadata, looks like a unexpected behavior)";
827
634
  }
828
- if (parameterName.indexOf('{') !== -1 || parameterName.indexOf('}') !== -1) {
829
- throw new PipelineExecutionError('Parameter is already opened or not closed');
635
+ else if (isMetadataAviailable && !isInstalled) {
636
+ // TODO: [�][�]
637
+ more = "(not installed)";
830
638
  }
831
- if (parameters[parameterName] === undefined) {
832
- throw new PipelineExecutionError("Parameter {".concat(parameterName, "} is not defined"));
639
+ else if (!isMetadataAviailable && isInstalled) {
640
+ more = "(no metadata, looks like a unexpected behavior)";
833
641
  }
834
- var parameterValue = parameters[parameterName];
835
- if (parameterValue === undefined) {
836
- throw new PipelineExecutionError("Parameter {".concat(parameterName, "} is not defined"));
642
+ else if (isMetadataAviailable && isInstalled) {
643
+ more = "(installed)";
837
644
  }
838
- parameterValue = parameterValue.toString();
839
- if (parameterValue.includes('\n') && /^\s*\W{0,3}\s*$/.test(precol)) {
840
- parameterValue = parameterValue
841
- .split('\n')
842
- .map(function (line, index) { return (index === 0 ? line : "".concat(precol).concat(line)); })
843
- .join('\n');
645
+ else {
646
+ more = "(unknown state, looks like a unexpected behavior)";
844
647
  }
845
- replacedTemplate =
846
- replacedTemplate.substring(0, match.index + precol.length) +
847
- parameterValue +
848
- replacedTemplate.substring(match.index + precol.length + parameterName.length + 2);
849
- };
850
- while ((match = /^(?<precol>.*){(?<parameterName>\w+)}(.*)/m /* <- Not global */
851
- .exec(replacedTemplate))) {
852
- _loop_1();
853
- }
854
- // [💫] Check if there are parameters that are not closed properly
855
- if (/{\w+$/.test(replacedTemplate)) {
856
- throw new PipelineExecutionError('Parameter is not closed');
857
- }
858
- // [💫] Check if there are parameters that are not opened properly
859
- if (/^\w+}/.test(replacedTemplate)) {
860
- throw new PipelineExecutionError('Parameter is not opened');
861
- }
862
- return replacedTemplate;
863
- }
864
-
865
- /**
866
- * Counts number of characters in the text
867
- *
868
- * @public exported from `@promptbook/utils`
869
- */
870
- function countCharacters(text) {
871
- // Remove null characters
872
- text = text.replace(/\0/g, '');
873
- // Replace emojis (and also ZWJ sequence) with hyphens
874
- text = text.replace(/(\p{Extended_Pictographic})\p{Modifier_Symbol}/gu, '$1');
875
- text = text.replace(/(\p{Extended_Pictographic})[\u{FE00}-\u{FE0F}]/gu, '$1');
876
- text = text.replace(/\p{Extended_Pictographic}(\u{200D}\p{Extended_Pictographic})*/gu, '-');
877
- return text.length;
878
- }
879
-
880
- /**
881
- * Counts number of lines in the text
882
- *
883
- * @public exported from `@promptbook/utils`
884
- */
885
- function countLines(text) {
886
- if (text === '') {
887
- return 0;
888
- }
889
- return text.split('\n').length;
890
- }
891
-
892
- /**
893
- * Counts number of pages in the text
894
- *
895
- * @public exported from `@promptbook/utils`
896
- */
897
- function countPages(text) {
898
- var sentencesPerPage = 5; // Assuming each page has 5 sentences
899
- var sentences = text.split(/[.!?]+/).filter(function (sentence) { return sentence.trim() !== ''; });
900
- var pageCount = Math.ceil(sentences.length / sentencesPerPage);
901
- return pageCount;
902
- }
903
-
904
- /**
905
- * Counts number of paragraphs in the text
906
- *
907
- * @public exported from `@promptbook/utils`
908
- */
909
- function countParagraphs(text) {
910
- return text.split(/\n\s*\n/).filter(function (paragraph) { return paragraph.trim() !== ''; }).length;
911
- }
912
-
913
- /**
914
- * Split text into sentences
915
- *
916
- * @public exported from `@promptbook/utils`
917
- */
918
- function splitIntoSentences(text) {
919
- return text.split(/[.!?]+/).filter(function (sentence) { return sentence.trim() !== ''; });
920
- }
921
- /**
922
- * Counts number of sentences in the text
923
- *
924
- * @public exported from `@promptbook/utils`
925
- */
926
- function countSentences(text) {
927
- return splitIntoSentences(text).length;
928
- }
929
-
930
- var defaultDiacriticsRemovalMap = [
931
- {
932
- base: 'A',
933
- letters: '\u0041\u24B6\uFF21\u00C0\u00C1\u00C2\u1EA6\u1EA4\u1EAA\u1EA8\u00C3\u0100\u0102\u1EB0\u1EAE\u1EB4\u1EB2\u0226\u01E0\u00C4\u01DE\u1EA2\u00C5\u01FA\u01CD\u0200\u0202\u1EA0\u1EAC\u1EB6\u1E00\u0104\u023A\u2C6F',
934
- },
935
- { base: 'AA', letters: '\uA732' },
936
- { base: 'AE', letters: '\u00C6\u01FC\u01E2' },
937
- { base: 'AO', letters: '\uA734' },
938
- { base: 'AU', letters: '\uA736' },
939
- { base: 'AV', letters: '\uA738\uA73A' },
940
- { base: 'AY', letters: '\uA73C' },
941
- {
942
- base: 'B',
943
- letters: '\u0042\u24B7\uFF22\u1E02\u1E04\u1E06\u0243\u0182\u0181',
944
- },
945
- {
946
- base: 'C',
947
- letters: '\u0043\u24B8\uFF23\u0106\u0108\u010A\u010C\u00C7\u1E08\u0187\u023B\uA73E',
948
- },
949
- {
950
- base: 'D',
951
- letters: '\u0044\u24B9\uFF24\u1E0A\u010E\u1E0C\u1E10\u1E12\u1E0E\u0110\u018B\u018A\u0189\uA779\u00D0',
952
- },
953
- { base: 'DZ', letters: '\u01F1\u01C4' },
954
- { base: 'Dz', letters: '\u01F2\u01C5' },
955
- {
956
- base: 'E',
957
- letters: '\u0045\u24BA\uFF25\u00C8\u00C9\u00CA\u1EC0\u1EBE\u1EC4\u1EC2\u1EBC\u0112\u1E14\u1E16\u0114\u0116\u00CB\u1EBA\u011A\u0204\u0206\u1EB8\u1EC6\u0228\u1E1C\u0118\u1E18\u1E1A\u0190\u018E',
958
- },
959
- { base: 'F', letters: '\u0046\u24BB\uFF26\u1E1E\u0191\uA77B' },
960
- {
961
- base: 'G',
962
- letters: '\u0047\u24BC\uFF27\u01F4\u011C\u1E20\u011E\u0120\u01E6\u0122\u01E4\u0193\uA7A0\uA77D\uA77E',
963
- },
964
- {
965
- base: 'H',
966
- letters: '\u0048\u24BD\uFF28\u0124\u1E22\u1E26\u021E\u1E24\u1E28\u1E2A\u0126\u2C67\u2C75\uA78D',
967
- },
968
- {
969
- base: 'I',
970
- letters: '\u0049\u24BE\uFF29\u00CC\u00CD\u00CE\u0128\u012A\u012C\u0130\u00CF\u1E2E\u1EC8\u01CF\u0208\u020A\u1ECA\u012E\u1E2C\u0197',
971
- },
972
- { base: 'J', letters: '\u004A\u24BF\uFF2A\u0134\u0248' },
973
- {
974
- base: 'K',
975
- letters: '\u004B\u24C0\uFF2B\u1E30\u01E8\u1E32\u0136\u1E34\u0198\u2C69\uA740\uA742\uA744\uA7A2',
976
- },
977
- {
978
- base: 'L',
979
- letters: '\u004C\u24C1\uFF2C\u013F\u0139\u013D\u1E36\u1E38\u013B\u1E3C\u1E3A\u0141\u023D\u2C62\u2C60\uA748\uA746\uA780',
980
- },
981
- { base: 'LJ', letters: '\u01C7' },
982
- { base: 'Lj', letters: '\u01C8' },
983
- { base: 'M', letters: '\u004D\u24C2\uFF2D\u1E3E\u1E40\u1E42\u2C6E\u019C' },
984
- {
985
- base: 'N',
986
- letters: '\u004E\u24C3\uFF2E\u01F8\u0143\u00D1\u1E44\u0147\u1E46\u0145\u1E4A\u1E48\u0220\u019D\uA790\uA7A4',
987
- },
988
- { base: 'NJ', letters: '\u01CA' },
989
- { base: 'Nj', letters: '\u01CB' },
990
- {
991
- base: 'O',
992
- letters: '\u004F\u24C4\uFF2F\u00D2\u00D3\u00D4\u1ED2\u1ED0\u1ED6\u1ED4\u00D5\u1E4C\u022C\u1E4E\u014C\u1E50\u1E52\u014E\u022E\u0230\u00D6\u022A\u1ECE\u0150\u01D1\u020C\u020E\u01A0\u1EDC\u1EDA\u1EE0\u1EDE\u1EE2\u1ECC\u1ED8\u01EA\u01EC\u00D8\u01FE\u0186\u019F\uA74A\uA74C',
993
- },
994
- { base: 'OI', letters: '\u01A2' },
995
- { base: 'OO', letters: '\uA74E' },
996
- { base: 'OU', letters: '\u0222' },
997
- { base: 'OE', letters: '\u008C\u0152' },
998
- { base: 'oe', letters: '\u009C\u0153' },
999
- {
1000
- base: 'P',
1001
- letters: '\u0050\u24C5\uFF30\u1E54\u1E56\u01A4\u2C63\uA750\uA752\uA754',
1002
- },
1003
- { base: 'Q', letters: '\u0051\u24C6\uFF31\uA756\uA758\u024A' },
1004
- {
1005
- base: 'R',
1006
- letters: '\u0052\u24C7\uFF32\u0154\u1E58\u0158\u0210\u0212\u1E5A\u1E5C\u0156\u1E5E\u024C\u2C64\uA75A\uA7A6\uA782',
1007
- },
1008
- {
1009
- base: 'S',
1010
- letters: '\u0053\u24C8\uFF33\u1E9E\u015A\u1E64\u015C\u1E60\u0160\u1E66\u1E62\u1E68\u0218\u015E\u2C7E\uA7A8\uA784',
1011
- },
1012
- {
1013
- base: 'T',
1014
- letters: '\u0054\u24C9\uFF34\u1E6A\u0164\u1E6C\u021A\u0162\u1E70\u1E6E\u0166\u01AC\u01AE\u023E\uA786',
1015
- },
1016
- { base: 'TZ', letters: '\uA728' },
1017
- {
1018
- base: 'U',
1019
- letters: '\u0055\u24CA\uFF35\u00D9\u00DA\u00DB\u0168\u1E78\u016A\u1E7A\u016C\u00DC\u01DB\u01D7\u01D5\u01D9\u1EE6\u016E\u0170\u01D3\u0214\u0216\u01AF\u1EEA\u1EE8\u1EEE\u1EEC\u1EF0\u1EE4\u1E72\u0172\u1E76\u1E74\u0244',
1020
- },
1021
- { base: 'V', letters: '\u0056\u24CB\uFF36\u1E7C\u1E7E\u01B2\uA75E\u0245' },
1022
- { base: 'VY', letters: '\uA760' },
1023
- {
1024
- base: 'W',
1025
- letters: '\u0057\u24CC\uFF37\u1E80\u1E82\u0174\u1E86\u1E84\u1E88\u2C72',
1026
- },
1027
- { base: 'X', letters: '\u0058\u24CD\uFF38\u1E8A\u1E8C' },
1028
- {
1029
- base: 'Y',
1030
- letters: '\u0059\u24CE\uFF39\u1EF2\u00DD\u0176\u1EF8\u0232\u1E8E\u0178\u1EF6\u1EF4\u01B3\u024E\u1EFE',
1031
- },
1032
- {
1033
- base: 'Z',
1034
- letters: '\u005A\u24CF\uFF3A\u0179\u1E90\u017B\u017D\u1E92\u1E94\u01B5\u0224\u2C7F\u2C6B\uA762',
1035
- },
1036
- {
1037
- base: 'a',
1038
- letters: '\u0061\u24D0\uFF41\u1E9A\u00E0\u00E1\u00E2\u1EA7\u1EA5\u1EAB\u1EA9\u00E3\u0101\u0103\u1EB1\u1EAF\u1EB5\u1EB3\u0227\u01E1\u00E4\u01DF\u1EA3\u00E5\u01FB\u01CE\u0201\u0203\u1EA1\u1EAD\u1EB7\u1E01\u0105\u2C65\u0250',
1039
- },
1040
- { base: 'aa', letters: '\uA733' },
1041
- { base: 'ae', letters: '\u00E6\u01FD\u01E3' },
1042
- { base: 'ao', letters: '\uA735' },
1043
- { base: 'au', letters: '\uA737' },
1044
- { base: 'av', letters: '\uA739\uA73B' },
1045
- { base: 'ay', letters: '\uA73D' },
1046
- {
1047
- base: 'b',
1048
- letters: '\u0062\u24D1\uFF42\u1E03\u1E05\u1E07\u0180\u0183\u0253',
1049
- },
1050
- {
1051
- base: 'c',
1052
- letters: '\u0063\u24D2\uFF43\u0107\u0109\u010B\u010D\u00E7\u1E09\u0188\u023C\uA73F\u2184',
1053
- },
1054
- {
1055
- base: 'd',
1056
- letters: '\u0064\u24D3\uFF44\u1E0B\u010F\u1E0D\u1E11\u1E13\u1E0F\u0111\u018C\u0256\u0257\uA77A',
1057
- },
1058
- { base: 'dz', letters: '\u01F3\u01C6' },
1059
- {
1060
- base: 'e',
1061
- letters: '\u0065\u24D4\uFF45\u00E8\u00E9\u00EA\u1EC1\u1EBF\u1EC5\u1EC3\u1EBD\u0113\u1E15\u1E17\u0115\u0117\u00EB\u1EBB\u011B\u0205\u0207\u1EB9\u1EC7\u0229\u1E1D\u0119\u1E19\u1E1B\u0247\u025B\u01DD',
1062
- },
1063
- { base: 'f', letters: '\u0066\u24D5\uFF46\u1E1F\u0192\uA77C' },
1064
- {
1065
- base: 'g',
1066
- letters: '\u0067\u24D6\uFF47\u01F5\u011D\u1E21\u011F\u0121\u01E7\u0123\u01E5\u0260\uA7A1\u1D79\uA77F',
1067
- },
1068
- {
1069
- base: 'h',
1070
- letters: '\u0068\u24D7\uFF48\u0125\u1E23\u1E27\u021F\u1E25\u1E29\u1E2B\u1E96\u0127\u2C68\u2C76\u0265',
1071
- },
1072
- { base: 'hv', letters: '\u0195' },
1073
- {
1074
- base: 'i',
1075
- letters: '\u0069\u24D8\uFF49\u00EC\u00ED\u00EE\u0129\u012B\u012D\u00EF\u1E2F\u1EC9\u01D0\u0209\u020B\u1ECB\u012F\u1E2D\u0268\u0131',
1076
- },
1077
- { base: 'j', letters: '\u006A\u24D9\uFF4A\u0135\u01F0\u0249' },
1078
- {
1079
- base: 'k',
1080
- letters: '\u006B\u24DA\uFF4B\u1E31\u01E9\u1E33\u0137\u1E35\u0199\u2C6A\uA741\uA743\uA745\uA7A3',
1081
- },
1082
- {
1083
- base: 'l',
1084
- letters: '\u006C\u24DB\uFF4C\u0140\u013A\u013E\u1E37\u1E39\u013C\u1E3D\u1E3B\u017F\u0142\u019A\u026B\u2C61\uA749\uA781\uA747',
1085
- },
1086
- { base: 'lj', letters: '\u01C9' },
1087
- { base: 'm', letters: '\u006D\u24DC\uFF4D\u1E3F\u1E41\u1E43\u0271\u026F' },
1088
- {
1089
- base: 'n',
1090
- letters: '\u006E\u24DD\uFF4E\u01F9\u0144\u00F1\u1E45\u0148\u1E47\u0146\u1E4B\u1E49\u019E\u0272\u0149\uA791\uA7A5',
1091
- },
1092
- { base: 'nj', letters: '\u01CC' },
1093
- {
1094
- base: 'o',
1095
- letters: '\u006F\u24DE\uFF4F\u00F2\u00F3\u00F4\u1ED3\u1ED1\u1ED7\u1ED5\u00F5\u1E4D\u022D\u1E4F\u014D\u1E51\u1E53\u014F\u022F\u0231\u00F6\u022B\u1ECF\u0151\u01D2\u020D\u020F\u01A1\u1EDD\u1EDB\u1EE1\u1EDF\u1EE3\u1ECD\u1ED9\u01EB\u01ED\u00F8\u01FF\u0254\uA74B\uA74D\u0275',
1096
- },
1097
- { base: 'oi', letters: '\u01A3' },
1098
- { base: 'ou', letters: '\u0223' },
1099
- { base: 'oo', letters: '\uA74F' },
1100
- {
1101
- base: 'p',
1102
- letters: '\u0070\u24DF\uFF50\u1E55\u1E57\u01A5\u1D7D\uA751\uA753\uA755',
1103
- },
1104
- { base: 'q', letters: '\u0071\u24E0\uFF51\u024B\uA757\uA759' },
1105
- {
1106
- base: 'r',
1107
- letters: '\u0072\u24E1\uFF52\u0155\u1E59\u0159\u0211\u0213\u1E5B\u1E5D\u0157\u1E5F\u024D\u027D\uA75B\uA7A7\uA783',
1108
- },
1109
- {
1110
- base: 's',
1111
- letters: '\u0073\u24E2\uFF53\u00DF\u015B\u1E65\u015D\u1E61\u0161\u1E67\u1E63\u1E69\u0219\u015F\u023F\uA7A9\uA785\u1E9B',
1112
- },
1113
- {
1114
- base: 't',
1115
- letters: '\u0074\u24E3\uFF54\u1E6B\u1E97\u0165\u1E6D\u021B\u0163\u1E71\u1E6F\u0167\u01AD\u0288\u2C66\uA787',
1116
- },
1117
- { base: 'tz', letters: '\uA729' },
1118
- {
1119
- base: 'u',
1120
- letters: '\u0075\u24E4\uFF55\u00F9\u00FA\u00FB\u0169\u1E79\u016B\u1E7B\u016D\u00FC\u01DC\u01D8\u01D6\u01DA\u1EE7\u016F\u0171\u01D4\u0215\u0217\u01B0\u1EEB\u1EE9\u1EEF\u1EED\u1EF1\u1EE5\u1E73\u0173\u1E77\u1E75\u0289',
1121
- },
1122
- { base: 'v', letters: '\u0076\u24E5\uFF56\u1E7D\u1E7F\u028B\uA75F\u028C' },
1123
- { base: 'vy', letters: '\uA761' },
1124
- {
1125
- base: 'w',
1126
- letters: '\u0077\u24E6\uFF57\u1E81\u1E83\u0175\u1E87\u1E85\u1E98\u1E89\u2C73',
1127
- },
1128
- { base: 'x', letters: '\u0078\u24E7\uFF58\u1E8B\u1E8D' },
1129
- {
1130
- base: 'y',
1131
- letters: '\u0079\u24E8\uFF59\u1EF3\u00FD\u0177\u1EF9\u0233\u1E8F\u00FF\u1EF7\u1E99\u1EF5\u01B4\u024F\u1EFF',
1132
- },
1133
- {
1134
- base: 'z',
1135
- letters: '\u007A\u24E9\uFF5A\u017A\u1E91\u017C\u017E\u1E93\u1E95\u01B6\u0225\u0240\u2C6C\uA763',
1136
- },
1137
- ];
1138
- /**
1139
- * Map of letters from diacritic variant to diacritless variant
1140
- * Contains lowercase and uppercase separatelly
1141
- *
1142
- * > "á" => "a"
1143
- * > "ě" => "e"
1144
- * > "Ă" => "A"
1145
- * > ...
1146
- *
1147
- * @public exported from `@promptbook/utils`
1148
- */
1149
- var DIACRITIC_VARIANTS_LETTERS = {};
1150
- // tslint:disable-next-line: prefer-for-of
1151
- for (var i = 0; i < defaultDiacriticsRemovalMap.length; i++) {
1152
- var letters = defaultDiacriticsRemovalMap[i].letters;
1153
- // tslint:disable-next-line: prefer-for-of
1154
- for (var j = 0; j < letters.length; j++) {
1155
- DIACRITIC_VARIANTS_LETTERS[letters[j]] = defaultDiacriticsRemovalMap[i].base;
1156
- }
648
+ return "".concat(i + 1, ") `").concat(className, "` from `").concat(packageName, "` ").concat(more);
649
+ })
650
+ .join('\n')), "\n "); });
1157
651
  }
1158
- // <- TODO: [🍓] Put to maker function to save execution time if not needed
1159
- /*
1160
- @see https://stackoverflow.com/questions/990904/remove-accents-diacritics-in-a-string-in-javascript
1161
- Licensed under the Apache License, Version 2.0 (the "License");
1162
- you may not use this file except in compliance with the License.
1163
- You may obtain a copy of the License at
1164
-
1165
- http://www.apache.org/licenses/LICENSE-2.0
1166
-
1167
- Unless required by applicable law or agreed to in writing, software
1168
- distributed under the License is distributed on an "AS IS" BASIS,
1169
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1170
- See the License for the specific language governing permissions and
1171
- limitations under the License.
1172
- */
1173
652
 
1174
653
  /**
1175
654
  * @@@
1176
655
  *
1177
- * @param input @@@
656
+ * Note: This function is not cached, every call creates new instance of `MultipleLlmExecutionTools`
657
+ *
1178
658
  * @returns @@@
1179
- * @public exported from `@promptbook/utils`
659
+ * @public exported from `@promptbook/core`
1180
660
  */
1181
- function removeDiacritics(input) {
1182
- /*eslint no-control-regex: "off"*/
1183
- return input.replace(/[^\u0000-\u007E]/g, function (a) {
1184
- return DIACRITIC_VARIANTS_LETTERS[a] || a;
661
+ function createLlmToolsFromConfiguration(configuration, options) {
662
+ if (options === void 0) { options = {}; }
663
+ var _a = options.isVerbose, isVerbose = _a === void 0 ? false : _a;
664
+ var llmTools = configuration.map(function (llmConfiguration) {
665
+ var registeredItem = $llmToolsRegister
666
+ .list()
667
+ .find(function (_a) {
668
+ var packageName = _a.packageName, className = _a.className;
669
+ return llmConfiguration.packageName === packageName && llmConfiguration.className === className;
670
+ });
671
+ if (registeredItem === undefined) {
672
+ throw new Error(spaceTrim__default["default"](function (block) { return "\n There is no constructor for LLM provider `".concat(llmConfiguration.className, "` from `").concat(llmConfiguration.packageName, "`\n\n You have probably forgotten install and import the provider package.\n To fix this issue, you can:\n\n Install:\n\n > npm install ").concat(llmConfiguration.packageName, "\n\n And import:\n\n > import '").concat(llmConfiguration.packageName, "';\n\n\n ").concat(block($registeredLlmToolsMessage()), "\n "); }));
673
+ }
674
+ return registeredItem(__assign({ isVerbose: isVerbose }, llmConfiguration.options));
1185
675
  });
676
+ return joinLlmExecutionTools.apply(void 0, __spreadArray([], __read(llmTools), false));
1186
677
  }
1187
678
  /**
1188
- * TODO: [Ж] Variant for cyrillic (and in general non-latin) letters
679
+ * TODO: [🎌] Togethere with `createLlmToolsFromConfiguration` + 'EXECUTION_TOOLS_CLASSES' gets to `@promptbook/core` ALL model providers, make this more efficient
680
+ * TODO: [🧠][🎌] Dynamically install required providers
681
+ * TODO: @@@ write discussion about this - wizzard
682
+ * TODO: [🧠][🍛] Which name is better `createLlmToolsFromConfig` or `createLlmToolsFromConfiguration`?
683
+ * TODO: [🧠] Is there some meaningfull way how to test this util
684
+ * TODO: This should be maybe not under `_common` but under `utils`
1189
685
  */
1190
686
 
1191
687
  /**
1192
- * Counts number of words in the text
688
+ * Remote server is a proxy server that uses its execution tools internally and exposes the executor interface externally.
1193
689
  *
1194
- * @public exported from `@promptbook/utils`
1195
- */
1196
- function countWords(text) {
1197
- text = text.replace(/[\p{Extended_Pictographic}]/gu, 'a');
1198
- text = removeDiacritics(text);
1199
- return text.split(/[^a-zа-я0-9]+/i).filter(function (word) { return word.length > 0; }).length;
1200
- }
1201
-
1202
- /**
1203
- * Helper of usage compute
1204
- *
1205
- * @param content the content of prompt or response
1206
- * @returns part of PromptResultUsageCounts
1207
- *
1208
- * @private internal utility of LlmExecutionTools
1209
- */
1210
- function computeUsageCounts(content) {
1211
- return {
1212
- charactersCount: { value: countCharacters(content) },
1213
- wordsCount: { value: countWords(content) },
1214
- sentencesCount: { value: countSentences(content) },
1215
- linesCount: { value: countLines(content) },
1216
- paragraphsCount: { value: countParagraphs(content) },
1217
- pagesCount: { value: countPages(content) },
1218
- };
1219
- }
1220
-
1221
- /**
1222
- * Make UncertainNumber
1223
- *
1224
- * @param value
1225
- *
1226
- * @private utility for initializating UncertainNumber
1227
- */
1228
- function uncertainNumber(value) {
1229
- if (value === null || value === undefined || Number.isNaN(value)) {
1230
- return { value: 0, isUncertain: true };
1231
- }
1232
- return { value: value };
1233
- }
1234
-
1235
- /**
1236
- * Computes the usage of the Anthropic Claude API based on the response from Anthropic Claude
1237
- *
1238
- * @param promptContent The content of the prompt
1239
- * @param resultContent The content of the result (for embedding prompts or failed prompts pass empty string)
1240
- * @param rawResponse The raw response from Anthropic Claude API
1241
- * @throws {PipelineExecutionError} If the usage is not defined in the response from Anthropic Claude
1242
- * @private internal utility of `AnthropicClaudeExecutionTools`
1243
- */
1244
- function computeAnthropicClaudeUsage(promptContent, // <- Note: Intentionally using [] to access type properties to bring jsdoc from Prompt/PromptResult to consumer
1245
- resultContent, rawResponse) {
1246
- var _a, _b;
1247
- if (rawResponse.usage === undefined) {
1248
- throw new PipelineExecutionError('The usage is not defined in the response from Anthropic Claude');
1249
- }
1250
- if (((_a = rawResponse.usage) === null || _a === void 0 ? void 0 : _a.input_tokens) === undefined) {
1251
- throw new PipelineExecutionError('In Anthropic Claude response `usage.prompt_tokens` not defined');
1252
- }
1253
- var inputTokens = rawResponse.usage.input_tokens;
1254
- var outputTokens = ((_b = rawResponse.usage) === null || _b === void 0 ? void 0 : _b.output_tokens) || 0;
1255
- var modelInfo = ANTHROPIC_CLAUDE_MODELS.find(function (model) { return model.modelName === rawResponse.model; });
1256
- var price;
1257
- if (modelInfo === undefined || modelInfo.pricing === undefined) {
1258
- price = uncertainNumber();
1259
- }
1260
- else {
1261
- price = uncertainNumber(inputTokens * modelInfo.pricing.prompt + outputTokens * modelInfo.pricing.output);
1262
- }
1263
- return {
1264
- price: price,
1265
- input: __assign({ tokensCount: uncertainNumber(rawResponse.usage.input_tokens) }, computeUsageCounts(promptContent)),
1266
- output: __assign({ tokensCount: uncertainNumber(outputTokens) }, computeUsageCounts(resultContent)),
1267
- };
1268
- }
1269
- /**
1270
- * TODO: [🤝] DRY Maybe some common abstraction between `computeOpenAiUsage` and `computeAnthropicClaudeUsage`
1271
- */
1272
-
1273
- /**
1274
- * Execution Tools for calling Anthropic Claude API.
1275
- *
1276
- * @public exported from `@promptbook/anthropic-claude`
1277
- * @deprecated use `createAnthropicClaudeExecutionTools` instead
1278
- */
1279
- var AnthropicClaudeExecutionTools = /** @class */ (function () {
1280
- /**
1281
- * Creates Anthropic Claude Execution Tools.
1282
- *
1283
- * @param options which are relevant are directly passed to the Anthropic Claude client
1284
- */
1285
- function AnthropicClaudeExecutionTools(options) {
1286
- if (options === void 0) { options = { isProxied: false }; }
1287
- this.options = options;
1288
- /**
1289
- * Anthropic Claude API client.
1290
- */
1291
- this.client = null;
1292
- }
1293
- Object.defineProperty(AnthropicClaudeExecutionTools.prototype, "title", {
1294
- get: function () {
1295
- return 'Anthropic Claude';
1296
- },
1297
- enumerable: false,
1298
- configurable: true
1299
- });
1300
- Object.defineProperty(AnthropicClaudeExecutionTools.prototype, "description", {
1301
- get: function () {
1302
- return 'Use all models provided by Anthropic Claude';
1303
- },
1304
- enumerable: false,
1305
- configurable: true
1306
- });
1307
- AnthropicClaudeExecutionTools.prototype.getClient = function () {
1308
- return __awaiter(this, void 0, void 0, function () {
1309
- var anthropicOptions;
1310
- return __generator(this, function (_a) {
1311
- if (this.client === null) {
1312
- anthropicOptions = __assign({}, this.options);
1313
- delete anthropicOptions.isVerbose;
1314
- delete anthropicOptions.isProxied;
1315
- this.client = new Anthropic__default["default"](anthropicOptions);
1316
- }
1317
- return [2 /*return*/, this.client];
1318
- });
1319
- });
1320
- };
1321
- /**
1322
- * Check the `options` passed to `constructor`
1323
- */
1324
- AnthropicClaudeExecutionTools.prototype.checkConfiguration = function () {
1325
- return __awaiter(this, void 0, void 0, function () {
1326
- return __generator(this, function (_a) {
1327
- switch (_a.label) {
1328
- case 0: return [4 /*yield*/, this.getClient()];
1329
- case 1:
1330
- _a.sent();
1331
- return [2 /*return*/];
1332
- }
1333
- });
1334
- });
1335
- };
1336
- /**
1337
- * List all available Anthropic Claude models that can be used
1338
- */
1339
- AnthropicClaudeExecutionTools.prototype.listModels = function () {
1340
- return ANTHROPIC_CLAUDE_MODELS;
1341
- };
1342
- /**
1343
- * Calls Anthropic Claude API to use a chat model.
1344
- */
1345
- AnthropicClaudeExecutionTools.prototype.callChatModel = function (prompt) {
1346
- return __awaiter(this, void 0, void 0, function () {
1347
- var content, parameters, modelRequirements, client, modelName, rawPromptContent, rawRequest, start, complete, rawResponse, contentBlock, resultContent, usage;
1348
- return __generator(this, function (_a) {
1349
- switch (_a.label) {
1350
- case 0:
1351
- if (this.options.isVerbose) {
1352
- console.info('💬 Anthropic Claude callChatModel call');
1353
- }
1354
- content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
1355
- return [4 /*yield*/, this.getClient()];
1356
- case 1:
1357
- client = _a.sent();
1358
- // TODO: [☂] Use here more modelRequirements
1359
- if (modelRequirements.modelVariant !== 'CHAT') {
1360
- throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
1361
- }
1362
- modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
1363
- rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
1364
- rawRequest = {
1365
- model: modelRequirements.modelName || this.getDefaultChatModel().modelName,
1366
- max_tokens: modelRequirements.maxTokens || 4096,
1367
- // <- TODO: [🌾] Make some global max cap for maxTokens
1368
- temperature: modelRequirements.temperature,
1369
- system: modelRequirements.systemMessage,
1370
- // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
1371
- // <- Note: [🧆]
1372
- messages: [
1373
- {
1374
- role: 'user',
1375
- content: rawPromptContent,
1376
- },
1377
- ],
1378
- // TODO: Is here some equivalent of user identification?> user: this.options.user,
1379
- };
1380
- start = getCurrentIsoDate();
1381
- if (this.options.isVerbose) {
1382
- console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
1383
- }
1384
- return [4 /*yield*/, client.messages.create(rawRequest)];
1385
- case 2:
1386
- rawResponse = _a.sent();
1387
- if (this.options.isVerbose) {
1388
- console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
1389
- }
1390
- if (!rawResponse.content[0]) {
1391
- throw new PipelineExecutionError('No content from Anthropic Claude');
1392
- }
1393
- if (rawResponse.content.length > 1) {
1394
- throw new PipelineExecutionError('More than one content blocks from Anthropic Claude');
1395
- }
1396
- contentBlock = rawResponse.content[0];
1397
- if (contentBlock.type !== 'text') {
1398
- throw new PipelineExecutionError("Returned content is not \"text\" type but \"".concat(contentBlock.type, "\""));
1399
- }
1400
- resultContent = contentBlock.text;
1401
- // eslint-disable-next-line prefer-const
1402
- complete = getCurrentIsoDate();
1403
- usage = computeAnthropicClaudeUsage(content, '', rawResponse);
1404
- return [2 /*return*/, {
1405
- content: resultContent,
1406
- modelName: rawResponse.model,
1407
- timing: {
1408
- start: start,
1409
- complete: complete,
1410
- },
1411
- usage: usage,
1412
- rawPromptContent: rawPromptContent,
1413
- rawRequest: rawRequest,
1414
- rawResponse: rawResponse,
1415
- // <- [🗯]
1416
- }];
1417
- }
1418
- });
1419
- });
1420
- };
1421
- /*
1422
- TODO: [👏]
1423
- public async callCompletionModel(
1424
- prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements'>,
1425
- ): Promise<PromptCompletionResult> {
1426
-
1427
- if (this.options.isVerbose) {
1428
- console.info('🖋 Anthropic Claude callCompletionModel call');
1429
- }
1430
-
1431
- const { content, parameters, modelRequirements } = prompt;
1432
-
1433
- // TODO: [☂] Use here more modelRequirements
1434
- if (modelRequirements.modelVariant !== 'COMPLETION') {
1435
- throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
1436
- }
1437
-
1438
- const modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
1439
- const modelSettings = {
1440
- model: modelName,
1441
- max_tokens: modelRequirements.maxTokens || 2000, // <- Note: 2000 is for lagacy reasons
1442
- // <- TODO: [🌾] Make some global max cap for maxTokens
1443
- // <- TODO: Use here `systemMessage`, `temperature` and `seed`
1444
- };
1445
-
1446
- const rawRequest: xxxx.Completions.CompletionCreateParamsNonStreaming = {
1447
- ...modelSettings,
1448
- prompt: rawPromptContent,
1449
- user: this.options.user,
1450
- };
1451
- const start: string_date_iso8601 = getCurrentIsoDate();
1452
- let complete: string_date_iso8601;
1453
-
1454
- if (this.options.isVerbose) {
1455
- console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
1456
- }
1457
- const rawResponse = await this.client.completions.create(rawRequest);
1458
- if (this.options.isVerbose) {
1459
- console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
1460
- }
1461
-
1462
- if (!rawResponse.choices[0]) {
1463
- throw new PipelineExecutionError('No choises from Anthropic Claude');
1464
- }
1465
-
1466
- if (rawResponse.choices.length > 1) {
1467
- // TODO: This should be maybe only warning
1468
- throw new PipelineExecutionError('More than one choise from Anthropic Claude');
1469
- }
1470
-
1471
- const resultContent = rawResponse.choices[0].text;
1472
- // eslint-disable-next-line prefer-const
1473
- complete = getCurrentIsoDate();
1474
- const usage = { price: 'UNKNOWN', inputTokens: 0, outputTokens: 0 /* <- TODO: [🐞] Compute usage * / } satisfies PromptResultUsage;
1475
-
1476
-
1477
-
1478
- return {
1479
- content: resultContent,
1480
- modelName: rawResponse.model || model,
1481
- timing: {
1482
- start,
1483
- complete,
1484
- },
1485
- usage,
1486
- rawResponse,
1487
- // <- [🗯]
1488
- };
1489
- }
1490
- */
1491
- // <- Note: [🤖] callXxxModel
1492
- /**
1493
- * Get the model that should be used as default
1494
- */
1495
- AnthropicClaudeExecutionTools.prototype.getDefaultModel = function (defaultModelName) {
1496
- var model = ANTHROPIC_CLAUDE_MODELS.find(function (_a) {
1497
- var modelName = _a.modelName;
1498
- return modelName.startsWith(defaultModelName);
1499
- });
1500
- if (model === undefined) {
1501
- throw new UnexpectedError(spaceTrim__default["default"](function (block) {
1502
- return "\n Cannot find model in OpenAI models with name \"".concat(defaultModelName, "\" which should be used as default.\n\n Available models:\n ").concat(block(ANTHROPIC_CLAUDE_MODELS.map(function (_a) {
1503
- var modelName = _a.modelName;
1504
- return "- \"".concat(modelName, "\"");
1505
- }).join('\n')), "\n\n ");
1506
- }));
1507
- }
1508
- return model;
1509
- };
1510
- /**
1511
- * Default model for chat variant.
1512
- */
1513
- AnthropicClaudeExecutionTools.prototype.getDefaultChatModel = function () {
1514
- return this.getDefaultModel('claude-3-opus');
1515
- };
1516
- return AnthropicClaudeExecutionTools;
1517
- }());
1518
- /**
1519
- * TODO: [🍆] JSON mode
1520
- * TODO: [🧠] Maybe handle errors via transformAnthropicError (like transformAzureError)
1521
- * TODO: Maybe Create some common util for callChatModel and callCompletionModel
1522
- * TODO: Maybe make custom OpenAiError
1523
- * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
1524
- * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
1525
- * TODO: [📅] Maybe instead of `RemoteLlmExecutionToolsOptions` use `proxyWithAnonymousRemoteServer` (if implemented)
1526
- */
1527
-
1528
- /**
1529
- * Execution Tools for calling Anthropic Claude API.
1530
- *
1531
- * @public exported from `@promptbook/anthropic-claude`
1532
- */
1533
- var createAnthropicClaudeExecutionTools = Object.assign(function (options) {
1534
- if (options.isProxied) {
1535
- return new RemoteLlmExecutionTools(__assign(__assign({}, options), { isAnonymous: true, llmToolsConfiguration: [
1536
- {
1537
- title: 'Anthropic Claude (proxied)',
1538
- packageName: '@promptbook/anthropic-claude',
1539
- className: 'AnthropicClaudeExecutionTools',
1540
- options: __assign(__assign({}, options), { isProxied: false }),
1541
- },
1542
- ], models: ANTHROPIC_CLAUDE_MODELS }));
1543
- }
1544
- return new AnthropicClaudeExecutionTools(options);
1545
- }, {
1546
- packageName: '@promptbook/anthropic-claude',
1547
- className: 'AnthropicClaudeExecutionTools',
1548
- });
1549
- /**
1550
- * TODO: [🧠] !!!! Make anonymous this with all LLM providers
1551
- * TODO: [🧠][🧱] !!!! Maybe change all `new AnthropicClaudeExecutionTools` -> `createAnthropicClaudeExecutionTools` in manual
1552
- * TODO: [🧠] Maybe auto-detect usage in browser and determine default value of `isProxied`
1553
- * TODO: [🦺] Is there some way how to put `packageName` and `className` on top and function definition on bottom?
1554
- * TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
1555
- */
1556
-
1557
- /**
1558
- * List of available OpenAI models with pricing
1559
- *
1560
- * Note: Done at 2024-05-20
1561
- *
1562
- * @see https://platform.openai.com/docs/models/
1563
- * @see https://openai.com/api/pricing/
1564
- * @public exported from `@promptbook/openai`
1565
- */
1566
- var OPENAI_MODELS = [
1567
- /*/
1568
- {
1569
- modelTitle: 'dall-e-3',
1570
- modelName: 'dall-e-3',
1571
- },
1572
- /**/
1573
- /*/
1574
- {
1575
- modelTitle: 'whisper-1',
1576
- modelName: 'whisper-1',
1577
- },
1578
- /**/
1579
- /**/
1580
- {
1581
- modelVariant: 'COMPLETION',
1582
- modelTitle: 'davinci-002',
1583
- modelName: 'davinci-002',
1584
- pricing: {
1585
- prompt: computeUsage("$2.00 / 1M tokens"),
1586
- output: computeUsage("$2.00 / 1M tokens"), // <- not sure
1587
- },
1588
- },
1589
- /**/
1590
- /*/
1591
- {
1592
- modelTitle: 'dall-e-2',
1593
- modelName: 'dall-e-2',
1594
- },
1595
- /**/
1596
- /**/
1597
- {
1598
- modelVariant: 'CHAT',
1599
- modelTitle: 'gpt-3.5-turbo-16k',
1600
- modelName: 'gpt-3.5-turbo-16k',
1601
- pricing: {
1602
- prompt: computeUsage("$3.00 / 1M tokens"),
1603
- output: computeUsage("$4.00 / 1M tokens"),
1604
- },
1605
- },
1606
- /**/
1607
- /*/
1608
- {
1609
- modelTitle: 'tts-1-hd-1106',
1610
- modelName: 'tts-1-hd-1106',
1611
- },
1612
- /**/
1613
- /*/
1614
- {
1615
- modelTitle: 'tts-1-hd',
1616
- modelName: 'tts-1-hd',
1617
- },
1618
- /**/
1619
- /**/
1620
- {
1621
- modelVariant: 'CHAT',
1622
- modelTitle: 'gpt-4',
1623
- modelName: 'gpt-4',
1624
- pricing: {
1625
- prompt: computeUsage("$30.00 / 1M tokens"),
1626
- output: computeUsage("$60.00 / 1M tokens"),
1627
- },
1628
- },
1629
- /**/
1630
- /**/
1631
- {
1632
- modelVariant: 'CHAT',
1633
- modelTitle: 'gpt-4-32k',
1634
- modelName: 'gpt-4-32k',
1635
- pricing: {
1636
- prompt: computeUsage("$60.00 / 1M tokens"),
1637
- output: computeUsage("$120.00 / 1M tokens"),
1638
- },
1639
- },
1640
- /**/
1641
- /*/
1642
- {
1643
- modelVariant: 'CHAT',
1644
- modelTitle: 'gpt-4-0613',
1645
- modelName: 'gpt-4-0613',
1646
- pricing: {
1647
- prompt: computeUsage(` / 1M tokens`),
1648
- output: computeUsage(` / 1M tokens`),
1649
- },
1650
- },
1651
- /**/
1652
- /**/
1653
- {
1654
- modelVariant: 'CHAT',
1655
- modelTitle: 'gpt-4-turbo-2024-04-09',
1656
- modelName: 'gpt-4-turbo-2024-04-09',
1657
- pricing: {
1658
- prompt: computeUsage("$10.00 / 1M tokens"),
1659
- output: computeUsage("$30.00 / 1M tokens"),
1660
- },
1661
- },
1662
- /**/
1663
- /**/
1664
- {
1665
- modelVariant: 'CHAT',
1666
- modelTitle: 'gpt-3.5-turbo-1106',
1667
- modelName: 'gpt-3.5-turbo-1106',
1668
- pricing: {
1669
- prompt: computeUsage("$1.00 / 1M tokens"),
1670
- output: computeUsage("$2.00 / 1M tokens"),
1671
- },
1672
- },
1673
- /**/
1674
- /**/
1675
- {
1676
- modelVariant: 'CHAT',
1677
- modelTitle: 'gpt-4-turbo',
1678
- modelName: 'gpt-4-turbo',
1679
- pricing: {
1680
- prompt: computeUsage("$10.00 / 1M tokens"),
1681
- output: computeUsage("$30.00 / 1M tokens"),
1682
- },
1683
- },
1684
- /**/
1685
- /**/
1686
- {
1687
- modelVariant: 'COMPLETION',
1688
- modelTitle: 'gpt-3.5-turbo-instruct-0914',
1689
- modelName: 'gpt-3.5-turbo-instruct-0914',
1690
- pricing: {
1691
- prompt: computeUsage("$1.50 / 1M tokens"),
1692
- output: computeUsage("$2.00 / 1M tokens"), // <- For gpt-3.5-turbo-instruct
1693
- },
1694
- },
1695
- /**/
1696
- /**/
1697
- {
1698
- modelVariant: 'COMPLETION',
1699
- modelTitle: 'gpt-3.5-turbo-instruct',
1700
- modelName: 'gpt-3.5-turbo-instruct',
1701
- pricing: {
1702
- prompt: computeUsage("$1.50 / 1M tokens"),
1703
- output: computeUsage("$2.00 / 1M tokens"),
1704
- },
1705
- },
1706
- /**/
1707
- /*/
1708
- {
1709
- modelTitle: 'tts-1',
1710
- modelName: 'tts-1',
1711
- },
1712
- /**/
1713
- /**/
1714
- {
1715
- modelVariant: 'CHAT',
1716
- modelTitle: 'gpt-3.5-turbo',
1717
- modelName: 'gpt-3.5-turbo',
1718
- pricing: {
1719
- prompt: computeUsage("$3.00 / 1M tokens"),
1720
- output: computeUsage("$6.00 / 1M tokens"), // <- Not sure, refer to gpt-3.5-turbo in Fine-tuning models
1721
- },
1722
- },
1723
- /**/
1724
- /**/
1725
- {
1726
- modelVariant: 'CHAT',
1727
- modelTitle: 'gpt-3.5-turbo-0301',
1728
- modelName: 'gpt-3.5-turbo-0301',
1729
- pricing: {
1730
- prompt: computeUsage("$1.50 / 1M tokens"),
1731
- output: computeUsage("$2.00 / 1M tokens"),
1732
- },
1733
- },
1734
- /**/
1735
- /**/
1736
- {
1737
- modelVariant: 'COMPLETION',
1738
- modelTitle: 'babbage-002',
1739
- modelName: 'babbage-002',
1740
- pricing: {
1741
- prompt: computeUsage("$0.40 / 1M tokens"),
1742
- output: computeUsage("$0.40 / 1M tokens"), // <- Not sure
1743
- },
1744
- },
1745
- /**/
1746
- /**/
1747
- {
1748
- modelVariant: 'CHAT',
1749
- modelTitle: 'gpt-4-1106-preview',
1750
- modelName: 'gpt-4-1106-preview',
1751
- pricing: {
1752
- prompt: computeUsage("$10.00 / 1M tokens"),
1753
- output: computeUsage("$30.00 / 1M tokens"),
1754
- },
1755
- },
1756
- /**/
1757
- /**/
1758
- {
1759
- modelVariant: 'CHAT',
1760
- modelTitle: 'gpt-4-0125-preview',
1761
- modelName: 'gpt-4-0125-preview',
1762
- pricing: {
1763
- prompt: computeUsage("$10.00 / 1M tokens"),
1764
- output: computeUsage("$30.00 / 1M tokens"),
1765
- },
1766
- },
1767
- /**/
1768
- /*/
1769
- {
1770
- modelTitle: 'tts-1-1106',
1771
- modelName: 'tts-1-1106',
1772
- },
1773
- /**/
1774
- /**/
1775
- {
1776
- modelVariant: 'CHAT',
1777
- modelTitle: 'gpt-3.5-turbo-0125',
1778
- modelName: 'gpt-3.5-turbo-0125',
1779
- pricing: {
1780
- prompt: computeUsage("$0.50 / 1M tokens"),
1781
- output: computeUsage("$1.50 / 1M tokens"),
1782
- },
1783
- },
1784
- /**/
1785
- /**/
1786
- {
1787
- modelVariant: 'CHAT',
1788
- modelTitle: 'gpt-4-turbo-preview',
1789
- modelName: 'gpt-4-turbo-preview',
1790
- pricing: {
1791
- prompt: computeUsage("$10.00 / 1M tokens"),
1792
- output: computeUsage("$30.00 / 1M tokens"), // <- Not sure, just for gpt-4-turbo
1793
- },
1794
- },
1795
- /**/
1796
- /**/
1797
- {
1798
- modelVariant: 'EMBEDDING',
1799
- modelTitle: 'text-embedding-3-large',
1800
- modelName: 'text-embedding-3-large',
1801
- pricing: {
1802
- prompt: computeUsage("$0.13 / 1M tokens"),
1803
- // TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
1804
- output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
1805
- },
1806
- },
1807
- /**/
1808
- /**/
1809
- {
1810
- modelVariant: 'EMBEDDING',
1811
- modelTitle: 'text-embedding-3-small',
1812
- modelName: 'text-embedding-3-small',
1813
- pricing: {
1814
- prompt: computeUsage("$0.02 / 1M tokens"),
1815
- // TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
1816
- output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
1817
- },
1818
- },
1819
- /**/
1820
- /**/
1821
- {
1822
- modelVariant: 'CHAT',
1823
- modelTitle: 'gpt-3.5-turbo-0613',
1824
- modelName: 'gpt-3.5-turbo-0613',
1825
- pricing: {
1826
- prompt: computeUsage("$1.50 / 1M tokens"),
1827
- output: computeUsage("$2.00 / 1M tokens"),
1828
- },
1829
- },
1830
- /**/
1831
- /**/
1832
- {
1833
- modelVariant: 'EMBEDDING',
1834
- modelTitle: 'text-embedding-ada-002',
1835
- modelName: 'text-embedding-ada-002',
1836
- pricing: {
1837
- prompt: computeUsage("$0.1 / 1M tokens"),
1838
- // TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
1839
- output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
1840
- },
1841
- },
1842
- /**/
1843
- /*/
1844
- {
1845
- modelVariant: 'CHAT',
1846
- modelTitle: 'gpt-4-1106-vision-preview',
1847
- modelName: 'gpt-4-1106-vision-preview',
1848
- },
1849
- /**/
1850
- /*/
1851
- {
1852
- modelVariant: 'CHAT',
1853
- modelTitle: 'gpt-4-vision-preview',
1854
- modelName: 'gpt-4-vision-preview',
1855
- pricing: {
1856
- prompt: computeUsage(`$10.00 / 1M tokens`),
1857
- output: computeUsage(`$30.00 / 1M tokens`),
1858
- },
1859
- },
1860
- /**/
1861
- /**/
1862
- {
1863
- modelVariant: 'CHAT',
1864
- modelTitle: 'gpt-4o-2024-05-13',
1865
- modelName: 'gpt-4o-2024-05-13',
1866
- pricing: {
1867
- prompt: computeUsage("$5.00 / 1M tokens"),
1868
- output: computeUsage("$15.00 / 1M tokens"),
1869
- },
1870
- },
1871
- /**/
1872
- /**/
1873
- {
1874
- modelVariant: 'CHAT',
1875
- modelTitle: 'gpt-4o',
1876
- modelName: 'gpt-4o',
1877
- pricing: {
1878
- prompt: computeUsage("$5.00 / 1M tokens"),
1879
- output: computeUsage("$15.00 / 1M tokens"),
1880
- },
1881
- },
1882
- /**/
1883
- /**/
1884
- {
1885
- modelVariant: 'CHAT',
1886
- modelTitle: 'gpt-3.5-turbo-16k-0613',
1887
- modelName: 'gpt-3.5-turbo-16k-0613',
1888
- pricing: {
1889
- prompt: computeUsage("$3.00 / 1M tokens"),
1890
- output: computeUsage("$4.00 / 1M tokens"),
1891
- },
1892
- },
1893
- /**/
1894
- ];
1895
- /**
1896
- * Note: [🤖] Add models of new variant
1897
- * TODO: [🧠] Some mechanism to propagate unsureness
1898
- * TODO: [🎰] Some mechanism to auto-update available models
1899
- * TODO: [🎰][👮‍♀️] Make this list dynamic - dynamically can be listed modelNames but not modelVariant, legacy status, context length and pricing
1900
- * TODO: [🧠][👮‍♀️] Put here more info like description, isVision, trainingDateCutoff, languages, strengths ( Top-level performance, intelligence, fluency, and understanding), contextWindow,...
1901
- * @see https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4
1902
- * @see https://openai.com/api/pricing/
1903
- * @see /other/playground/playground.ts
1904
- * TODO: [🍓] Make better
1905
- * TODO: Change model titles to human eg: "gpt-4-turbo-2024-04-09" -> "GPT-4 Turbo (2024-04-09)"
1906
- * TODO: [🚸] Not all models are compatible with JSON mode, add this information here and use it
1907
- */
1908
-
1909
- /**
1910
- * Execution Tools for calling Azure OpenAI API.
1911
- *
1912
- * @public exported from `@promptbook/azure-openai`
1913
- */
1914
- var AzureOpenAiExecutionTools = /** @class */ (function () {
1915
- /**
1916
- * Creates OpenAI Execution Tools.
1917
- *
1918
- * @param options which are relevant are directly passed to the OpenAI client
1919
- */
1920
- function AzureOpenAiExecutionTools(options) {
1921
- this.options = options;
1922
- /**
1923
- * OpenAI Azure API client.
1924
- */
1925
- this.client = null;
1926
- }
1927
- Object.defineProperty(AzureOpenAiExecutionTools.prototype, "title", {
1928
- get: function () {
1929
- return 'Azure OpenAI';
1930
- },
1931
- enumerable: false,
1932
- configurable: true
1933
- });
1934
- Object.defineProperty(AzureOpenAiExecutionTools.prototype, "description", {
1935
- get: function () {
1936
- return 'Use all models trained by OpenAI provided by Azure';
1937
- },
1938
- enumerable: false,
1939
- configurable: true
1940
- });
1941
- AzureOpenAiExecutionTools.prototype.getClient = function () {
1942
- return __awaiter(this, void 0, void 0, function () {
1943
- return __generator(this, function (_a) {
1944
- if (this.client === null) {
1945
- this.client = new openai.OpenAIClient("https://".concat(this.options.resourceName, ".openai.azure.com/"), new openai.AzureKeyCredential(this.options.apiKey));
1946
- }
1947
- return [2 /*return*/, this.client];
1948
- });
1949
- });
1950
- };
1951
- /**
1952
- * Check the `options` passed to `constructor`
1953
- */
1954
- AzureOpenAiExecutionTools.prototype.checkConfiguration = function () {
1955
- return __awaiter(this, void 0, void 0, function () {
1956
- return __generator(this, function (_a) {
1957
- switch (_a.label) {
1958
- case 0: return [4 /*yield*/, this.getClient()];
1959
- case 1:
1960
- _a.sent();
1961
- return [2 /*return*/];
1962
- }
1963
- });
1964
- });
1965
- };
1966
- /**
1967
- * List all available Azure OpenAI models that can be used
1968
- */
1969
- AzureOpenAiExecutionTools.prototype.listModels = function () {
1970
- return __awaiter(this, void 0, void 0, function () {
1971
- return __generator(this, function (_a) {
1972
- // TODO: !!! Do here some filtering which models are really available as deployment
1973
- // @see https://management.azure.com/subscriptions/subscriptionId/resourceGroups/resourceGroupName/providers/Microsoft.CognitiveServices/accounts/accountName/deployments?api-version=2023-05-01
1974
- return [2 /*return*/, OPENAI_MODELS.map(function (_a) {
1975
- var modelTitle = _a.modelTitle, modelName = _a.modelName, modelVariant = _a.modelVariant;
1976
- return ({
1977
- modelTitle: "Azure ".concat(modelTitle),
1978
- modelName: modelName,
1979
- modelVariant: modelVariant,
1980
- });
1981
- })];
1982
- });
1983
- });
1984
- };
1985
- /**
1986
- * Calls OpenAI API to use a chat model.
1987
- */
1988
- AzureOpenAiExecutionTools.prototype.callChatModel = function (prompt) {
1989
- var _a, _b;
1990
- return __awaiter(this, void 0, void 0, function () {
1991
- var content, parameters, modelRequirements, client, modelName, modelSettings, rawPromptContent, messages, start, complete, rawRequest, rawResponse, resultContent, usage, error_1;
1992
- return __generator(this, function (_c) {
1993
- switch (_c.label) {
1994
- case 0:
1995
- if (this.options.isVerbose) {
1996
- console.info('💬 OpenAI callChatModel call');
1997
- }
1998
- content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
1999
- return [4 /*yield*/, this.getClient()];
2000
- case 1:
2001
- client = _c.sent();
2002
- // TODO: [☂] Use here more modelRequirements
2003
- if (modelRequirements.modelVariant !== 'CHAT') {
2004
- throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
2005
- }
2006
- _c.label = 2;
2007
- case 2:
2008
- _c.trys.push([2, 4, , 5]);
2009
- modelName = prompt.modelRequirements.modelName || this.options.deploymentName;
2010
- modelSettings = {
2011
- maxTokens: modelRequirements.maxTokens,
2012
- // <- TODO: [🌾] Make some global max cap for maxTokens
2013
- temperature: modelRequirements.temperature,
2014
- user: this.options.user,
2015
- // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
2016
- // <- Note: [🧆]
2017
- };
2018
- rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
2019
- messages = __spreadArray(__spreadArray([], __read((modelRequirements.systemMessage === undefined
2020
- ? []
2021
- : [
2022
- {
2023
- role: 'system',
2024
- content: modelRequirements.systemMessage,
2025
- },
2026
- ])), false), [
2027
- {
2028
- role: 'user',
2029
- content: rawPromptContent,
2030
- },
2031
- ], false);
2032
- start = getCurrentIsoDate();
2033
- complete = void 0;
2034
- if (this.options.isVerbose) {
2035
- console.info(colors__default["default"].bgWhite('messages'), JSON.stringify(messages, null, 4));
2036
- }
2037
- rawRequest = [modelName, messages, modelSettings];
2038
- return [4 /*yield*/, client.getChatCompletions.apply(client, __spreadArray([], __read(rawRequest), false))];
2039
- case 3:
2040
- rawResponse = _c.sent();
2041
- if (this.options.isVerbose) {
2042
- console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
2043
- }
2044
- if (!rawResponse.choices[0]) {
2045
- throw new PipelineExecutionError('No choises from Azure OpenAI');
2046
- }
2047
- if (rawResponse.choices.length > 1) {
2048
- // TODO: This should be maybe only warning
2049
- throw new PipelineExecutionError('More than one choise from Azure OpenAI');
2050
- }
2051
- if (!rawResponse.choices[0].message || !rawResponse.choices[0].message.content) {
2052
- throw new PipelineExecutionError('Empty response from Azure OpenAI');
2053
- }
2054
- resultContent = rawResponse.choices[0].message.content;
2055
- // eslint-disable-next-line prefer-const
2056
- complete = getCurrentIsoDate();
2057
- usage = {
2058
- price: uncertainNumber() /* <- TODO: [🐞] Compute usage */,
2059
- input: __assign({ tokensCount: uncertainNumber((_a = rawResponse.usage) === null || _a === void 0 ? void 0 : _a.promptTokens) }, computeUsageCounts(prompt.content)),
2060
- output: __assign({ tokensCount: uncertainNumber((_b = rawResponse.usage) === null || _b === void 0 ? void 0 : _b.completionTokens) }, computeUsageCounts(prompt.content)),
2061
- };
2062
- return [2 /*return*/, {
2063
- content: resultContent,
2064
- modelName: modelName,
2065
- timing: {
2066
- start: start,
2067
- complete: complete,
2068
- },
2069
- usage: usage,
2070
- rawPromptContent: rawPromptContent,
2071
- rawRequest: rawRequest,
2072
- rawResponse: rawResponse,
2073
- // <- [🗯]
2074
- }];
2075
- case 4:
2076
- error_1 = _c.sent();
2077
- throw this.transformAzureError(error_1);
2078
- case 5: return [2 /*return*/];
2079
- }
2080
- });
2081
- });
2082
- };
2083
- /**
2084
- * Calls Azure OpenAI API to use a complete model.
2085
- */
2086
- AzureOpenAiExecutionTools.prototype.callCompletionModel = function (prompt) {
2087
- var _a, _b;
2088
- return __awaiter(this, void 0, void 0, function () {
2089
- var content, parameters, modelRequirements, client, modelName, modelSettings, start, complete, rawPromptContent, rawRequest, rawResponse, resultContent, usage, error_2;
2090
- return __generator(this, function (_c) {
2091
- switch (_c.label) {
2092
- case 0:
2093
- if (this.options.isVerbose) {
2094
- console.info('🖋 OpenAI callCompletionModel call');
2095
- }
2096
- content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
2097
- return [4 /*yield*/, this.getClient()];
2098
- case 1:
2099
- client = _c.sent();
2100
- // TODO: [☂] Use here more modelRequirements
2101
- if (modelRequirements.modelVariant !== 'COMPLETION') {
2102
- throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
2103
- }
2104
- _c.label = 2;
2105
- case 2:
2106
- _c.trys.push([2, 4, , 5]);
2107
- modelName = prompt.modelRequirements.modelName || this.options.deploymentName;
2108
- modelSettings = {
2109
- maxTokens: modelRequirements.maxTokens || 2000,
2110
- // <- TODO: [🌾] Make some global max cap for maxTokens
2111
- temperature: modelRequirements.temperature,
2112
- user: this.options.user,
2113
- // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
2114
- // <- Note: [🧆]
2115
- };
2116
- start = getCurrentIsoDate();
2117
- complete = void 0;
2118
- if (this.options.isVerbose) {
2119
- console.info(colors__default["default"].bgWhite('content'), JSON.stringify(content, null, 4));
2120
- console.info(colors__default["default"].bgWhite('parameters'), JSON.stringify(parameters, null, 4));
2121
- }
2122
- rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
2123
- rawRequest = [
2124
- modelName,
2125
- [rawPromptContent],
2126
- modelSettings,
2127
- ];
2128
- return [4 /*yield*/, client.getCompletions.apply(client, __spreadArray([], __read(rawRequest), false))];
2129
- case 3:
2130
- rawResponse = _c.sent();
2131
- if (this.options.isVerbose) {
2132
- console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
2133
- }
2134
- if (!rawResponse.choices[0]) {
2135
- throw new PipelineExecutionError('No choises from OpenAI');
2136
- }
2137
- if (rawResponse.choices.length > 1) {
2138
- // TODO: This should be maybe only warning
2139
- throw new PipelineExecutionError('More than one choise from OpenAI');
2140
- }
2141
- resultContent = rawResponse.choices[0].text;
2142
- // eslint-disable-next-line prefer-const
2143
- complete = getCurrentIsoDate();
2144
- usage = {
2145
- price: uncertainNumber() /* <- TODO: [🐞] Compute usage */,
2146
- input: __assign({ tokensCount: uncertainNumber((_a = rawResponse.usage) === null || _a === void 0 ? void 0 : _a.promptTokens) }, computeUsageCounts(prompt.content)),
2147
- output: __assign({ tokensCount: uncertainNumber((_b = rawResponse.usage) === null || _b === void 0 ? void 0 : _b.completionTokens) }, computeUsageCounts(prompt.content)),
2148
- };
2149
- return [2 /*return*/, {
2150
- content: resultContent,
2151
- modelName: modelName,
2152
- timing: {
2153
- start: start,
2154
- complete: complete,
2155
- },
2156
- usage: usage,
2157
- rawPromptContent: rawPromptContent,
2158
- rawRequest: rawRequest,
2159
- rawResponse: rawResponse,
2160
- // <- [🗯]
2161
- }];
2162
- case 4:
2163
- error_2 = _c.sent();
2164
- throw this.transformAzureError(error_2);
2165
- case 5: return [2 /*return*/];
2166
- }
2167
- });
2168
- });
2169
- };
2170
- // <- Note: [🤖] callXxxModel
2171
- /**
2172
- * Changes Azure error (which is not propper Error but object) to propper Error
2173
- */
2174
- AzureOpenAiExecutionTools.prototype.transformAzureError = function (azureError) {
2175
- if (typeof azureError !== 'object' || azureError === null) {
2176
- return new PipelineExecutionError("Unknown Azure OpenAI error");
2177
- }
2178
- var code = azureError.code, message = azureError.message;
2179
- return new PipelineExecutionError("".concat(code, ": ").concat(message));
2180
- };
2181
- return AzureOpenAiExecutionTools;
2182
- }());
2183
- /**
2184
- * TODO: Maybe Create some common util for callChatModel and callCompletionModel
2185
- * TODO: Maybe make custom AzureOpenAiError
2186
- * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
2187
- * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
2188
- */
2189
-
2190
- /**
2191
- * Computes the usage of the OpenAI API based on the response from OpenAI
2192
- *
2193
- * @param promptContent The content of the prompt
2194
- * @param resultContent The content of the result (for embedding prompts or failed prompts pass empty string)
2195
- * @param rawResponse The raw response from OpenAI API
2196
- * @throws {PipelineExecutionError} If the usage is not defined in the response from OpenAI
2197
- * @private internal utility of `OpenAiExecutionTools`
2198
- */
2199
- function computeOpenAiUsage(promptContent, // <- Note: Intentionally using [] to access type properties to bring jsdoc from Prompt/PromptResult to consumer
2200
- resultContent, rawResponse) {
2201
- var _a, _b;
2202
- if (rawResponse.usage === undefined) {
2203
- throw new PipelineExecutionError('The usage is not defined in the response from OpenAI');
2204
- }
2205
- if (((_a = rawResponse.usage) === null || _a === void 0 ? void 0 : _a.prompt_tokens) === undefined) {
2206
- throw new PipelineExecutionError('In OpenAI response `usage.prompt_tokens` not defined');
2207
- }
2208
- var inputTokens = rawResponse.usage.prompt_tokens;
2209
- var outputTokens = ((_b = rawResponse.usage) === null || _b === void 0 ? void 0 : _b.completion_tokens) || 0;
2210
- var modelInfo = OPENAI_MODELS.find(function (model) { return model.modelName === rawResponse.model; });
2211
- var price;
2212
- if (modelInfo === undefined || modelInfo.pricing === undefined) {
2213
- price = uncertainNumber();
2214
- }
2215
- else {
2216
- price = uncertainNumber(inputTokens * modelInfo.pricing.prompt + outputTokens * modelInfo.pricing.output);
2217
- }
2218
- return {
2219
- price: price,
2220
- input: __assign({ tokensCount: uncertainNumber(rawResponse.usage.prompt_tokens) }, computeUsageCounts(promptContent)),
2221
- output: __assign({ tokensCount: uncertainNumber(outputTokens) }, computeUsageCounts(resultContent)),
2222
- };
2223
- }
2224
- /**
2225
- * TODO: [🤝] DRY Maybe some common abstraction between `computeOpenAiUsage` and `computeAnthropicClaudeUsage`
2226
- */
2227
-
2228
- /**
2229
- * Execution Tools for calling OpenAI API
2230
- *
2231
- * @public exported from `@promptbook/openai`
2232
- */
2233
- var OpenAiExecutionTools = /** @class */ (function () {
2234
- /**
2235
- * Creates OpenAI Execution Tools.
2236
- *
2237
- * @param options which are relevant are directly passed to the OpenAI client
2238
- */
2239
- function OpenAiExecutionTools(options) {
2240
- if (options === void 0) { options = {}; }
2241
- this.options = options;
2242
- /**
2243
- * OpenAI API client.
2244
- */
2245
- this.client = null;
2246
- }
2247
- Object.defineProperty(OpenAiExecutionTools.prototype, "title", {
2248
- get: function () {
2249
- return 'OpenAI';
2250
- },
2251
- enumerable: false,
2252
- configurable: true
2253
- });
2254
- Object.defineProperty(OpenAiExecutionTools.prototype, "description", {
2255
- get: function () {
2256
- return 'Use all models provided by OpenAI';
2257
- },
2258
- enumerable: false,
2259
- configurable: true
2260
- });
2261
- OpenAiExecutionTools.prototype.getClient = function () {
2262
- return __awaiter(this, void 0, void 0, function () {
2263
- var openAiOptions;
2264
- return __generator(this, function (_a) {
2265
- if (this.client === null) {
2266
- openAiOptions = __assign({}, this.options);
2267
- delete openAiOptions.isVerbose;
2268
- delete openAiOptions.user;
2269
- this.client = new OpenAI__default["default"](__assign({}, openAiOptions));
2270
- }
2271
- return [2 /*return*/, this.client];
2272
- });
2273
- });
2274
- };
2275
- /**
2276
- * Check the `options` passed to `constructor`
2277
- */
2278
- OpenAiExecutionTools.prototype.checkConfiguration = function () {
2279
- return __awaiter(this, void 0, void 0, function () {
2280
- return __generator(this, function (_a) {
2281
- switch (_a.label) {
2282
- case 0: return [4 /*yield*/, this.getClient()];
2283
- case 1:
2284
- _a.sent();
2285
- return [2 /*return*/];
2286
- }
2287
- });
2288
- });
2289
- };
2290
- /**
2291
- * List all available OpenAI models that can be used
2292
- */
2293
- OpenAiExecutionTools.prototype.listModels = function () {
2294
- /*
2295
- Note: Dynamic lising of the models
2296
- const models = await this.openai.models.list({});
2297
-
2298
- console.log({ models });
2299
- console.log(models.data);
2300
- */
2301
- return OPENAI_MODELS;
2302
- };
2303
- /**
2304
- * Calls OpenAI API to use a chat model.
2305
- */
2306
- OpenAiExecutionTools.prototype.callChatModel = function (prompt) {
2307
- return __awaiter(this, void 0, void 0, function () {
2308
- var content, parameters, modelRequirements, expectFormat, client, modelName, modelSettings, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
2309
- return __generator(this, function (_a) {
2310
- switch (_a.label) {
2311
- case 0:
2312
- if (this.options.isVerbose) {
2313
- console.info('💬 OpenAI callChatModel call', { prompt: prompt });
2314
- }
2315
- content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements, expectFormat = prompt.expectFormat;
2316
- return [4 /*yield*/, this.getClient()];
2317
- case 1:
2318
- client = _a.sent();
2319
- // TODO: [☂] Use here more modelRequirements
2320
- if (modelRequirements.modelVariant !== 'CHAT') {
2321
- throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
2322
- }
2323
- modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
2324
- modelSettings = {
2325
- model: modelName,
2326
- max_tokens: modelRequirements.maxTokens,
2327
- // <- TODO: [🌾] Make some global max cap for maxTokens
2328
- temperature: modelRequirements.temperature,
2329
- // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
2330
- // <- Note: [🧆]
2331
- };
2332
- if (expectFormat === 'JSON') {
2333
- modelSettings.response_format = {
2334
- type: 'json_object',
2335
- };
2336
- }
2337
- rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
2338
- rawRequest = __assign(__assign({}, modelSettings), { messages: __spreadArray(__spreadArray([], __read((modelRequirements.systemMessage === undefined
2339
- ? []
2340
- : [
2341
- {
2342
- role: 'system',
2343
- content: modelRequirements.systemMessage,
2344
- },
2345
- ])), false), [
2346
- {
2347
- role: 'user',
2348
- content: rawPromptContent,
2349
- },
2350
- ], false), user: this.options.user });
2351
- start = getCurrentIsoDate();
2352
- if (this.options.isVerbose) {
2353
- console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
2354
- }
2355
- return [4 /*yield*/, client.chat.completions.create(rawRequest)];
2356
- case 2:
2357
- rawResponse = _a.sent();
2358
- if (this.options.isVerbose) {
2359
- console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
2360
- }
2361
- if (!rawResponse.choices[0]) {
2362
- throw new PipelineExecutionError('No choises from OpenAI');
2363
- }
2364
- if (rawResponse.choices.length > 1) {
2365
- // TODO: This should be maybe only warning
2366
- throw new PipelineExecutionError('More than one choise from OpenAI');
2367
- }
2368
- resultContent = rawResponse.choices[0].message.content;
2369
- // eslint-disable-next-line prefer-const
2370
- complete = getCurrentIsoDate();
2371
- usage = computeOpenAiUsage(content, resultContent || '', rawResponse);
2372
- if (resultContent === null) {
2373
- throw new PipelineExecutionError('No response message from OpenAI');
2374
- }
2375
- return [2 /*return*/, {
2376
- content: resultContent,
2377
- modelName: rawResponse.model || modelName,
2378
- timing: {
2379
- start: start,
2380
- complete: complete,
2381
- },
2382
- usage: usage,
2383
- rawPromptContent: rawPromptContent,
2384
- rawRequest: rawRequest,
2385
- rawResponse: rawResponse,
2386
- // <- [🗯]
2387
- }];
2388
- }
2389
- });
2390
- });
2391
- };
2392
- /**
2393
- * Calls OpenAI API to use a complete model.
2394
- */
2395
- OpenAiExecutionTools.prototype.callCompletionModel = function (prompt) {
2396
- return __awaiter(this, void 0, void 0, function () {
2397
- var content, parameters, modelRequirements, client, modelName, modelSettings, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
2398
- return __generator(this, function (_a) {
2399
- switch (_a.label) {
2400
- case 0:
2401
- if (this.options.isVerbose) {
2402
- console.info('🖋 OpenAI callCompletionModel call', { prompt: prompt });
2403
- }
2404
- content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
2405
- return [4 /*yield*/, this.getClient()];
2406
- case 1:
2407
- client = _a.sent();
2408
- // TODO: [☂] Use here more modelRequirements
2409
- if (modelRequirements.modelVariant !== 'COMPLETION') {
2410
- throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
2411
- }
2412
- modelName = modelRequirements.modelName || this.getDefaultCompletionModel().modelName;
2413
- modelSettings = {
2414
- model: modelName,
2415
- max_tokens: modelRequirements.maxTokens || 2000,
2416
- // <- TODO: [🌾] Make some global max cap for maxTokens
2417
- temperature: modelRequirements.temperature,
2418
- // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
2419
- // <- Note: [🧆]
2420
- };
2421
- rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
2422
- rawRequest = __assign(__assign({}, modelSettings), { prompt: rawPromptContent, user: this.options.user });
2423
- start = getCurrentIsoDate();
2424
- if (this.options.isVerbose) {
2425
- console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
2426
- }
2427
- return [4 /*yield*/, client.completions.create(rawRequest)];
2428
- case 2:
2429
- rawResponse = _a.sent();
2430
- if (this.options.isVerbose) {
2431
- console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
2432
- }
2433
- if (!rawResponse.choices[0]) {
2434
- throw new PipelineExecutionError('No choises from OpenAI');
2435
- }
2436
- if (rawResponse.choices.length > 1) {
2437
- // TODO: This should be maybe only warning
2438
- throw new PipelineExecutionError('More than one choise from OpenAI');
2439
- }
2440
- resultContent = rawResponse.choices[0].text;
2441
- // eslint-disable-next-line prefer-const
2442
- complete = getCurrentIsoDate();
2443
- usage = computeOpenAiUsage(content, resultContent || '', rawResponse);
2444
- return [2 /*return*/, {
2445
- content: resultContent,
2446
- modelName: rawResponse.model || modelName,
2447
- timing: {
2448
- start: start,
2449
- complete: complete,
2450
- },
2451
- usage: usage,
2452
- rawPromptContent: rawPromptContent,
2453
- rawRequest: rawRequest,
2454
- rawResponse: rawResponse,
2455
- // <- [🗯]
2456
- }];
2457
- }
2458
- });
2459
- });
2460
- };
2461
- /**
2462
- * Calls OpenAI API to use a embedding model
2463
- */
2464
- OpenAiExecutionTools.prototype.callEmbeddingModel = function (prompt) {
2465
- return __awaiter(this, void 0, void 0, function () {
2466
- var content, parameters, modelRequirements, client, modelName, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
2467
- return __generator(this, function (_a) {
2468
- switch (_a.label) {
2469
- case 0:
2470
- if (this.options.isVerbose) {
2471
- console.info('🖋 OpenAI embedding call', { prompt: prompt });
2472
- }
2473
- content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
2474
- return [4 /*yield*/, this.getClient()];
2475
- case 1:
2476
- client = _a.sent();
2477
- // TODO: [☂] Use here more modelRequirements
2478
- if (modelRequirements.modelVariant !== 'EMBEDDING') {
2479
- throw new PipelineExecutionError('Use embed only for EMBEDDING variant');
2480
- }
2481
- modelName = modelRequirements.modelName || this.getDefaultEmbeddingModel().modelName;
2482
- rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
2483
- rawRequest = {
2484
- input: rawPromptContent,
2485
- model: modelName,
2486
- };
2487
- start = getCurrentIsoDate();
2488
- if (this.options.isVerbose) {
2489
- console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
2490
- }
2491
- return [4 /*yield*/, client.embeddings.create(rawRequest)];
2492
- case 2:
2493
- rawResponse = _a.sent();
2494
- if (this.options.isVerbose) {
2495
- console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
2496
- }
2497
- if (rawResponse.data.length !== 1) {
2498
- throw new PipelineExecutionError("Expected exactly 1 data item in response, got ".concat(rawResponse.data.length));
2499
- }
2500
- resultContent = rawResponse.data[0].embedding;
2501
- // eslint-disable-next-line prefer-const
2502
- complete = getCurrentIsoDate();
2503
- usage = computeOpenAiUsage(content, '', rawResponse);
2504
- return [2 /*return*/, {
2505
- content: resultContent,
2506
- modelName: rawResponse.model || modelName,
2507
- timing: {
2508
- start: start,
2509
- complete: complete,
2510
- },
2511
- usage: usage,
2512
- rawPromptContent: rawPromptContent,
2513
- rawRequest: rawRequest,
2514
- rawResponse: rawResponse,
2515
- // <- [🗯]
2516
- }];
2517
- }
2518
- });
2519
- });
2520
- };
2521
- // <- Note: [🤖] callXxxModel
2522
- /**
2523
- * Get the model that should be used as default
2524
- */
2525
- OpenAiExecutionTools.prototype.getDefaultModel = function (defaultModelName) {
2526
- var model = OPENAI_MODELS.find(function (_a) {
2527
- var modelName = _a.modelName;
2528
- return modelName === defaultModelName;
2529
- });
2530
- if (model === undefined) {
2531
- throw new UnexpectedError(spaceTrim__default["default"](function (block) {
2532
- return "\n Cannot find model in OpenAI models with name \"".concat(defaultModelName, "\" which should be used as default.\n\n Available models:\n ").concat(block(OPENAI_MODELS.map(function (_a) {
2533
- var modelName = _a.modelName;
2534
- return "- \"".concat(modelName, "\"");
2535
- }).join('\n')), "\n\n ");
2536
- }));
2537
- }
2538
- return model;
2539
- };
2540
- /**
2541
- * Default model for chat variant.
2542
- */
2543
- OpenAiExecutionTools.prototype.getDefaultChatModel = function () {
2544
- return this.getDefaultModel('gpt-4o');
2545
- };
2546
- /**
2547
- * Default model for completion variant.
2548
- */
2549
- OpenAiExecutionTools.prototype.getDefaultCompletionModel = function () {
2550
- return this.getDefaultModel('gpt-3.5-turbo-instruct');
2551
- };
2552
- /**
2553
- * Default model for completion variant.
2554
- */
2555
- OpenAiExecutionTools.prototype.getDefaultEmbeddingModel = function () {
2556
- return this.getDefaultModel('text-embedding-3-large');
2557
- };
2558
- return OpenAiExecutionTools;
2559
- }());
2560
- /**
2561
- * TODO: [🧠][🧙‍♂️] Maybe there can be some wizzard for thoose who want to use just OpenAI
2562
- * TODO: Maybe Create some common util for callChatModel and callCompletionModel
2563
- * TODO: Maybe make custom OpenAiError
2564
- * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
2565
- * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
2566
- */
2567
-
2568
- /**
2569
- * Execution Tools for calling OpenAI API
2570
- *
2571
- * @public exported from `@promptbook/openai`
2572
- */
2573
- var createOpenAiExecutionTools = Object.assign(function (options) {
2574
- // TODO: !!!!!! If browser, auto add `dangerouslyAllowBrowser`
2575
- return new OpenAiExecutionTools(options);
2576
- }, {
2577
- packageName: '@promptbook/openai',
2578
- className: 'OpenAiExecutionTools',
2579
- });
2580
- /**
2581
- * TODO: [🦺] Is there some way how to put `packageName` and `className` on top and function definition on bottom?
2582
- * TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
2583
- */
2584
-
2585
- /**
2586
- * @@@
2587
- *
2588
- * TODO: !!!!!! Remove EXECUTION_TOOLS_CLASSES and use $llmToolsRegister instead
2589
- *
2590
- * @private internal type for `createLlmToolsFromConfiguration`
2591
- */
2592
- var EXECUTION_TOOLS_CLASSES = {
2593
- createOpenAiExecutionTools: createOpenAiExecutionTools,
2594
- createAnthropicClaudeExecutionTools: createAnthropicClaudeExecutionTools,
2595
- createAzureOpenAiExecutionTools: function (options) {
2596
- return new AzureOpenAiExecutionTools(
2597
- // <- TODO: [🧱] Implement in a functional (not new Class) way
2598
- options);
2599
- },
2600
- // <- Note: [🦑] Add here new LLM provider
2601
- };
2602
- /**
2603
- * TODO: !!!!!!! Make global register for this
2604
- * TODO: [🧠][🎌] Adding this should be responsibility of each provider package NOT this one central place
2605
- */
2606
-
2607
- /**
2608
- * @@@
2609
- *
2610
- * Note: This function is not cached, every call creates new instance of `MultipleLlmExecutionTools`
2611
- *
2612
- * @returns @@@
2613
- * @public exported from `@promptbook/core`
2614
- */
2615
- function createLlmToolsFromConfiguration(configuration, options) {
2616
- if (options === void 0) { options = {}; }
2617
- var _a = options.isVerbose, isVerbose = _a === void 0 ? false : _a;
2618
- var llmTools = configuration.map(function (llmConfiguration) {
2619
- var constructor = EXECUTION_TOOLS_CLASSES["create".concat(llmConfiguration.className)];
2620
- if (!constructor) {
2621
- throw new Error(spaceTrim__default["default"](function (block) { return "\n There is no constructor for LLM provider `".concat(llmConfiguration.className, "`\n\n\n @@@\n\n Available constructors are:\n ").concat(block('@@@'), "\n\n\n "); }));
2622
- }
2623
- return constructor(__assign({ isVerbose: isVerbose }, llmConfiguration.options));
2624
- });
2625
- return joinLlmExecutionTools.apply(void 0, __spreadArray([], __read(llmTools), false));
2626
- }
2627
- /**
2628
- * TODO: [🎌] Togethere with `createLlmToolsFromConfiguration` + 'EXECUTION_TOOLS_CLASSES' gets to `@promptbook/core` ALL model providers, make this more efficient
2629
- * TODO: [🧠][🎌] Dynamically install required providers
2630
- * TODO: @@@ write discussion about this - wizzard
2631
- * TODO: [🧠][🍛] Which name is better `createLlmToolsFromConfig` or `createLlmToolsFromConfiguration`?
2632
- * TODO: [🧠] Is there some meaningfull way how to test this util
2633
- * TODO: This should be maybe not under `_common` but under `utils`
2634
- */
2635
-
2636
- /**
2637
- * Remote server is a proxy server that uses its execution tools internally and exposes the executor interface externally.
2638
- *
2639
- * You can simply use `RemoteExecutionTools` on client-side javascript and connect to your remote server.
2640
- * This is useful to make all logic on browser side but not expose your API keys or no need to use customer's GPU.
690
+ * You can simply use `RemoteExecutionTools` on client-side javascript and connect to your remote server.
691
+ * This is useful to make all logic on browser side but not expose your API keys or no need to use customer's GPU.
2641
692
  *
2642
693
  * @see https://github.com/webgptorg/promptbook#remote-server
2643
694
  * @public exported from `@promptbook/remote-server`
@@ -2701,12 +752,12 @@
2701
752
  });
2702
753
  server.on('connection', function (socket) {
2703
754
  console.info(colors__default["default"].gray("Client connected"), socket.id);
2704
- socket.on('request', function (request) { return __awaiter(_this, void 0, void 0, function () {
2705
- var _a, prompt, clientId, llmToolsConfiguration, llmExecutionTools, promptResult, _b, error_1;
755
+ socket.on('prompt-request', function (request) { return __awaiter(_this, void 0, void 0, function () {
756
+ var _a, isAnonymous, prompt, clientId, llmToolsConfiguration, llmExecutionTools, promptResult, _b, error_1;
2706
757
  return __generator(this, function (_c) {
2707
758
  switch (_c.label) {
2708
759
  case 0:
2709
- _a = __assign({ clientId: null, llmToolsConfiguration: null }, request), prompt = _a.prompt, clientId = _a.clientId, llmToolsConfiguration = _a.llmToolsConfiguration;
760
+ _a = __assign({ clientId: null, llmToolsConfiguration: null }, request), isAnonymous = _a.isAnonymous, prompt = _a.prompt, clientId = _a.clientId, llmToolsConfiguration = _a.llmToolsConfiguration;
2710
761
  // <- TODO: [🦪] Some helper type to be able to use discriminant union types with destructuring
2711
762
  if (isVerbose) {
2712
763
  console.info(colors__default["default"].bgWhite("Prompt:"), colors__default["default"].gray(JSON.stringify(request, null, 4)));
@@ -2714,20 +765,20 @@
2714
765
  _c.label = 1;
2715
766
  case 1:
2716
767
  _c.trys.push([1, 14, 15, 16]);
2717
- if (llmToolsConfiguration !== null && !isAnonymousModeAllowed) {
768
+ if (isAnonymous === true && !isAnonymousModeAllowed) {
2718
769
  throw new PipelineExecutionError("Anonymous mode is not allowed"); // <- TODO: !!! Test
2719
770
  }
2720
- if (clientId !== null && !isCollectionModeAllowed) {
771
+ if (isAnonymous === false && !isCollectionModeAllowed) {
2721
772
  throw new PipelineExecutionError("Collection mode is not allowed"); // <- TODO: !!! Test
2722
773
  }
2723
774
  llmExecutionTools = void 0;
2724
- if (!(llmToolsConfiguration !== null)) return [3 /*break*/, 2];
775
+ if (!(isAnonymous === true && llmToolsConfiguration !== null)) return [3 /*break*/, 2];
2725
776
  // Note: Anonymouse mode
2726
777
  // TODO: Maybe check that configuration is not empty
2727
778
  llmExecutionTools = createLlmToolsFromConfiguration(llmToolsConfiguration, { isVerbose: isVerbose });
2728
779
  return [3 /*break*/, 5];
2729
780
  case 2:
2730
- if (!(createLlmExecutionTools !== null)) return [3 /*break*/, 4];
781
+ if (!(isAnonymous === false && createLlmExecutionTools !== null)) return [3 /*break*/, 4];
2731
782
  // Note: Collection mode
2732
783
  llmExecutionTools = createLlmExecutionTools(clientId);
2733
784
  return [4 /*yield*/, collection.isResponsibleForPrompt(prompt)];
@@ -2736,7 +787,7 @@
2736
787
  throw new PipelineExecutionError("Pipeline is not in the collection of this server");
2737
788
  }
2738
789
  return [3 /*break*/, 5];
2739
- case 4: throw new PipelineExecutionError("You must provide either llmToolsConfiguration or createLlmExecutionTools");
790
+ case 4: throw new PipelineExecutionError("You must provide either llmToolsConfiguration or non-anonymous mode must be propperly configured");
2740
791
  case 5:
2741
792
  promptResult = void 0;
2742
793
  _b = prompt.modelRequirements.modelVariant;
@@ -2778,7 +829,7 @@
2778
829
  if (isVerbose) {
2779
830
  console.info(colors__default["default"].bgGreen("PromptResult:"), colors__default["default"].green(JSON.stringify(promptResult, null, 4)));
2780
831
  }
2781
- socket.emit('response', { promptResult: promptResult });
832
+ socket.emit('prompt-response', { promptResult: promptResult });
2782
833
  return [3 /*break*/, 16];
2783
834
  case 14:
2784
835
  error_1 = _c.sent();
@@ -2794,6 +845,55 @@
2794
845
  }
2795
846
  });
2796
847
  }); });
848
+ // TODO: [👒] Listing models (and checking configuration) probbably should go through REST API not Socket.io
849
+ socket.on('listModels-request', function (request) { return __awaiter(_this, void 0, void 0, function () {
850
+ var _a, isAnonymous, clientId, llmToolsConfiguration, llmExecutionTools, models, error_2;
851
+ return __generator(this, function (_b) {
852
+ switch (_b.label) {
853
+ case 0:
854
+ _a = __assign({ clientId: null, llmToolsConfiguration: null }, request), isAnonymous = _a.isAnonymous, clientId = _a.clientId, llmToolsConfiguration = _a.llmToolsConfiguration;
855
+ // <- TODO: [🦪] Some helper type to be able to use discriminant union types with destructuring
856
+ if (isVerbose) {
857
+ console.info(colors__default["default"].bgWhite("Listing models"));
858
+ }
859
+ _b.label = 1;
860
+ case 1:
861
+ _b.trys.push([1, 3, 4, 5]);
862
+ if (isAnonymous === true && !isAnonymousModeAllowed) {
863
+ throw new PipelineExecutionError("Anonymous mode is not allowed"); // <- TODO: !!! Test
864
+ }
865
+ if (isAnonymous === false && !isCollectionModeAllowed) {
866
+ throw new PipelineExecutionError("Collection mode is not allowed"); // <- TODO: !!! Test
867
+ }
868
+ llmExecutionTools = void 0;
869
+ if (isAnonymous === true) {
870
+ // Note: Anonymouse mode
871
+ // TODO: Maybe check that configuration is not empty
872
+ llmExecutionTools = createLlmToolsFromConfiguration(llmToolsConfiguration, { isVerbose: isVerbose });
873
+ }
874
+ else {
875
+ // Note: Collection mode
876
+ llmExecutionTools = createLlmExecutionTools(clientId);
877
+ }
878
+ return [4 /*yield*/, llmExecutionTools.listModels()];
879
+ case 2:
880
+ models = _b.sent();
881
+ socket.emit('prompt-response', { models: models });
882
+ return [3 /*break*/, 5];
883
+ case 3:
884
+ error_2 = _b.sent();
885
+ if (!(error_2 instanceof Error)) {
886
+ throw error_2;
887
+ }
888
+ socket.emit('error', { errorMessage: error_2.message });
889
+ return [3 /*break*/, 5];
890
+ case 4:
891
+ socket.disconnect();
892
+ return [7 /*endfinally*/];
893
+ case 5: return [2 /*return*/];
894
+ }
895
+ });
896
+ }); });
2797
897
  socket.on('disconnect', function () {
2798
898
  // TODO: Destroy here executionToolsForClient
2799
899
  if (isVerbose) {
@@ -2823,6 +923,7 @@
2823
923
  };
2824
924
  }
2825
925
  /**
926
+ * TODO: [🧠][🛍] Maybe not `isAnonymous: boolean` BUT `mode: 'ANONYMOUS'|'COLLECTION'`
2826
927
  * TODO: [⚖] Expose the collection to be able to connect to same collection via createCollectionFromUrl
2827
928
  * TODO: Handle progress - support streaming
2828
929
  * TODO: [🗯] Do not hang up immediately but wait until client closes OR timeout