@promptbook/remote-server 0.66.0-7 → 0.66.0-9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. package/esm/index.es.js +266 -2167
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/src/_packages/anthropic-claude.index.d.ts +0 -2
  4. package/esm/typings/src/_packages/azure-openai.index.d.ts +4 -0
  5. package/esm/typings/src/_packages/cli.index.d.ts +8 -0
  6. package/esm/typings/src/_packages/core.index.d.ts +2 -0
  7. package/esm/typings/src/_packages/types.index.d.ts +22 -14
  8. package/esm/typings/src/_packages/utils.index.d.ts +7 -7
  9. package/esm/typings/src/execution/LlmExecutionToolsConstructor.d.ts +1 -1
  10. package/esm/typings/src/llm-providers/_common/$llmToolsMetadataRegister.d.ts +2 -2
  11. package/esm/typings/src/llm-providers/_common/$llmToolsRegister.d.ts +2 -2
  12. package/esm/typings/src/llm-providers/_common/$registeredLlmToolsMessage.d.ts +9 -0
  13. package/esm/typings/src/llm-providers/_common/LlmToolsConfiguration.d.ts +1 -1
  14. package/esm/typings/src/llm-providers/_common/LlmToolsMetadata.d.ts +1 -1
  15. package/esm/typings/src/llm-providers/_common/createLlmToolsFromConfigurationFromEnv.d.ts +1 -0
  16. package/esm/typings/src/llm-providers/_common/getLlmToolsForTestingAndScriptsAndPlayground.d.ts +1 -1
  17. package/esm/typings/src/llm-providers/anthropic-claude/register-constructor.d.ts +1 -0
  18. package/esm/typings/src/llm-providers/azure-openai/createAzureOpenAiExecutionTools.d.ts +15 -0
  19. package/esm/typings/src/llm-providers/azure-openai/register-configuration.d.ts +9 -0
  20. package/esm/typings/src/llm-providers/azure-openai/register-constructor.d.ts +12 -0
  21. package/esm/typings/src/llm-providers/openai/register-constructor.d.ts +1 -0
  22. package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +1 -0
  23. package/esm/typings/src/llm-providers/remote/interfaces/{Promptbook_Server_Error.d.ts → PromptbookServer_Error.d.ts} +1 -1
  24. package/esm/typings/src/llm-providers/remote/interfaces/PromptbookServer_ListModels_Request.d.ts +34 -0
  25. package/esm/typings/src/llm-providers/remote/interfaces/PromptbookServer_ListModels_Response.d.ts +15 -0
  26. package/esm/typings/src/llm-providers/remote/interfaces/{Promptbook_Server_Progress.d.ts → PromptbookServer_Prompt_Progress.d.ts} +1 -1
  27. package/esm/typings/src/llm-providers/remote/interfaces/{Promptbook_Server_Request.d.ts → PromptbookServer_Prompt_Request.d.ts} +15 -3
  28. package/esm/typings/src/llm-providers/remote/interfaces/{Promptbook_Server_Response.d.ts → PromptbookServer_Prompt_Response.d.ts} +1 -1
  29. package/esm/typings/src/llm-providers/remote/interfaces/RemoteLlmExecutionToolsOptions.d.ts +1 -7
  30. package/esm/typings/src/llm-providers/remote/startRemoteServer.d.ts +1 -0
  31. package/esm/typings/src/utils/{Register.d.ts → $Register.d.ts} +6 -2
  32. package/esm/typings/src/utils/environment/{getGlobalScope.d.ts → $getGlobalScope.d.ts} +1 -1
  33. package/esm/typings/src/utils/organization/f.d.ts +6 -0
  34. package/package.json +2 -6
  35. package/umd/index.umd.js +270 -2169
  36. package/umd/index.umd.js.map +1 -1
  37. package/esm/typings/src/llm-providers/_common/config.d.ts +0 -14
  38. package/esm/typings/src/llm-providers/anthropic-claude/register1.d.ts +0 -4
  39. /package/esm/typings/src/llm-providers/mocked/{fakeTextToExpectations.d.ts → $fakeTextToExpectations.d.ts} +0 -0
  40. /package/esm/typings/src/utils/{currentDate.d.ts → $currentDate.d.ts} +0 -0
  41. /package/esm/typings/src/utils/environment/{isRunningInBrowser.d.ts → $isRunningInBrowser.d.ts} +0 -0
  42. /package/esm/typings/src/utils/environment/{isRunningInNode.d.ts → $isRunningInNode.d.ts} +0 -0
  43. /package/esm/typings/src/utils/environment/{isRunningInWebWorker.d.ts → $isRunningInWebWorker.d.ts} +0 -0
  44. /package/esm/typings/src/utils/files/{isDirectoryExisting.d.ts → $isDirectoryExisting.d.ts} +0 -0
  45. /package/esm/typings/src/utils/files/{isDirectoryExisting.test.d.ts → $isDirectoryExisting.test.d.ts} +0 -0
  46. /package/esm/typings/src/utils/files/{isFileExisting.d.ts → $isFileExisting.d.ts} +0 -0
  47. /package/esm/typings/src/utils/files/{isFileExisting.test.d.ts → $isFileExisting.test.d.ts} +0 -0
  48. /package/esm/typings/src/utils/files/{listAllFiles.d.ts → $listAllFiles.d.ts} +0 -0
  49. /package/esm/typings/src/utils/files/{listAllFiles.test.d.ts → $listAllFiles.test.d.ts} +0 -0
  50. /package/esm/typings/src/utils/random/{randomSeed.d.ts → $randomSeed.d.ts} +0 -0
package/esm/index.es.js CHANGED
@@ -2,16 +2,12 @@ import colors from 'colors';
2
2
  import http from 'http';
3
3
  import { Server } from 'socket.io';
4
4
  import spaceTrim$1, { spaceTrim } from 'spacetrim';
5
- import { io } from 'socket.io-client';
6
- import Anthropic from '@anthropic-ai/sdk';
7
- import { OpenAIClient, AzureKeyCredential } from '@azure/openai';
8
- import OpenAI from 'openai';
9
5
 
10
6
  // ⚠️ WARNING: This code has been generated so that any manual changes will be overwritten
11
7
  /**
12
8
  * The version of the Promptbook library
13
9
  */
14
- var PROMPTBOOK_VERSION = '0.66.0-6';
10
+ var PROMPTBOOK_VERSION = '0.66.0-8';
15
11
  // TODO: !!!! List here all the versions and annotate + put into script
16
12
 
17
13
  /*! *****************************************************************************
@@ -203,8 +199,37 @@ var MultipleLlmExecutionTools = /** @class */ (function () {
203
199
  */
204
200
  MultipleLlmExecutionTools.prototype.checkConfiguration = function () {
205
201
  return __awaiter(this, void 0, void 0, function () {
206
- return __generator(this, function (_a) {
207
- return [2 /*return*/];
202
+ var _a, _b, llmExecutionTools, e_1_1;
203
+ var e_1, _c;
204
+ return __generator(this, function (_d) {
205
+ switch (_d.label) {
206
+ case 0:
207
+ _d.trys.push([0, 5, 6, 7]);
208
+ _a = __values(this.llmExecutionTools), _b = _a.next();
209
+ _d.label = 1;
210
+ case 1:
211
+ if (!!_b.done) return [3 /*break*/, 4];
212
+ llmExecutionTools = _b.value;
213
+ return [4 /*yield*/, llmExecutionTools.checkConfiguration()];
214
+ case 2:
215
+ _d.sent();
216
+ _d.label = 3;
217
+ case 3:
218
+ _b = _a.next();
219
+ return [3 /*break*/, 1];
220
+ case 4: return [3 /*break*/, 7];
221
+ case 5:
222
+ e_1_1 = _d.sent();
223
+ e_1 = { error: e_1_1 };
224
+ return [3 /*break*/, 7];
225
+ case 6:
226
+ try {
227
+ if (_b && !_b.done && (_c = _a.return)) _c.call(_a);
228
+ }
229
+ finally { if (e_1) throw e_1.error; }
230
+ return [7 /*endfinally*/];
231
+ case 7: return [2 /*return*/];
232
+ }
208
233
  });
209
234
  });
210
235
  };
@@ -214,8 +239,8 @@ var MultipleLlmExecutionTools = /** @class */ (function () {
214
239
  */
215
240
  MultipleLlmExecutionTools.prototype.listModels = function () {
216
241
  return __awaiter(this, void 0, void 0, function () {
217
- var availableModels, _a, _b, llmExecutionTools, models, e_1_1;
218
- var e_1, _c;
242
+ var availableModels, _a, _b, llmExecutionTools, models, e_2_1;
243
+ var e_2, _c;
219
244
  return __generator(this, function (_d) {
220
245
  switch (_d.label) {
221
246
  case 0:
@@ -238,14 +263,14 @@ var MultipleLlmExecutionTools = /** @class */ (function () {
238
263
  return [3 /*break*/, 2];
239
264
  case 5: return [3 /*break*/, 8];
240
265
  case 6:
241
- e_1_1 = _d.sent();
242
- e_1 = { error: e_1_1 };
266
+ e_2_1 = _d.sent();
267
+ e_2 = { error: e_2_1 };
243
268
  return [3 /*break*/, 8];
244
269
  case 7:
245
270
  try {
246
271
  if (_b && !_b.done && (_c = _a.return)) _c.call(_a);
247
272
  }
248
- finally { if (e_1) throw e_1.error; }
273
+ finally { if (e_2) throw e_2.error; }
249
274
  return [7 /*endfinally*/];
250
275
  case 8: return [2 /*return*/, availableModels];
251
276
  }
@@ -278,8 +303,8 @@ var MultipleLlmExecutionTools = /** @class */ (function () {
278
303
  */
279
304
  MultipleLlmExecutionTools.prototype.callCommonModel = function (prompt) {
280
305
  return __awaiter(this, void 0, void 0, function () {
281
- var errors, _a, _b, llmExecutionTools, _c, error_1, e_2_1;
282
- var e_2, _d;
306
+ var errors, _a, _b, llmExecutionTools, _c, error_1, e_3_1;
307
+ var e_3, _d;
283
308
  var _this = this;
284
309
  return __generator(this, function (_e) {
285
310
  switch (_e.label) {
@@ -335,14 +360,14 @@ var MultipleLlmExecutionTools = /** @class */ (function () {
335
360
  return [3 /*break*/, 2];
336
361
  case 14: return [3 /*break*/, 17];
337
362
  case 15:
338
- e_2_1 = _e.sent();
339
- e_2 = { error: e_2_1 };
363
+ e_3_1 = _e.sent();
364
+ e_3 = { error: e_3_1 };
340
365
  return [3 /*break*/, 17];
341
366
  case 16:
342
367
  try {
343
368
  if (_b && !_b.done && (_d = _a.return)) _d.call(_a);
344
369
  }
345
- finally { if (e_2) throw e_2.error; }
370
+ finally { if (e_3) throw e_3.error; }
346
371
  return [7 /*endfinally*/];
347
372
  case 17:
348
373
  if (errors.length === 1) {
@@ -430,2209 +455,233 @@ function joinLlmExecutionTools() {
430
455
  */
431
456
 
432
457
  /**
433
- * Remote server is a proxy server that uses its execution tools internally and exposes the executor interface externally.
458
+ * @@@
434
459
  *
435
- * You can simply use `RemoteExecutionTools` on client-side javascript and connect to your remote server.
436
- * This is useful to make all logic on browser side but not expose your API keys or no need to use customer's GPU.
460
+ * Note: `$` is used to indicate that this function is not a pure function - it access global scope
437
461
  *
438
- * @see https://github.com/webgptorg/promptbook#remote-server
439
- * @public exported from `@promptbook/remote-client`
462
+ * @public exported from `@promptbook/utils`
440
463
  */
441
- var RemoteLlmExecutionTools = /** @class */ (function () {
442
- function RemoteLlmExecutionTools(options) {
443
- this.options = options;
444
- }
445
- Object.defineProperty(RemoteLlmExecutionTools.prototype, "title", {
446
- get: function () {
447
- // TODO: [🧠] Maybe fetch title+description from the remote server (as well as if model methods are defined)
448
- return 'Remote server';
449
- },
450
- enumerable: false,
451
- configurable: true
452
- });
453
- Object.defineProperty(RemoteLlmExecutionTools.prototype, "description", {
454
- get: function () {
455
- return 'Use all models by your remote server';
456
- },
457
- enumerable: false,
458
- configurable: true
459
- });
460
- /**
461
- * Check the configuration of all execution tools
462
- */
463
- RemoteLlmExecutionTools.prototype.checkConfiguration = function () {
464
- return __awaiter(this, void 0, void 0, function () {
465
- return __generator(this, function (_a) {
466
- return [2 /*return*/];
467
- });
468
- });
469
- };
470
- /**
471
- * List all available models that can be used
472
- */
473
- RemoteLlmExecutionTools.prototype.listModels = function () {
474
- return __awaiter(this, void 0, void 0, function () {
475
- return __generator(this, function (_a) {
476
- return [2 /*return*/, (this.options.models ||
477
- [
478
- /* !!!!!! */
479
- ])];
480
- });
481
- });
482
- };
483
- /**
484
- * Creates a connection to the remote proxy server.
485
- */
486
- RemoteLlmExecutionTools.prototype.makeConnection = function () {
487
- var _this = this;
488
- return new Promise(
489
- // <- TODO: [🧱] Implement in a functional (not new Class) way
490
- function (resolve, reject) {
491
- var socket = io(_this.options.remoteUrl, {
492
- path: _this.options.path,
493
- // path: `${this.remoteUrl.pathname}/socket.io`,
494
- transports: [/*'websocket', <- TODO: [🌬] Make websocket transport work */ 'polling'],
495
- });
496
- // console.log('Connecting to', this.options.remoteUrl.href, { socket });
497
- socket.on('connect', function () {
498
- resolve(socket);
499
- });
500
- // TODO: !!!! Better timeout handling
501
- setTimeout(function () {
502
- reject(new Error("Timeout while connecting to ".concat(_this.options.remoteUrl)));
503
- }, 1000 /* <- TODO: Timeout to config */);
504
- });
505
- };
506
- /**
507
- * Calls remote proxy server to use a chat model
508
- */
509
- RemoteLlmExecutionTools.prototype.callChatModel = function (prompt) {
510
- if (this.options.isVerbose) {
511
- console.info("\uD83D\uDD8B Remote callChatModel call");
464
+ function $getGlobalScope() {
465
+ return Function('return this')();
466
+ }
467
+ /***
468
+ * TODO: !!!!! Make private and promptbook registry from this
469
+ */
470
+
471
+ /**
472
+ * Register is @@@
473
+ *
474
+ * Note: `$` is used to indicate that this function is not a pure function - it accesses and adds variables in global scope.
475
+ *
476
+ * @private internal utility, exported are only signleton instances of this class
477
+ */
478
+ var $Register = /** @class */ (function () {
479
+ function $Register(storageName) {
480
+ this.storageName = storageName;
481
+ storageName = "_promptbook_".concat(storageName);
482
+ var globalScope = $getGlobalScope();
483
+ if (globalScope[storageName] === undefined) {
484
+ globalScope[storageName] = [];
512
485
  }
513
- return /* not await */ this.callCommonModel(prompt);
514
- };
515
- /**
516
- * Calls remote proxy server to use a completion model
517
- */
518
- RemoteLlmExecutionTools.prototype.callCompletionModel = function (prompt) {
519
- if (this.options.isVerbose) {
520
- console.info("\uD83D\uDCAC Remote callCompletionModel call");
486
+ else if (!Array.isArray(globalScope[storageName])) {
487
+ throw new UnexpectedError("Expected (global) ".concat(storageName, " to be an array, but got ").concat(typeof globalScope[storageName]));
521
488
  }
522
- return /* not await */ this.callCommonModel(prompt);
523
- };
524
- /**
525
- * Calls remote proxy server to use a embedding model
526
- */
527
- RemoteLlmExecutionTools.prototype.callEmbeddingModel = function (prompt) {
528
- if (this.options.isVerbose) {
529
- console.info("\uD83D\uDCAC Remote callEmbeddingModel call");
489
+ this.storage = globalScope[storageName];
490
+ }
491
+ $Register.prototype.list = function () {
492
+ // <- TODO: ReadonlyDeep<Array<TRegistered>>
493
+ return this.storage;
494
+ };
495
+ $Register.prototype.register = function (registered) {
496
+ // <- TODO: What to return here
497
+ var packageName = registered.packageName, className = registered.className;
498
+ var existingRegistrationIndex = this.storage.findIndex(function (item) { return item.packageName === packageName && item.className === className; });
499
+ var existingRegistration = this.storage[existingRegistrationIndex];
500
+ // TODO: !!!!!! Global IS_VERBOSE mode
501
+ if (!existingRegistration) {
502
+ console.warn("[\uD83D\uDCE6] Registering `".concat(packageName, ".").concat(className, "` to `").concat(this.storageName, "`"));
503
+ this.storage.push(registered);
504
+ }
505
+ else {
506
+ console.warn("[\uD83D\uDCE6] Re-registering `".concat(packageName, ".").concat(className, "` to `").concat(this.storageName, "`"));
507
+ this.storage[existingRegistrationIndex] = registered;
530
508
  }
531
- return /* not await */ this.callCommonModel(prompt);
532
- };
533
- // <- Note: [🤖] callXxxModel
534
- /**
535
- * Calls remote proxy server to use both completion or chat model
536
- */
537
- RemoteLlmExecutionTools.prototype.callCommonModel = function (prompt) {
538
- return __awaiter(this, void 0, void 0, function () {
539
- var socket, promptResult;
540
- return __generator(this, function (_a) {
541
- switch (_a.label) {
542
- case 0: return [4 /*yield*/, this.makeConnection()];
543
- case 1:
544
- socket = _a.sent();
545
- if (this.options.isAnonymous) {
546
- socket.emit('request', {
547
- llmToolsConfiguration: this.options.llmToolsConfiguration,
548
- prompt: prompt,
549
- // <- TODO: [🛫] `prompt` is NOT fully serializable as JSON, it contains functions which are not serializable
550
- });
551
- }
552
- else {
553
- socket.emit('request', {
554
- clientId: this.options.clientId,
555
- prompt: prompt,
556
- // <- TODO: [🛫] `prompt` is NOT fully serializable as JSON, it contains functions which are not serializable
557
- });
558
- }
559
- return [4 /*yield*/, new Promise(function (resolve, reject) {
560
- socket.on('response', function (response) {
561
- resolve(response.promptResult);
562
- socket.disconnect();
563
- });
564
- socket.on('error', function (error) {
565
- reject(new PipelineExecutionError(error.errorMessage));
566
- socket.disconnect();
567
- });
568
- })];
569
- case 2:
570
- promptResult = _a.sent();
571
- socket.disconnect();
572
- return [2 /*return*/, promptResult];
573
- }
574
- });
575
- });
576
509
  };
577
- return RemoteLlmExecutionTools;
510
+ return $Register;
578
511
  }());
579
- /**
580
- * TODO: [🍓] Allow to list compatible models with each variant
581
- * TODO: [🗯] RemoteLlmExecutionTools should extend Destroyable and implement IDestroyable
582
- * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
583
- * TODO: [🧠] Maybe remove `@promptbook/remote-client` and just use `@promptbook/core`
584
- */
585
512
 
586
513
  /**
587
- * Function computeUsage will create price per one token based on the string value found on openai page
514
+ * @@@
588
515
  *
589
- * @private within the repository, used only as internal helper for `OPENAI_MODELS`
516
+ * Note: `$` is used to indicate that this interacts with the global scope
517
+ * @singleton Only one instance of each register is created per build, but thare can be more @@@
518
+ * @public exported from `@promptbook/core`
590
519
  */
591
- function computeUsage(value) {
592
- var _a = __read(value.split(' / '), 2), price = _a[0], tokens = _a[1];
593
- return parseFloat(price.replace('$', '')) / parseFloat(tokens.replace('M tokens', '')) / 1000000;
594
- }
520
+ var $llmToolsRegister = new $Register('llm_execution_tools_constructors');
595
521
 
596
522
  /**
597
- * List of available Anthropic Claude models with pricing
523
+ * Returns the same value that is passed as argument.
524
+ * No side effects.
598
525
  *
599
- * Note: Done at 2024-08-16
526
+ * Note: It can be usefull for:
600
527
  *
601
- * @see https://docs.anthropic.com/en/docs/models-overview
602
- * @public exported from `@promptbook/anthropic-claude`
603
- */
604
- var ANTHROPIC_CLAUDE_MODELS = [
605
- {
606
- modelVariant: 'CHAT',
607
- modelTitle: 'Claude 3.5 Sonnet',
608
- modelName: 'claude-3-5-sonnet-20240620',
609
- pricing: {
610
- prompt: computeUsage("$3.00 / 1M tokens"),
611
- output: computeUsage("$15.00 / 1M tokens"),
612
- },
613
- },
614
- {
615
- modelVariant: 'CHAT',
616
- modelTitle: 'Claude 3 Opus',
617
- modelName: 'claude-3-opus-20240229',
618
- pricing: {
619
- prompt: computeUsage("$15.00 / 1M tokens"),
620
- output: computeUsage("$75.00 / 1M tokens"),
621
- },
622
- },
623
- {
624
- modelVariant: 'CHAT',
625
- modelTitle: 'Claude 3 Sonnet',
626
- modelName: 'claude-3-sonnet-20240229',
627
- pricing: {
628
- prompt: computeUsage("$3.00 / 1M tokens"),
629
- output: computeUsage("$15.00 / 1M tokens"),
630
- },
631
- },
632
- {
633
- modelVariant: 'CHAT',
634
- modelTitle: 'Claude 3 Haiku',
635
- modelName: ' claude-3-haiku-20240307',
636
- pricing: {
637
- prompt: computeUsage("$0.25 / 1M tokens"),
638
- output: computeUsage("$1.25 / 1M tokens"),
639
- },
640
- },
641
- {
642
- modelVariant: 'CHAT',
643
- modelTitle: 'Claude 2.1',
644
- modelName: 'claude-2.1',
645
- pricing: {
646
- prompt: computeUsage("$8.00 / 1M tokens"),
647
- output: computeUsage("$24.00 / 1M tokens"),
648
- },
649
- },
650
- {
651
- modelVariant: 'CHAT',
652
- modelTitle: 'Claude 2',
653
- modelName: 'claude-2.0',
654
- pricing: {
655
- prompt: computeUsage("$8.00 / 1M tokens"),
656
- output: computeUsage("$24.00 / 1M tokens"),
657
- },
658
- },
659
- {
660
- modelVariant: 'CHAT',
661
- modelTitle: ' Claude Instant 1.2',
662
- modelName: 'claude-instant-1.2',
663
- pricing: {
664
- prompt: computeUsage("$0.80 / 1M tokens"),
665
- output: computeUsage("$2.40 / 1M tokens"),
666
- },
667
- },
668
- // TODO: !!! Claude 1 and 2 has also completion versions - ask Hoagy
669
- ];
670
- /**
671
- * Note: [🤖] Add models of new variant
672
- * TODO: [🧠] !!! Add embedding models OR Anthropic has only chat+completion models?
673
- * TODO: [🧠] Some mechanism to propagate unsureness
674
- * TODO: [🧠][👮‍♀️] Put here more info like description, isVision, trainingDateCutoff, languages, strengths ( Top-level performance, intelligence, fluency, and understanding), contextWindow,...
675
- * TODO: [🎰] Some mechanism to auto-update available models
528
+ * 1) Leveling indentation
529
+ * 2) Putting always-true or always-false conditions without getting eslint errors
530
+ *
531
+ * @param value any values
532
+ * @returns the same values
533
+ * @private within the repository
676
534
  */
535
+ function just(value) {
536
+ if (value === undefined) {
537
+ return undefined;
538
+ }
539
+ return value;
540
+ }
677
541
 
678
542
  /**
679
- * Get current date in ISO 8601 format
543
+ * @@@
680
544
  *
681
- * @private internal utility
545
+ * Note: `$` is used to indicate that this interacts with the global scope
546
+ * @singleton Only one instance of each register is created per build, but thare can be more @@@
547
+ * @public exported from `@promptbook/core`
682
548
  */
683
- function getCurrentIsoDate() {
684
- return new Date().toISOString();
685
- }
549
+ var $llmToolsMetadataRegister = new $Register('llm_tools_metadata');
686
550
 
687
551
  /**
688
- * @@@
552
+ * Creates a message with all registered LLM tools
689
553
  *
690
- * Note: `$` is used to indicate that this function is not a pure function - it mutates given object
691
- * Note: This function mutates the object and returns the original (but mutated-deep-freezed) object
554
+ * Note: This function is used to create a (error) message when there is no constructor for some LLM provider
692
555
  *
693
- * @returns The same object as the input, but deeply frozen
694
- * @public exported from `@promptbook/utils`
556
+ * @private internal function of `createLlmToolsFromConfiguration` and `createLlmToolsFromEnv`
695
557
  */
696
- function $deepFreeze(objectValue) {
697
- var e_1, _a;
698
- var propertyNames = Object.getOwnPropertyNames(objectValue);
558
+ function $registeredLlmToolsMessage() {
559
+ var e_1, _a, e_2, _b;
560
+ /**
561
+ * Mixes registered LLM tools from $llmToolsMetadataRegister and $llmToolsRegister
562
+ */
563
+ var all = [];
564
+ var _loop_1 = function (packageName, className) {
565
+ if (all.some(function (item) { return item.packageName === packageName && item.className === className; })) {
566
+ return "continue";
567
+ }
568
+ all.push({ packageName: packageName, className: className });
569
+ };
699
570
  try {
700
- for (var propertyNames_1 = __values(propertyNames), propertyNames_1_1 = propertyNames_1.next(); !propertyNames_1_1.done; propertyNames_1_1 = propertyNames_1.next()) {
701
- var propertyName = propertyNames_1_1.value;
702
- var value = objectValue[propertyName];
703
- if (value && typeof value === 'object') {
704
- $deepFreeze(value);
705
- }
571
+ for (var _c = __values($llmToolsMetadataRegister.list()), _d = _c.next(); !_d.done; _d = _c.next()) {
572
+ var _e = _d.value, packageName = _e.packageName, className = _e.className;
573
+ _loop_1(packageName, className);
706
574
  }
707
575
  }
708
576
  catch (e_1_1) { e_1 = { error: e_1_1 }; }
709
577
  finally {
710
578
  try {
711
- if (propertyNames_1_1 && !propertyNames_1_1.done && (_a = propertyNames_1.return)) _a.call(propertyNames_1);
579
+ if (_d && !_d.done && (_a = _c.return)) _a.call(_c);
712
580
  }
713
581
  finally { if (e_1) throw e_1.error; }
714
582
  }
715
- return Object.freeze(objectValue);
716
- }
717
- /**
718
- * TODO: [🧠] Is there a way how to meaningfully test this utility
719
- */
720
-
721
- // <- TODO: [🧠] Better system for generator warnings - not always "code" and "by `@promptbook/cli`"
722
- /**
723
- * The maximum number of iterations for a loops
724
- *
725
- * @private within the repository - too low-level in comparison with other `MAX_...`
726
- */
727
- var LOOP_LIMIT = 1000;
728
- /**
729
- * Nonce which is used for replacing things in strings
730
- *
731
- * @private within the repository
732
- */
733
- var REPLACING_NONCE = 'u$k42k%!V2zo34w7Fu#@QUHYPW';
734
- /**
735
- * The names of the parameters that are reserved for special purposes
736
- *
737
- * @public exported from `@promptbook/core`
738
- */
739
- $deepFreeze([
740
- 'content',
741
- 'context',
742
- 'knowledge',
743
- 'samples',
744
- 'modelName',
745
- 'currentDate',
746
- // <- TODO: Add more like 'date', 'modelName',...
747
- // <- TODO: Add [emoji] + instructions ACRY when adding new reserved parameter
748
- ]);
749
- /**
750
- * @@@
751
- *
752
- * @private within the repository
753
- */
754
- var RESERVED_PARAMETER_MISSING_VALUE = 'MISSING-' + REPLACING_NONCE;
755
- /**
756
- * @@@
757
- *
758
- * @private within the repository
759
- */
760
- var RESERVED_PARAMETER_RESTRICTED = 'RESTRICTED-' + REPLACING_NONCE;
761
- /**
762
- * TODO: [🧠][🧜‍♂️] Maybe join remoteUrl and path into single value
763
- */
764
-
765
- /**
766
- * This error type indicates that some limit was reached
767
- *
768
- * @public exported from `@promptbook/core`
769
- */
770
- var LimitReachedError = /** @class */ (function (_super) {
771
- __extends(LimitReachedError, _super);
772
- function LimitReachedError(message) {
773
- var _this = _super.call(this, message) || this;
774
- _this.name = 'LimitReachedError';
775
- Object.setPrototypeOf(_this, LimitReachedError.prototype);
776
- return _this;
777
- }
778
- return LimitReachedError;
779
- }(Error));
780
-
781
- /**
782
- * Replaces parameters in template with values from parameters object
783
- *
784
- * @param template the template with parameters in {curly} braces
785
- * @param parameters the object with parameters
786
- * @returns the template with replaced parameters
787
- * @throws {PipelineExecutionError} if parameter is not defined, not closed, or not opened
788
- * @public exported from `@promptbook/utils`
789
- */
790
- function replaceParameters(template, parameters) {
791
- var e_1, _a;
583
+ var _loop_2 = function (packageName, className) {
584
+ if (all.some(function (item) { return item.packageName === packageName && item.className === className; })) {
585
+ return "continue";
586
+ }
587
+ all.push({ packageName: packageName, className: className });
588
+ };
792
589
  try {
793
- for (var _b = __values(Object.entries(parameters)), _c = _b.next(); !_c.done; _c = _b.next()) {
794
- var _d = __read(_c.value, 2), parameterName = _d[0], parameterValue = _d[1];
795
- if (parameterValue === RESERVED_PARAMETER_MISSING_VALUE) {
796
- throw new UnexpectedError("Parameter {".concat(parameterName, "} has missing value"));
797
- }
798
- else if (parameterValue === RESERVED_PARAMETER_RESTRICTED) {
799
- // TODO: [🍵]
800
- throw new UnexpectedError("Parameter {".concat(parameterName, "} is restricted to use"));
801
- }
590
+ for (var _f = __values($llmToolsRegister.list()), _g = _f.next(); !_g.done; _g = _f.next()) {
591
+ var _h = _g.value, packageName = _h.packageName, className = _h.className;
592
+ _loop_2(packageName, className);
802
593
  }
803
594
  }
804
- catch (e_1_1) { e_1 = { error: e_1_1 }; }
595
+ catch (e_2_1) { e_2 = { error: e_2_1 }; }
805
596
  finally {
806
597
  try {
807
- if (_c && !_c.done && (_a = _b.return)) _a.call(_b);
598
+ if (_g && !_g.done && (_b = _f.return)) _b.call(_f);
808
599
  }
809
- finally { if (e_1) throw e_1.error; }
600
+ finally { if (e_2) throw e_2.error; }
810
601
  }
811
- var replacedTemplate = template;
812
- var match;
813
- var loopLimit = LOOP_LIMIT;
814
- var _loop_1 = function () {
815
- if (loopLimit-- < 0) {
816
- throw new LimitReachedError('Loop limit reached during parameters replacement in `replaceParameters`');
602
+ var metadata = all.map(function (metadata) {
603
+ var isMetadataAviailable = $llmToolsMetadataRegister
604
+ .list()
605
+ .find(function (_a) {
606
+ var packageName = _a.packageName, className = _a.className;
607
+ return metadata.packageName === packageName && metadata.className === className;
608
+ });
609
+ var isInstalled = $llmToolsRegister
610
+ .list()
611
+ .find(function (_a) {
612
+ var packageName = _a.packageName, className = _a.className;
613
+ return metadata.packageName === packageName && metadata.className === className;
614
+ });
615
+ return __assign(__assign({}, metadata), { isMetadataAviailable: isMetadataAviailable, isInstalled: isInstalled });
616
+ });
617
+ return spaceTrim$1(function (block) { return "\n Available LLM providers are:\n ".concat(block(metadata
618
+ .map(function (_a, i) {
619
+ var packageName = _a.packageName, className = _a.className, isMetadataAviailable = _a.isMetadataAviailable, isInstalled = _a.isInstalled;
620
+ var more;
621
+ if (just(false)) {
622
+ more = '';
817
623
  }
818
- var precol = match.groups.precol;
819
- var parameterName = match.groups.parameterName;
820
- if (parameterName === '') {
821
- return "continue";
624
+ else if (!isMetadataAviailable && !isInstalled) {
625
+ // TODO: [�][�] Maybe do allow to do auto-install if package not registered and not found
626
+ more = "(not installed and no metadata, looks like a unexpected behavior)";
822
627
  }
823
- if (parameterName.indexOf('{') !== -1 || parameterName.indexOf('}') !== -1) {
824
- throw new PipelineExecutionError('Parameter is already opened or not closed');
628
+ else if (isMetadataAviailable && !isInstalled) {
629
+ // TODO: [�][�]
630
+ more = "(not installed)";
825
631
  }
826
- if (parameters[parameterName] === undefined) {
827
- throw new PipelineExecutionError("Parameter {".concat(parameterName, "} is not defined"));
632
+ else if (!isMetadataAviailable && isInstalled) {
633
+ more = "(no metadata, looks like a unexpected behavior)";
828
634
  }
829
- var parameterValue = parameters[parameterName];
830
- if (parameterValue === undefined) {
831
- throw new PipelineExecutionError("Parameter {".concat(parameterName, "} is not defined"));
635
+ else if (isMetadataAviailable && isInstalled) {
636
+ more = "(installed)";
832
637
  }
833
- parameterValue = parameterValue.toString();
834
- if (parameterValue.includes('\n') && /^\s*\W{0,3}\s*$/.test(precol)) {
835
- parameterValue = parameterValue
836
- .split('\n')
837
- .map(function (line, index) { return (index === 0 ? line : "".concat(precol).concat(line)); })
838
- .join('\n');
638
+ else {
639
+ more = "(unknown state, looks like a unexpected behavior)";
839
640
  }
840
- replacedTemplate =
841
- replacedTemplate.substring(0, match.index + precol.length) +
842
- parameterValue +
843
- replacedTemplate.substring(match.index + precol.length + parameterName.length + 2);
844
- };
845
- while ((match = /^(?<precol>.*){(?<parameterName>\w+)}(.*)/m /* <- Not global */
846
- .exec(replacedTemplate))) {
847
- _loop_1();
848
- }
849
- // [💫] Check if there are parameters that are not closed properly
850
- if (/{\w+$/.test(replacedTemplate)) {
851
- throw new PipelineExecutionError('Parameter is not closed');
852
- }
853
- // [💫] Check if there are parameters that are not opened properly
854
- if (/^\w+}/.test(replacedTemplate)) {
855
- throw new PipelineExecutionError('Parameter is not opened');
856
- }
857
- return replacedTemplate;
858
- }
859
-
860
- /**
861
- * Counts number of characters in the text
862
- *
863
- * @public exported from `@promptbook/utils`
864
- */
865
- function countCharacters(text) {
866
- // Remove null characters
867
- text = text.replace(/\0/g, '');
868
- // Replace emojis (and also ZWJ sequence) with hyphens
869
- text = text.replace(/(\p{Extended_Pictographic})\p{Modifier_Symbol}/gu, '$1');
870
- text = text.replace(/(\p{Extended_Pictographic})[\u{FE00}-\u{FE0F}]/gu, '$1');
871
- text = text.replace(/\p{Extended_Pictographic}(\u{200D}\p{Extended_Pictographic})*/gu, '-');
872
- return text.length;
873
- }
874
-
875
- /**
876
- * Counts number of lines in the text
877
- *
878
- * @public exported from `@promptbook/utils`
879
- */
880
- function countLines(text) {
881
- if (text === '') {
882
- return 0;
883
- }
884
- return text.split('\n').length;
885
- }
886
-
887
- /**
888
- * Counts number of pages in the text
889
- *
890
- * @public exported from `@promptbook/utils`
891
- */
892
- function countPages(text) {
893
- var sentencesPerPage = 5; // Assuming each page has 5 sentences
894
- var sentences = text.split(/[.!?]+/).filter(function (sentence) { return sentence.trim() !== ''; });
895
- var pageCount = Math.ceil(sentences.length / sentencesPerPage);
896
- return pageCount;
897
- }
898
-
899
- /**
900
- * Counts number of paragraphs in the text
901
- *
902
- * @public exported from `@promptbook/utils`
903
- */
904
- function countParagraphs(text) {
905
- return text.split(/\n\s*\n/).filter(function (paragraph) { return paragraph.trim() !== ''; }).length;
906
- }
907
-
908
- /**
909
- * Split text into sentences
910
- *
911
- * @public exported from `@promptbook/utils`
912
- */
913
- function splitIntoSentences(text) {
914
- return text.split(/[.!?]+/).filter(function (sentence) { return sentence.trim() !== ''; });
915
- }
916
- /**
917
- * Counts number of sentences in the text
918
- *
919
- * @public exported from `@promptbook/utils`
920
- */
921
- function countSentences(text) {
922
- return splitIntoSentences(text).length;
923
- }
924
-
925
- var defaultDiacriticsRemovalMap = [
926
- {
927
- base: 'A',
928
- letters: '\u0041\u24B6\uFF21\u00C0\u00C1\u00C2\u1EA6\u1EA4\u1EAA\u1EA8\u00C3\u0100\u0102\u1EB0\u1EAE\u1EB4\u1EB2\u0226\u01E0\u00C4\u01DE\u1EA2\u00C5\u01FA\u01CD\u0200\u0202\u1EA0\u1EAC\u1EB6\u1E00\u0104\u023A\u2C6F',
929
- },
930
- { base: 'AA', letters: '\uA732' },
931
- { base: 'AE', letters: '\u00C6\u01FC\u01E2' },
932
- { base: 'AO', letters: '\uA734' },
933
- { base: 'AU', letters: '\uA736' },
934
- { base: 'AV', letters: '\uA738\uA73A' },
935
- { base: 'AY', letters: '\uA73C' },
936
- {
937
- base: 'B',
938
- letters: '\u0042\u24B7\uFF22\u1E02\u1E04\u1E06\u0243\u0182\u0181',
939
- },
940
- {
941
- base: 'C',
942
- letters: '\u0043\u24B8\uFF23\u0106\u0108\u010A\u010C\u00C7\u1E08\u0187\u023B\uA73E',
943
- },
944
- {
945
- base: 'D',
946
- letters: '\u0044\u24B9\uFF24\u1E0A\u010E\u1E0C\u1E10\u1E12\u1E0E\u0110\u018B\u018A\u0189\uA779\u00D0',
947
- },
948
- { base: 'DZ', letters: '\u01F1\u01C4' },
949
- { base: 'Dz', letters: '\u01F2\u01C5' },
950
- {
951
- base: 'E',
952
- letters: '\u0045\u24BA\uFF25\u00C8\u00C9\u00CA\u1EC0\u1EBE\u1EC4\u1EC2\u1EBC\u0112\u1E14\u1E16\u0114\u0116\u00CB\u1EBA\u011A\u0204\u0206\u1EB8\u1EC6\u0228\u1E1C\u0118\u1E18\u1E1A\u0190\u018E',
953
- },
954
- { base: 'F', letters: '\u0046\u24BB\uFF26\u1E1E\u0191\uA77B' },
955
- {
956
- base: 'G',
957
- letters: '\u0047\u24BC\uFF27\u01F4\u011C\u1E20\u011E\u0120\u01E6\u0122\u01E4\u0193\uA7A0\uA77D\uA77E',
958
- },
959
- {
960
- base: 'H',
961
- letters: '\u0048\u24BD\uFF28\u0124\u1E22\u1E26\u021E\u1E24\u1E28\u1E2A\u0126\u2C67\u2C75\uA78D',
962
- },
963
- {
964
- base: 'I',
965
- letters: '\u0049\u24BE\uFF29\u00CC\u00CD\u00CE\u0128\u012A\u012C\u0130\u00CF\u1E2E\u1EC8\u01CF\u0208\u020A\u1ECA\u012E\u1E2C\u0197',
966
- },
967
- { base: 'J', letters: '\u004A\u24BF\uFF2A\u0134\u0248' },
968
- {
969
- base: 'K',
970
- letters: '\u004B\u24C0\uFF2B\u1E30\u01E8\u1E32\u0136\u1E34\u0198\u2C69\uA740\uA742\uA744\uA7A2',
971
- },
972
- {
973
- base: 'L',
974
- letters: '\u004C\u24C1\uFF2C\u013F\u0139\u013D\u1E36\u1E38\u013B\u1E3C\u1E3A\u0141\u023D\u2C62\u2C60\uA748\uA746\uA780',
975
- },
976
- { base: 'LJ', letters: '\u01C7' },
977
- { base: 'Lj', letters: '\u01C8' },
978
- { base: 'M', letters: '\u004D\u24C2\uFF2D\u1E3E\u1E40\u1E42\u2C6E\u019C' },
979
- {
980
- base: 'N',
981
- letters: '\u004E\u24C3\uFF2E\u01F8\u0143\u00D1\u1E44\u0147\u1E46\u0145\u1E4A\u1E48\u0220\u019D\uA790\uA7A4',
982
- },
983
- { base: 'NJ', letters: '\u01CA' },
984
- { base: 'Nj', letters: '\u01CB' },
985
- {
986
- base: 'O',
987
- letters: '\u004F\u24C4\uFF2F\u00D2\u00D3\u00D4\u1ED2\u1ED0\u1ED6\u1ED4\u00D5\u1E4C\u022C\u1E4E\u014C\u1E50\u1E52\u014E\u022E\u0230\u00D6\u022A\u1ECE\u0150\u01D1\u020C\u020E\u01A0\u1EDC\u1EDA\u1EE0\u1EDE\u1EE2\u1ECC\u1ED8\u01EA\u01EC\u00D8\u01FE\u0186\u019F\uA74A\uA74C',
988
- },
989
- { base: 'OI', letters: '\u01A2' },
990
- { base: 'OO', letters: '\uA74E' },
991
- { base: 'OU', letters: '\u0222' },
992
- { base: 'OE', letters: '\u008C\u0152' },
993
- { base: 'oe', letters: '\u009C\u0153' },
994
- {
995
- base: 'P',
996
- letters: '\u0050\u24C5\uFF30\u1E54\u1E56\u01A4\u2C63\uA750\uA752\uA754',
997
- },
998
- { base: 'Q', letters: '\u0051\u24C6\uFF31\uA756\uA758\u024A' },
999
- {
1000
- base: 'R',
1001
- letters: '\u0052\u24C7\uFF32\u0154\u1E58\u0158\u0210\u0212\u1E5A\u1E5C\u0156\u1E5E\u024C\u2C64\uA75A\uA7A6\uA782',
1002
- },
1003
- {
1004
- base: 'S',
1005
- letters: '\u0053\u24C8\uFF33\u1E9E\u015A\u1E64\u015C\u1E60\u0160\u1E66\u1E62\u1E68\u0218\u015E\u2C7E\uA7A8\uA784',
1006
- },
1007
- {
1008
- base: 'T',
1009
- letters: '\u0054\u24C9\uFF34\u1E6A\u0164\u1E6C\u021A\u0162\u1E70\u1E6E\u0166\u01AC\u01AE\u023E\uA786',
1010
- },
1011
- { base: 'TZ', letters: '\uA728' },
1012
- {
1013
- base: 'U',
1014
- letters: '\u0055\u24CA\uFF35\u00D9\u00DA\u00DB\u0168\u1E78\u016A\u1E7A\u016C\u00DC\u01DB\u01D7\u01D5\u01D9\u1EE6\u016E\u0170\u01D3\u0214\u0216\u01AF\u1EEA\u1EE8\u1EEE\u1EEC\u1EF0\u1EE4\u1E72\u0172\u1E76\u1E74\u0244',
1015
- },
1016
- { base: 'V', letters: '\u0056\u24CB\uFF36\u1E7C\u1E7E\u01B2\uA75E\u0245' },
1017
- { base: 'VY', letters: '\uA760' },
1018
- {
1019
- base: 'W',
1020
- letters: '\u0057\u24CC\uFF37\u1E80\u1E82\u0174\u1E86\u1E84\u1E88\u2C72',
1021
- },
1022
- { base: 'X', letters: '\u0058\u24CD\uFF38\u1E8A\u1E8C' },
1023
- {
1024
- base: 'Y',
1025
- letters: '\u0059\u24CE\uFF39\u1EF2\u00DD\u0176\u1EF8\u0232\u1E8E\u0178\u1EF6\u1EF4\u01B3\u024E\u1EFE',
1026
- },
1027
- {
1028
- base: 'Z',
1029
- letters: '\u005A\u24CF\uFF3A\u0179\u1E90\u017B\u017D\u1E92\u1E94\u01B5\u0224\u2C7F\u2C6B\uA762',
1030
- },
1031
- {
1032
- base: 'a',
1033
- letters: '\u0061\u24D0\uFF41\u1E9A\u00E0\u00E1\u00E2\u1EA7\u1EA5\u1EAB\u1EA9\u00E3\u0101\u0103\u1EB1\u1EAF\u1EB5\u1EB3\u0227\u01E1\u00E4\u01DF\u1EA3\u00E5\u01FB\u01CE\u0201\u0203\u1EA1\u1EAD\u1EB7\u1E01\u0105\u2C65\u0250',
1034
- },
1035
- { base: 'aa', letters: '\uA733' },
1036
- { base: 'ae', letters: '\u00E6\u01FD\u01E3' },
1037
- { base: 'ao', letters: '\uA735' },
1038
- { base: 'au', letters: '\uA737' },
1039
- { base: 'av', letters: '\uA739\uA73B' },
1040
- { base: 'ay', letters: '\uA73D' },
1041
- {
1042
- base: 'b',
1043
- letters: '\u0062\u24D1\uFF42\u1E03\u1E05\u1E07\u0180\u0183\u0253',
1044
- },
1045
- {
1046
- base: 'c',
1047
- letters: '\u0063\u24D2\uFF43\u0107\u0109\u010B\u010D\u00E7\u1E09\u0188\u023C\uA73F\u2184',
1048
- },
1049
- {
1050
- base: 'd',
1051
- letters: '\u0064\u24D3\uFF44\u1E0B\u010F\u1E0D\u1E11\u1E13\u1E0F\u0111\u018C\u0256\u0257\uA77A',
1052
- },
1053
- { base: 'dz', letters: '\u01F3\u01C6' },
1054
- {
1055
- base: 'e',
1056
- letters: '\u0065\u24D4\uFF45\u00E8\u00E9\u00EA\u1EC1\u1EBF\u1EC5\u1EC3\u1EBD\u0113\u1E15\u1E17\u0115\u0117\u00EB\u1EBB\u011B\u0205\u0207\u1EB9\u1EC7\u0229\u1E1D\u0119\u1E19\u1E1B\u0247\u025B\u01DD',
1057
- },
1058
- { base: 'f', letters: '\u0066\u24D5\uFF46\u1E1F\u0192\uA77C' },
1059
- {
1060
- base: 'g',
1061
- letters: '\u0067\u24D6\uFF47\u01F5\u011D\u1E21\u011F\u0121\u01E7\u0123\u01E5\u0260\uA7A1\u1D79\uA77F',
1062
- },
1063
- {
1064
- base: 'h',
1065
- letters: '\u0068\u24D7\uFF48\u0125\u1E23\u1E27\u021F\u1E25\u1E29\u1E2B\u1E96\u0127\u2C68\u2C76\u0265',
1066
- },
1067
- { base: 'hv', letters: '\u0195' },
1068
- {
1069
- base: 'i',
1070
- letters: '\u0069\u24D8\uFF49\u00EC\u00ED\u00EE\u0129\u012B\u012D\u00EF\u1E2F\u1EC9\u01D0\u0209\u020B\u1ECB\u012F\u1E2D\u0268\u0131',
1071
- },
1072
- { base: 'j', letters: '\u006A\u24D9\uFF4A\u0135\u01F0\u0249' },
1073
- {
1074
- base: 'k',
1075
- letters: '\u006B\u24DA\uFF4B\u1E31\u01E9\u1E33\u0137\u1E35\u0199\u2C6A\uA741\uA743\uA745\uA7A3',
1076
- },
1077
- {
1078
- base: 'l',
1079
- letters: '\u006C\u24DB\uFF4C\u0140\u013A\u013E\u1E37\u1E39\u013C\u1E3D\u1E3B\u017F\u0142\u019A\u026B\u2C61\uA749\uA781\uA747',
1080
- },
1081
- { base: 'lj', letters: '\u01C9' },
1082
- { base: 'm', letters: '\u006D\u24DC\uFF4D\u1E3F\u1E41\u1E43\u0271\u026F' },
1083
- {
1084
- base: 'n',
1085
- letters: '\u006E\u24DD\uFF4E\u01F9\u0144\u00F1\u1E45\u0148\u1E47\u0146\u1E4B\u1E49\u019E\u0272\u0149\uA791\uA7A5',
1086
- },
1087
- { base: 'nj', letters: '\u01CC' },
1088
- {
1089
- base: 'o',
1090
- letters: '\u006F\u24DE\uFF4F\u00F2\u00F3\u00F4\u1ED3\u1ED1\u1ED7\u1ED5\u00F5\u1E4D\u022D\u1E4F\u014D\u1E51\u1E53\u014F\u022F\u0231\u00F6\u022B\u1ECF\u0151\u01D2\u020D\u020F\u01A1\u1EDD\u1EDB\u1EE1\u1EDF\u1EE3\u1ECD\u1ED9\u01EB\u01ED\u00F8\u01FF\u0254\uA74B\uA74D\u0275',
1091
- },
1092
- { base: 'oi', letters: '\u01A3' },
1093
- { base: 'ou', letters: '\u0223' },
1094
- { base: 'oo', letters: '\uA74F' },
1095
- {
1096
- base: 'p',
1097
- letters: '\u0070\u24DF\uFF50\u1E55\u1E57\u01A5\u1D7D\uA751\uA753\uA755',
1098
- },
1099
- { base: 'q', letters: '\u0071\u24E0\uFF51\u024B\uA757\uA759' },
1100
- {
1101
- base: 'r',
1102
- letters: '\u0072\u24E1\uFF52\u0155\u1E59\u0159\u0211\u0213\u1E5B\u1E5D\u0157\u1E5F\u024D\u027D\uA75B\uA7A7\uA783',
1103
- },
1104
- {
1105
- base: 's',
1106
- letters: '\u0073\u24E2\uFF53\u00DF\u015B\u1E65\u015D\u1E61\u0161\u1E67\u1E63\u1E69\u0219\u015F\u023F\uA7A9\uA785\u1E9B',
1107
- },
1108
- {
1109
- base: 't',
1110
- letters: '\u0074\u24E3\uFF54\u1E6B\u1E97\u0165\u1E6D\u021B\u0163\u1E71\u1E6F\u0167\u01AD\u0288\u2C66\uA787',
1111
- },
1112
- { base: 'tz', letters: '\uA729' },
1113
- {
1114
- base: 'u',
1115
- letters: '\u0075\u24E4\uFF55\u00F9\u00FA\u00FB\u0169\u1E79\u016B\u1E7B\u016D\u00FC\u01DC\u01D8\u01D6\u01DA\u1EE7\u016F\u0171\u01D4\u0215\u0217\u01B0\u1EEB\u1EE9\u1EEF\u1EED\u1EF1\u1EE5\u1E73\u0173\u1E77\u1E75\u0289',
1116
- },
1117
- { base: 'v', letters: '\u0076\u24E5\uFF56\u1E7D\u1E7F\u028B\uA75F\u028C' },
1118
- { base: 'vy', letters: '\uA761' },
1119
- {
1120
- base: 'w',
1121
- letters: '\u0077\u24E6\uFF57\u1E81\u1E83\u0175\u1E87\u1E85\u1E98\u1E89\u2C73',
1122
- },
1123
- { base: 'x', letters: '\u0078\u24E7\uFF58\u1E8B\u1E8D' },
1124
- {
1125
- base: 'y',
1126
- letters: '\u0079\u24E8\uFF59\u1EF3\u00FD\u0177\u1EF9\u0233\u1E8F\u00FF\u1EF7\u1E99\u1EF5\u01B4\u024F\u1EFF',
1127
- },
1128
- {
1129
- base: 'z',
1130
- letters: '\u007A\u24E9\uFF5A\u017A\u1E91\u017C\u017E\u1E93\u1E95\u01B6\u0225\u0240\u2C6C\uA763',
1131
- },
1132
- ];
1133
- /**
1134
- * Map of letters from diacritic variant to diacritless variant
1135
- * Contains lowercase and uppercase separatelly
1136
- *
1137
- * > "á" => "a"
1138
- * > "ě" => "e"
1139
- * > "Ă" => "A"
1140
- * > ...
1141
- *
1142
- * @public exported from `@promptbook/utils`
1143
- */
1144
- var DIACRITIC_VARIANTS_LETTERS = {};
1145
- // tslint:disable-next-line: prefer-for-of
1146
- for (var i = 0; i < defaultDiacriticsRemovalMap.length; i++) {
1147
- var letters = defaultDiacriticsRemovalMap[i].letters;
1148
- // tslint:disable-next-line: prefer-for-of
1149
- for (var j = 0; j < letters.length; j++) {
1150
- DIACRITIC_VARIANTS_LETTERS[letters[j]] = defaultDiacriticsRemovalMap[i].base;
1151
- }
641
+ return "".concat(i + 1, ") `").concat(className, "` from `").concat(packageName, "` ").concat(more);
642
+ })
643
+ .join('\n')), "\n "); });
1152
644
  }
1153
- // <- TODO: [🍓] Put to maker function to save execution time if not needed
1154
- /*
1155
- @see https://stackoverflow.com/questions/990904/remove-accents-diacritics-in-a-string-in-javascript
1156
- Licensed under the Apache License, Version 2.0 (the "License");
1157
- you may not use this file except in compliance with the License.
1158
- You may obtain a copy of the License at
1159
-
1160
- http://www.apache.org/licenses/LICENSE-2.0
1161
-
1162
- Unless required by applicable law or agreed to in writing, software
1163
- distributed under the License is distributed on an "AS IS" BASIS,
1164
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1165
- See the License for the specific language governing permissions and
1166
- limitations under the License.
1167
- */
1168
645
 
1169
646
  /**
1170
647
  * @@@
1171
648
  *
1172
- * @param input @@@
649
+ * Note: This function is not cached, every call creates new instance of `MultipleLlmExecutionTools`
650
+ *
1173
651
  * @returns @@@
1174
- * @public exported from `@promptbook/utils`
652
+ * @public exported from `@promptbook/core`
1175
653
  */
1176
- function removeDiacritics(input) {
1177
- /*eslint no-control-regex: "off"*/
1178
- return input.replace(/[^\u0000-\u007E]/g, function (a) {
1179
- return DIACRITIC_VARIANTS_LETTERS[a] || a;
654
+ function createLlmToolsFromConfiguration(configuration, options) {
655
+ if (options === void 0) { options = {}; }
656
+ var _a = options.isVerbose, isVerbose = _a === void 0 ? false : _a;
657
+ var llmTools = configuration.map(function (llmConfiguration) {
658
+ var registeredItem = $llmToolsRegister
659
+ .list()
660
+ .find(function (_a) {
661
+ var packageName = _a.packageName, className = _a.className;
662
+ return llmConfiguration.packageName === packageName && llmConfiguration.className === className;
663
+ });
664
+ if (registeredItem === undefined) {
665
+ throw new Error(spaceTrim$1(function (block) { return "\n There is no constructor for LLM provider `".concat(llmConfiguration.className, "` from `").concat(llmConfiguration.packageName, "`\n\n You have probably forgotten install and import the provider package.\n To fix this issue, you can:\n\n Install:\n\n > npm install ").concat(llmConfiguration.packageName, "\n\n And import:\n\n > import '").concat(llmConfiguration.packageName, "';\n\n\n ").concat(block($registeredLlmToolsMessage()), "\n "); }));
666
+ }
667
+ return registeredItem(__assign({ isVerbose: isVerbose }, llmConfiguration.options));
1180
668
  });
669
+ return joinLlmExecutionTools.apply(void 0, __spreadArray([], __read(llmTools), false));
1181
670
  }
1182
671
  /**
1183
- * TODO: [Ж] Variant for cyrillic (and in general non-latin) letters
672
+ * TODO: [🎌] Togethere with `createLlmToolsFromConfiguration` + 'EXECUTION_TOOLS_CLASSES' gets to `@promptbook/core` ALL model providers, make this more efficient
673
+ * TODO: [🧠][🎌] Dynamically install required providers
674
+ * TODO: @@@ write discussion about this - wizzard
675
+ * TODO: [🧠][🍛] Which name is better `createLlmToolsFromConfig` or `createLlmToolsFromConfiguration`?
676
+ * TODO: [🧠] Is there some meaningfull way how to test this util
677
+ * TODO: This should be maybe not under `_common` but under `utils`
1184
678
  */
1185
679
 
1186
680
  /**
1187
- * Counts number of words in the text
681
+ * Remote server is a proxy server that uses its execution tools internally and exposes the executor interface externally.
1188
682
  *
1189
- * @public exported from `@promptbook/utils`
1190
- */
1191
- function countWords(text) {
1192
- text = text.replace(/[\p{Extended_Pictographic}]/gu, 'a');
1193
- text = removeDiacritics(text);
1194
- return text.split(/[^a-zа-я0-9]+/i).filter(function (word) { return word.length > 0; }).length;
1195
- }
1196
-
1197
- /**
1198
- * Helper of usage compute
1199
- *
1200
- * @param content the content of prompt or response
1201
- * @returns part of PromptResultUsageCounts
1202
- *
1203
- * @private internal utility of LlmExecutionTools
1204
- */
1205
- function computeUsageCounts(content) {
1206
- return {
1207
- charactersCount: { value: countCharacters(content) },
1208
- wordsCount: { value: countWords(content) },
1209
- sentencesCount: { value: countSentences(content) },
1210
- linesCount: { value: countLines(content) },
1211
- paragraphsCount: { value: countParagraphs(content) },
1212
- pagesCount: { value: countPages(content) },
1213
- };
1214
- }
1215
-
1216
- /**
1217
- * Make UncertainNumber
1218
- *
1219
- * @param value
1220
- *
1221
- * @private utility for initializating UncertainNumber
1222
- */
1223
- function uncertainNumber(value) {
1224
- if (value === null || value === undefined || Number.isNaN(value)) {
1225
- return { value: 0, isUncertain: true };
1226
- }
1227
- return { value: value };
1228
- }
1229
-
1230
- /**
1231
- * Computes the usage of the Anthropic Claude API based on the response from Anthropic Claude
1232
- *
1233
- * @param promptContent The content of the prompt
1234
- * @param resultContent The content of the result (for embedding prompts or failed prompts pass empty string)
1235
- * @param rawResponse The raw response from Anthropic Claude API
1236
- * @throws {PipelineExecutionError} If the usage is not defined in the response from Anthropic Claude
1237
- * @private internal utility of `AnthropicClaudeExecutionTools`
1238
- */
1239
- function computeAnthropicClaudeUsage(promptContent, // <- Note: Intentionally using [] to access type properties to bring jsdoc from Prompt/PromptResult to consumer
1240
- resultContent, rawResponse) {
1241
- var _a, _b;
1242
- if (rawResponse.usage === undefined) {
1243
- throw new PipelineExecutionError('The usage is not defined in the response from Anthropic Claude');
1244
- }
1245
- if (((_a = rawResponse.usage) === null || _a === void 0 ? void 0 : _a.input_tokens) === undefined) {
1246
- throw new PipelineExecutionError('In Anthropic Claude response `usage.prompt_tokens` not defined');
1247
- }
1248
- var inputTokens = rawResponse.usage.input_tokens;
1249
- var outputTokens = ((_b = rawResponse.usage) === null || _b === void 0 ? void 0 : _b.output_tokens) || 0;
1250
- var modelInfo = ANTHROPIC_CLAUDE_MODELS.find(function (model) { return model.modelName === rawResponse.model; });
1251
- var price;
1252
- if (modelInfo === undefined || modelInfo.pricing === undefined) {
1253
- price = uncertainNumber();
1254
- }
1255
- else {
1256
- price = uncertainNumber(inputTokens * modelInfo.pricing.prompt + outputTokens * modelInfo.pricing.output);
1257
- }
1258
- return {
1259
- price: price,
1260
- input: __assign({ tokensCount: uncertainNumber(rawResponse.usage.input_tokens) }, computeUsageCounts(promptContent)),
1261
- output: __assign({ tokensCount: uncertainNumber(outputTokens) }, computeUsageCounts(resultContent)),
1262
- };
1263
- }
1264
- /**
1265
- * TODO: [🤝] DRY Maybe some common abstraction between `computeOpenAiUsage` and `computeAnthropicClaudeUsage`
1266
- */
1267
-
1268
- /**
1269
- * Execution Tools for calling Anthropic Claude API.
1270
- *
1271
- * @public exported from `@promptbook/anthropic-claude`
1272
- * @deprecated use `createAnthropicClaudeExecutionTools` instead
1273
- */
1274
- var AnthropicClaudeExecutionTools = /** @class */ (function () {
1275
- /**
1276
- * Creates Anthropic Claude Execution Tools.
1277
- *
1278
- * @param options which are relevant are directly passed to the Anthropic Claude client
1279
- */
1280
- function AnthropicClaudeExecutionTools(options) {
1281
- if (options === void 0) { options = { isProxied: false }; }
1282
- this.options = options;
1283
- /**
1284
- * Anthropic Claude API client.
1285
- */
1286
- this.client = null;
1287
- }
1288
- Object.defineProperty(AnthropicClaudeExecutionTools.prototype, "title", {
1289
- get: function () {
1290
- return 'Anthropic Claude';
1291
- },
1292
- enumerable: false,
1293
- configurable: true
1294
- });
1295
- Object.defineProperty(AnthropicClaudeExecutionTools.prototype, "description", {
1296
- get: function () {
1297
- return 'Use all models provided by Anthropic Claude';
1298
- },
1299
- enumerable: false,
1300
- configurable: true
1301
- });
1302
- AnthropicClaudeExecutionTools.prototype.getClient = function () {
1303
- return __awaiter(this, void 0, void 0, function () {
1304
- var anthropicOptions;
1305
- return __generator(this, function (_a) {
1306
- if (this.client === null) {
1307
- anthropicOptions = __assign({}, this.options);
1308
- delete anthropicOptions.isVerbose;
1309
- delete anthropicOptions.isProxied;
1310
- this.client = new Anthropic(anthropicOptions);
1311
- }
1312
- return [2 /*return*/, this.client];
1313
- });
1314
- });
1315
- };
1316
- /**
1317
- * Check the `options` passed to `constructor`
1318
- */
1319
- AnthropicClaudeExecutionTools.prototype.checkConfiguration = function () {
1320
- return __awaiter(this, void 0, void 0, function () {
1321
- return __generator(this, function (_a) {
1322
- switch (_a.label) {
1323
- case 0: return [4 /*yield*/, this.getClient()];
1324
- case 1:
1325
- _a.sent();
1326
- return [2 /*return*/];
1327
- }
1328
- });
1329
- });
1330
- };
1331
- /**
1332
- * List all available Anthropic Claude models that can be used
1333
- */
1334
- AnthropicClaudeExecutionTools.prototype.listModels = function () {
1335
- return ANTHROPIC_CLAUDE_MODELS;
1336
- };
1337
- /**
1338
- * Calls Anthropic Claude API to use a chat model.
1339
- */
1340
- AnthropicClaudeExecutionTools.prototype.callChatModel = function (prompt) {
1341
- return __awaiter(this, void 0, void 0, function () {
1342
- var content, parameters, modelRequirements, client, modelName, rawPromptContent, rawRequest, start, complete, rawResponse, contentBlock, resultContent, usage;
1343
- return __generator(this, function (_a) {
1344
- switch (_a.label) {
1345
- case 0:
1346
- if (this.options.isVerbose) {
1347
- console.info('💬 Anthropic Claude callChatModel call');
1348
- }
1349
- content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
1350
- return [4 /*yield*/, this.getClient()];
1351
- case 1:
1352
- client = _a.sent();
1353
- // TODO: [☂] Use here more modelRequirements
1354
- if (modelRequirements.modelVariant !== 'CHAT') {
1355
- throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
1356
- }
1357
- modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
1358
- rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
1359
- rawRequest = {
1360
- model: modelRequirements.modelName || this.getDefaultChatModel().modelName,
1361
- max_tokens: modelRequirements.maxTokens || 4096,
1362
- // <- TODO: [🌾] Make some global max cap for maxTokens
1363
- temperature: modelRequirements.temperature,
1364
- system: modelRequirements.systemMessage,
1365
- // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
1366
- // <- Note: [🧆]
1367
- messages: [
1368
- {
1369
- role: 'user',
1370
- content: rawPromptContent,
1371
- },
1372
- ],
1373
- // TODO: Is here some equivalent of user identification?> user: this.options.user,
1374
- };
1375
- start = getCurrentIsoDate();
1376
- if (this.options.isVerbose) {
1377
- console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
1378
- }
1379
- return [4 /*yield*/, client.messages.create(rawRequest)];
1380
- case 2:
1381
- rawResponse = _a.sent();
1382
- if (this.options.isVerbose) {
1383
- console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
1384
- }
1385
- if (!rawResponse.content[0]) {
1386
- throw new PipelineExecutionError('No content from Anthropic Claude');
1387
- }
1388
- if (rawResponse.content.length > 1) {
1389
- throw new PipelineExecutionError('More than one content blocks from Anthropic Claude');
1390
- }
1391
- contentBlock = rawResponse.content[0];
1392
- if (contentBlock.type !== 'text') {
1393
- throw new PipelineExecutionError("Returned content is not \"text\" type but \"".concat(contentBlock.type, "\""));
1394
- }
1395
- resultContent = contentBlock.text;
1396
- // eslint-disable-next-line prefer-const
1397
- complete = getCurrentIsoDate();
1398
- usage = computeAnthropicClaudeUsage(content, '', rawResponse);
1399
- return [2 /*return*/, {
1400
- content: resultContent,
1401
- modelName: rawResponse.model,
1402
- timing: {
1403
- start: start,
1404
- complete: complete,
1405
- },
1406
- usage: usage,
1407
- rawPromptContent: rawPromptContent,
1408
- rawRequest: rawRequest,
1409
- rawResponse: rawResponse,
1410
- // <- [🗯]
1411
- }];
1412
- }
1413
- });
1414
- });
1415
- };
1416
- /*
1417
- TODO: [👏]
1418
- public async callCompletionModel(
1419
- prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements'>,
1420
- ): Promise<PromptCompletionResult> {
1421
-
1422
- if (this.options.isVerbose) {
1423
- console.info('🖋 Anthropic Claude callCompletionModel call');
1424
- }
1425
-
1426
- const { content, parameters, modelRequirements } = prompt;
1427
-
1428
- // TODO: [☂] Use here more modelRequirements
1429
- if (modelRequirements.modelVariant !== 'COMPLETION') {
1430
- throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
1431
- }
1432
-
1433
- const modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
1434
- const modelSettings = {
1435
- model: modelName,
1436
- max_tokens: modelRequirements.maxTokens || 2000, // <- Note: 2000 is for lagacy reasons
1437
- // <- TODO: [🌾] Make some global max cap for maxTokens
1438
- // <- TODO: Use here `systemMessage`, `temperature` and `seed`
1439
- };
1440
-
1441
- const rawRequest: xxxx.Completions.CompletionCreateParamsNonStreaming = {
1442
- ...modelSettings,
1443
- prompt: rawPromptContent,
1444
- user: this.options.user,
1445
- };
1446
- const start: string_date_iso8601 = getCurrentIsoDate();
1447
- let complete: string_date_iso8601;
1448
-
1449
- if (this.options.isVerbose) {
1450
- console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
1451
- }
1452
- const rawResponse = await this.client.completions.create(rawRequest);
1453
- if (this.options.isVerbose) {
1454
- console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
1455
- }
1456
-
1457
- if (!rawResponse.choices[0]) {
1458
- throw new PipelineExecutionError('No choises from Anthropic Claude');
1459
- }
1460
-
1461
- if (rawResponse.choices.length > 1) {
1462
- // TODO: This should be maybe only warning
1463
- throw new PipelineExecutionError('More than one choise from Anthropic Claude');
1464
- }
1465
-
1466
- const resultContent = rawResponse.choices[0].text;
1467
- // eslint-disable-next-line prefer-const
1468
- complete = getCurrentIsoDate();
1469
- const usage = { price: 'UNKNOWN', inputTokens: 0, outputTokens: 0 /* <- TODO: [🐞] Compute usage * / } satisfies PromptResultUsage;
1470
-
1471
-
1472
-
1473
- return {
1474
- content: resultContent,
1475
- modelName: rawResponse.model || model,
1476
- timing: {
1477
- start,
1478
- complete,
1479
- },
1480
- usage,
1481
- rawResponse,
1482
- // <- [🗯]
1483
- };
1484
- }
1485
- */
1486
- // <- Note: [🤖] callXxxModel
1487
- /**
1488
- * Get the model that should be used as default
1489
- */
1490
- AnthropicClaudeExecutionTools.prototype.getDefaultModel = function (defaultModelName) {
1491
- var model = ANTHROPIC_CLAUDE_MODELS.find(function (_a) {
1492
- var modelName = _a.modelName;
1493
- return modelName.startsWith(defaultModelName);
1494
- });
1495
- if (model === undefined) {
1496
- throw new UnexpectedError(spaceTrim$1(function (block) {
1497
- return "\n Cannot find model in OpenAI models with name \"".concat(defaultModelName, "\" which should be used as default.\n\n Available models:\n ").concat(block(ANTHROPIC_CLAUDE_MODELS.map(function (_a) {
1498
- var modelName = _a.modelName;
1499
- return "- \"".concat(modelName, "\"");
1500
- }).join('\n')), "\n\n ");
1501
- }));
1502
- }
1503
- return model;
1504
- };
1505
- /**
1506
- * Default model for chat variant.
1507
- */
1508
- AnthropicClaudeExecutionTools.prototype.getDefaultChatModel = function () {
1509
- return this.getDefaultModel('claude-3-opus');
1510
- };
1511
- return AnthropicClaudeExecutionTools;
1512
- }());
1513
- /**
1514
- * TODO: [🍆] JSON mode
1515
- * TODO: [🧠] Maybe handle errors via transformAnthropicError (like transformAzureError)
1516
- * TODO: Maybe Create some common util for callChatModel and callCompletionModel
1517
- * TODO: Maybe make custom OpenAiError
1518
- * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
1519
- * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
1520
- * TODO: [📅] Maybe instead of `RemoteLlmExecutionToolsOptions` use `proxyWithAnonymousRemoteServer` (if implemented)
1521
- */
1522
-
1523
- /**
1524
- * Execution Tools for calling Anthropic Claude API.
1525
- *
1526
- * @public exported from `@promptbook/anthropic-claude`
1527
- */
1528
- var createAnthropicClaudeExecutionTools = Object.assign(function (options) {
1529
- if (options.isProxied) {
1530
- return new RemoteLlmExecutionTools(__assign(__assign({}, options), { isAnonymous: true, llmToolsConfiguration: [
1531
- {
1532
- title: 'Anthropic Claude (proxied)',
1533
- packageName: '@promptbook/anthropic-claude',
1534
- className: 'AnthropicClaudeExecutionTools',
1535
- options: __assign(__assign({}, options), { isProxied: false }),
1536
- },
1537
- ], models: ANTHROPIC_CLAUDE_MODELS }));
1538
- }
1539
- return new AnthropicClaudeExecutionTools(options);
1540
- }, {
1541
- packageName: '@promptbook/anthropic-claude',
1542
- className: 'AnthropicClaudeExecutionTools',
1543
- });
1544
- /**
1545
- * TODO: [🧠] !!!! Make anonymous this with all LLM providers
1546
- * TODO: [🧠][🧱] !!!! Maybe change all `new AnthropicClaudeExecutionTools` -> `createAnthropicClaudeExecutionTools` in manual
1547
- * TODO: [🧠] Maybe auto-detect usage in browser and determine default value of `isProxied`
1548
- * TODO: [🦺] Is there some way how to put `packageName` and `className` on top and function definition on bottom?
1549
- * TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
1550
- */
1551
-
1552
- /**
1553
- * List of available OpenAI models with pricing
1554
- *
1555
- * Note: Done at 2024-05-20
1556
- *
1557
- * @see https://platform.openai.com/docs/models/
1558
- * @see https://openai.com/api/pricing/
1559
- * @public exported from `@promptbook/openai`
1560
- */
1561
- var OPENAI_MODELS = [
1562
- /*/
1563
- {
1564
- modelTitle: 'dall-e-3',
1565
- modelName: 'dall-e-3',
1566
- },
1567
- /**/
1568
- /*/
1569
- {
1570
- modelTitle: 'whisper-1',
1571
- modelName: 'whisper-1',
1572
- },
1573
- /**/
1574
- /**/
1575
- {
1576
- modelVariant: 'COMPLETION',
1577
- modelTitle: 'davinci-002',
1578
- modelName: 'davinci-002',
1579
- pricing: {
1580
- prompt: computeUsage("$2.00 / 1M tokens"),
1581
- output: computeUsage("$2.00 / 1M tokens"), // <- not sure
1582
- },
1583
- },
1584
- /**/
1585
- /*/
1586
- {
1587
- modelTitle: 'dall-e-2',
1588
- modelName: 'dall-e-2',
1589
- },
1590
- /**/
1591
- /**/
1592
- {
1593
- modelVariant: 'CHAT',
1594
- modelTitle: 'gpt-3.5-turbo-16k',
1595
- modelName: 'gpt-3.5-turbo-16k',
1596
- pricing: {
1597
- prompt: computeUsage("$3.00 / 1M tokens"),
1598
- output: computeUsage("$4.00 / 1M tokens"),
1599
- },
1600
- },
1601
- /**/
1602
- /*/
1603
- {
1604
- modelTitle: 'tts-1-hd-1106',
1605
- modelName: 'tts-1-hd-1106',
1606
- },
1607
- /**/
1608
- /*/
1609
- {
1610
- modelTitle: 'tts-1-hd',
1611
- modelName: 'tts-1-hd',
1612
- },
1613
- /**/
1614
- /**/
1615
- {
1616
- modelVariant: 'CHAT',
1617
- modelTitle: 'gpt-4',
1618
- modelName: 'gpt-4',
1619
- pricing: {
1620
- prompt: computeUsage("$30.00 / 1M tokens"),
1621
- output: computeUsage("$60.00 / 1M tokens"),
1622
- },
1623
- },
1624
- /**/
1625
- /**/
1626
- {
1627
- modelVariant: 'CHAT',
1628
- modelTitle: 'gpt-4-32k',
1629
- modelName: 'gpt-4-32k',
1630
- pricing: {
1631
- prompt: computeUsage("$60.00 / 1M tokens"),
1632
- output: computeUsage("$120.00 / 1M tokens"),
1633
- },
1634
- },
1635
- /**/
1636
- /*/
1637
- {
1638
- modelVariant: 'CHAT',
1639
- modelTitle: 'gpt-4-0613',
1640
- modelName: 'gpt-4-0613',
1641
- pricing: {
1642
- prompt: computeUsage(` / 1M tokens`),
1643
- output: computeUsage(` / 1M tokens`),
1644
- },
1645
- },
1646
- /**/
1647
- /**/
1648
- {
1649
- modelVariant: 'CHAT',
1650
- modelTitle: 'gpt-4-turbo-2024-04-09',
1651
- modelName: 'gpt-4-turbo-2024-04-09',
1652
- pricing: {
1653
- prompt: computeUsage("$10.00 / 1M tokens"),
1654
- output: computeUsage("$30.00 / 1M tokens"),
1655
- },
1656
- },
1657
- /**/
1658
- /**/
1659
- {
1660
- modelVariant: 'CHAT',
1661
- modelTitle: 'gpt-3.5-turbo-1106',
1662
- modelName: 'gpt-3.5-turbo-1106',
1663
- pricing: {
1664
- prompt: computeUsage("$1.00 / 1M tokens"),
1665
- output: computeUsage("$2.00 / 1M tokens"),
1666
- },
1667
- },
1668
- /**/
1669
- /**/
1670
- {
1671
- modelVariant: 'CHAT',
1672
- modelTitle: 'gpt-4-turbo',
1673
- modelName: 'gpt-4-turbo',
1674
- pricing: {
1675
- prompt: computeUsage("$10.00 / 1M tokens"),
1676
- output: computeUsage("$30.00 / 1M tokens"),
1677
- },
1678
- },
1679
- /**/
1680
- /**/
1681
- {
1682
- modelVariant: 'COMPLETION',
1683
- modelTitle: 'gpt-3.5-turbo-instruct-0914',
1684
- modelName: 'gpt-3.5-turbo-instruct-0914',
1685
- pricing: {
1686
- prompt: computeUsage("$1.50 / 1M tokens"),
1687
- output: computeUsage("$2.00 / 1M tokens"), // <- For gpt-3.5-turbo-instruct
1688
- },
1689
- },
1690
- /**/
1691
- /**/
1692
- {
1693
- modelVariant: 'COMPLETION',
1694
- modelTitle: 'gpt-3.5-turbo-instruct',
1695
- modelName: 'gpt-3.5-turbo-instruct',
1696
- pricing: {
1697
- prompt: computeUsage("$1.50 / 1M tokens"),
1698
- output: computeUsage("$2.00 / 1M tokens"),
1699
- },
1700
- },
1701
- /**/
1702
- /*/
1703
- {
1704
- modelTitle: 'tts-1',
1705
- modelName: 'tts-1',
1706
- },
1707
- /**/
1708
- /**/
1709
- {
1710
- modelVariant: 'CHAT',
1711
- modelTitle: 'gpt-3.5-turbo',
1712
- modelName: 'gpt-3.5-turbo',
1713
- pricing: {
1714
- prompt: computeUsage("$3.00 / 1M tokens"),
1715
- output: computeUsage("$6.00 / 1M tokens"), // <- Not sure, refer to gpt-3.5-turbo in Fine-tuning models
1716
- },
1717
- },
1718
- /**/
1719
- /**/
1720
- {
1721
- modelVariant: 'CHAT',
1722
- modelTitle: 'gpt-3.5-turbo-0301',
1723
- modelName: 'gpt-3.5-turbo-0301',
1724
- pricing: {
1725
- prompt: computeUsage("$1.50 / 1M tokens"),
1726
- output: computeUsage("$2.00 / 1M tokens"),
1727
- },
1728
- },
1729
- /**/
1730
- /**/
1731
- {
1732
- modelVariant: 'COMPLETION',
1733
- modelTitle: 'babbage-002',
1734
- modelName: 'babbage-002',
1735
- pricing: {
1736
- prompt: computeUsage("$0.40 / 1M tokens"),
1737
- output: computeUsage("$0.40 / 1M tokens"), // <- Not sure
1738
- },
1739
- },
1740
- /**/
1741
- /**/
1742
- {
1743
- modelVariant: 'CHAT',
1744
- modelTitle: 'gpt-4-1106-preview',
1745
- modelName: 'gpt-4-1106-preview',
1746
- pricing: {
1747
- prompt: computeUsage("$10.00 / 1M tokens"),
1748
- output: computeUsage("$30.00 / 1M tokens"),
1749
- },
1750
- },
1751
- /**/
1752
- /**/
1753
- {
1754
- modelVariant: 'CHAT',
1755
- modelTitle: 'gpt-4-0125-preview',
1756
- modelName: 'gpt-4-0125-preview',
1757
- pricing: {
1758
- prompt: computeUsage("$10.00 / 1M tokens"),
1759
- output: computeUsage("$30.00 / 1M tokens"),
1760
- },
1761
- },
1762
- /**/
1763
- /*/
1764
- {
1765
- modelTitle: 'tts-1-1106',
1766
- modelName: 'tts-1-1106',
1767
- },
1768
- /**/
1769
- /**/
1770
- {
1771
- modelVariant: 'CHAT',
1772
- modelTitle: 'gpt-3.5-turbo-0125',
1773
- modelName: 'gpt-3.5-turbo-0125',
1774
- pricing: {
1775
- prompt: computeUsage("$0.50 / 1M tokens"),
1776
- output: computeUsage("$1.50 / 1M tokens"),
1777
- },
1778
- },
1779
- /**/
1780
- /**/
1781
- {
1782
- modelVariant: 'CHAT',
1783
- modelTitle: 'gpt-4-turbo-preview',
1784
- modelName: 'gpt-4-turbo-preview',
1785
- pricing: {
1786
- prompt: computeUsage("$10.00 / 1M tokens"),
1787
- output: computeUsage("$30.00 / 1M tokens"), // <- Not sure, just for gpt-4-turbo
1788
- },
1789
- },
1790
- /**/
1791
- /**/
1792
- {
1793
- modelVariant: 'EMBEDDING',
1794
- modelTitle: 'text-embedding-3-large',
1795
- modelName: 'text-embedding-3-large',
1796
- pricing: {
1797
- prompt: computeUsage("$0.13 / 1M tokens"),
1798
- // TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
1799
- output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
1800
- },
1801
- },
1802
- /**/
1803
- /**/
1804
- {
1805
- modelVariant: 'EMBEDDING',
1806
- modelTitle: 'text-embedding-3-small',
1807
- modelName: 'text-embedding-3-small',
1808
- pricing: {
1809
- prompt: computeUsage("$0.02 / 1M tokens"),
1810
- // TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
1811
- output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
1812
- },
1813
- },
1814
- /**/
1815
- /**/
1816
- {
1817
- modelVariant: 'CHAT',
1818
- modelTitle: 'gpt-3.5-turbo-0613',
1819
- modelName: 'gpt-3.5-turbo-0613',
1820
- pricing: {
1821
- prompt: computeUsage("$1.50 / 1M tokens"),
1822
- output: computeUsage("$2.00 / 1M tokens"),
1823
- },
1824
- },
1825
- /**/
1826
- /**/
1827
- {
1828
- modelVariant: 'EMBEDDING',
1829
- modelTitle: 'text-embedding-ada-002',
1830
- modelName: 'text-embedding-ada-002',
1831
- pricing: {
1832
- prompt: computeUsage("$0.1 / 1M tokens"),
1833
- // TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
1834
- output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
1835
- },
1836
- },
1837
- /**/
1838
- /*/
1839
- {
1840
- modelVariant: 'CHAT',
1841
- modelTitle: 'gpt-4-1106-vision-preview',
1842
- modelName: 'gpt-4-1106-vision-preview',
1843
- },
1844
- /**/
1845
- /*/
1846
- {
1847
- modelVariant: 'CHAT',
1848
- modelTitle: 'gpt-4-vision-preview',
1849
- modelName: 'gpt-4-vision-preview',
1850
- pricing: {
1851
- prompt: computeUsage(`$10.00 / 1M tokens`),
1852
- output: computeUsage(`$30.00 / 1M tokens`),
1853
- },
1854
- },
1855
- /**/
1856
- /**/
1857
- {
1858
- modelVariant: 'CHAT',
1859
- modelTitle: 'gpt-4o-2024-05-13',
1860
- modelName: 'gpt-4o-2024-05-13',
1861
- pricing: {
1862
- prompt: computeUsage("$5.00 / 1M tokens"),
1863
- output: computeUsage("$15.00 / 1M tokens"),
1864
- },
1865
- },
1866
- /**/
1867
- /**/
1868
- {
1869
- modelVariant: 'CHAT',
1870
- modelTitle: 'gpt-4o',
1871
- modelName: 'gpt-4o',
1872
- pricing: {
1873
- prompt: computeUsage("$5.00 / 1M tokens"),
1874
- output: computeUsage("$15.00 / 1M tokens"),
1875
- },
1876
- },
1877
- /**/
1878
- /**/
1879
- {
1880
- modelVariant: 'CHAT',
1881
- modelTitle: 'gpt-3.5-turbo-16k-0613',
1882
- modelName: 'gpt-3.5-turbo-16k-0613',
1883
- pricing: {
1884
- prompt: computeUsage("$3.00 / 1M tokens"),
1885
- output: computeUsage("$4.00 / 1M tokens"),
1886
- },
1887
- },
1888
- /**/
1889
- ];
1890
- /**
1891
- * Note: [🤖] Add models of new variant
1892
- * TODO: [🧠] Some mechanism to propagate unsureness
1893
- * TODO: [🎰] Some mechanism to auto-update available models
1894
- * TODO: [🎰][👮‍♀️] Make this list dynamic - dynamically can be listed modelNames but not modelVariant, legacy status, context length and pricing
1895
- * TODO: [🧠][👮‍♀️] Put here more info like description, isVision, trainingDateCutoff, languages, strengths ( Top-level performance, intelligence, fluency, and understanding), contextWindow,...
1896
- * @see https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4
1897
- * @see https://openai.com/api/pricing/
1898
- * @see /other/playground/playground.ts
1899
- * TODO: [🍓] Make better
1900
- * TODO: Change model titles to human eg: "gpt-4-turbo-2024-04-09" -> "GPT-4 Turbo (2024-04-09)"
1901
- * TODO: [🚸] Not all models are compatible with JSON mode, add this information here and use it
1902
- */
1903
-
1904
- /**
1905
- * Execution Tools for calling Azure OpenAI API.
1906
- *
1907
- * @public exported from `@promptbook/azure-openai`
1908
- */
1909
- var AzureOpenAiExecutionTools = /** @class */ (function () {
1910
- /**
1911
- * Creates OpenAI Execution Tools.
1912
- *
1913
- * @param options which are relevant are directly passed to the OpenAI client
1914
- */
1915
- function AzureOpenAiExecutionTools(options) {
1916
- this.options = options;
1917
- /**
1918
- * OpenAI Azure API client.
1919
- */
1920
- this.client = null;
1921
- }
1922
- Object.defineProperty(AzureOpenAiExecutionTools.prototype, "title", {
1923
- get: function () {
1924
- return 'Azure OpenAI';
1925
- },
1926
- enumerable: false,
1927
- configurable: true
1928
- });
1929
- Object.defineProperty(AzureOpenAiExecutionTools.prototype, "description", {
1930
- get: function () {
1931
- return 'Use all models trained by OpenAI provided by Azure';
1932
- },
1933
- enumerable: false,
1934
- configurable: true
1935
- });
1936
- AzureOpenAiExecutionTools.prototype.getClient = function () {
1937
- return __awaiter(this, void 0, void 0, function () {
1938
- return __generator(this, function (_a) {
1939
- if (this.client === null) {
1940
- this.client = new OpenAIClient("https://".concat(this.options.resourceName, ".openai.azure.com/"), new AzureKeyCredential(this.options.apiKey));
1941
- }
1942
- return [2 /*return*/, this.client];
1943
- });
1944
- });
1945
- };
1946
- /**
1947
- * Check the `options` passed to `constructor`
1948
- */
1949
- AzureOpenAiExecutionTools.prototype.checkConfiguration = function () {
1950
- return __awaiter(this, void 0, void 0, function () {
1951
- return __generator(this, function (_a) {
1952
- switch (_a.label) {
1953
- case 0: return [4 /*yield*/, this.getClient()];
1954
- case 1:
1955
- _a.sent();
1956
- return [2 /*return*/];
1957
- }
1958
- });
1959
- });
1960
- };
1961
- /**
1962
- * List all available Azure OpenAI models that can be used
1963
- */
1964
- AzureOpenAiExecutionTools.prototype.listModels = function () {
1965
- return __awaiter(this, void 0, void 0, function () {
1966
- return __generator(this, function (_a) {
1967
- // TODO: !!! Do here some filtering which models are really available as deployment
1968
- // @see https://management.azure.com/subscriptions/subscriptionId/resourceGroups/resourceGroupName/providers/Microsoft.CognitiveServices/accounts/accountName/deployments?api-version=2023-05-01
1969
- return [2 /*return*/, OPENAI_MODELS.map(function (_a) {
1970
- var modelTitle = _a.modelTitle, modelName = _a.modelName, modelVariant = _a.modelVariant;
1971
- return ({
1972
- modelTitle: "Azure ".concat(modelTitle),
1973
- modelName: modelName,
1974
- modelVariant: modelVariant,
1975
- });
1976
- })];
1977
- });
1978
- });
1979
- };
1980
- /**
1981
- * Calls OpenAI API to use a chat model.
1982
- */
1983
- AzureOpenAiExecutionTools.prototype.callChatModel = function (prompt) {
1984
- var _a, _b;
1985
- return __awaiter(this, void 0, void 0, function () {
1986
- var content, parameters, modelRequirements, client, modelName, modelSettings, rawPromptContent, messages, start, complete, rawRequest, rawResponse, resultContent, usage, error_1;
1987
- return __generator(this, function (_c) {
1988
- switch (_c.label) {
1989
- case 0:
1990
- if (this.options.isVerbose) {
1991
- console.info('💬 OpenAI callChatModel call');
1992
- }
1993
- content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
1994
- return [4 /*yield*/, this.getClient()];
1995
- case 1:
1996
- client = _c.sent();
1997
- // TODO: [☂] Use here more modelRequirements
1998
- if (modelRequirements.modelVariant !== 'CHAT') {
1999
- throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
2000
- }
2001
- _c.label = 2;
2002
- case 2:
2003
- _c.trys.push([2, 4, , 5]);
2004
- modelName = prompt.modelRequirements.modelName || this.options.deploymentName;
2005
- modelSettings = {
2006
- maxTokens: modelRequirements.maxTokens,
2007
- // <- TODO: [🌾] Make some global max cap for maxTokens
2008
- temperature: modelRequirements.temperature,
2009
- user: this.options.user,
2010
- // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
2011
- // <- Note: [🧆]
2012
- };
2013
- rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
2014
- messages = __spreadArray(__spreadArray([], __read((modelRequirements.systemMessage === undefined
2015
- ? []
2016
- : [
2017
- {
2018
- role: 'system',
2019
- content: modelRequirements.systemMessage,
2020
- },
2021
- ])), false), [
2022
- {
2023
- role: 'user',
2024
- content: rawPromptContent,
2025
- },
2026
- ], false);
2027
- start = getCurrentIsoDate();
2028
- complete = void 0;
2029
- if (this.options.isVerbose) {
2030
- console.info(colors.bgWhite('messages'), JSON.stringify(messages, null, 4));
2031
- }
2032
- rawRequest = [modelName, messages, modelSettings];
2033
- return [4 /*yield*/, client.getChatCompletions.apply(client, __spreadArray([], __read(rawRequest), false))];
2034
- case 3:
2035
- rawResponse = _c.sent();
2036
- if (this.options.isVerbose) {
2037
- console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
2038
- }
2039
- if (!rawResponse.choices[0]) {
2040
- throw new PipelineExecutionError('No choises from Azure OpenAI');
2041
- }
2042
- if (rawResponse.choices.length > 1) {
2043
- // TODO: This should be maybe only warning
2044
- throw new PipelineExecutionError('More than one choise from Azure OpenAI');
2045
- }
2046
- if (!rawResponse.choices[0].message || !rawResponse.choices[0].message.content) {
2047
- throw new PipelineExecutionError('Empty response from Azure OpenAI');
2048
- }
2049
- resultContent = rawResponse.choices[0].message.content;
2050
- // eslint-disable-next-line prefer-const
2051
- complete = getCurrentIsoDate();
2052
- usage = {
2053
- price: uncertainNumber() /* <- TODO: [🐞] Compute usage */,
2054
- input: __assign({ tokensCount: uncertainNumber((_a = rawResponse.usage) === null || _a === void 0 ? void 0 : _a.promptTokens) }, computeUsageCounts(prompt.content)),
2055
- output: __assign({ tokensCount: uncertainNumber((_b = rawResponse.usage) === null || _b === void 0 ? void 0 : _b.completionTokens) }, computeUsageCounts(prompt.content)),
2056
- };
2057
- return [2 /*return*/, {
2058
- content: resultContent,
2059
- modelName: modelName,
2060
- timing: {
2061
- start: start,
2062
- complete: complete,
2063
- },
2064
- usage: usage,
2065
- rawPromptContent: rawPromptContent,
2066
- rawRequest: rawRequest,
2067
- rawResponse: rawResponse,
2068
- // <- [🗯]
2069
- }];
2070
- case 4:
2071
- error_1 = _c.sent();
2072
- throw this.transformAzureError(error_1);
2073
- case 5: return [2 /*return*/];
2074
- }
2075
- });
2076
- });
2077
- };
2078
- /**
2079
- * Calls Azure OpenAI API to use a complete model.
2080
- */
2081
- AzureOpenAiExecutionTools.prototype.callCompletionModel = function (prompt) {
2082
- var _a, _b;
2083
- return __awaiter(this, void 0, void 0, function () {
2084
- var content, parameters, modelRequirements, client, modelName, modelSettings, start, complete, rawPromptContent, rawRequest, rawResponse, resultContent, usage, error_2;
2085
- return __generator(this, function (_c) {
2086
- switch (_c.label) {
2087
- case 0:
2088
- if (this.options.isVerbose) {
2089
- console.info('🖋 OpenAI callCompletionModel call');
2090
- }
2091
- content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
2092
- return [4 /*yield*/, this.getClient()];
2093
- case 1:
2094
- client = _c.sent();
2095
- // TODO: [☂] Use here more modelRequirements
2096
- if (modelRequirements.modelVariant !== 'COMPLETION') {
2097
- throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
2098
- }
2099
- _c.label = 2;
2100
- case 2:
2101
- _c.trys.push([2, 4, , 5]);
2102
- modelName = prompt.modelRequirements.modelName || this.options.deploymentName;
2103
- modelSettings = {
2104
- maxTokens: modelRequirements.maxTokens || 2000,
2105
- // <- TODO: [🌾] Make some global max cap for maxTokens
2106
- temperature: modelRequirements.temperature,
2107
- user: this.options.user,
2108
- // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
2109
- // <- Note: [🧆]
2110
- };
2111
- start = getCurrentIsoDate();
2112
- complete = void 0;
2113
- if (this.options.isVerbose) {
2114
- console.info(colors.bgWhite('content'), JSON.stringify(content, null, 4));
2115
- console.info(colors.bgWhite('parameters'), JSON.stringify(parameters, null, 4));
2116
- }
2117
- rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
2118
- rawRequest = [
2119
- modelName,
2120
- [rawPromptContent],
2121
- modelSettings,
2122
- ];
2123
- return [4 /*yield*/, client.getCompletions.apply(client, __spreadArray([], __read(rawRequest), false))];
2124
- case 3:
2125
- rawResponse = _c.sent();
2126
- if (this.options.isVerbose) {
2127
- console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
2128
- }
2129
- if (!rawResponse.choices[0]) {
2130
- throw new PipelineExecutionError('No choises from OpenAI');
2131
- }
2132
- if (rawResponse.choices.length > 1) {
2133
- // TODO: This should be maybe only warning
2134
- throw new PipelineExecutionError('More than one choise from OpenAI');
2135
- }
2136
- resultContent = rawResponse.choices[0].text;
2137
- // eslint-disable-next-line prefer-const
2138
- complete = getCurrentIsoDate();
2139
- usage = {
2140
- price: uncertainNumber() /* <- TODO: [🐞] Compute usage */,
2141
- input: __assign({ tokensCount: uncertainNumber((_a = rawResponse.usage) === null || _a === void 0 ? void 0 : _a.promptTokens) }, computeUsageCounts(prompt.content)),
2142
- output: __assign({ tokensCount: uncertainNumber((_b = rawResponse.usage) === null || _b === void 0 ? void 0 : _b.completionTokens) }, computeUsageCounts(prompt.content)),
2143
- };
2144
- return [2 /*return*/, {
2145
- content: resultContent,
2146
- modelName: modelName,
2147
- timing: {
2148
- start: start,
2149
- complete: complete,
2150
- },
2151
- usage: usage,
2152
- rawPromptContent: rawPromptContent,
2153
- rawRequest: rawRequest,
2154
- rawResponse: rawResponse,
2155
- // <- [🗯]
2156
- }];
2157
- case 4:
2158
- error_2 = _c.sent();
2159
- throw this.transformAzureError(error_2);
2160
- case 5: return [2 /*return*/];
2161
- }
2162
- });
2163
- });
2164
- };
2165
- // <- Note: [🤖] callXxxModel
2166
- /**
2167
- * Changes Azure error (which is not propper Error but object) to propper Error
2168
- */
2169
- AzureOpenAiExecutionTools.prototype.transformAzureError = function (azureError) {
2170
- if (typeof azureError !== 'object' || azureError === null) {
2171
- return new PipelineExecutionError("Unknown Azure OpenAI error");
2172
- }
2173
- var code = azureError.code, message = azureError.message;
2174
- return new PipelineExecutionError("".concat(code, ": ").concat(message));
2175
- };
2176
- return AzureOpenAiExecutionTools;
2177
- }());
2178
- /**
2179
- * TODO: Maybe Create some common util for callChatModel and callCompletionModel
2180
- * TODO: Maybe make custom AzureOpenAiError
2181
- * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
2182
- * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
2183
- */
2184
-
2185
- /**
2186
- * Computes the usage of the OpenAI API based on the response from OpenAI
2187
- *
2188
- * @param promptContent The content of the prompt
2189
- * @param resultContent The content of the result (for embedding prompts or failed prompts pass empty string)
2190
- * @param rawResponse The raw response from OpenAI API
2191
- * @throws {PipelineExecutionError} If the usage is not defined in the response from OpenAI
2192
- * @private internal utility of `OpenAiExecutionTools`
2193
- */
2194
- function computeOpenAiUsage(promptContent, // <- Note: Intentionally using [] to access type properties to bring jsdoc from Prompt/PromptResult to consumer
2195
- resultContent, rawResponse) {
2196
- var _a, _b;
2197
- if (rawResponse.usage === undefined) {
2198
- throw new PipelineExecutionError('The usage is not defined in the response from OpenAI');
2199
- }
2200
- if (((_a = rawResponse.usage) === null || _a === void 0 ? void 0 : _a.prompt_tokens) === undefined) {
2201
- throw new PipelineExecutionError('In OpenAI response `usage.prompt_tokens` not defined');
2202
- }
2203
- var inputTokens = rawResponse.usage.prompt_tokens;
2204
- var outputTokens = ((_b = rawResponse.usage) === null || _b === void 0 ? void 0 : _b.completion_tokens) || 0;
2205
- var modelInfo = OPENAI_MODELS.find(function (model) { return model.modelName === rawResponse.model; });
2206
- var price;
2207
- if (modelInfo === undefined || modelInfo.pricing === undefined) {
2208
- price = uncertainNumber();
2209
- }
2210
- else {
2211
- price = uncertainNumber(inputTokens * modelInfo.pricing.prompt + outputTokens * modelInfo.pricing.output);
2212
- }
2213
- return {
2214
- price: price,
2215
- input: __assign({ tokensCount: uncertainNumber(rawResponse.usage.prompt_tokens) }, computeUsageCounts(promptContent)),
2216
- output: __assign({ tokensCount: uncertainNumber(outputTokens) }, computeUsageCounts(resultContent)),
2217
- };
2218
- }
2219
- /**
2220
- * TODO: [🤝] DRY Maybe some common abstraction between `computeOpenAiUsage` and `computeAnthropicClaudeUsage`
2221
- */
2222
-
2223
- /**
2224
- * Execution Tools for calling OpenAI API
2225
- *
2226
- * @public exported from `@promptbook/openai`
2227
- */
2228
- var OpenAiExecutionTools = /** @class */ (function () {
2229
- /**
2230
- * Creates OpenAI Execution Tools.
2231
- *
2232
- * @param options which are relevant are directly passed to the OpenAI client
2233
- */
2234
- function OpenAiExecutionTools(options) {
2235
- if (options === void 0) { options = {}; }
2236
- this.options = options;
2237
- /**
2238
- * OpenAI API client.
2239
- */
2240
- this.client = null;
2241
- }
2242
- Object.defineProperty(OpenAiExecutionTools.prototype, "title", {
2243
- get: function () {
2244
- return 'OpenAI';
2245
- },
2246
- enumerable: false,
2247
- configurable: true
2248
- });
2249
- Object.defineProperty(OpenAiExecutionTools.prototype, "description", {
2250
- get: function () {
2251
- return 'Use all models provided by OpenAI';
2252
- },
2253
- enumerable: false,
2254
- configurable: true
2255
- });
2256
- OpenAiExecutionTools.prototype.getClient = function () {
2257
- return __awaiter(this, void 0, void 0, function () {
2258
- var openAiOptions;
2259
- return __generator(this, function (_a) {
2260
- if (this.client === null) {
2261
- openAiOptions = __assign({}, this.options);
2262
- delete openAiOptions.isVerbose;
2263
- delete openAiOptions.user;
2264
- this.client = new OpenAI(__assign({}, openAiOptions));
2265
- }
2266
- return [2 /*return*/, this.client];
2267
- });
2268
- });
2269
- };
2270
- /**
2271
- * Check the `options` passed to `constructor`
2272
- */
2273
- OpenAiExecutionTools.prototype.checkConfiguration = function () {
2274
- return __awaiter(this, void 0, void 0, function () {
2275
- return __generator(this, function (_a) {
2276
- switch (_a.label) {
2277
- case 0: return [4 /*yield*/, this.getClient()];
2278
- case 1:
2279
- _a.sent();
2280
- return [2 /*return*/];
2281
- }
2282
- });
2283
- });
2284
- };
2285
- /**
2286
- * List all available OpenAI models that can be used
2287
- */
2288
- OpenAiExecutionTools.prototype.listModels = function () {
2289
- /*
2290
- Note: Dynamic lising of the models
2291
- const models = await this.openai.models.list({});
2292
-
2293
- console.log({ models });
2294
- console.log(models.data);
2295
- */
2296
- return OPENAI_MODELS;
2297
- };
2298
- /**
2299
- * Calls OpenAI API to use a chat model.
2300
- */
2301
- OpenAiExecutionTools.prototype.callChatModel = function (prompt) {
2302
- return __awaiter(this, void 0, void 0, function () {
2303
- var content, parameters, modelRequirements, expectFormat, client, modelName, modelSettings, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
2304
- return __generator(this, function (_a) {
2305
- switch (_a.label) {
2306
- case 0:
2307
- if (this.options.isVerbose) {
2308
- console.info('💬 OpenAI callChatModel call', { prompt: prompt });
2309
- }
2310
- content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements, expectFormat = prompt.expectFormat;
2311
- return [4 /*yield*/, this.getClient()];
2312
- case 1:
2313
- client = _a.sent();
2314
- // TODO: [☂] Use here more modelRequirements
2315
- if (modelRequirements.modelVariant !== 'CHAT') {
2316
- throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
2317
- }
2318
- modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
2319
- modelSettings = {
2320
- model: modelName,
2321
- max_tokens: modelRequirements.maxTokens,
2322
- // <- TODO: [🌾] Make some global max cap for maxTokens
2323
- temperature: modelRequirements.temperature,
2324
- // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
2325
- // <- Note: [🧆]
2326
- };
2327
- if (expectFormat === 'JSON') {
2328
- modelSettings.response_format = {
2329
- type: 'json_object',
2330
- };
2331
- }
2332
- rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
2333
- rawRequest = __assign(__assign({}, modelSettings), { messages: __spreadArray(__spreadArray([], __read((modelRequirements.systemMessage === undefined
2334
- ? []
2335
- : [
2336
- {
2337
- role: 'system',
2338
- content: modelRequirements.systemMessage,
2339
- },
2340
- ])), false), [
2341
- {
2342
- role: 'user',
2343
- content: rawPromptContent,
2344
- },
2345
- ], false), user: this.options.user });
2346
- start = getCurrentIsoDate();
2347
- if (this.options.isVerbose) {
2348
- console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
2349
- }
2350
- return [4 /*yield*/, client.chat.completions.create(rawRequest)];
2351
- case 2:
2352
- rawResponse = _a.sent();
2353
- if (this.options.isVerbose) {
2354
- console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
2355
- }
2356
- if (!rawResponse.choices[0]) {
2357
- throw new PipelineExecutionError('No choises from OpenAI');
2358
- }
2359
- if (rawResponse.choices.length > 1) {
2360
- // TODO: This should be maybe only warning
2361
- throw new PipelineExecutionError('More than one choise from OpenAI');
2362
- }
2363
- resultContent = rawResponse.choices[0].message.content;
2364
- // eslint-disable-next-line prefer-const
2365
- complete = getCurrentIsoDate();
2366
- usage = computeOpenAiUsage(content, resultContent || '', rawResponse);
2367
- if (resultContent === null) {
2368
- throw new PipelineExecutionError('No response message from OpenAI');
2369
- }
2370
- return [2 /*return*/, {
2371
- content: resultContent,
2372
- modelName: rawResponse.model || modelName,
2373
- timing: {
2374
- start: start,
2375
- complete: complete,
2376
- },
2377
- usage: usage,
2378
- rawPromptContent: rawPromptContent,
2379
- rawRequest: rawRequest,
2380
- rawResponse: rawResponse,
2381
- // <- [🗯]
2382
- }];
2383
- }
2384
- });
2385
- });
2386
- };
2387
- /**
2388
- * Calls OpenAI API to use a complete model.
2389
- */
2390
- OpenAiExecutionTools.prototype.callCompletionModel = function (prompt) {
2391
- return __awaiter(this, void 0, void 0, function () {
2392
- var content, parameters, modelRequirements, client, modelName, modelSettings, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
2393
- return __generator(this, function (_a) {
2394
- switch (_a.label) {
2395
- case 0:
2396
- if (this.options.isVerbose) {
2397
- console.info('🖋 OpenAI callCompletionModel call', { prompt: prompt });
2398
- }
2399
- content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
2400
- return [4 /*yield*/, this.getClient()];
2401
- case 1:
2402
- client = _a.sent();
2403
- // TODO: [☂] Use here more modelRequirements
2404
- if (modelRequirements.modelVariant !== 'COMPLETION') {
2405
- throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
2406
- }
2407
- modelName = modelRequirements.modelName || this.getDefaultCompletionModel().modelName;
2408
- modelSettings = {
2409
- model: modelName,
2410
- max_tokens: modelRequirements.maxTokens || 2000,
2411
- // <- TODO: [🌾] Make some global max cap for maxTokens
2412
- temperature: modelRequirements.temperature,
2413
- // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
2414
- // <- Note: [🧆]
2415
- };
2416
- rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
2417
- rawRequest = __assign(__assign({}, modelSettings), { prompt: rawPromptContent, user: this.options.user });
2418
- start = getCurrentIsoDate();
2419
- if (this.options.isVerbose) {
2420
- console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
2421
- }
2422
- return [4 /*yield*/, client.completions.create(rawRequest)];
2423
- case 2:
2424
- rawResponse = _a.sent();
2425
- if (this.options.isVerbose) {
2426
- console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
2427
- }
2428
- if (!rawResponse.choices[0]) {
2429
- throw new PipelineExecutionError('No choises from OpenAI');
2430
- }
2431
- if (rawResponse.choices.length > 1) {
2432
- // TODO: This should be maybe only warning
2433
- throw new PipelineExecutionError('More than one choise from OpenAI');
2434
- }
2435
- resultContent = rawResponse.choices[0].text;
2436
- // eslint-disable-next-line prefer-const
2437
- complete = getCurrentIsoDate();
2438
- usage = computeOpenAiUsage(content, resultContent || '', rawResponse);
2439
- return [2 /*return*/, {
2440
- content: resultContent,
2441
- modelName: rawResponse.model || modelName,
2442
- timing: {
2443
- start: start,
2444
- complete: complete,
2445
- },
2446
- usage: usage,
2447
- rawPromptContent: rawPromptContent,
2448
- rawRequest: rawRequest,
2449
- rawResponse: rawResponse,
2450
- // <- [🗯]
2451
- }];
2452
- }
2453
- });
2454
- });
2455
- };
2456
- /**
2457
- * Calls OpenAI API to use a embedding model
2458
- */
2459
- OpenAiExecutionTools.prototype.callEmbeddingModel = function (prompt) {
2460
- return __awaiter(this, void 0, void 0, function () {
2461
- var content, parameters, modelRequirements, client, modelName, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
2462
- return __generator(this, function (_a) {
2463
- switch (_a.label) {
2464
- case 0:
2465
- if (this.options.isVerbose) {
2466
- console.info('🖋 OpenAI embedding call', { prompt: prompt });
2467
- }
2468
- content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
2469
- return [4 /*yield*/, this.getClient()];
2470
- case 1:
2471
- client = _a.sent();
2472
- // TODO: [☂] Use here more modelRequirements
2473
- if (modelRequirements.modelVariant !== 'EMBEDDING') {
2474
- throw new PipelineExecutionError('Use embed only for EMBEDDING variant');
2475
- }
2476
- modelName = modelRequirements.modelName || this.getDefaultEmbeddingModel().modelName;
2477
- rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
2478
- rawRequest = {
2479
- input: rawPromptContent,
2480
- model: modelName,
2481
- };
2482
- start = getCurrentIsoDate();
2483
- if (this.options.isVerbose) {
2484
- console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
2485
- }
2486
- return [4 /*yield*/, client.embeddings.create(rawRequest)];
2487
- case 2:
2488
- rawResponse = _a.sent();
2489
- if (this.options.isVerbose) {
2490
- console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
2491
- }
2492
- if (rawResponse.data.length !== 1) {
2493
- throw new PipelineExecutionError("Expected exactly 1 data item in response, got ".concat(rawResponse.data.length));
2494
- }
2495
- resultContent = rawResponse.data[0].embedding;
2496
- // eslint-disable-next-line prefer-const
2497
- complete = getCurrentIsoDate();
2498
- usage = computeOpenAiUsage(content, '', rawResponse);
2499
- return [2 /*return*/, {
2500
- content: resultContent,
2501
- modelName: rawResponse.model || modelName,
2502
- timing: {
2503
- start: start,
2504
- complete: complete,
2505
- },
2506
- usage: usage,
2507
- rawPromptContent: rawPromptContent,
2508
- rawRequest: rawRequest,
2509
- rawResponse: rawResponse,
2510
- // <- [🗯]
2511
- }];
2512
- }
2513
- });
2514
- });
2515
- };
2516
- // <- Note: [🤖] callXxxModel
2517
- /**
2518
- * Get the model that should be used as default
2519
- */
2520
- OpenAiExecutionTools.prototype.getDefaultModel = function (defaultModelName) {
2521
- var model = OPENAI_MODELS.find(function (_a) {
2522
- var modelName = _a.modelName;
2523
- return modelName === defaultModelName;
2524
- });
2525
- if (model === undefined) {
2526
- throw new UnexpectedError(spaceTrim$1(function (block) {
2527
- return "\n Cannot find model in OpenAI models with name \"".concat(defaultModelName, "\" which should be used as default.\n\n Available models:\n ").concat(block(OPENAI_MODELS.map(function (_a) {
2528
- var modelName = _a.modelName;
2529
- return "- \"".concat(modelName, "\"");
2530
- }).join('\n')), "\n\n ");
2531
- }));
2532
- }
2533
- return model;
2534
- };
2535
- /**
2536
- * Default model for chat variant.
2537
- */
2538
- OpenAiExecutionTools.prototype.getDefaultChatModel = function () {
2539
- return this.getDefaultModel('gpt-4o');
2540
- };
2541
- /**
2542
- * Default model for completion variant.
2543
- */
2544
- OpenAiExecutionTools.prototype.getDefaultCompletionModel = function () {
2545
- return this.getDefaultModel('gpt-3.5-turbo-instruct');
2546
- };
2547
- /**
2548
- * Default model for completion variant.
2549
- */
2550
- OpenAiExecutionTools.prototype.getDefaultEmbeddingModel = function () {
2551
- return this.getDefaultModel('text-embedding-3-large');
2552
- };
2553
- return OpenAiExecutionTools;
2554
- }());
2555
- /**
2556
- * TODO: [🧠][🧙‍♂️] Maybe there can be some wizzard for thoose who want to use just OpenAI
2557
- * TODO: Maybe Create some common util for callChatModel and callCompletionModel
2558
- * TODO: Maybe make custom OpenAiError
2559
- * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
2560
- * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
2561
- */
2562
-
2563
- /**
2564
- * Execution Tools for calling OpenAI API
2565
- *
2566
- * @public exported from `@promptbook/openai`
2567
- */
2568
- var createOpenAiExecutionTools = Object.assign(function (options) {
2569
- // TODO: !!!!!! If browser, auto add `dangerouslyAllowBrowser`
2570
- return new OpenAiExecutionTools(options);
2571
- }, {
2572
- packageName: '@promptbook/openai',
2573
- className: 'OpenAiExecutionTools',
2574
- });
2575
- /**
2576
- * TODO: [🦺] Is there some way how to put `packageName` and `className` on top and function definition on bottom?
2577
- * TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
2578
- */
2579
-
2580
- /**
2581
- * @@@
2582
- *
2583
- * TODO: !!!!!! Remove EXECUTION_TOOLS_CLASSES and use $llmToolsRegister instead
2584
- *
2585
- * @private internal type for `createLlmToolsFromConfiguration`
2586
- */
2587
- var EXECUTION_TOOLS_CLASSES = {
2588
- createOpenAiExecutionTools: createOpenAiExecutionTools,
2589
- createAnthropicClaudeExecutionTools: createAnthropicClaudeExecutionTools,
2590
- createAzureOpenAiExecutionTools: function (options) {
2591
- return new AzureOpenAiExecutionTools(
2592
- // <- TODO: [🧱] Implement in a functional (not new Class) way
2593
- options);
2594
- },
2595
- // <- Note: [🦑] Add here new LLM provider
2596
- };
2597
- /**
2598
- * TODO: !!!!!!! Make global register for this
2599
- * TODO: [🧠][🎌] Adding this should be responsibility of each provider package NOT this one central place
2600
- */
2601
-
2602
- /**
2603
- * @@@
2604
- *
2605
- * Note: This function is not cached, every call creates new instance of `MultipleLlmExecutionTools`
2606
- *
2607
- * @returns @@@
2608
- * @public exported from `@promptbook/core`
2609
- */
2610
- function createLlmToolsFromConfiguration(configuration, options) {
2611
- if (options === void 0) { options = {}; }
2612
- var _a = options.isVerbose, isVerbose = _a === void 0 ? false : _a;
2613
- var llmTools = configuration.map(function (llmConfiguration) {
2614
- var constructor = EXECUTION_TOOLS_CLASSES["create".concat(llmConfiguration.className)];
2615
- if (!constructor) {
2616
- throw new Error(spaceTrim$1(function (block) { return "\n There is no constructor for LLM provider `".concat(llmConfiguration.className, "`\n\n\n @@@\n\n Available constructors are:\n ").concat(block('@@@'), "\n\n\n "); }));
2617
- }
2618
- return constructor(__assign({ isVerbose: isVerbose }, llmConfiguration.options));
2619
- });
2620
- return joinLlmExecutionTools.apply(void 0, __spreadArray([], __read(llmTools), false));
2621
- }
2622
- /**
2623
- * TODO: [🎌] Togethere with `createLlmToolsFromConfiguration` + 'EXECUTION_TOOLS_CLASSES' gets to `@promptbook/core` ALL model providers, make this more efficient
2624
- * TODO: [🧠][🎌] Dynamically install required providers
2625
- * TODO: @@@ write discussion about this - wizzard
2626
- * TODO: [🧠][🍛] Which name is better `createLlmToolsFromConfig` or `createLlmToolsFromConfiguration`?
2627
- * TODO: [🧠] Is there some meaningfull way how to test this util
2628
- * TODO: This should be maybe not under `_common` but under `utils`
2629
- */
2630
-
2631
- /**
2632
- * Remote server is a proxy server that uses its execution tools internally and exposes the executor interface externally.
2633
- *
2634
- * You can simply use `RemoteExecutionTools` on client-side javascript and connect to your remote server.
2635
- * This is useful to make all logic on browser side but not expose your API keys or no need to use customer's GPU.
683
+ * You can simply use `RemoteExecutionTools` on client-side javascript and connect to your remote server.
684
+ * This is useful to make all logic on browser side but not expose your API keys or no need to use customer's GPU.
2636
685
  *
2637
686
  * @see https://github.com/webgptorg/promptbook#remote-server
2638
687
  * @public exported from `@promptbook/remote-server`
@@ -2696,12 +745,12 @@ function startRemoteServer(options) {
2696
745
  });
2697
746
  server.on('connection', function (socket) {
2698
747
  console.info(colors.gray("Client connected"), socket.id);
2699
- socket.on('request', function (request) { return __awaiter(_this, void 0, void 0, function () {
2700
- var _a, prompt, clientId, llmToolsConfiguration, llmExecutionTools, promptResult, _b, error_1;
748
+ socket.on('prompt-request', function (request) { return __awaiter(_this, void 0, void 0, function () {
749
+ var _a, isAnonymous, prompt, clientId, llmToolsConfiguration, llmExecutionTools, promptResult, _b, error_1;
2701
750
  return __generator(this, function (_c) {
2702
751
  switch (_c.label) {
2703
752
  case 0:
2704
- _a = __assign({ clientId: null, llmToolsConfiguration: null }, request), prompt = _a.prompt, clientId = _a.clientId, llmToolsConfiguration = _a.llmToolsConfiguration;
753
+ _a = __assign({ clientId: null, llmToolsConfiguration: null }, request), isAnonymous = _a.isAnonymous, prompt = _a.prompt, clientId = _a.clientId, llmToolsConfiguration = _a.llmToolsConfiguration;
2705
754
  // <- TODO: [🦪] Some helper type to be able to use discriminant union types with destructuring
2706
755
  if (isVerbose) {
2707
756
  console.info(colors.bgWhite("Prompt:"), colors.gray(JSON.stringify(request, null, 4)));
@@ -2709,20 +758,20 @@ function startRemoteServer(options) {
2709
758
  _c.label = 1;
2710
759
  case 1:
2711
760
  _c.trys.push([1, 14, 15, 16]);
2712
- if (llmToolsConfiguration !== null && !isAnonymousModeAllowed) {
761
+ if (isAnonymous === true && !isAnonymousModeAllowed) {
2713
762
  throw new PipelineExecutionError("Anonymous mode is not allowed"); // <- TODO: !!! Test
2714
763
  }
2715
- if (clientId !== null && !isCollectionModeAllowed) {
764
+ if (isAnonymous === false && !isCollectionModeAllowed) {
2716
765
  throw new PipelineExecutionError("Collection mode is not allowed"); // <- TODO: !!! Test
2717
766
  }
2718
767
  llmExecutionTools = void 0;
2719
- if (!(llmToolsConfiguration !== null)) return [3 /*break*/, 2];
768
+ if (!(isAnonymous === true && llmToolsConfiguration !== null)) return [3 /*break*/, 2];
2720
769
  // Note: Anonymouse mode
2721
770
  // TODO: Maybe check that configuration is not empty
2722
771
  llmExecutionTools = createLlmToolsFromConfiguration(llmToolsConfiguration, { isVerbose: isVerbose });
2723
772
  return [3 /*break*/, 5];
2724
773
  case 2:
2725
- if (!(createLlmExecutionTools !== null)) return [3 /*break*/, 4];
774
+ if (!(isAnonymous === false && createLlmExecutionTools !== null)) return [3 /*break*/, 4];
2726
775
  // Note: Collection mode
2727
776
  llmExecutionTools = createLlmExecutionTools(clientId);
2728
777
  return [4 /*yield*/, collection.isResponsibleForPrompt(prompt)];
@@ -2731,7 +780,7 @@ function startRemoteServer(options) {
2731
780
  throw new PipelineExecutionError("Pipeline is not in the collection of this server");
2732
781
  }
2733
782
  return [3 /*break*/, 5];
2734
- case 4: throw new PipelineExecutionError("You must provide either llmToolsConfiguration or createLlmExecutionTools");
783
+ case 4: throw new PipelineExecutionError("You must provide either llmToolsConfiguration or non-anonymous mode must be propperly configured");
2735
784
  case 5:
2736
785
  promptResult = void 0;
2737
786
  _b = prompt.modelRequirements.modelVariant;
@@ -2773,7 +822,7 @@ function startRemoteServer(options) {
2773
822
  if (isVerbose) {
2774
823
  console.info(colors.bgGreen("PromptResult:"), colors.green(JSON.stringify(promptResult, null, 4)));
2775
824
  }
2776
- socket.emit('response', { promptResult: promptResult });
825
+ socket.emit('prompt-response', { promptResult: promptResult });
2777
826
  return [3 /*break*/, 16];
2778
827
  case 14:
2779
828
  error_1 = _c.sent();
@@ -2789,6 +838,55 @@ function startRemoteServer(options) {
2789
838
  }
2790
839
  });
2791
840
  }); });
841
+ // TODO: [👒] Listing models (and checking configuration) probbably should go through REST API not Socket.io
842
+ socket.on('listModels-request', function (request) { return __awaiter(_this, void 0, void 0, function () {
843
+ var _a, isAnonymous, clientId, llmToolsConfiguration, llmExecutionTools, models, error_2;
844
+ return __generator(this, function (_b) {
845
+ switch (_b.label) {
846
+ case 0:
847
+ _a = __assign({ clientId: null, llmToolsConfiguration: null }, request), isAnonymous = _a.isAnonymous, clientId = _a.clientId, llmToolsConfiguration = _a.llmToolsConfiguration;
848
+ // <- TODO: [🦪] Some helper type to be able to use discriminant union types with destructuring
849
+ if (isVerbose) {
850
+ console.info(colors.bgWhite("Listing models"));
851
+ }
852
+ _b.label = 1;
853
+ case 1:
854
+ _b.trys.push([1, 3, 4, 5]);
855
+ if (isAnonymous === true && !isAnonymousModeAllowed) {
856
+ throw new PipelineExecutionError("Anonymous mode is not allowed"); // <- TODO: !!! Test
857
+ }
858
+ if (isAnonymous === false && !isCollectionModeAllowed) {
859
+ throw new PipelineExecutionError("Collection mode is not allowed"); // <- TODO: !!! Test
860
+ }
861
+ llmExecutionTools = void 0;
862
+ if (isAnonymous === true) {
863
+ // Note: Anonymouse mode
864
+ // TODO: Maybe check that configuration is not empty
865
+ llmExecutionTools = createLlmToolsFromConfiguration(llmToolsConfiguration, { isVerbose: isVerbose });
866
+ }
867
+ else {
868
+ // Note: Collection mode
869
+ llmExecutionTools = createLlmExecutionTools(clientId);
870
+ }
871
+ return [4 /*yield*/, llmExecutionTools.listModels()];
872
+ case 2:
873
+ models = _b.sent();
874
+ socket.emit('prompt-response', { models: models });
875
+ return [3 /*break*/, 5];
876
+ case 3:
877
+ error_2 = _b.sent();
878
+ if (!(error_2 instanceof Error)) {
879
+ throw error_2;
880
+ }
881
+ socket.emit('error', { errorMessage: error_2.message });
882
+ return [3 /*break*/, 5];
883
+ case 4:
884
+ socket.disconnect();
885
+ return [7 /*endfinally*/];
886
+ case 5: return [2 /*return*/];
887
+ }
888
+ });
889
+ }); });
2792
890
  socket.on('disconnect', function () {
2793
891
  // TODO: Destroy here executionToolsForClient
2794
892
  if (isVerbose) {
@@ -2818,6 +916,7 @@ function startRemoteServer(options) {
2818
916
  };
2819
917
  }
2820
918
  /**
919
+ * TODO: [🧠][🛍] Maybe not `isAnonymous: boolean` BUT `mode: 'ANONYMOUS'|'COLLECTION'`
2821
920
  * TODO: [⚖] Expose the collection to be able to connect to same collection via createCollectionFromUrl
2822
921
  * TODO: Handle progress - support streaming
2823
922
  * TODO: [🗯] Do not hang up immediately but wait until client closes OR timeout