@promptbook/remote-server 0.65.0-2 → 0.65.0-4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (25) hide show
  1. package/esm/index.es.js +2462 -82
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/src/_packages/anthropic-claude.index.d.ts +6 -0
  4. package/esm/typings/src/_packages/core.index.d.ts +2 -0
  5. package/esm/typings/src/_packages/node.index.d.ts +0 -2
  6. package/esm/typings/src/_packages/remote-client.index.d.ts +2 -2
  7. package/esm/typings/src/_packages/types.index.d.ts +16 -2
  8. package/esm/typings/src/llm-providers/_common/config.d.ts +3 -3
  9. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +5 -3
  10. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionToolsOptions.d.ts +23 -2
  11. package/esm/typings/src/llm-providers/anthropic-claude/createAnthropicClaudeExecutionTools.d.ts +13 -0
  12. package/esm/typings/src/llm-providers/anthropic-claude/playground/playground.d.ts +1 -1
  13. package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +3 -2
  14. package/esm/typings/src/llm-providers/remote/interfaces/Promptbook_Server_Error.d.ts +2 -2
  15. package/esm/typings/src/llm-providers/remote/interfaces/Promptbook_Server_Progress.d.ts +2 -2
  16. package/esm/typings/src/llm-providers/remote/interfaces/Promptbook_Server_Request.d.ts +14 -2
  17. package/esm/typings/src/llm-providers/remote/interfaces/RemoteLlmExecutionToolsOptions.d.ts +49 -0
  18. package/esm/typings/src/llm-providers/remote/interfaces/RemoteServerOptions.d.ts +23 -2
  19. package/esm/typings/src/llm-providers/remote/playground/playground.d.ts +2 -0
  20. package/esm/typings/src/llm-providers/remote/startRemoteServer.d.ts +2 -1
  21. package/esm/typings/src/types/typeAliases.d.ts +6 -0
  22. package/package.json +6 -2
  23. package/umd/index.umd.js +2464 -85
  24. package/umd/index.umd.js.map +1 -1
  25. package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionToolsOptions.d.ts +0 -26
package/esm/index.es.js CHANGED
@@ -1,13 +1,17 @@
1
1
  import colors from 'colors';
2
2
  import http from 'http';
3
3
  import { Server } from 'socket.io';
4
- import { spaceTrim } from 'spacetrim';
4
+ import spaceTrim$1, { spaceTrim } from 'spacetrim';
5
+ import { io } from 'socket.io-client';
6
+ import Anthropic from '@anthropic-ai/sdk';
7
+ import { OpenAIClient, AzureKeyCredential } from '@azure/openai';
8
+ import OpenAI from 'openai';
5
9
 
6
10
  // ⚠️ WARNING: This code has been generated so that any manual changes will be overwritten
7
11
  /**
8
12
  * The version of the Promptbook library
9
13
  */
10
- var PROMPTBOOK_VERSION = '0.65.0-1';
14
+ var PROMPTBOOK_VERSION = '0.65.0-3';
11
15
  // TODO: !!!! List here all the versions and annotate + put into script
12
16
 
13
17
  /*! *****************************************************************************
@@ -41,6 +45,17 @@ function __extends(d, b) {
41
45
  d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());
42
46
  }
43
47
 
48
+ var __assign = function() {
49
+ __assign = Object.assign || function __assign(t) {
50
+ for (var s, i = 1, n = arguments.length; i < n; i++) {
51
+ s = arguments[i];
52
+ for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p)) t[p] = s[p];
53
+ }
54
+ return t;
55
+ };
56
+ return __assign.apply(this, arguments);
57
+ };
58
+
44
59
  function __awaiter(thisArg, _arguments, P, generator) {
45
60
  function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
46
61
  return new (P || (P = Promise))(function (resolve, reject) {
@@ -77,6 +92,45 @@ function __generator(thisArg, body) {
77
92
  } catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; }
78
93
  if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true };
79
94
  }
95
+ }
96
+
97
+ function __values(o) {
98
+ var s = typeof Symbol === "function" && Symbol.iterator, m = s && o[s], i = 0;
99
+ if (m) return m.call(o);
100
+ if (o && typeof o.length === "number") return {
101
+ next: function () {
102
+ if (o && i >= o.length) o = void 0;
103
+ return { value: o && o[i++], done: !o };
104
+ }
105
+ };
106
+ throw new TypeError(s ? "Object is not iterable." : "Symbol.iterator is not defined.");
107
+ }
108
+
109
+ function __read(o, n) {
110
+ var m = typeof Symbol === "function" && o[Symbol.iterator];
111
+ if (!m) return o;
112
+ var i = m.call(o), r, ar = [], e;
113
+ try {
114
+ while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value);
115
+ }
116
+ catch (error) { e = { error: error }; }
117
+ finally {
118
+ try {
119
+ if (r && !r.done && (m = i["return"])) m.call(i);
120
+ }
121
+ finally { if (e) throw e.error; }
122
+ }
123
+ return ar;
124
+ }
125
+
126
+ function __spreadArray(to, from, pack) {
127
+ if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {
128
+ if (ar || !(i in from)) {
129
+ if (!ar) ar = Array.prototype.slice.call(from, 0, i);
130
+ ar[i] = from[i];
131
+ }
132
+ }
133
+ return to.concat(ar || Array.prototype.slice.call(from));
80
134
  }
81
135
 
82
136
  /**
@@ -96,107 +150,2432 @@ var PipelineExecutionError = /** @class */ (function (_super) {
96
150
  }(Error));
97
151
 
98
152
  /**
99
- * Remote server is a proxy server that uses its execution tools internally and exposes the executor interface externally.
153
+ * This error type indicates that the error should not happen and its last check before crashing with some other error
100
154
  *
101
- * You can simply use `RemoteExecutionTools` on client-side javascript and connect to your remote server.
102
- * This is useful to make all logic on browser side but not expose your API keys or no need to use customer's GPU.
155
+ * @public exported from `@promptbook/core`
156
+ */
157
+ var UnexpectedError = /** @class */ (function (_super) {
158
+ __extends(UnexpectedError, _super);
159
+ function UnexpectedError(message) {
160
+ var _this = _super.call(this, spaceTrim(function (block) { return "\n ".concat(block(message), "\n\n Note: This error should not happen.\n It's probbably a bug in the pipeline collection\n\n Please report issue:\n https://github.com/webgptorg/promptbook/issues\n\n Or contact us on me@pavolhejny.com\n\n "); })) || this;
161
+ _this.name = 'UnexpectedError';
162
+ Object.setPrototypeOf(_this, UnexpectedError.prototype);
163
+ return _this;
164
+ }
165
+ return UnexpectedError;
166
+ }(Error));
167
+
168
+ /**
169
+ * Multiple LLM Execution Tools is a proxy server that uses multiple execution tools internally and exposes the executor interface externally.
103
170
  *
104
- * @see https://github.com/webgptorg/promptbook#remote-server
105
- * @public exported from `@promptbook/remote-server`
171
+ * Note: Internal utility of `joinLlmExecutionTools` but exposed type
172
+ * @public exported from `@promptbook/types`
106
173
  */
107
- function startRemoteServer(options) {
108
- var _this = this;
109
- var port = options.port, path = options.path, collection = options.collection, createLlmExecutionTools = options.createLlmExecutionTools, _a = options.isVerbose, isVerbose = _a === void 0 ? false : _a;
110
- var httpServer = http.createServer({}, function (request, response) {
111
- var _a;
112
- if ((_a = request.url) === null || _a === void 0 ? void 0 : _a.includes('socket.io')) {
113
- return;
114
- }
115
- response.write(spaceTrim("\n Server for processing promptbook remote requests is running.\n\n Version: ".concat(PROMPTBOOK_VERSION, "\n\n For more information look at:\n https://github.com/webgptorg/promptbook\n\n ")));
116
- response.end();
174
+ var MultipleLlmExecutionTools = /** @class */ (function () {
175
+ /**
176
+ * Gets array of execution tools in order of priority
177
+ */
178
+ function MultipleLlmExecutionTools() {
179
+ var llmExecutionTools = [];
180
+ for (var _i = 0; _i < arguments.length; _i++) {
181
+ llmExecutionTools[_i] = arguments[_i];
182
+ }
183
+ this.llmExecutionTools = llmExecutionTools;
184
+ }
185
+ Object.defineProperty(MultipleLlmExecutionTools.prototype, "title", {
186
+ get: function () {
187
+ return 'Multiple LLM Providers';
188
+ },
189
+ enumerable: false,
190
+ configurable: true
117
191
  });
118
- var server = new Server(httpServer, {
119
- path: path,
120
- transports: [/*'websocket', <- TODO: [🌬] Make websocket transport work */ 'polling'],
121
- cors: {
122
- origin: '*',
123
- methods: ['GET', 'POST'],
192
+ Object.defineProperty(MultipleLlmExecutionTools.prototype, "description", {
193
+ get: function () {
194
+ return this.llmExecutionTools
195
+ .map(function (tools, index) { return "".concat(index + 1, ") ").concat(tools.title, " ").concat(tools.description || ''); })
196
+ .join('\n');
124
197
  },
198
+ enumerable: false,
199
+ configurable: true
125
200
  });
126
- server.on('connection', function (socket) {
127
- console.info(colors.gray("Client connected"), socket.id);
128
- socket.on('request', function (request) { return __awaiter(_this, void 0, void 0, function () {
129
- var prompt, clientId, executionToolsForClient, promptResult, _a, error_1;
130
- return __generator(this, function (_b) {
131
- switch (_b.label) {
201
+ /**
202
+ * Calls the best available chat model
203
+ */
204
+ MultipleLlmExecutionTools.prototype.callChatModel = function (prompt) {
205
+ return this.callCommonModel(prompt);
206
+ };
207
+ /**
208
+ * Calls the best available completion model
209
+ */
210
+ MultipleLlmExecutionTools.prototype.callCompletionModel = function (prompt) {
211
+ return this.callCommonModel(prompt);
212
+ };
213
+ /**
214
+ * Calls the best available embedding model
215
+ */
216
+ MultipleLlmExecutionTools.prototype.callEmbeddingModel = function (prompt) {
217
+ return this.callCommonModel(prompt);
218
+ };
219
+ // <- Note: [🤖]
220
+ /**
221
+ * Calls the best available model
222
+ *
223
+ * Note: This should be private or protected but is public to be usable with duck typing
224
+ */
225
+ MultipleLlmExecutionTools.prototype.callCommonModel = function (prompt) {
226
+ return __awaiter(this, void 0, void 0, function () {
227
+ var errors, _a, _b, llmExecutionTools, _c, error_1, e_1_1;
228
+ var e_1, _d;
229
+ var _this = this;
230
+ return __generator(this, function (_e) {
231
+ switch (_e.label) {
132
232
  case 0:
133
- prompt = request.prompt, clientId = request.clientId;
134
- // TODO: !! Validate here clientId (pass validator as dependency)
135
- if (isVerbose) {
136
- console.info(colors.bgWhite("Prompt:"), colors.gray(JSON.stringify(request, null, 4)));
137
- }
138
- _b.label = 1;
233
+ errors = [];
234
+ _e.label = 1;
139
235
  case 1:
140
- _b.trys.push([1, 11, 12, 13]);
141
- executionToolsForClient = createLlmExecutionTools(clientId);
142
- return [4 /*yield*/, collection.isResponsibleForPrompt(prompt)];
236
+ _e.trys.push([1, 15, 16, 17]);
237
+ _a = __values(this.llmExecutionTools), _b = _a.next();
238
+ _e.label = 2;
143
239
  case 2:
144
- if (!(_b.sent())) {
145
- throw new PipelineExecutionError("Pipeline is not in the collection of this server");
146
- }
147
- promptResult = void 0;
148
- _a = prompt.modelRequirements.modelVariant;
149
- switch (_a) {
150
- case 'CHAT': return [3 /*break*/, 3];
151
- case 'COMPLETION': return [3 /*break*/, 5];
152
- case 'EMBEDDING': return [3 /*break*/, 7];
153
- }
154
- return [3 /*break*/, 9];
240
+ if (!!_b.done) return [3 /*break*/, 14];
241
+ llmExecutionTools = _b.value;
242
+ _e.label = 3;
155
243
  case 3:
156
- if (executionToolsForClient.callChatModel === undefined) {
157
- // Note: [0] This check should not be a thing
158
- throw new PipelineExecutionError("Chat model is not available");
244
+ _e.trys.push([3, 12, , 13]);
245
+ _c = prompt.modelRequirements.modelVariant;
246
+ switch (_c) {
247
+ case 'CHAT': return [3 /*break*/, 4];
248
+ case 'COMPLETION': return [3 /*break*/, 6];
249
+ case 'EMBEDDING': return [3 /*break*/, 8];
159
250
  }
160
- return [4 /*yield*/, executionToolsForClient.callChatModel(prompt)];
161
- case 4:
162
- promptResult = _b.sent();
163
251
  return [3 /*break*/, 10];
164
- case 5:
165
- if (executionToolsForClient.callCompletionModel === undefined) {
166
- // Note: [0] This check should not be a thing
167
- throw new PipelineExecutionError("Completion model is not available");
252
+ case 4:
253
+ if (llmExecutionTools.callChatModel === undefined) {
254
+ return [3 /*break*/, 13];
168
255
  }
169
- return [4 /*yield*/, executionToolsForClient.callCompletionModel(prompt)];
256
+ return [4 /*yield*/, llmExecutionTools.callChatModel(prompt)];
257
+ case 5: return [2 /*return*/, _e.sent()];
170
258
  case 6:
171
- promptResult = _b.sent();
172
- return [3 /*break*/, 10];
173
- case 7:
174
- if (executionToolsForClient.callEmbeddingModel === undefined) {
175
- // Note: [0] This check should not be a thing
176
- throw new PipelineExecutionError("Embedding model is not available");
259
+ if (llmExecutionTools.callCompletionModel === undefined) {
260
+ return [3 /*break*/, 13];
177
261
  }
178
- return [4 /*yield*/, executionToolsForClient.callEmbeddingModel(prompt)];
262
+ return [4 /*yield*/, llmExecutionTools.callCompletionModel(prompt)];
263
+ case 7: return [2 /*return*/, _e.sent()];
179
264
  case 8:
180
- promptResult = _b.sent();
181
- return [3 /*break*/, 10];
182
- case 9: throw new PipelineExecutionError("Unknown model variant \"".concat(prompt.modelRequirements.modelVariant, "\""));
183
- case 10:
184
- if (isVerbose) {
185
- console.info(colors.bgGreen("PromptResult:"), colors.green(JSON.stringify(promptResult, null, 4)));
265
+ if (llmExecutionTools.callEmbeddingModel === undefined) {
266
+ return [3 /*break*/, 13];
186
267
  }
187
- socket.emit('response', { promptResult: promptResult });
188
- return [3 /*break*/, 13];
189
- case 11:
190
- error_1 = _b.sent();
191
- if (!(error_1 instanceof Error)) {
268
+ return [4 /*yield*/, llmExecutionTools.callEmbeddingModel(prompt)];
269
+ case 9: return [2 /*return*/, _e.sent()];
270
+ case 10: throw new UnexpectedError("Unknown model variant \"".concat(prompt.modelRequirements.modelVariant, "\""));
271
+ case 11: return [3 /*break*/, 13];
272
+ case 12:
273
+ error_1 = _e.sent();
274
+ if (!(error_1 instanceof Error) || error_1 instanceof UnexpectedError) {
192
275
  throw error_1;
193
276
  }
194
- socket.emit('error', { errorMessage: error_1.message });
277
+ errors.push(error_1);
195
278
  return [3 /*break*/, 13];
196
- case 12:
279
+ case 13:
280
+ _b = _a.next();
281
+ return [3 /*break*/, 2];
282
+ case 14: return [3 /*break*/, 17];
283
+ case 15:
284
+ e_1_1 = _e.sent();
285
+ e_1 = { error: e_1_1 };
286
+ return [3 /*break*/, 17];
287
+ case 16:
288
+ try {
289
+ if (_b && !_b.done && (_d = _a.return)) _d.call(_a);
290
+ }
291
+ finally { if (e_1) throw e_1.error; }
292
+ return [7 /*endfinally*/];
293
+ case 17:
294
+ if (errors.length === 1) {
295
+ throw errors[0];
296
+ }
297
+ else if (errors.length > 1) {
298
+ throw new PipelineExecutionError(
299
+ // TODO: Tell which execution tools failed like
300
+ // 1) OpenAI throw PipelineExecutionError: Parameter {knowledge} is not defined
301
+ // 2) AnthropicClaude throw PipelineExecutionError: Parameter {knowledge} is not defined
302
+ // 3) ...
303
+ spaceTrim$1(function (block) { return "\n All execution tools failed:\n\n ".concat(block(errors
304
+ .map(function (error, i) { return "".concat(i + 1, ") **").concat(error.name || 'Error', ":** ").concat(error.message); })
305
+ .join('\n')), "\n\n "); }));
306
+ }
307
+ else if (this.llmExecutionTools.length === 0) {
308
+ throw new PipelineExecutionError("You have not provided any `LlmExecutionTools`");
309
+ }
310
+ else {
311
+ throw new PipelineExecutionError(spaceTrim$1(function (block) { return "\n You have not provided any `LlmExecutionTools` that support model variant \"".concat(prompt.modelRequirements.modelVariant, "\n\n Available `LlmExecutionTools`:\n ").concat(block(_this.llmExecutionTools
312
+ .map(function (tools) { return "- ".concat(tools.title, " ").concat(tools.description || ''); })
313
+ .join('\n')), "\n\n "); }));
314
+ }
315
+ }
316
+ });
317
+ });
318
+ };
319
+ /**
320
+ * List all available models that can be used
321
+ * This lists is a combination of all available models from all execution tools
322
+ */
323
+ MultipleLlmExecutionTools.prototype.listModels = function () {
324
+ return __awaiter(this, void 0, void 0, function () {
325
+ var availableModels, _a, _b, llmExecutionTools, models, e_2_1;
326
+ var e_2, _c;
327
+ return __generator(this, function (_d) {
328
+ switch (_d.label) {
329
+ case 0:
330
+ availableModels = [];
331
+ _d.label = 1;
332
+ case 1:
333
+ _d.trys.push([1, 6, 7, 8]);
334
+ _a = __values(this.llmExecutionTools), _b = _a.next();
335
+ _d.label = 2;
336
+ case 2:
337
+ if (!!_b.done) return [3 /*break*/, 5];
338
+ llmExecutionTools = _b.value;
339
+ return [4 /*yield*/, llmExecutionTools.listModels()];
340
+ case 3:
341
+ models = _d.sent();
342
+ availableModels.push.apply(availableModels, __spreadArray([], __read(models), false));
343
+ _d.label = 4;
344
+ case 4:
345
+ _b = _a.next();
346
+ return [3 /*break*/, 2];
347
+ case 5: return [3 /*break*/, 8];
348
+ case 6:
349
+ e_2_1 = _d.sent();
350
+ e_2 = { error: e_2_1 };
351
+ return [3 /*break*/, 8];
352
+ case 7:
353
+ try {
354
+ if (_b && !_b.done && (_c = _a.return)) _c.call(_a);
355
+ }
356
+ finally { if (e_2) throw e_2.error; }
357
+ return [7 /*endfinally*/];
358
+ case 8: return [2 /*return*/, availableModels];
359
+ }
360
+ });
361
+ });
362
+ };
363
+ return MultipleLlmExecutionTools;
364
+ }());
365
+ /**
366
+ * TODO: [🧠][🎛] Aggregating multiple models - have result not only from one first aviable model BUT all of them
367
+ * TODO: [🏖] If no llmTools have for example not defined `callCompletionModel` this will still return object with defined `callCompletionModel` which just throws `PipelineExecutionError`, make it undefined instead
368
+ * Look how `countTotalUsage` (and `cacheLlmTools`) implements it
369
+ */
370
+
371
+ /**
372
+ * Joins multiple LLM Execution Tools into one
373
+ *
374
+ * @returns {LlmExecutionTools} Single wrapper for multiple LlmExecutionTools
375
+ *
376
+ * 0) If there is no LlmExecutionTools, it warns and returns valid but empty LlmExecutionTools
377
+ * 1) If there is only one LlmExecutionTools, it returns it wrapped in a proxy object
378
+ * 2) If there are multiple LlmExecutionTools, first will be used first, second will be used if the first hasn`t defined model variant or fails, etc.
379
+ * 3) When all LlmExecutionTools fail, it throws an error with a list of all errors merged into one
380
+ *
381
+ *
382
+ * Tip: You don't have to use this function directly, just pass an array of LlmExecutionTools to the `ExecutionTools`
383
+ *
384
+ * @public exported from `@promptbook/core`
385
+ */
386
+ function joinLlmExecutionTools() {
387
+ var llmExecutionTools = [];
388
+ for (var _i = 0; _i < arguments.length; _i++) {
389
+ llmExecutionTools[_i] = arguments[_i];
390
+ }
391
+ if (llmExecutionTools.length === 0) {
392
+ var warningMessage = spaceTrim$1("\n You have not provided any `LlmExecutionTools`\n This means that you won't be able to execute any prompts that require large language models like GPT-4 or Anthropic's Claude.\n\n Technically, it's not an error, but it's probably not what you want because it does not make sense to use Promptbook without language models.\n ");
393
+ // TODO: [🟥] Detect browser / node and make it colorfull
394
+ console.warn(warningMessage);
395
+ /*
396
+ return {
397
+ async listModels() {
398
+ // TODO: [🟥] Detect browser / node and make it colorfull
399
+ console.warn(
400
+ spaceTrim(
401
+ (block) => `
402
+
403
+ You can't list models because you have no LLM Execution Tools defined:
404
+
405
+ tl;dr
406
+
407
+ ${block(warningMessage)}
408
+ `,
409
+ ),
410
+ );
411
+ return [];
412
+ },
413
+ };
414
+ */
415
+ }
416
+ return new (MultipleLlmExecutionTools.bind.apply(MultipleLlmExecutionTools, __spreadArray([void 0], __read(llmExecutionTools), false)))();
417
+ }
418
+ /**
419
+ * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
420
+ */
421
+
422
+ /**
423
+ * Remote server is a proxy server that uses its execution tools internally and exposes the executor interface externally.
424
+ *
425
+ * You can simply use `RemoteExecutionTools` on client-side javascript and connect to your remote server.
426
+ * This is useful to make all logic on browser side but not expose your API keys or no need to use customer's GPU.
427
+ *
428
+ * @see https://github.com/webgptorg/promptbook#remote-server
429
+ * @public exported from `@promptbook/remote-client`
430
+ */
431
+ var RemoteLlmExecutionTools = /** @class */ (function () {
432
+ function RemoteLlmExecutionTools(options) {
433
+ this.options = options;
434
+ }
435
+ Object.defineProperty(RemoteLlmExecutionTools.prototype, "title", {
436
+ get: function () {
437
+ // TODO: [🧠] Maybe fetch title+description from the remote server (as well as if model methods are defined)
438
+ return 'Remote server';
439
+ },
440
+ enumerable: false,
441
+ configurable: true
442
+ });
443
+ Object.defineProperty(RemoteLlmExecutionTools.prototype, "description", {
444
+ get: function () {
445
+ return 'Use all models by your remote server';
446
+ },
447
+ enumerable: false,
448
+ configurable: true
449
+ });
450
+ /**
451
+ * Creates a connection to the remote proxy server.
452
+ */
453
+ RemoteLlmExecutionTools.prototype.makeConnection = function () {
454
+ var _this = this;
455
+ return new Promise(function (resolve, reject) {
456
+ var socket = io(_this.options.remoteUrl, {
457
+ path: _this.options.path,
458
+ // path: `${this.remoteUrl.pathname}/socket.io`,
459
+ transports: [/*'websocket', <- TODO: [🌬] Make websocket transport work */ 'polling'],
460
+ });
461
+ // console.log('Connecting to', this.options.remoteUrl.href, { socket });
462
+ socket.on('connect', function () {
463
+ resolve(socket);
464
+ });
465
+ setTimeout(function () {
466
+ reject(new Error("Timeout while connecting to ".concat(_this.options.remoteUrl)));
467
+ }, 60000 /* <- TODO: Timeout to config */);
468
+ });
469
+ };
470
+ /**
471
+ * Calls remote proxy server to use a chat model
472
+ */
473
+ RemoteLlmExecutionTools.prototype.callChatModel = function (prompt) {
474
+ if (this.options.isVerbose) {
475
+ console.info("\uD83D\uDD8B Remote callChatModel call");
476
+ }
477
+ return /* not await */ this.callCommonModel(prompt);
478
+ };
479
+ /**
480
+ * Calls remote proxy server to use a completion model
481
+ */
482
+ RemoteLlmExecutionTools.prototype.callCompletionModel = function (prompt) {
483
+ if (this.options.isVerbose) {
484
+ console.info("\uD83D\uDCAC Remote callCompletionModel call");
485
+ }
486
+ return /* not await */ this.callCommonModel(prompt);
487
+ };
488
+ /**
489
+ * Calls remote proxy server to use a embedding model
490
+ */
491
+ RemoteLlmExecutionTools.prototype.callEmbeddingModel = function (prompt) {
492
+ if (this.options.isVerbose) {
493
+ console.info("\uD83D\uDCAC Remote callEmbeddingModel call");
494
+ }
495
+ return /* not await */ this.callCommonModel(prompt);
496
+ };
497
+ // <- Note: [🤖] callXxxModel
498
+ /**
499
+ * Calls remote proxy server to use both completion or chat model
500
+ */
501
+ RemoteLlmExecutionTools.prototype.callCommonModel = function (prompt) {
502
+ return __awaiter(this, void 0, void 0, function () {
503
+ var socket, promptResult;
504
+ return __generator(this, function (_a) {
505
+ switch (_a.label) {
506
+ case 0: return [4 /*yield*/, this.makeConnection()];
507
+ case 1:
508
+ socket = _a.sent();
509
+ if (this.options.isAnonymous) {
510
+ socket.emit('request', {
511
+ llmToolsConfiguration: this.options.llmToolsConfiguration,
512
+ prompt: prompt,
513
+ // <- TODO: [🛫] `prompt` is NOT fully serializable as JSON, it contains functions which are not serializable
514
+ });
515
+ }
516
+ else {
517
+ socket.emit('request', {
518
+ clientId: this.options.clientId,
519
+ prompt: prompt,
520
+ // <- TODO: [🛫] `prompt` is NOT fully serializable as JSON, it contains functions which are not serializable
521
+ });
522
+ }
523
+ return [4 /*yield*/, new Promise(function (resolve, reject) {
524
+ socket.on('response', function (response) {
525
+ resolve(response.promptResult);
526
+ socket.disconnect();
527
+ });
528
+ socket.on('error', function (error) {
529
+ reject(new PipelineExecutionError(error.errorMessage));
530
+ socket.disconnect();
531
+ });
532
+ })];
533
+ case 2:
534
+ promptResult = _a.sent();
535
+ socket.disconnect();
536
+ return [2 /*return*/, promptResult];
537
+ }
538
+ });
539
+ });
540
+ };
541
+ /**
542
+ * List all available models that can be used
543
+ */
544
+ RemoteLlmExecutionTools.prototype.listModels = function () {
545
+ return __awaiter(this, void 0, void 0, function () {
546
+ return __generator(this, function (_a) {
547
+ return [2 /*return*/, [
548
+ /* !!! */
549
+ ]];
550
+ });
551
+ });
552
+ };
553
+ return RemoteLlmExecutionTools;
554
+ }());
555
+ /**
556
+ * TODO: [🍜] !!!!!! Default remote remoteUrl and path for anonymous server
557
+ * TODO: [🍓] Allow to list compatible models with each variant
558
+ * TODO: [🗯] RemoteLlmExecutionTools should extend Destroyable and implement IDestroyable
559
+ * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
560
+ * TODO: [🧠] Maybe remove `@promptbook/remote-client` and just use `@promptbook/core`
561
+ */
562
+
563
+ /**
564
+ * Counts number of characters in the text
565
+ *
566
+ * @public exported from `@promptbook/utils`
567
+ */
568
+ function countCharacters(text) {
569
+ // Remove null characters
570
+ text = text.replace(/\0/g, '');
571
+ // Replace emojis (and also ZWJ sequence) with hyphens
572
+ text = text.replace(/(\p{Extended_Pictographic})\p{Modifier_Symbol}/gu, '$1');
573
+ text = text.replace(/(\p{Extended_Pictographic})[\u{FE00}-\u{FE0F}]/gu, '$1');
574
+ text = text.replace(/\p{Extended_Pictographic}(\u{200D}\p{Extended_Pictographic})*/gu, '-');
575
+ return text.length;
576
+ }
577
+
578
+ /**
579
+ * Counts number of lines in the text
580
+ *
581
+ * @public exported from `@promptbook/utils`
582
+ */
583
+ function countLines(text) {
584
+ if (text === '') {
585
+ return 0;
586
+ }
587
+ return text.split('\n').length;
588
+ }
589
+
590
+ /**
591
+ * Counts number of pages in the text
592
+ *
593
+ * @public exported from `@promptbook/utils`
594
+ */
595
+ function countPages(text) {
596
+ var sentencesPerPage = 5; // Assuming each page has 5 sentences
597
+ var sentences = text.split(/[.!?]+/).filter(function (sentence) { return sentence.trim() !== ''; });
598
+ var pageCount = Math.ceil(sentences.length / sentencesPerPage);
599
+ return pageCount;
600
+ }
601
+
602
+ /**
603
+ * Counts number of paragraphs in the text
604
+ *
605
+ * @public exported from `@promptbook/utils`
606
+ */
607
+ function countParagraphs(text) {
608
+ return text.split(/\n\s*\n/).filter(function (paragraph) { return paragraph.trim() !== ''; }).length;
609
+ }
610
+
611
+ /**
612
+ * Split text into sentences
613
+ *
614
+ * @public exported from `@promptbook/utils`
615
+ */
616
+ function splitIntoSentences(text) {
617
+ return text.split(/[.!?]+/).filter(function (sentence) { return sentence.trim() !== ''; });
618
+ }
619
+ /**
620
+ * Counts number of sentences in the text
621
+ *
622
+ * @public exported from `@promptbook/utils`
623
+ */
624
+ function countSentences(text) {
625
+ return splitIntoSentences(text).length;
626
+ }
627
+
628
+ var defaultDiacriticsRemovalMap = [
629
+ {
630
+ base: 'A',
631
+ letters: '\u0041\u24B6\uFF21\u00C0\u00C1\u00C2\u1EA6\u1EA4\u1EAA\u1EA8\u00C3\u0100\u0102\u1EB0\u1EAE\u1EB4\u1EB2\u0226\u01E0\u00C4\u01DE\u1EA2\u00C5\u01FA\u01CD\u0200\u0202\u1EA0\u1EAC\u1EB6\u1E00\u0104\u023A\u2C6F',
632
+ },
633
+ { base: 'AA', letters: '\uA732' },
634
+ { base: 'AE', letters: '\u00C6\u01FC\u01E2' },
635
+ { base: 'AO', letters: '\uA734' },
636
+ { base: 'AU', letters: '\uA736' },
637
+ { base: 'AV', letters: '\uA738\uA73A' },
638
+ { base: 'AY', letters: '\uA73C' },
639
+ {
640
+ base: 'B',
641
+ letters: '\u0042\u24B7\uFF22\u1E02\u1E04\u1E06\u0243\u0182\u0181',
642
+ },
643
+ {
644
+ base: 'C',
645
+ letters: '\u0043\u24B8\uFF23\u0106\u0108\u010A\u010C\u00C7\u1E08\u0187\u023B\uA73E',
646
+ },
647
+ {
648
+ base: 'D',
649
+ letters: '\u0044\u24B9\uFF24\u1E0A\u010E\u1E0C\u1E10\u1E12\u1E0E\u0110\u018B\u018A\u0189\uA779\u00D0',
650
+ },
651
+ { base: 'DZ', letters: '\u01F1\u01C4' },
652
+ { base: 'Dz', letters: '\u01F2\u01C5' },
653
+ {
654
+ base: 'E',
655
+ letters: '\u0045\u24BA\uFF25\u00C8\u00C9\u00CA\u1EC0\u1EBE\u1EC4\u1EC2\u1EBC\u0112\u1E14\u1E16\u0114\u0116\u00CB\u1EBA\u011A\u0204\u0206\u1EB8\u1EC6\u0228\u1E1C\u0118\u1E18\u1E1A\u0190\u018E',
656
+ },
657
+ { base: 'F', letters: '\u0046\u24BB\uFF26\u1E1E\u0191\uA77B' },
658
+ {
659
+ base: 'G',
660
+ letters: '\u0047\u24BC\uFF27\u01F4\u011C\u1E20\u011E\u0120\u01E6\u0122\u01E4\u0193\uA7A0\uA77D\uA77E',
661
+ },
662
+ {
663
+ base: 'H',
664
+ letters: '\u0048\u24BD\uFF28\u0124\u1E22\u1E26\u021E\u1E24\u1E28\u1E2A\u0126\u2C67\u2C75\uA78D',
665
+ },
666
+ {
667
+ base: 'I',
668
+ letters: '\u0049\u24BE\uFF29\u00CC\u00CD\u00CE\u0128\u012A\u012C\u0130\u00CF\u1E2E\u1EC8\u01CF\u0208\u020A\u1ECA\u012E\u1E2C\u0197',
669
+ },
670
+ { base: 'J', letters: '\u004A\u24BF\uFF2A\u0134\u0248' },
671
+ {
672
+ base: 'K',
673
+ letters: '\u004B\u24C0\uFF2B\u1E30\u01E8\u1E32\u0136\u1E34\u0198\u2C69\uA740\uA742\uA744\uA7A2',
674
+ },
675
+ {
676
+ base: 'L',
677
+ letters: '\u004C\u24C1\uFF2C\u013F\u0139\u013D\u1E36\u1E38\u013B\u1E3C\u1E3A\u0141\u023D\u2C62\u2C60\uA748\uA746\uA780',
678
+ },
679
+ { base: 'LJ', letters: '\u01C7' },
680
+ { base: 'Lj', letters: '\u01C8' },
681
+ { base: 'M', letters: '\u004D\u24C2\uFF2D\u1E3E\u1E40\u1E42\u2C6E\u019C' },
682
+ {
683
+ base: 'N',
684
+ letters: '\u004E\u24C3\uFF2E\u01F8\u0143\u00D1\u1E44\u0147\u1E46\u0145\u1E4A\u1E48\u0220\u019D\uA790\uA7A4',
685
+ },
686
+ { base: 'NJ', letters: '\u01CA' },
687
+ { base: 'Nj', letters: '\u01CB' },
688
+ {
689
+ base: 'O',
690
+ letters: '\u004F\u24C4\uFF2F\u00D2\u00D3\u00D4\u1ED2\u1ED0\u1ED6\u1ED4\u00D5\u1E4C\u022C\u1E4E\u014C\u1E50\u1E52\u014E\u022E\u0230\u00D6\u022A\u1ECE\u0150\u01D1\u020C\u020E\u01A0\u1EDC\u1EDA\u1EE0\u1EDE\u1EE2\u1ECC\u1ED8\u01EA\u01EC\u00D8\u01FE\u0186\u019F\uA74A\uA74C',
691
+ },
692
+ { base: 'OI', letters: '\u01A2' },
693
+ { base: 'OO', letters: '\uA74E' },
694
+ { base: 'OU', letters: '\u0222' },
695
+ { base: 'OE', letters: '\u008C\u0152' },
696
+ { base: 'oe', letters: '\u009C\u0153' },
697
+ {
698
+ base: 'P',
699
+ letters: '\u0050\u24C5\uFF30\u1E54\u1E56\u01A4\u2C63\uA750\uA752\uA754',
700
+ },
701
+ { base: 'Q', letters: '\u0051\u24C6\uFF31\uA756\uA758\u024A' },
702
+ {
703
+ base: 'R',
704
+ letters: '\u0052\u24C7\uFF32\u0154\u1E58\u0158\u0210\u0212\u1E5A\u1E5C\u0156\u1E5E\u024C\u2C64\uA75A\uA7A6\uA782',
705
+ },
706
+ {
707
+ base: 'S',
708
+ letters: '\u0053\u24C8\uFF33\u1E9E\u015A\u1E64\u015C\u1E60\u0160\u1E66\u1E62\u1E68\u0218\u015E\u2C7E\uA7A8\uA784',
709
+ },
710
+ {
711
+ base: 'T',
712
+ letters: '\u0054\u24C9\uFF34\u1E6A\u0164\u1E6C\u021A\u0162\u1E70\u1E6E\u0166\u01AC\u01AE\u023E\uA786',
713
+ },
714
+ { base: 'TZ', letters: '\uA728' },
715
+ {
716
+ base: 'U',
717
+ letters: '\u0055\u24CA\uFF35\u00D9\u00DA\u00DB\u0168\u1E78\u016A\u1E7A\u016C\u00DC\u01DB\u01D7\u01D5\u01D9\u1EE6\u016E\u0170\u01D3\u0214\u0216\u01AF\u1EEA\u1EE8\u1EEE\u1EEC\u1EF0\u1EE4\u1E72\u0172\u1E76\u1E74\u0244',
718
+ },
719
+ { base: 'V', letters: '\u0056\u24CB\uFF36\u1E7C\u1E7E\u01B2\uA75E\u0245' },
720
+ { base: 'VY', letters: '\uA760' },
721
+ {
722
+ base: 'W',
723
+ letters: '\u0057\u24CC\uFF37\u1E80\u1E82\u0174\u1E86\u1E84\u1E88\u2C72',
724
+ },
725
+ { base: 'X', letters: '\u0058\u24CD\uFF38\u1E8A\u1E8C' },
726
+ {
727
+ base: 'Y',
728
+ letters: '\u0059\u24CE\uFF39\u1EF2\u00DD\u0176\u1EF8\u0232\u1E8E\u0178\u1EF6\u1EF4\u01B3\u024E\u1EFE',
729
+ },
730
+ {
731
+ base: 'Z',
732
+ letters: '\u005A\u24CF\uFF3A\u0179\u1E90\u017B\u017D\u1E92\u1E94\u01B5\u0224\u2C7F\u2C6B\uA762',
733
+ },
734
+ {
735
+ base: 'a',
736
+ letters: '\u0061\u24D0\uFF41\u1E9A\u00E0\u00E1\u00E2\u1EA7\u1EA5\u1EAB\u1EA9\u00E3\u0101\u0103\u1EB1\u1EAF\u1EB5\u1EB3\u0227\u01E1\u00E4\u01DF\u1EA3\u00E5\u01FB\u01CE\u0201\u0203\u1EA1\u1EAD\u1EB7\u1E01\u0105\u2C65\u0250',
737
+ },
738
+ { base: 'aa', letters: '\uA733' },
739
+ { base: 'ae', letters: '\u00E6\u01FD\u01E3' },
740
+ { base: 'ao', letters: '\uA735' },
741
+ { base: 'au', letters: '\uA737' },
742
+ { base: 'av', letters: '\uA739\uA73B' },
743
+ { base: 'ay', letters: '\uA73D' },
744
+ {
745
+ base: 'b',
746
+ letters: '\u0062\u24D1\uFF42\u1E03\u1E05\u1E07\u0180\u0183\u0253',
747
+ },
748
+ {
749
+ base: 'c',
750
+ letters: '\u0063\u24D2\uFF43\u0107\u0109\u010B\u010D\u00E7\u1E09\u0188\u023C\uA73F\u2184',
751
+ },
752
+ {
753
+ base: 'd',
754
+ letters: '\u0064\u24D3\uFF44\u1E0B\u010F\u1E0D\u1E11\u1E13\u1E0F\u0111\u018C\u0256\u0257\uA77A',
755
+ },
756
+ { base: 'dz', letters: '\u01F3\u01C6' },
757
+ {
758
+ base: 'e',
759
+ letters: '\u0065\u24D4\uFF45\u00E8\u00E9\u00EA\u1EC1\u1EBF\u1EC5\u1EC3\u1EBD\u0113\u1E15\u1E17\u0115\u0117\u00EB\u1EBB\u011B\u0205\u0207\u1EB9\u1EC7\u0229\u1E1D\u0119\u1E19\u1E1B\u0247\u025B\u01DD',
760
+ },
761
+ { base: 'f', letters: '\u0066\u24D5\uFF46\u1E1F\u0192\uA77C' },
762
+ {
763
+ base: 'g',
764
+ letters: '\u0067\u24D6\uFF47\u01F5\u011D\u1E21\u011F\u0121\u01E7\u0123\u01E5\u0260\uA7A1\u1D79\uA77F',
765
+ },
766
+ {
767
+ base: 'h',
768
+ letters: '\u0068\u24D7\uFF48\u0125\u1E23\u1E27\u021F\u1E25\u1E29\u1E2B\u1E96\u0127\u2C68\u2C76\u0265',
769
+ },
770
+ { base: 'hv', letters: '\u0195' },
771
+ {
772
+ base: 'i',
773
+ letters: '\u0069\u24D8\uFF49\u00EC\u00ED\u00EE\u0129\u012B\u012D\u00EF\u1E2F\u1EC9\u01D0\u0209\u020B\u1ECB\u012F\u1E2D\u0268\u0131',
774
+ },
775
+ { base: 'j', letters: '\u006A\u24D9\uFF4A\u0135\u01F0\u0249' },
776
+ {
777
+ base: 'k',
778
+ letters: '\u006B\u24DA\uFF4B\u1E31\u01E9\u1E33\u0137\u1E35\u0199\u2C6A\uA741\uA743\uA745\uA7A3',
779
+ },
780
+ {
781
+ base: 'l',
782
+ letters: '\u006C\u24DB\uFF4C\u0140\u013A\u013E\u1E37\u1E39\u013C\u1E3D\u1E3B\u017F\u0142\u019A\u026B\u2C61\uA749\uA781\uA747',
783
+ },
784
+ { base: 'lj', letters: '\u01C9' },
785
+ { base: 'm', letters: '\u006D\u24DC\uFF4D\u1E3F\u1E41\u1E43\u0271\u026F' },
786
+ {
787
+ base: 'n',
788
+ letters: '\u006E\u24DD\uFF4E\u01F9\u0144\u00F1\u1E45\u0148\u1E47\u0146\u1E4B\u1E49\u019E\u0272\u0149\uA791\uA7A5',
789
+ },
790
+ { base: 'nj', letters: '\u01CC' },
791
+ {
792
+ base: 'o',
793
+ letters: '\u006F\u24DE\uFF4F\u00F2\u00F3\u00F4\u1ED3\u1ED1\u1ED7\u1ED5\u00F5\u1E4D\u022D\u1E4F\u014D\u1E51\u1E53\u014F\u022F\u0231\u00F6\u022B\u1ECF\u0151\u01D2\u020D\u020F\u01A1\u1EDD\u1EDB\u1EE1\u1EDF\u1EE3\u1ECD\u1ED9\u01EB\u01ED\u00F8\u01FF\u0254\uA74B\uA74D\u0275',
794
+ },
795
+ { base: 'oi', letters: '\u01A3' },
796
+ { base: 'ou', letters: '\u0223' },
797
+ { base: 'oo', letters: '\uA74F' },
798
+ {
799
+ base: 'p',
800
+ letters: '\u0070\u24DF\uFF50\u1E55\u1E57\u01A5\u1D7D\uA751\uA753\uA755',
801
+ },
802
+ { base: 'q', letters: '\u0071\u24E0\uFF51\u024B\uA757\uA759' },
803
+ {
804
+ base: 'r',
805
+ letters: '\u0072\u24E1\uFF52\u0155\u1E59\u0159\u0211\u0213\u1E5B\u1E5D\u0157\u1E5F\u024D\u027D\uA75B\uA7A7\uA783',
806
+ },
807
+ {
808
+ base: 's',
809
+ letters: '\u0073\u24E2\uFF53\u00DF\u015B\u1E65\u015D\u1E61\u0161\u1E67\u1E63\u1E69\u0219\u015F\u023F\uA7A9\uA785\u1E9B',
810
+ },
811
+ {
812
+ base: 't',
813
+ letters: '\u0074\u24E3\uFF54\u1E6B\u1E97\u0165\u1E6D\u021B\u0163\u1E71\u1E6F\u0167\u01AD\u0288\u2C66\uA787',
814
+ },
815
+ { base: 'tz', letters: '\uA729' },
816
+ {
817
+ base: 'u',
818
+ letters: '\u0075\u24E4\uFF55\u00F9\u00FA\u00FB\u0169\u1E79\u016B\u1E7B\u016D\u00FC\u01DC\u01D8\u01D6\u01DA\u1EE7\u016F\u0171\u01D4\u0215\u0217\u01B0\u1EEB\u1EE9\u1EEF\u1EED\u1EF1\u1EE5\u1E73\u0173\u1E77\u1E75\u0289',
819
+ },
820
+ { base: 'v', letters: '\u0076\u24E5\uFF56\u1E7D\u1E7F\u028B\uA75F\u028C' },
821
+ { base: 'vy', letters: '\uA761' },
822
+ {
823
+ base: 'w',
824
+ letters: '\u0077\u24E6\uFF57\u1E81\u1E83\u0175\u1E87\u1E85\u1E98\u1E89\u2C73',
825
+ },
826
+ { base: 'x', letters: '\u0078\u24E7\uFF58\u1E8B\u1E8D' },
827
+ {
828
+ base: 'y',
829
+ letters: '\u0079\u24E8\uFF59\u1EF3\u00FD\u0177\u1EF9\u0233\u1E8F\u00FF\u1EF7\u1E99\u1EF5\u01B4\u024F\u1EFF',
830
+ },
831
+ {
832
+ base: 'z',
833
+ letters: '\u007A\u24E9\uFF5A\u017A\u1E91\u017C\u017E\u1E93\u1E95\u01B6\u0225\u0240\u2C6C\uA763',
834
+ },
835
+ ];
836
+ /**
837
+ * Map of letters from diacritic variant to diacritless variant
838
+ * Contains lowercase and uppercase separatelly
839
+ *
840
+ * > "á" => "a"
841
+ * > "ě" => "e"
842
+ * > "Ă" => "A"
843
+ * > ...
844
+ *
845
+ * @public exported from `@promptbook/utils`
846
+ */
847
+ var DIACRITIC_VARIANTS_LETTERS = {};
848
+ // tslint:disable-next-line: prefer-for-of
849
+ for (var i = 0; i < defaultDiacriticsRemovalMap.length; i++) {
850
+ var letters = defaultDiacriticsRemovalMap[i].letters;
851
+ // tslint:disable-next-line: prefer-for-of
852
+ for (var j = 0; j < letters.length; j++) {
853
+ DIACRITIC_VARIANTS_LETTERS[letters[j]] = defaultDiacriticsRemovalMap[i].base;
854
+ }
855
+ }
856
+ // <- TODO: [🍓] Put to maker function to save execution time if not needed
857
+ /*
858
+ @see https://stackoverflow.com/questions/990904/remove-accents-diacritics-in-a-string-in-javascript
859
+ Licensed under the Apache License, Version 2.0 (the "License");
860
+ you may not use this file except in compliance with the License.
861
+ You may obtain a copy of the License at
862
+
863
+ http://www.apache.org/licenses/LICENSE-2.0
864
+
865
+ Unless required by applicable law or agreed to in writing, software
866
+ distributed under the License is distributed on an "AS IS" BASIS,
867
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
868
+ See the License for the specific language governing permissions and
869
+ limitations under the License.
870
+ */
871
+
872
+ /**
873
+ * @@@
874
+ *
875
+ * @param input @@@
876
+ * @returns @@@
877
+ * @public exported from `@promptbook/utils`
878
+ */
879
+ function removeDiacritics(input) {
880
+ /*eslint no-control-regex: "off"*/
881
+ return input.replace(/[^\u0000-\u007E]/g, function (a) {
882
+ return DIACRITIC_VARIANTS_LETTERS[a] || a;
883
+ });
884
+ }
885
+ /**
886
+ * TODO: [Ж] Variant for cyrillic (and in general non-latin) letters
887
+ */
888
+
889
+ /**
890
+ * Counts number of words in the text
891
+ *
892
+ * @public exported from `@promptbook/utils`
893
+ */
894
+ function countWords(text) {
895
+ text = text.replace(/[\p{Extended_Pictographic}]/gu, 'a');
896
+ text = removeDiacritics(text);
897
+ return text.split(/[^a-zа-я0-9]+/i).filter(function (word) { return word.length > 0; }).length;
898
+ }
899
+
900
+ /**
901
+ * Helper of usage compute
902
+ *
903
+ * @param content the content of prompt or response
904
+ * @returns part of PromptResultUsageCounts
905
+ *
906
+ * @private internal utility of LlmExecutionTools
907
+ */
908
+ function computeUsageCounts(content) {
909
+ return {
910
+ charactersCount: { value: countCharacters(content) },
911
+ wordsCount: { value: countWords(content) },
912
+ sentencesCount: { value: countSentences(content) },
913
+ linesCount: { value: countLines(content) },
914
+ paragraphsCount: { value: countParagraphs(content) },
915
+ pagesCount: { value: countPages(content) },
916
+ };
917
+ }
918
+
919
+ /**
920
+ * Make UncertainNumber
921
+ *
922
+ * @param value
923
+ *
924
+ * @private utility for initializating UncertainNumber
925
+ */
926
+ function uncertainNumber(value) {
927
+ if (value === null || value === undefined || Number.isNaN(value)) {
928
+ return { value: 0, isUncertain: true };
929
+ }
930
+ return { value: value };
931
+ }
932
+
933
+ /**
934
+ * Get current date in ISO 8601 format
935
+ *
936
+ * @private internal utility
937
+ */
938
+ function getCurrentIsoDate() {
939
+ return new Date().toISOString();
940
+ }
941
+
942
+ /**
943
+ * @@@
944
+ *
945
+ * Note: This function mutates the object and returns the original (but mutated-deep-freezed) object
946
+ *
947
+ * @returns The same object as the input, but deeply frozen
948
+ * @public exported from `@promptbook/utils`
949
+ */
950
+ function deepFreeze(objectValue) {
951
+ var e_1, _a;
952
+ var propertyNames = Object.getOwnPropertyNames(objectValue);
953
+ try {
954
+ for (var propertyNames_1 = __values(propertyNames), propertyNames_1_1 = propertyNames_1.next(); !propertyNames_1_1.done; propertyNames_1_1 = propertyNames_1.next()) {
955
+ var propertyName = propertyNames_1_1.value;
956
+ var value = objectValue[propertyName];
957
+ if (value && typeof value === 'object') {
958
+ deepFreeze(value);
959
+ }
960
+ }
961
+ }
962
+ catch (e_1_1) { e_1 = { error: e_1_1 }; }
963
+ finally {
964
+ try {
965
+ if (propertyNames_1_1 && !propertyNames_1_1.done && (_a = propertyNames_1.return)) _a.call(propertyNames_1);
966
+ }
967
+ finally { if (e_1) throw e_1.error; }
968
+ }
969
+ return Object.freeze(objectValue);
970
+ }
971
+ /**
972
+ * TODO: [🧠] Is there a way how to meaningfully test this utility
973
+ */
974
+
975
+ // <- TODO: [🧠] Better system for generator warnings - not always "code" and "by `@promptbook/cli`"
976
+ /**
977
+ * The maximum number of iterations for a loops
978
+ *
979
+ * @private within the repository - too low-level in comparison with other `MAX_...`
980
+ */
981
+ var LOOP_LIMIT = 1000;
982
+ /**
983
+ * Nonce which is used for replacing things in strings
984
+ *
985
+ * @private within the repository
986
+ */
987
+ var REPLACING_NONCE = 'u$k42k%!V2zo34w7Fu#@QUHYPW';
988
+ /**
989
+ * The names of the parameters that are reserved for special purposes
990
+ *
991
+ * @public exported from `@promptbook/core`
992
+ */
993
+ deepFreeze([
994
+ 'content',
995
+ 'context',
996
+ 'knowledge',
997
+ 'samples',
998
+ 'modelName',
999
+ 'currentDate',
1000
+ // <- TODO: Add more like 'date', 'modelName',...
1001
+ // <- TODO: Add [emoji] + instructions ACRY when adding new reserved parameter
1002
+ ]);
1003
+ /**
1004
+ * @@@
1005
+ *
1006
+ * @private within the repository
1007
+ */
1008
+ var RESERVED_PARAMETER_MISSING_VALUE = 'MISSING-' + REPLACING_NONCE;
1009
+ /**
1010
+ * @@@
1011
+ *
1012
+ * @private within the repository
1013
+ */
1014
+ var RESERVED_PARAMETER_RESTRICTED = 'RESTRICTED-' + REPLACING_NONCE;
1015
+
1016
+ /**
1017
+ * This error type indicates that some limit was reached
1018
+ *
1019
+ * @public exported from `@promptbook/core`
1020
+ */
1021
+ var LimitReachedError = /** @class */ (function (_super) {
1022
+ __extends(LimitReachedError, _super);
1023
+ function LimitReachedError(message) {
1024
+ var _this = _super.call(this, message) || this;
1025
+ _this.name = 'LimitReachedError';
1026
+ Object.setPrototypeOf(_this, LimitReachedError.prototype);
1027
+ return _this;
1028
+ }
1029
+ return LimitReachedError;
1030
+ }(Error));
1031
+
1032
+ /**
1033
+ * Replaces parameters in template with values from parameters object
1034
+ *
1035
+ * @param template the template with parameters in {curly} braces
1036
+ * @param parameters the object with parameters
1037
+ * @returns the template with replaced parameters
1038
+ * @throws {PipelineExecutionError} if parameter is not defined, not closed, or not opened
1039
+ * @public exported from `@promptbook/utils`
1040
+ */
1041
+ function replaceParameters(template, parameters) {
1042
+ var e_1, _a;
1043
+ try {
1044
+ for (var _b = __values(Object.entries(parameters)), _c = _b.next(); !_c.done; _c = _b.next()) {
1045
+ var _d = __read(_c.value, 2), parameterName = _d[0], parameterValue = _d[1];
1046
+ if (parameterValue === RESERVED_PARAMETER_MISSING_VALUE) {
1047
+ throw new UnexpectedError("Parameter {".concat(parameterName, "} has missing value"));
1048
+ }
1049
+ else if (parameterValue === RESERVED_PARAMETER_RESTRICTED) {
1050
+ // TODO: [🍵]
1051
+ throw new UnexpectedError("Parameter {".concat(parameterName, "} is restricted to use"));
1052
+ }
1053
+ }
1054
+ }
1055
+ catch (e_1_1) { e_1 = { error: e_1_1 }; }
1056
+ finally {
1057
+ try {
1058
+ if (_c && !_c.done && (_a = _b.return)) _a.call(_b);
1059
+ }
1060
+ finally { if (e_1) throw e_1.error; }
1061
+ }
1062
+ var replacedTemplate = template;
1063
+ var match;
1064
+ var loopLimit = LOOP_LIMIT;
1065
+ var _loop_1 = function () {
1066
+ if (loopLimit-- < 0) {
1067
+ throw new LimitReachedError('Loop limit reached during parameters replacement in `replaceParameters`');
1068
+ }
1069
+ var precol = match.groups.precol;
1070
+ var parameterName = match.groups.parameterName;
1071
+ if (parameterName === '') {
1072
+ return "continue";
1073
+ }
1074
+ if (parameterName.indexOf('{') !== -1 || parameterName.indexOf('}') !== -1) {
1075
+ throw new PipelineExecutionError('Parameter is already opened or not closed');
1076
+ }
1077
+ if (parameters[parameterName] === undefined) {
1078
+ throw new PipelineExecutionError("Parameter {".concat(parameterName, "} is not defined"));
1079
+ }
1080
+ var parameterValue = parameters[parameterName];
1081
+ if (parameterValue === undefined) {
1082
+ throw new PipelineExecutionError("Parameter {".concat(parameterName, "} is not defined"));
1083
+ }
1084
+ parameterValue = parameterValue.toString();
1085
+ if (parameterValue.includes('\n') && /^\s*\W{0,3}\s*$/.test(precol)) {
1086
+ parameterValue = parameterValue
1087
+ .split('\n')
1088
+ .map(function (line, index) { return (index === 0 ? line : "".concat(precol).concat(line)); })
1089
+ .join('\n');
1090
+ }
1091
+ replacedTemplate =
1092
+ replacedTemplate.substring(0, match.index + precol.length) +
1093
+ parameterValue +
1094
+ replacedTemplate.substring(match.index + precol.length + parameterName.length + 2);
1095
+ };
1096
+ while ((match = /^(?<precol>.*){(?<parameterName>\w+)}(.*)/m /* <- Not global */
1097
+ .exec(replacedTemplate))) {
1098
+ _loop_1();
1099
+ }
1100
+ // [💫] Check if there are parameters that are not closed properly
1101
+ if (/{\w+$/.test(replacedTemplate)) {
1102
+ throw new PipelineExecutionError('Parameter is not closed');
1103
+ }
1104
+ // [💫] Check if there are parameters that are not opened properly
1105
+ if (/^\w+}/.test(replacedTemplate)) {
1106
+ throw new PipelineExecutionError('Parameter is not opened');
1107
+ }
1108
+ return replacedTemplate;
1109
+ }
1110
+
1111
+ /**
1112
+ * Function computeUsage will create price per one token based on the string value found on openai page
1113
+ *
1114
+ * @private within the repository, used only as internal helper for `OPENAI_MODELS`
1115
+ */
1116
+ function computeUsage(value) {
1117
+ var _a = __read(value.split(' / '), 2), price = _a[0], tokens = _a[1];
1118
+ return parseFloat(price.replace('$', '')) / parseFloat(tokens.replace('M tokens', '')) / 1000000;
1119
+ }
1120
+
1121
+ /**
1122
+ * List of available Anthropic Claude models with pricing
1123
+ *
1124
+ * Note: Done at 2024-05-25
1125
+ *
1126
+ * @see https://docs.anthropic.com/en/docs/models-overview
1127
+ * @public exported from `@promptbook/anthropic-claude`
1128
+ */
1129
+ var ANTHROPIC_CLAUDE_MODELS = [
1130
+ {
1131
+ modelVariant: 'CHAT',
1132
+ modelTitle: 'Claude 3 Opus',
1133
+ modelName: 'claude-3-opus-20240229',
1134
+ pricing: {
1135
+ prompt: computeUsage("$15.00 / 1M tokens"),
1136
+ output: computeUsage("$75.00 / 1M tokens"),
1137
+ },
1138
+ },
1139
+ {
1140
+ modelVariant: 'CHAT',
1141
+ modelTitle: 'Claude 3 Sonnet',
1142
+ modelName: 'claude-3-sonnet-20240229',
1143
+ pricing: {
1144
+ prompt: computeUsage("$3.00 / 1M tokens"),
1145
+ output: computeUsage("$15.00 / 1M tokens"),
1146
+ },
1147
+ },
1148
+ {
1149
+ modelVariant: 'CHAT',
1150
+ modelTitle: 'Claude 3 Haiku',
1151
+ modelName: ' claude-3-haiku-20240307',
1152
+ pricing: {
1153
+ prompt: computeUsage("$0.25 / 1M tokens"),
1154
+ output: computeUsage("$1.25 / 1M tokens"),
1155
+ },
1156
+ },
1157
+ {
1158
+ modelVariant: 'CHAT',
1159
+ modelTitle: 'Claude 2.1',
1160
+ modelName: 'claude-2.1',
1161
+ pricing: {
1162
+ prompt: computeUsage("$8.00 / 1M tokens"),
1163
+ output: computeUsage("$24.00 / 1M tokens"),
1164
+ },
1165
+ },
1166
+ {
1167
+ modelVariant: 'CHAT',
1168
+ modelTitle: 'Claude 2',
1169
+ modelName: 'claude-2.0',
1170
+ pricing: {
1171
+ prompt: computeUsage("$8.00 / 1M tokens"),
1172
+ output: computeUsage("$24.00 / 1M tokens"),
1173
+ },
1174
+ },
1175
+ {
1176
+ modelVariant: 'CHAT',
1177
+ modelTitle: ' Claude Instant 1.2',
1178
+ modelName: 'claude-instant-1.2',
1179
+ pricing: {
1180
+ prompt: computeUsage("$0.80 / 1M tokens"),
1181
+ output: computeUsage("$2.40 / 1M tokens"),
1182
+ },
1183
+ },
1184
+ // TODO: !!! Claude 1 and 2 has also completion versions - ask Hoagy
1185
+ ];
1186
+ /**
1187
+ * Note: [🤖] Add models of new variant
1188
+ * TODO: [🧠] !!! Add embedding models OR Anthropic has only chat+completion models?
1189
+ * TODO: [🧠] Some mechanism to propagate unsureness
1190
+ * TODO: [🧠][👮‍♀️] Put here more info like description, isVision, trainingDateCutoff, languages, strengths ( Top-level performance, intelligence, fluency, and understanding), contextWindow,...
1191
+ * TODO: [🕚] Make this list dynamic - dynamically can be listed modelNames but not modelVariant, legacy status, context length and pricing
1192
+ */
1193
+
1194
+ /**
1195
+ * Execution Tools for calling Anthropic Claude API.
1196
+ *
1197
+ * @public exported from `@promptbook/anthropic-claude`
1198
+ * @deprecated use `createAnthropicClaudeExecutionTools` instead
1199
+ */
1200
+ var AnthropicClaudeExecutionTools = /** @class */ (function () {
1201
+ /**
1202
+ * Creates Anthropic Claude Execution Tools.
1203
+ *
1204
+ * @param options which are relevant are directly passed to the Anthropic Claude client
1205
+ */
1206
+ function AnthropicClaudeExecutionTools(options) {
1207
+ if (options === void 0) { options = { isProxied: false }; }
1208
+ this.options = options;
1209
+ // Note: Passing only Anthropic Claude relevant options to Anthropic constructor
1210
+ var anthropicOptions = __assign({}, options);
1211
+ delete anthropicOptions.isVerbose;
1212
+ delete anthropicOptions.isProxied;
1213
+ this.client = new Anthropic(anthropicOptions);
1214
+ }
1215
+ Object.defineProperty(AnthropicClaudeExecutionTools.prototype, "title", {
1216
+ get: function () {
1217
+ return 'Anthropic Claude';
1218
+ },
1219
+ enumerable: false,
1220
+ configurable: true
1221
+ });
1222
+ Object.defineProperty(AnthropicClaudeExecutionTools.prototype, "description", {
1223
+ get: function () {
1224
+ return 'Use all models provided by Anthropic Claude';
1225
+ },
1226
+ enumerable: false,
1227
+ configurable: true
1228
+ });
1229
+ /**
1230
+ * Calls Anthropic Claude API to use a chat model.
1231
+ */
1232
+ AnthropicClaudeExecutionTools.prototype.callChatModel = function (prompt) {
1233
+ return __awaiter(this, void 0, void 0, function () {
1234
+ var content, parameters, modelRequirements, modelName, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
1235
+ return __generator(this, function (_a) {
1236
+ switch (_a.label) {
1237
+ case 0:
1238
+ if (this.options.isVerbose) {
1239
+ console.info('💬 Anthropic Claude callChatModel call');
1240
+ }
1241
+ content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
1242
+ // TODO: [☂] Use here more modelRequirements
1243
+ if (modelRequirements.modelVariant !== 'CHAT') {
1244
+ throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
1245
+ }
1246
+ modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
1247
+ rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
1248
+ rawRequest = {
1249
+ model: modelRequirements.modelName || this.getDefaultChatModel().modelName,
1250
+ max_tokens: modelRequirements.maxTokens || 4096,
1251
+ // <- TODO: [🌾] Make some global max cap for maxTokens
1252
+ temperature: modelRequirements.temperature,
1253
+ system: modelRequirements.systemMessage,
1254
+ // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
1255
+ // <- Note: [🧆]
1256
+ messages: [
1257
+ {
1258
+ role: 'user',
1259
+ content: rawPromptContent,
1260
+ },
1261
+ ],
1262
+ // TODO: Is here some equivalent of user identification?> user: this.options.user,
1263
+ };
1264
+ start = getCurrentIsoDate();
1265
+ if (this.options.isVerbose) {
1266
+ console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
1267
+ }
1268
+ return [4 /*yield*/, this.client.messages.create(rawRequest)];
1269
+ case 1:
1270
+ rawResponse = _a.sent();
1271
+ if (this.options.isVerbose) {
1272
+ console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
1273
+ }
1274
+ if (!rawResponse.content[0]) {
1275
+ throw new PipelineExecutionError('No content from Anthropic Claude');
1276
+ }
1277
+ if (rawResponse.content.length > 1) {
1278
+ throw new PipelineExecutionError('More than one content blocks from Anthropic Claude');
1279
+ }
1280
+ resultContent = rawResponse.content[0].text;
1281
+ // eslint-disable-next-line prefer-const
1282
+ complete = getCurrentIsoDate();
1283
+ usage = {
1284
+ price: { value: 0, isUncertain: true } /* <- TODO: [🐞] Compute usage */,
1285
+ input: __assign({ tokensCount: uncertainNumber(rawResponse.usage.input_tokens) }, computeUsageCounts(prompt.content)),
1286
+ output: __assign({ tokensCount: uncertainNumber(rawResponse.usage.output_tokens) }, computeUsageCounts(prompt.content)),
1287
+ };
1288
+ return [2 /*return*/, {
1289
+ content: resultContent,
1290
+ modelName: rawResponse.model,
1291
+ timing: {
1292
+ start: start,
1293
+ complete: complete,
1294
+ },
1295
+ usage: usage,
1296
+ rawPromptContent: rawPromptContent,
1297
+ rawRequest: rawRequest,
1298
+ rawResponse: rawResponse,
1299
+ // <- [🗯]
1300
+ }];
1301
+ }
1302
+ });
1303
+ });
1304
+ };
1305
+ /*
1306
+ TODO: [👏]
1307
+ public async callCompletionModel(
1308
+ prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements'>,
1309
+ ): Promise<PromptCompletionResult> {
1310
+
1311
+ if (this.options.isVerbose) {
1312
+ console.info('🖋 Anthropic Claude callCompletionModel call');
1313
+ }
1314
+
1315
+ const { content, parameters, modelRequirements } = prompt;
1316
+
1317
+ // TODO: [☂] Use here more modelRequirements
1318
+ if (modelRequirements.modelVariant !== 'COMPLETION') {
1319
+ throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
1320
+ }
1321
+
1322
+ const modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
1323
+ const modelSettings = {
1324
+ model: modelName,
1325
+ max_tokens: modelRequirements.maxTokens || 2000, // <- Note: 2000 is for lagacy reasons
1326
+ // <- TODO: [🌾] Make some global max cap for maxTokens
1327
+ // <- TODO: Use here `systemMessage`, `temperature` and `seed`
1328
+ };
1329
+
1330
+ const rawRequest: xxxx.Completions.CompletionCreateParamsNonStreaming = {
1331
+ ...modelSettings,
1332
+ prompt: rawPromptContent,
1333
+ user: this.options.user,
1334
+ };
1335
+ const start: string_date_iso8601 = getCurrentIsoDate();
1336
+ let complete: string_date_iso8601;
1337
+
1338
+ if (this.options.isVerbose) {
1339
+ console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
1340
+ }
1341
+ const rawResponse = await this.client.completions.create(rawRequest);
1342
+ if (this.options.isVerbose) {
1343
+ console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
1344
+ }
1345
+
1346
+ if (!rawResponse.choices[0]) {
1347
+ throw new PipelineExecutionError('No choises from Anthropic Claude');
1348
+ }
1349
+
1350
+ if (rawResponse.choices.length > 1) {
1351
+ // TODO: This should be maybe only warning
1352
+ throw new PipelineExecutionError('More than one choise from Anthropic Claude');
1353
+ }
1354
+
1355
+ const resultContent = rawResponse.choices[0].text;
1356
+ // eslint-disable-next-line prefer-const
1357
+ complete = getCurrentIsoDate();
1358
+ const usage = { price: 'UNKNOWN', inputTokens: 0, outputTokens: 0 /* <- TODO: [🐞] Compute usage * / } satisfies PromptResultUsage;
1359
+
1360
+
1361
+
1362
+ return {
1363
+ content: resultContent,
1364
+ modelName: rawResponse.model || model,
1365
+ timing: {
1366
+ start,
1367
+ complete,
1368
+ },
1369
+ usage,
1370
+ rawResponse,
1371
+ // <- [🗯]
1372
+ };
1373
+ }
1374
+ */
1375
+ // <- Note: [🤖] callXxxModel
1376
+ /**
1377
+ * Get the model that should be used as default
1378
+ */
1379
+ AnthropicClaudeExecutionTools.prototype.getDefaultModel = function (defaultModelName) {
1380
+ var model = ANTHROPIC_CLAUDE_MODELS.find(function (_a) {
1381
+ var modelName = _a.modelName;
1382
+ return modelName.startsWith(defaultModelName);
1383
+ });
1384
+ if (model === undefined) {
1385
+ throw new UnexpectedError(spaceTrim$1(function (block) {
1386
+ return "\n Cannot find model in OpenAI models with name \"".concat(defaultModelName, "\" which should be used as default.\n\n Available models:\n ").concat(block(ANTHROPIC_CLAUDE_MODELS.map(function (_a) {
1387
+ var modelName = _a.modelName;
1388
+ return "- \"".concat(modelName, "\"");
1389
+ }).join('\n')), "\n\n ");
1390
+ }));
1391
+ }
1392
+ return model;
1393
+ };
1394
+ /**
1395
+ * Default model for chat variant.
1396
+ */
1397
+ AnthropicClaudeExecutionTools.prototype.getDefaultChatModel = function () {
1398
+ return this.getDefaultModel('claude-3-opus');
1399
+ };
1400
+ // <- Note: [🤖] getDefaultXxxModel
1401
+ /**
1402
+ * List all available Anthropic Claude models that can be used
1403
+ */
1404
+ AnthropicClaudeExecutionTools.prototype.listModels = function () {
1405
+ return ANTHROPIC_CLAUDE_MODELS;
1406
+ };
1407
+ return AnthropicClaudeExecutionTools;
1408
+ }());
1409
+ /**
1410
+ * TODO: [🍆] JSON mode
1411
+ * TODO: [🧠] Maybe handle errors via transformAnthropicError (like transformAzureError)
1412
+ * TODO: Maybe Create some common util for callChatModel and callCompletionModel
1413
+ * TODO: Maybe make custom OpenaiError
1414
+ * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
1415
+ * TODO: [🍜] !!!!!! Auto use anonymous server in browser
1416
+ * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
1417
+ * TODO: [📅] Maybe instead of `RemoteLlmExecutionToolsOptions` use `proxyWithAnonymousRemoteServer` (if implemented)
1418
+ */
1419
+
1420
+ /**
1421
+ * Execution Tools for calling Anthropic Claude API.
1422
+ *
1423
+ * @public exported from `@promptbook/anthropic-claude`
1424
+ */
1425
+ function createAnthropicClaudeExecutionTools(options) {
1426
+ if (options.isProxied) {
1427
+ return new RemoteLlmExecutionTools(__assign(__assign({}, options), { isAnonymous: true, llmToolsConfiguration: [
1428
+ {
1429
+ title: 'Anthropic Claude (proxied)',
1430
+ packageName: '@promptbook/anthropic-claude',
1431
+ className: 'AnthropicClaudeExecutionTools',
1432
+ options: __assign(__assign({}, options), { isProxied: false }),
1433
+ },
1434
+ ] }));
1435
+ }
1436
+ return new AnthropicClaudeExecutionTools(options);
1437
+ }
1438
+ /**
1439
+ * TODO: !!!!!! Make this with all LLM providers
1440
+ * TODO: !!!!!! Maybe change all `new AnthropicClaudeExecutionTools` -> `createAnthropicClaudeExecutionTools` in manual
1441
+ */
1442
+
1443
+ /**
1444
+ * List of available OpenAI models with pricing
1445
+ *
1446
+ * Note: Done at 2024-05-20
1447
+ *
1448
+ * @see https://platform.openai.com/docs/models/
1449
+ * @see https://openai.com/api/pricing/
1450
+ * @public exported from `@promptbook/openai`
1451
+ */
1452
+ var OPENAI_MODELS = [
1453
+ /*/
1454
+ {
1455
+ modelTitle: 'dall-e-3',
1456
+ modelName: 'dall-e-3',
1457
+ },
1458
+ /**/
1459
+ /*/
1460
+ {
1461
+ modelTitle: 'whisper-1',
1462
+ modelName: 'whisper-1',
1463
+ },
1464
+ /**/
1465
+ /**/
1466
+ {
1467
+ modelVariant: 'COMPLETION',
1468
+ modelTitle: 'davinci-002',
1469
+ modelName: 'davinci-002',
1470
+ pricing: {
1471
+ prompt: computeUsage("$2.00 / 1M tokens"),
1472
+ output: computeUsage("$2.00 / 1M tokens"), // <- not sure
1473
+ },
1474
+ },
1475
+ /**/
1476
+ /*/
1477
+ {
1478
+ modelTitle: 'dall-e-2',
1479
+ modelName: 'dall-e-2',
1480
+ },
1481
+ /**/
1482
+ /**/
1483
+ {
1484
+ modelVariant: 'CHAT',
1485
+ modelTitle: 'gpt-3.5-turbo-16k',
1486
+ modelName: 'gpt-3.5-turbo-16k',
1487
+ pricing: {
1488
+ prompt: computeUsage("$3.00 / 1M tokens"),
1489
+ output: computeUsage("$4.00 / 1M tokens"),
1490
+ },
1491
+ },
1492
+ /**/
1493
+ /*/
1494
+ {
1495
+ modelTitle: 'tts-1-hd-1106',
1496
+ modelName: 'tts-1-hd-1106',
1497
+ },
1498
+ /**/
1499
+ /*/
1500
+ {
1501
+ modelTitle: 'tts-1-hd',
1502
+ modelName: 'tts-1-hd',
1503
+ },
1504
+ /**/
1505
+ /**/
1506
+ {
1507
+ modelVariant: 'CHAT',
1508
+ modelTitle: 'gpt-4',
1509
+ modelName: 'gpt-4',
1510
+ pricing: {
1511
+ prompt: computeUsage("$30.00 / 1M tokens"),
1512
+ output: computeUsage("$60.00 / 1M tokens"),
1513
+ },
1514
+ },
1515
+ /**/
1516
+ /**/
1517
+ {
1518
+ modelVariant: 'CHAT',
1519
+ modelTitle: 'gpt-4-32k',
1520
+ modelName: 'gpt-4-32k',
1521
+ pricing: {
1522
+ prompt: computeUsage("$60.00 / 1M tokens"),
1523
+ output: computeUsage("$120.00 / 1M tokens"),
1524
+ },
1525
+ },
1526
+ /**/
1527
+ /*/
1528
+ {
1529
+ modelVariant: 'CHAT',
1530
+ modelTitle: 'gpt-4-0613',
1531
+ modelName: 'gpt-4-0613',
1532
+ pricing: {
1533
+ prompt: computeUsage(` / 1M tokens`),
1534
+ output: computeUsage(` / 1M tokens`),
1535
+ },
1536
+ },
1537
+ /**/
1538
+ /**/
1539
+ {
1540
+ modelVariant: 'CHAT',
1541
+ modelTitle: 'gpt-4-turbo-2024-04-09',
1542
+ modelName: 'gpt-4-turbo-2024-04-09',
1543
+ pricing: {
1544
+ prompt: computeUsage("$10.00 / 1M tokens"),
1545
+ output: computeUsage("$30.00 / 1M tokens"),
1546
+ },
1547
+ },
1548
+ /**/
1549
+ /**/
1550
+ {
1551
+ modelVariant: 'CHAT',
1552
+ modelTitle: 'gpt-3.5-turbo-1106',
1553
+ modelName: 'gpt-3.5-turbo-1106',
1554
+ pricing: {
1555
+ prompt: computeUsage("$1.00 / 1M tokens"),
1556
+ output: computeUsage("$2.00 / 1M tokens"),
1557
+ },
1558
+ },
1559
+ /**/
1560
+ /**/
1561
+ {
1562
+ modelVariant: 'CHAT',
1563
+ modelTitle: 'gpt-4-turbo',
1564
+ modelName: 'gpt-4-turbo',
1565
+ pricing: {
1566
+ prompt: computeUsage("$10.00 / 1M tokens"),
1567
+ output: computeUsage("$30.00 / 1M tokens"),
1568
+ },
1569
+ },
1570
+ /**/
1571
+ /**/
1572
+ {
1573
+ modelVariant: 'COMPLETION',
1574
+ modelTitle: 'gpt-3.5-turbo-instruct-0914',
1575
+ modelName: 'gpt-3.5-turbo-instruct-0914',
1576
+ pricing: {
1577
+ prompt: computeUsage("$1.50 / 1M tokens"),
1578
+ output: computeUsage("$2.00 / 1M tokens"), // <- For gpt-3.5-turbo-instruct
1579
+ },
1580
+ },
1581
+ /**/
1582
+ /**/
1583
+ {
1584
+ modelVariant: 'COMPLETION',
1585
+ modelTitle: 'gpt-3.5-turbo-instruct',
1586
+ modelName: 'gpt-3.5-turbo-instruct',
1587
+ pricing: {
1588
+ prompt: computeUsage("$1.50 / 1M tokens"),
1589
+ output: computeUsage("$2.00 / 1M tokens"),
1590
+ },
1591
+ },
1592
+ /**/
1593
+ /*/
1594
+ {
1595
+ modelTitle: 'tts-1',
1596
+ modelName: 'tts-1',
1597
+ },
1598
+ /**/
1599
+ /**/
1600
+ {
1601
+ modelVariant: 'CHAT',
1602
+ modelTitle: 'gpt-3.5-turbo',
1603
+ modelName: 'gpt-3.5-turbo',
1604
+ pricing: {
1605
+ prompt: computeUsage("$3.00 / 1M tokens"),
1606
+ output: computeUsage("$6.00 / 1M tokens"), // <- Not sure, refer to gpt-3.5-turbo in Fine-tuning models
1607
+ },
1608
+ },
1609
+ /**/
1610
+ /**/
1611
+ {
1612
+ modelVariant: 'CHAT',
1613
+ modelTitle: 'gpt-3.5-turbo-0301',
1614
+ modelName: 'gpt-3.5-turbo-0301',
1615
+ pricing: {
1616
+ prompt: computeUsage("$1.50 / 1M tokens"),
1617
+ output: computeUsage("$2.00 / 1M tokens"),
1618
+ },
1619
+ },
1620
+ /**/
1621
+ /**/
1622
+ {
1623
+ modelVariant: 'COMPLETION',
1624
+ modelTitle: 'babbage-002',
1625
+ modelName: 'babbage-002',
1626
+ pricing: {
1627
+ prompt: computeUsage("$0.40 / 1M tokens"),
1628
+ output: computeUsage("$0.40 / 1M tokens"), // <- Not sure
1629
+ },
1630
+ },
1631
+ /**/
1632
+ /**/
1633
+ {
1634
+ modelVariant: 'CHAT',
1635
+ modelTitle: 'gpt-4-1106-preview',
1636
+ modelName: 'gpt-4-1106-preview',
1637
+ pricing: {
1638
+ prompt: computeUsage("$10.00 / 1M tokens"),
1639
+ output: computeUsage("$30.00 / 1M tokens"),
1640
+ },
1641
+ },
1642
+ /**/
1643
+ /**/
1644
+ {
1645
+ modelVariant: 'CHAT',
1646
+ modelTitle: 'gpt-4-0125-preview',
1647
+ modelName: 'gpt-4-0125-preview',
1648
+ pricing: {
1649
+ prompt: computeUsage("$10.00 / 1M tokens"),
1650
+ output: computeUsage("$30.00 / 1M tokens"),
1651
+ },
1652
+ },
1653
+ /**/
1654
+ /*/
1655
+ {
1656
+ modelTitle: 'tts-1-1106',
1657
+ modelName: 'tts-1-1106',
1658
+ },
1659
+ /**/
1660
+ /**/
1661
+ {
1662
+ modelVariant: 'CHAT',
1663
+ modelTitle: 'gpt-3.5-turbo-0125',
1664
+ modelName: 'gpt-3.5-turbo-0125',
1665
+ pricing: {
1666
+ prompt: computeUsage("$0.50 / 1M tokens"),
1667
+ output: computeUsage("$1.50 / 1M tokens"),
1668
+ },
1669
+ },
1670
+ /**/
1671
+ /**/
1672
+ {
1673
+ modelVariant: 'CHAT',
1674
+ modelTitle: 'gpt-4-turbo-preview',
1675
+ modelName: 'gpt-4-turbo-preview',
1676
+ pricing: {
1677
+ prompt: computeUsage("$10.00 / 1M tokens"),
1678
+ output: computeUsage("$30.00 / 1M tokens"), // <- Not sure, just for gpt-4-turbo
1679
+ },
1680
+ },
1681
+ /**/
1682
+ /**/
1683
+ {
1684
+ modelVariant: 'EMBEDDING',
1685
+ modelTitle: 'text-embedding-3-large',
1686
+ modelName: 'text-embedding-3-large',
1687
+ pricing: {
1688
+ prompt: computeUsage("$0.13 / 1M tokens"),
1689
+ // TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
1690
+ output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
1691
+ },
1692
+ },
1693
+ /**/
1694
+ /**/
1695
+ {
1696
+ modelVariant: 'EMBEDDING',
1697
+ modelTitle: 'text-embedding-3-small',
1698
+ modelName: 'text-embedding-3-small',
1699
+ pricing: {
1700
+ prompt: computeUsage("$0.02 / 1M tokens"),
1701
+ // TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
1702
+ output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
1703
+ },
1704
+ },
1705
+ /**/
1706
+ /**/
1707
+ {
1708
+ modelVariant: 'CHAT',
1709
+ modelTitle: 'gpt-3.5-turbo-0613',
1710
+ modelName: 'gpt-3.5-turbo-0613',
1711
+ pricing: {
1712
+ prompt: computeUsage("$1.50 / 1M tokens"),
1713
+ output: computeUsage("$2.00 / 1M tokens"),
1714
+ },
1715
+ },
1716
+ /**/
1717
+ /**/
1718
+ {
1719
+ modelVariant: 'EMBEDDING',
1720
+ modelTitle: 'text-embedding-ada-002',
1721
+ modelName: 'text-embedding-ada-002',
1722
+ pricing: {
1723
+ prompt: computeUsage("$0.1 / 1M tokens"),
1724
+ // TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
1725
+ output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
1726
+ },
1727
+ },
1728
+ /**/
1729
+ /*/
1730
+ {
1731
+ modelVariant: 'CHAT',
1732
+ modelTitle: 'gpt-4-1106-vision-preview',
1733
+ modelName: 'gpt-4-1106-vision-preview',
1734
+ },
1735
+ /**/
1736
+ /*/
1737
+ {
1738
+ modelVariant: 'CHAT',
1739
+ modelTitle: 'gpt-4-vision-preview',
1740
+ modelName: 'gpt-4-vision-preview',
1741
+ pricing: {
1742
+ prompt: computeUsage(`$10.00 / 1M tokens`),
1743
+ output: computeUsage(`$30.00 / 1M tokens`),
1744
+ },
1745
+ },
1746
+ /**/
1747
+ /**/
1748
+ {
1749
+ modelVariant: 'CHAT',
1750
+ modelTitle: 'gpt-4o-2024-05-13',
1751
+ modelName: 'gpt-4o-2024-05-13',
1752
+ pricing: {
1753
+ prompt: computeUsage("$5.00 / 1M tokens"),
1754
+ output: computeUsage("$15.00 / 1M tokens"),
1755
+ },
1756
+ },
1757
+ /**/
1758
+ /**/
1759
+ {
1760
+ modelVariant: 'CHAT',
1761
+ modelTitle: 'gpt-4o',
1762
+ modelName: 'gpt-4o',
1763
+ pricing: {
1764
+ prompt: computeUsage("$5.00 / 1M tokens"),
1765
+ output: computeUsage("$15.00 / 1M tokens"),
1766
+ },
1767
+ },
1768
+ /**/
1769
+ /**/
1770
+ {
1771
+ modelVariant: 'CHAT',
1772
+ modelTitle: 'gpt-3.5-turbo-16k-0613',
1773
+ modelName: 'gpt-3.5-turbo-16k-0613',
1774
+ pricing: {
1775
+ prompt: computeUsage("$3.00 / 1M tokens"),
1776
+ output: computeUsage("$4.00 / 1M tokens"),
1777
+ },
1778
+ },
1779
+ /**/
1780
+ ];
1781
+ /**
1782
+ * Note: [🤖] Add models of new variant
1783
+ * TODO: [🧠] Some mechanism to propagate unsureness
1784
+ * TODO: [🕚][👮‍♀️] Make this list dynamic - dynamically can be listed modelNames but not modelVariant, legacy status, context length and pricing
1785
+ * TODO: [🧠][👮‍♀️] Put here more info like description, isVision, trainingDateCutoff, languages, strengths ( Top-level performance, intelligence, fluency, and understanding), contextWindow,...
1786
+ * @see https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4
1787
+ * @see https://openai.com/api/pricing/
1788
+ * @see /other/playground/playground.ts
1789
+ * TODO: [🍓] Make better
1790
+ * TODO: Change model titles to human eg: "gpt-4-turbo-2024-04-09" -> "GPT-4 Turbo (2024-04-09)"
1791
+ * TODO: [🚸] Not all models are compatible with JSON mode, add this information here and use it
1792
+ */
1793
+
1794
+ /**
1795
+ * Execution Tools for calling Azure OpenAI API.
1796
+ *
1797
+ * @public exported from `@promptbook/azure-openai`
1798
+ */
1799
+ var AzureOpenAiExecutionTools = /** @class */ (function () {
1800
+ /**
1801
+ * Creates OpenAI Execution Tools.
1802
+ *
1803
+ * @param options which are relevant are directly passed to the OpenAI client
1804
+ */
1805
+ function AzureOpenAiExecutionTools(options) {
1806
+ this.options = options;
1807
+ this.client = new OpenAIClient("https://".concat(options.resourceName, ".openai.azure.com/"), new AzureKeyCredential(options.apiKey));
1808
+ }
1809
+ Object.defineProperty(AzureOpenAiExecutionTools.prototype, "title", {
1810
+ get: function () {
1811
+ return 'Azure OpenAI';
1812
+ },
1813
+ enumerable: false,
1814
+ configurable: true
1815
+ });
1816
+ Object.defineProperty(AzureOpenAiExecutionTools.prototype, "description", {
1817
+ get: function () {
1818
+ return 'Use all models trained by OpenAI provided by Azure';
1819
+ },
1820
+ enumerable: false,
1821
+ configurable: true
1822
+ });
1823
+ /**
1824
+ * Calls OpenAI API to use a chat model.
1825
+ */
1826
+ AzureOpenAiExecutionTools.prototype.callChatModel = function (prompt) {
1827
+ var _a, _b;
1828
+ return __awaiter(this, void 0, void 0, function () {
1829
+ var content, parameters, modelRequirements, modelName, modelSettings, rawPromptContent, messages, start, complete, rawRequest, rawResponse, resultContent, usage, error_1;
1830
+ var _c;
1831
+ return __generator(this, function (_d) {
1832
+ switch (_d.label) {
1833
+ case 0:
1834
+ if (this.options.isVerbose) {
1835
+ console.info('💬 OpenAI callChatModel call');
1836
+ }
1837
+ content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
1838
+ // TODO: [☂] Use here more modelRequirements
1839
+ if (modelRequirements.modelVariant !== 'CHAT') {
1840
+ throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
1841
+ }
1842
+ _d.label = 1;
1843
+ case 1:
1844
+ _d.trys.push([1, 3, , 4]);
1845
+ modelName = prompt.modelRequirements.modelName || this.options.deploymentName;
1846
+ modelSettings = {
1847
+ maxTokens: modelRequirements.maxTokens,
1848
+ // <- TODO: [🌾] Make some global max cap for maxTokens
1849
+ temperature: modelRequirements.temperature,
1850
+ user: this.options.user,
1851
+ // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
1852
+ // <- Note: [🧆]
1853
+ };
1854
+ rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
1855
+ messages = __spreadArray(__spreadArray([], __read((modelRequirements.systemMessage === undefined
1856
+ ? []
1857
+ : [
1858
+ {
1859
+ role: 'system',
1860
+ content: modelRequirements.systemMessage,
1861
+ },
1862
+ ])), false), [
1863
+ {
1864
+ role: 'user',
1865
+ content: rawPromptContent,
1866
+ },
1867
+ ], false);
1868
+ start = getCurrentIsoDate();
1869
+ complete = void 0;
1870
+ if (this.options.isVerbose) {
1871
+ console.info(colors.bgWhite('messages'), JSON.stringify(messages, null, 4));
1872
+ }
1873
+ rawRequest = [modelName, messages, modelSettings];
1874
+ return [4 /*yield*/, (_c = this.client).getChatCompletions.apply(_c, __spreadArray([], __read(rawRequest), false))];
1875
+ case 2:
1876
+ rawResponse = _d.sent();
1877
+ if (this.options.isVerbose) {
1878
+ console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
1879
+ }
1880
+ if (!rawResponse.choices[0]) {
1881
+ throw new PipelineExecutionError('No choises from Azure OpenAI');
1882
+ }
1883
+ if (rawResponse.choices.length > 1) {
1884
+ // TODO: This should be maybe only warning
1885
+ throw new PipelineExecutionError('More than one choise from Azure OpenAI');
1886
+ }
1887
+ if (!rawResponse.choices[0].message || !rawResponse.choices[0].message.content) {
1888
+ throw new PipelineExecutionError('Empty response from Azure OpenAI');
1889
+ }
1890
+ resultContent = rawResponse.choices[0].message.content;
1891
+ // eslint-disable-next-line prefer-const
1892
+ complete = getCurrentIsoDate();
1893
+ usage = {
1894
+ price: uncertainNumber() /* <- TODO: [🐞] Compute usage */,
1895
+ input: __assign({ tokensCount: uncertainNumber((_a = rawResponse.usage) === null || _a === void 0 ? void 0 : _a.promptTokens) }, computeUsageCounts(prompt.content)),
1896
+ output: __assign({ tokensCount: uncertainNumber((_b = rawResponse.usage) === null || _b === void 0 ? void 0 : _b.completionTokens) }, computeUsageCounts(prompt.content)),
1897
+ };
1898
+ return [2 /*return*/, {
1899
+ content: resultContent,
1900
+ modelName: modelName,
1901
+ timing: {
1902
+ start: start,
1903
+ complete: complete,
1904
+ },
1905
+ usage: usage,
1906
+ rawPromptContent: rawPromptContent,
1907
+ rawRequest: rawRequest,
1908
+ rawResponse: rawResponse,
1909
+ // <- [🗯]
1910
+ }];
1911
+ case 3:
1912
+ error_1 = _d.sent();
1913
+ throw this.transformAzureError(error_1);
1914
+ case 4: return [2 /*return*/];
1915
+ }
1916
+ });
1917
+ });
1918
+ };
1919
+ /**
1920
+ * Calls Azure OpenAI API to use a complete model.
1921
+ */
1922
+ AzureOpenAiExecutionTools.prototype.callCompletionModel = function (prompt) {
1923
+ var _a, _b;
1924
+ return __awaiter(this, void 0, void 0, function () {
1925
+ var content, parameters, modelRequirements, modelName, modelSettings, start, complete, rawPromptContent, rawRequest, rawResponse, resultContent, usage, error_2;
1926
+ var _c;
1927
+ return __generator(this, function (_d) {
1928
+ switch (_d.label) {
1929
+ case 0:
1930
+ if (this.options.isVerbose) {
1931
+ console.info('🖋 OpenAI callCompletionModel call');
1932
+ }
1933
+ content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
1934
+ // TODO: [☂] Use here more modelRequirements
1935
+ if (modelRequirements.modelVariant !== 'COMPLETION') {
1936
+ throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
1937
+ }
1938
+ _d.label = 1;
1939
+ case 1:
1940
+ _d.trys.push([1, 3, , 4]);
1941
+ modelName = prompt.modelRequirements.modelName || this.options.deploymentName;
1942
+ modelSettings = {
1943
+ maxTokens: modelRequirements.maxTokens || 2000,
1944
+ // <- TODO: [🌾] Make some global max cap for maxTokens
1945
+ temperature: modelRequirements.temperature,
1946
+ user: this.options.user,
1947
+ // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
1948
+ // <- Note: [🧆]
1949
+ };
1950
+ start = getCurrentIsoDate();
1951
+ complete = void 0;
1952
+ if (this.options.isVerbose) {
1953
+ console.info(colors.bgWhite('content'), JSON.stringify(content, null, 4));
1954
+ console.info(colors.bgWhite('parameters'), JSON.stringify(parameters, null, 4));
1955
+ }
1956
+ rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
1957
+ rawRequest = [
1958
+ modelName,
1959
+ [rawPromptContent],
1960
+ modelSettings,
1961
+ ];
1962
+ return [4 /*yield*/, (_c = this.client).getCompletions.apply(_c, __spreadArray([], __read(rawRequest), false))];
1963
+ case 2:
1964
+ rawResponse = _d.sent();
1965
+ if (this.options.isVerbose) {
1966
+ console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
1967
+ }
1968
+ if (!rawResponse.choices[0]) {
1969
+ throw new PipelineExecutionError('No choises from OpenAI');
1970
+ }
1971
+ if (rawResponse.choices.length > 1) {
1972
+ // TODO: This should be maybe only warning
1973
+ throw new PipelineExecutionError('More than one choise from OpenAI');
1974
+ }
1975
+ resultContent = rawResponse.choices[0].text;
1976
+ // eslint-disable-next-line prefer-const
1977
+ complete = getCurrentIsoDate();
1978
+ usage = {
1979
+ price: uncertainNumber() /* <- TODO: [🐞] Compute usage */,
1980
+ input: __assign({ tokensCount: uncertainNumber((_a = rawResponse.usage) === null || _a === void 0 ? void 0 : _a.promptTokens) }, computeUsageCounts(prompt.content)),
1981
+ output: __assign({ tokensCount: uncertainNumber((_b = rawResponse.usage) === null || _b === void 0 ? void 0 : _b.completionTokens) }, computeUsageCounts(prompt.content)),
1982
+ };
1983
+ return [2 /*return*/, {
1984
+ content: resultContent,
1985
+ modelName: modelName,
1986
+ timing: {
1987
+ start: start,
1988
+ complete: complete,
1989
+ },
1990
+ usage: usage,
1991
+ rawPromptContent: rawPromptContent,
1992
+ rawRequest: rawRequest,
1993
+ rawResponse: rawResponse,
1994
+ // <- [🗯]
1995
+ }];
1996
+ case 3:
1997
+ error_2 = _d.sent();
1998
+ throw this.transformAzureError(error_2);
1999
+ case 4: return [2 /*return*/];
2000
+ }
2001
+ });
2002
+ });
2003
+ };
2004
+ // <- Note: [🤖] callXxxModel
2005
+ /**
2006
+ * Changes Azure error (which is not propper Error but object) to propper Error
2007
+ */
2008
+ AzureOpenAiExecutionTools.prototype.transformAzureError = function (azureError) {
2009
+ if (typeof azureError !== 'object' || azureError === null) {
2010
+ return new PipelineExecutionError("Unknown Azure OpenAI error");
2011
+ }
2012
+ var code = azureError.code, message = azureError.message;
2013
+ return new PipelineExecutionError("".concat(code, ": ").concat(message));
2014
+ };
2015
+ /**
2016
+ * List all available Azure OpenAI models that can be used
2017
+ */
2018
+ AzureOpenAiExecutionTools.prototype.listModels = function () {
2019
+ return __awaiter(this, void 0, void 0, function () {
2020
+ return __generator(this, function (_a) {
2021
+ // TODO: !!! Do here some filtering which models are really available as deployment
2022
+ // @see https://management.azure.com/subscriptions/subscriptionId/resourceGroups/resourceGroupName/providers/Microsoft.CognitiveServices/accounts/accountName/deployments?api-version=2023-05-01
2023
+ return [2 /*return*/, OPENAI_MODELS.map(function (_a) {
2024
+ var modelTitle = _a.modelTitle, modelName = _a.modelName, modelVariant = _a.modelVariant;
2025
+ return ({
2026
+ modelTitle: "Azure ".concat(modelTitle),
2027
+ modelName: modelName,
2028
+ modelVariant: modelVariant,
2029
+ });
2030
+ })];
2031
+ });
2032
+ });
2033
+ };
2034
+ return AzureOpenAiExecutionTools;
2035
+ }());
2036
+ /**
2037
+ * TODO: Maybe Create some common util for callChatModel and callCompletionModel
2038
+ * TODO: Maybe make custom AzureOpenaiError
2039
+ * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
2040
+ * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
2041
+ */
2042
+
2043
+ /**
2044
+ * Computes the usage of the OpenAI API based on the response from OpenAI
2045
+ *
2046
+ * @param promptContent The content of the prompt
2047
+ * @param resultContent The content of the result (for embedding prompts or failed prompts pass empty string)
2048
+ * @param rawResponse The raw response from OpenAI API
2049
+ * @throws {PipelineExecutionError} If the usage is not defined in the response from OpenAI
2050
+ * @private internal utility of `OpenAiExecutionTools`
2051
+ */
2052
+ function computeOpenaiUsage(promptContent, // <- Note: Intentionally using [] to access type properties to bring jsdoc from Prompt/PromptResult to consumer
2053
+ resultContent, rawResponse) {
2054
+ var _a, _b;
2055
+ if (rawResponse.usage === undefined) {
2056
+ throw new PipelineExecutionError('The usage is not defined in the response from OpenAI');
2057
+ }
2058
+ if (((_a = rawResponse.usage) === null || _a === void 0 ? void 0 : _a.prompt_tokens) === undefined) {
2059
+ throw new PipelineExecutionError('In OpenAI response `usage.prompt_tokens` not defined');
2060
+ }
2061
+ var inputTokens = rawResponse.usage.prompt_tokens;
2062
+ var outputTokens = ((_b = rawResponse.usage) === null || _b === void 0 ? void 0 : _b.completion_tokens) || 0;
2063
+ var modelInfo = OPENAI_MODELS.find(function (model) { return model.modelName === rawResponse.model; });
2064
+ var price;
2065
+ if (modelInfo === undefined || modelInfo.pricing === undefined) {
2066
+ price = uncertainNumber();
2067
+ }
2068
+ else {
2069
+ price = uncertainNumber(inputTokens * modelInfo.pricing.prompt + outputTokens * modelInfo.pricing.output);
2070
+ }
2071
+ return {
2072
+ price: price,
2073
+ input: __assign({ tokensCount: uncertainNumber(rawResponse.usage.prompt_tokens) }, computeUsageCounts(promptContent)),
2074
+ output: __assign({ tokensCount: uncertainNumber(outputTokens) }, computeUsageCounts(resultContent)),
2075
+ };
2076
+ }
2077
+
2078
+ /**
2079
+ * Execution Tools for calling OpenAI API.
2080
+ *
2081
+ * @public exported from `@promptbook/openai`
2082
+ */
2083
+ var OpenAiExecutionTools = /** @class */ (function () {
2084
+ /**
2085
+ * Creates OpenAI Execution Tools.
2086
+ *
2087
+ * @param options which are relevant are directly passed to the OpenAI client
2088
+ */
2089
+ function OpenAiExecutionTools(options) {
2090
+ if (options === void 0) { options = {}; }
2091
+ this.options = options;
2092
+ // Note: Passing only OpenAI relevant options to OpenAI constructor
2093
+ var openAiOptions = __assign({}, options);
2094
+ delete openAiOptions.isVerbose;
2095
+ delete openAiOptions.user;
2096
+ this.client = new OpenAI(__assign({}, openAiOptions));
2097
+ }
2098
+ Object.defineProperty(OpenAiExecutionTools.prototype, "title", {
2099
+ get: function () {
2100
+ return 'OpenAI';
2101
+ },
2102
+ enumerable: false,
2103
+ configurable: true
2104
+ });
2105
+ Object.defineProperty(OpenAiExecutionTools.prototype, "description", {
2106
+ get: function () {
2107
+ return 'Use all models provided by OpenAI';
2108
+ },
2109
+ enumerable: false,
2110
+ configurable: true
2111
+ });
2112
+ /**
2113
+ * Calls OpenAI API to use a chat model.
2114
+ */
2115
+ OpenAiExecutionTools.prototype.callChatModel = function (prompt) {
2116
+ return __awaiter(this, void 0, void 0, function () {
2117
+ var content, parameters, modelRequirements, expectFormat, modelName, modelSettings, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
2118
+ return __generator(this, function (_a) {
2119
+ switch (_a.label) {
2120
+ case 0:
2121
+ if (this.options.isVerbose) {
2122
+ console.info('💬 OpenAI callChatModel call', { prompt: prompt });
2123
+ }
2124
+ content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements, expectFormat = prompt.expectFormat;
2125
+ // TODO: [☂] Use here more modelRequirements
2126
+ if (modelRequirements.modelVariant !== 'CHAT') {
2127
+ throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
2128
+ }
2129
+ modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
2130
+ modelSettings = {
2131
+ model: modelName,
2132
+ max_tokens: modelRequirements.maxTokens,
2133
+ // <- TODO: [🌾] Make some global max cap for maxTokens
2134
+ temperature: modelRequirements.temperature,
2135
+ // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
2136
+ // <- Note: [🧆]
2137
+ };
2138
+ if (expectFormat === 'JSON') {
2139
+ modelSettings.response_format = {
2140
+ type: 'json_object',
2141
+ };
2142
+ }
2143
+ rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
2144
+ rawRequest = __assign(__assign({}, modelSettings), { messages: __spreadArray(__spreadArray([], __read((modelRequirements.systemMessage === undefined
2145
+ ? []
2146
+ : [
2147
+ {
2148
+ role: 'system',
2149
+ content: modelRequirements.systemMessage,
2150
+ },
2151
+ ])), false), [
2152
+ {
2153
+ role: 'user',
2154
+ content: rawPromptContent,
2155
+ },
2156
+ ], false), user: this.options.user });
2157
+ start = getCurrentIsoDate();
2158
+ if (this.options.isVerbose) {
2159
+ console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
2160
+ }
2161
+ return [4 /*yield*/, this.client.chat.completions.create(rawRequest)];
2162
+ case 1:
2163
+ rawResponse = _a.sent();
2164
+ if (this.options.isVerbose) {
2165
+ console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
2166
+ }
2167
+ if (!rawResponse.choices[0]) {
2168
+ throw new PipelineExecutionError('No choises from OpenAI');
2169
+ }
2170
+ if (rawResponse.choices.length > 1) {
2171
+ // TODO: This should be maybe only warning
2172
+ throw new PipelineExecutionError('More than one choise from OpenAI');
2173
+ }
2174
+ resultContent = rawResponse.choices[0].message.content;
2175
+ // eslint-disable-next-line prefer-const
2176
+ complete = getCurrentIsoDate();
2177
+ usage = computeOpenaiUsage(content, resultContent || '', rawResponse);
2178
+ if (resultContent === null) {
2179
+ throw new PipelineExecutionError('No response message from OpenAI');
2180
+ }
2181
+ return [2 /*return*/, {
2182
+ content: resultContent,
2183
+ modelName: rawResponse.model || modelName,
2184
+ timing: {
2185
+ start: start,
2186
+ complete: complete,
2187
+ },
2188
+ usage: usage,
2189
+ rawPromptContent: rawPromptContent,
2190
+ rawRequest: rawRequest,
2191
+ rawResponse: rawResponse,
2192
+ // <- [🗯]
2193
+ }];
2194
+ }
2195
+ });
2196
+ });
2197
+ };
2198
+ /**
2199
+ * Calls OpenAI API to use a complete model.
2200
+ */
2201
+ OpenAiExecutionTools.prototype.callCompletionModel = function (prompt) {
2202
+ return __awaiter(this, void 0, void 0, function () {
2203
+ var content, parameters, modelRequirements, modelName, modelSettings, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
2204
+ return __generator(this, function (_a) {
2205
+ switch (_a.label) {
2206
+ case 0:
2207
+ if (this.options.isVerbose) {
2208
+ console.info('🖋 OpenAI callCompletionModel call', { prompt: prompt });
2209
+ }
2210
+ content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
2211
+ // TODO: [☂] Use here more modelRequirements
2212
+ if (modelRequirements.modelVariant !== 'COMPLETION') {
2213
+ throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
2214
+ }
2215
+ modelName = modelRequirements.modelName || this.getDefaultCompletionModel().modelName;
2216
+ modelSettings = {
2217
+ model: modelName,
2218
+ max_tokens: modelRequirements.maxTokens || 2000,
2219
+ // <- TODO: [🌾] Make some global max cap for maxTokens
2220
+ temperature: modelRequirements.temperature,
2221
+ // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
2222
+ // <- Note: [🧆]
2223
+ };
2224
+ rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
2225
+ rawRequest = __assign(__assign({}, modelSettings), { prompt: rawPromptContent, user: this.options.user });
2226
+ start = getCurrentIsoDate();
2227
+ if (this.options.isVerbose) {
2228
+ console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
2229
+ }
2230
+ return [4 /*yield*/, this.client.completions.create(rawRequest)];
2231
+ case 1:
2232
+ rawResponse = _a.sent();
2233
+ if (this.options.isVerbose) {
2234
+ console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
2235
+ }
2236
+ if (!rawResponse.choices[0]) {
2237
+ throw new PipelineExecutionError('No choises from OpenAI');
2238
+ }
2239
+ if (rawResponse.choices.length > 1) {
2240
+ // TODO: This should be maybe only warning
2241
+ throw new PipelineExecutionError('More than one choise from OpenAI');
2242
+ }
2243
+ resultContent = rawResponse.choices[0].text;
2244
+ // eslint-disable-next-line prefer-const
2245
+ complete = getCurrentIsoDate();
2246
+ usage = computeOpenaiUsage(content, resultContent || '', rawResponse);
2247
+ return [2 /*return*/, {
2248
+ content: resultContent,
2249
+ modelName: rawResponse.model || modelName,
2250
+ timing: {
2251
+ start: start,
2252
+ complete: complete,
2253
+ },
2254
+ usage: usage,
2255
+ rawPromptContent: rawPromptContent,
2256
+ rawRequest: rawRequest,
2257
+ rawResponse: rawResponse,
2258
+ // <- [🗯]
2259
+ }];
2260
+ }
2261
+ });
2262
+ });
2263
+ };
2264
+ /**
2265
+ * Calls OpenAI API to use a embedding model
2266
+ */
2267
+ OpenAiExecutionTools.prototype.callEmbeddingModel = function (prompt) {
2268
+ return __awaiter(this, void 0, void 0, function () {
2269
+ var content, parameters, modelRequirements, modelName, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
2270
+ return __generator(this, function (_a) {
2271
+ switch (_a.label) {
2272
+ case 0:
2273
+ if (this.options.isVerbose) {
2274
+ console.info('🖋 OpenAI embedding call', { prompt: prompt });
2275
+ }
2276
+ content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
2277
+ // TODO: [☂] Use here more modelRequirements
2278
+ if (modelRequirements.modelVariant !== 'EMBEDDING') {
2279
+ throw new PipelineExecutionError('Use embed only for EMBEDDING variant');
2280
+ }
2281
+ modelName = modelRequirements.modelName || this.getDefaultEmbeddingModel().modelName;
2282
+ rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
2283
+ rawRequest = {
2284
+ input: rawPromptContent,
2285
+ model: modelName,
2286
+ };
2287
+ start = getCurrentIsoDate();
2288
+ if (this.options.isVerbose) {
2289
+ console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
2290
+ }
2291
+ return [4 /*yield*/, this.client.embeddings.create(rawRequest)];
2292
+ case 1:
2293
+ rawResponse = _a.sent();
2294
+ if (this.options.isVerbose) {
2295
+ console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
2296
+ }
2297
+ if (rawResponse.data.length !== 1) {
2298
+ throw new PipelineExecutionError("Expected exactly 1 data item in response, got ".concat(rawResponse.data.length));
2299
+ }
2300
+ resultContent = rawResponse.data[0].embedding;
2301
+ // eslint-disable-next-line prefer-const
2302
+ complete = getCurrentIsoDate();
2303
+ usage = computeOpenaiUsage(content, '', rawResponse);
2304
+ return [2 /*return*/, {
2305
+ content: resultContent,
2306
+ modelName: rawResponse.model || modelName,
2307
+ timing: {
2308
+ start: start,
2309
+ complete: complete,
2310
+ },
2311
+ usage: usage,
2312
+ rawPromptContent: rawPromptContent,
2313
+ rawRequest: rawRequest,
2314
+ rawResponse: rawResponse,
2315
+ // <- [🗯]
2316
+ }];
2317
+ }
2318
+ });
2319
+ });
2320
+ };
2321
+ // <- Note: [🤖] callXxxModel
2322
+ /**
2323
+ * Get the model that should be used as default
2324
+ */
2325
+ OpenAiExecutionTools.prototype.getDefaultModel = function (defaultModelName) {
2326
+ var model = OPENAI_MODELS.find(function (_a) {
2327
+ var modelName = _a.modelName;
2328
+ return modelName === defaultModelName;
2329
+ });
2330
+ if (model === undefined) {
2331
+ throw new UnexpectedError(spaceTrim$1(function (block) {
2332
+ return "\n Cannot find model in OpenAI models with name \"".concat(defaultModelName, "\" which should be used as default.\n\n Available models:\n ").concat(block(OPENAI_MODELS.map(function (_a) {
2333
+ var modelName = _a.modelName;
2334
+ return "- \"".concat(modelName, "\"");
2335
+ }).join('\n')), "\n\n ");
2336
+ }));
2337
+ }
2338
+ return model;
2339
+ };
2340
+ /**
2341
+ * Default model for chat variant.
2342
+ */
2343
+ OpenAiExecutionTools.prototype.getDefaultChatModel = function () {
2344
+ return this.getDefaultModel('gpt-4o');
2345
+ };
2346
+ /**
2347
+ * Default model for completion variant.
2348
+ */
2349
+ OpenAiExecutionTools.prototype.getDefaultCompletionModel = function () {
2350
+ return this.getDefaultModel('gpt-3.5-turbo-instruct');
2351
+ };
2352
+ /**
2353
+ * Default model for completion variant.
2354
+ */
2355
+ OpenAiExecutionTools.prototype.getDefaultEmbeddingModel = function () {
2356
+ return this.getDefaultModel('text-embedding-3-large');
2357
+ };
2358
+ // <- Note: [🤖] getDefaultXxxModel
2359
+ /**
2360
+ * List all available OpenAI models that can be used
2361
+ */
2362
+ OpenAiExecutionTools.prototype.listModels = function () {
2363
+ /*
2364
+ Note: Dynamic lising of the models
2365
+ const models = await this.openai.models.list({});
2366
+
2367
+ console.log({ models });
2368
+ console.log(models.data);
2369
+ */
2370
+ return OPENAI_MODELS;
2371
+ };
2372
+ return OpenAiExecutionTools;
2373
+ }());
2374
+ /**
2375
+ * TODO: [🧠][🧙‍♂️] Maybe there can be some wizzard for thoose who want to use just OpenAI
2376
+ * TODO: Maybe Create some common util for callChatModel and callCompletionModel
2377
+ * TODO: Maybe make custom OpenaiError
2378
+ * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
2379
+ * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
2380
+ */
2381
+
2382
+ /**
2383
+ * @private internal type for `createLlmToolsFromConfiguration`
2384
+ */
2385
+ var EXECUTION_TOOLS_CLASSES = {
2386
+ createOpenAiExecutionTools: function (options) {
2387
+ return new OpenAiExecutionTools(__assign(__assign({}, options), { dangerouslyAllowBrowser: true /* <- TODO: [🧠] !!! Some mechanism for auto-detection of browser, maybe hide in `OpenAiExecutionTools` */ }));
2388
+ },
2389
+ createAnthropicClaudeExecutionTools: createAnthropicClaudeExecutionTools,
2390
+ createAzureOpenAiExecutionTools: function (options) { return new AzureOpenAiExecutionTools(options); },
2391
+ // <- Note: [🦑] Add here new LLM provider
2392
+ };
2393
+ /**
2394
+ * TODO: [🧠] Better file name than `config.ts` + maybe move to two separate files
2395
+ * TODO: [🧠][🎌] Adding this should be responsibility of each provider package NOT this one central place
2396
+ */
2397
+
2398
+ /**
2399
+ * @@@
2400
+ *
2401
+ * Note: This function is not cached, every call creates new instance of `MultipleLlmExecutionTools`
2402
+ *
2403
+ * @returns @@@
2404
+ * @public exported from `@promptbook/core`
2405
+ */
2406
+ function createLlmToolsFromConfiguration(configuration, options) {
2407
+ if (options === void 0) { options = {}; }
2408
+ var _a = options.isVerbose, isVerbose = _a === void 0 ? false : _a;
2409
+ var llmTools = configuration.map(function (llmConfiguration) {
2410
+ return EXECUTION_TOOLS_CLASSES["create".concat(llmConfiguration.className)](__assign({ isVerbose: isVerbose }, llmConfiguration.options));
2411
+ });
2412
+ return joinLlmExecutionTools.apply(void 0, __spreadArray([], __read(llmTools), false));
2413
+ }
2414
+ /**
2415
+ * TODO: [🎌] Togethere with `createLlmToolsFromConfiguration` + 'EXECUTION_TOOLS_CLASSES' gets to `@promptbook/core` ALL model providers, make this more efficient
2416
+ * TODO: [🧠][🎌] Dynamically install required providers
2417
+ * TODO: @@@ write discussion about this - wizzard
2418
+ * TODO: [🧠][🍛] Which name is better `createLlmToolsFromConfig` or `createLlmToolsFromConfiguration`?
2419
+ * TODO: [🧠] Is there some meaningfull way how to test this util
2420
+ * TODO: This should be maybe not under `_common` but under `utils`
2421
+ */
2422
+
2423
+ /**
2424
+ * Remote server is a proxy server that uses its execution tools internally and exposes the executor interface externally.
2425
+ *
2426
+ * You can simply use `RemoteExecutionTools` on client-side javascript and connect to your remote server.
2427
+ * This is useful to make all logic on browser side but not expose your API keys or no need to use customer's GPU.
2428
+ *
2429
+ * @see https://github.com/webgptorg/promptbook#remote-server
2430
+ * @public exported from `@promptbook/remote-server`
2431
+ */
2432
+ function startRemoteServer(options) {
2433
+ var _this = this;
2434
+ var _a = __assign({ isAnonymousModeAllowed: false, isCollectionModeAllowed: false, collection: null, createLlmExecutionTools: null }, options), port = _a.port, path = _a.path, collection = _a.collection, createLlmExecutionTools = _a.createLlmExecutionTools,
2435
+ // <- TODO: [🧠][🤺] Remove `createLlmExecutionTools`, pass just `llmExecutionTools`
2436
+ isAnonymousModeAllowed = _a.isAnonymousModeAllowed, isCollectionModeAllowed = _a.isCollectionModeAllowed, _b = _a.isVerbose, isVerbose = _b === void 0 ? false : _b;
2437
+ // <- TODO: [🦪] Some helper type to be able to use discriminant union types with destructuring
2438
+ var httpServer = http.createServer({}, function (request, response) { return __awaiter(_this, void 0, void 0, function () {
2439
+ var _a, _b;
2440
+ var _this = this;
2441
+ var _c;
2442
+ return __generator(this, function (_d) {
2443
+ switch (_d.label) {
2444
+ case 0:
2445
+ if ((_c = request.url) === null || _c === void 0 ? void 0 : _c.includes('socket.io')) {
2446
+ return [2 /*return*/];
2447
+ }
2448
+ _b = (_a = response).write;
2449
+ return [4 /*yield*/, spaceTrim(function (block) { return __awaiter(_this, void 0, void 0, function () {
2450
+ var _a, _b, _c, _d, _e;
2451
+ return __generator(this, function (_f) {
2452
+ switch (_f.label) {
2453
+ case 0:
2454
+ _b = (_a = "\n Server for processing promptbook remote requests is running.\n\n Version: ".concat(PROMPTBOOK_VERSION, "\n Anonymouse mode: ").concat(isAnonymousModeAllowed ? 'enabled' : 'disabled', "\n Collection mode: ").concat(isCollectionModeAllowed ? 'enabled' : 'disabled', "\n ")).concat;
2455
+ _c = block;
2456
+ if (!!isCollectionModeAllowed) return [3 /*break*/, 1];
2457
+ _d = '';
2458
+ return [3 /*break*/, 3];
2459
+ case 1:
2460
+ _e = 'Pipelines in collection:\n';
2461
+ return [4 /*yield*/, collection.listPipelines()];
2462
+ case 2:
2463
+ _d = _e +
2464
+ (_f.sent())
2465
+ .map(function (pipelineUrl) { return "- ".concat(pipelineUrl); })
2466
+ .join('\n');
2467
+ _f.label = 3;
2468
+ case 3: return [2 /*return*/, _b.apply(_a, [_c.apply(void 0, [_d]), "\n\n For more information look at:\n https://github.com/webgptorg/promptbook\n "])];
2469
+ }
2470
+ });
2471
+ }); })];
2472
+ case 1:
2473
+ _b.apply(_a, [_d.sent()]);
2474
+ response.end();
2475
+ return [2 /*return*/];
2476
+ }
2477
+ });
2478
+ }); });
2479
+ var server = new Server(httpServer, {
2480
+ path: path,
2481
+ transports: [/*'websocket', <- TODO: [🌬] Make websocket transport work */ 'polling'],
2482
+ cors: {
2483
+ origin: '*',
2484
+ methods: ['GET', 'POST'],
2485
+ },
2486
+ });
2487
+ server.on('connection', function (socket) {
2488
+ console.info(colors.gray("Client connected"), socket.id);
2489
+ socket.on('request', function (request) { return __awaiter(_this, void 0, void 0, function () {
2490
+ var _a, prompt, clientId, llmToolsConfiguration, llmExecutionTools, promptResult, _b, error_1;
2491
+ return __generator(this, function (_c) {
2492
+ switch (_c.label) {
2493
+ case 0:
2494
+ _a = __assign({ clientId: null, llmToolsConfiguration: null }, request), prompt = _a.prompt, clientId = _a.clientId, llmToolsConfiguration = _a.llmToolsConfiguration;
2495
+ // <- TODO: [🦪] Some helper type to be able to use discriminant union types with destructuring
2496
+ if (isVerbose) {
2497
+ console.info(colors.bgWhite("Prompt:"), colors.gray(JSON.stringify(request, null, 4)));
2498
+ }
2499
+ _c.label = 1;
2500
+ case 1:
2501
+ _c.trys.push([1, 14, 15, 16]);
2502
+ if (llmToolsConfiguration !== null && !isAnonymousModeAllowed) {
2503
+ throw new PipelineExecutionError("Anonymous mode is not allowed"); // <- TODO: !!!!!! Test
2504
+ }
2505
+ if (clientId !== null && !isCollectionModeAllowed) {
2506
+ throw new PipelineExecutionError("Collection mode is not allowed"); // <- TODO: !!!!!! Test
2507
+ }
2508
+ llmExecutionTools = void 0;
2509
+ if (!(llmToolsConfiguration !== null)) return [3 /*break*/, 2];
2510
+ // Note: Anonymouse mode
2511
+ // TODO: Maybe check that configuration is not empty
2512
+ llmExecutionTools = createLlmToolsFromConfiguration(llmToolsConfiguration);
2513
+ return [3 /*break*/, 5];
2514
+ case 2:
2515
+ if (!(createLlmExecutionTools !== null)) return [3 /*break*/, 4];
2516
+ // Note: Collection mode
2517
+ llmExecutionTools = createLlmExecutionTools(clientId);
2518
+ return [4 /*yield*/, collection.isResponsibleForPrompt(prompt)];
2519
+ case 3:
2520
+ if (!(_c.sent())) {
2521
+ throw new PipelineExecutionError("Pipeline is not in the collection of this server");
2522
+ }
2523
+ return [3 /*break*/, 5];
2524
+ case 4: throw new PipelineExecutionError("You must provide either llmToolsConfiguration or createLlmExecutionTools");
2525
+ case 5:
2526
+ promptResult = void 0;
2527
+ _b = prompt.modelRequirements.modelVariant;
2528
+ switch (_b) {
2529
+ case 'CHAT': return [3 /*break*/, 6];
2530
+ case 'COMPLETION': return [3 /*break*/, 8];
2531
+ case 'EMBEDDING': return [3 /*break*/, 10];
2532
+ }
2533
+ return [3 /*break*/, 12];
2534
+ case 6:
2535
+ if (llmExecutionTools.callChatModel === undefined) {
2536
+ // Note: [0] This check should not be a thing
2537
+ throw new PipelineExecutionError("Chat model is not available");
2538
+ }
2539
+ return [4 /*yield*/, llmExecutionTools.callChatModel(prompt)];
2540
+ case 7:
2541
+ promptResult = _c.sent();
2542
+ return [3 /*break*/, 13];
2543
+ case 8:
2544
+ if (llmExecutionTools.callCompletionModel === undefined) {
2545
+ // Note: [0] This check should not be a thing
2546
+ throw new PipelineExecutionError("Completion model is not available");
2547
+ }
2548
+ return [4 /*yield*/, llmExecutionTools.callCompletionModel(prompt)];
2549
+ case 9:
2550
+ promptResult = _c.sent();
2551
+ return [3 /*break*/, 13];
2552
+ case 10:
2553
+ if (llmExecutionTools.callEmbeddingModel === undefined) {
2554
+ // Note: [0] This check should not be a thing
2555
+ throw new PipelineExecutionError("Embedding model is not available");
2556
+ }
2557
+ return [4 /*yield*/, llmExecutionTools.callEmbeddingModel(prompt)];
2558
+ case 11:
2559
+ promptResult = _c.sent();
2560
+ return [3 /*break*/, 13];
2561
+ case 12: throw new PipelineExecutionError("Unknown model variant \"".concat(prompt.modelRequirements.modelVariant, "\""));
2562
+ case 13:
2563
+ if (isVerbose) {
2564
+ console.info(colors.bgGreen("PromptResult:"), colors.green(JSON.stringify(promptResult, null, 4)));
2565
+ }
2566
+ socket.emit('response', { promptResult: promptResult });
2567
+ return [3 /*break*/, 16];
2568
+ case 14:
2569
+ error_1 = _c.sent();
2570
+ if (!(error_1 instanceof Error)) {
2571
+ throw error_1;
2572
+ }
2573
+ socket.emit('error', { errorMessage: error_1.message });
2574
+ return [3 /*break*/, 16];
2575
+ case 15:
197
2576
  socket.disconnect();
198
2577
  return [7 /*endfinally*/];
199
- case 13: return [2 /*return*/];
2578
+ case 16: return [2 /*return*/];
200
2579
  }
201
2580
  });
202
2581
  }); });
@@ -229,13 +2608,14 @@ function startRemoteServer(options) {
229
2608
  };
230
2609
  }
231
2610
  /**
232
- * TODO: [🍜] Add anonymous option
2611
+ * TODO: [🍜] !!!!!! Add anonymous option
233
2612
  * TODO: [⚖] Expose the collection to be able to connect to same collection via createCollectionFromUrl
234
2613
  * TODO: Handle progress - support streaming
235
2614
  * TODO: [🗯] Do not hang up immediately but wait until client closes OR timeout
236
2615
  * TODO: [🗯] Timeout on chat to free up resources
237
2616
  * TODO: [🃏] Pass here some security token to prevent malitious usage and/or DDoS
238
2617
  * TODO: [0] Set unavailable models as undefined in `RemoteLlmExecutionTools` NOT throw error here
2618
+ * TODO: Constrain anonymous mode for specific models / providers
239
2619
  */
240
2620
 
241
2621
  export { PROMPTBOOK_VERSION, startRemoteServer };