@promptbook/remote-server 0.65.0-2 → 0.65.0-3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (25) hide show
  1. package/esm/index.es.js +2464 -82
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/src/_packages/anthropic-claude.index.d.ts +6 -0
  4. package/esm/typings/src/_packages/core.index.d.ts +2 -0
  5. package/esm/typings/src/_packages/node.index.d.ts +0 -2
  6. package/esm/typings/src/_packages/remote-client.index.d.ts +2 -2
  7. package/esm/typings/src/_packages/types.index.d.ts +16 -2
  8. package/esm/typings/src/llm-providers/_common/config.d.ts +3 -3
  9. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +5 -3
  10. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionToolsOptions.d.ts +23 -2
  11. package/esm/typings/src/llm-providers/anthropic-claude/createAnthropicClaudeExecutionTools.d.ts +13 -0
  12. package/esm/typings/src/llm-providers/anthropic-claude/playground/playground.d.ts +1 -1
  13. package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +3 -2
  14. package/esm/typings/src/llm-providers/remote/interfaces/Promptbook_Server_Error.d.ts +2 -2
  15. package/esm/typings/src/llm-providers/remote/interfaces/Promptbook_Server_Progress.d.ts +2 -2
  16. package/esm/typings/src/llm-providers/remote/interfaces/Promptbook_Server_Request.d.ts +14 -2
  17. package/esm/typings/src/llm-providers/remote/interfaces/RemoteLlmExecutionToolsOptions.d.ts +49 -0
  18. package/esm/typings/src/llm-providers/remote/interfaces/RemoteServerOptions.d.ts +23 -2
  19. package/esm/typings/src/llm-providers/remote/playground/playground.d.ts +2 -0
  20. package/esm/typings/src/llm-providers/remote/startRemoteServer.d.ts +2 -1
  21. package/esm/typings/src/types/typeAliases.d.ts +6 -0
  22. package/package.json +6 -2
  23. package/umd/index.umd.js +2484 -85
  24. package/umd/index.umd.js.map +1 -1
  25. package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionToolsOptions.d.ts +0 -26
package/esm/index.es.js CHANGED
@@ -1,13 +1,18 @@
1
1
  import colors from 'colors';
2
2
  import http from 'http';
3
3
  import { Server } from 'socket.io';
4
- import { spaceTrim } from 'spacetrim';
4
+ import spaceTrim$1, { spaceTrim } from 'spacetrim';
5
+ import * as dotenv from 'dotenv';
6
+ import { io } from 'socket.io-client';
7
+ import Anthropic from '@anthropic-ai/sdk';
8
+ import { OpenAIClient, AzureKeyCredential } from '@azure/openai';
9
+ import OpenAI from 'openai';
5
10
 
6
11
  // ⚠️ WARNING: This code has been generated so that any manual changes will be overwritten
7
12
  /**
8
13
  * The version of the Promptbook library
9
14
  */
10
- var PROMPTBOOK_VERSION = '0.65.0-1';
15
+ var PROMPTBOOK_VERSION = '0.65.0-2';
11
16
  // TODO: !!!! List here all the versions and annotate + put into script
12
17
 
13
18
  /*! *****************************************************************************
@@ -41,6 +46,17 @@ function __extends(d, b) {
41
46
  d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());
42
47
  }
43
48
 
49
+ var __assign = function() {
50
+ __assign = Object.assign || function __assign(t) {
51
+ for (var s, i = 1, n = arguments.length; i < n; i++) {
52
+ s = arguments[i];
53
+ for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p)) t[p] = s[p];
54
+ }
55
+ return t;
56
+ };
57
+ return __assign.apply(this, arguments);
58
+ };
59
+
44
60
  function __awaiter(thisArg, _arguments, P, generator) {
45
61
  function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
46
62
  return new (P || (P = Promise))(function (resolve, reject) {
@@ -77,6 +93,45 @@ function __generator(thisArg, body) {
77
93
  } catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; }
78
94
  if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true };
79
95
  }
96
+ }
97
+
98
+ function __values(o) {
99
+ var s = typeof Symbol === "function" && Symbol.iterator, m = s && o[s], i = 0;
100
+ if (m) return m.call(o);
101
+ if (o && typeof o.length === "number") return {
102
+ next: function () {
103
+ if (o && i >= o.length) o = void 0;
104
+ return { value: o && o[i++], done: !o };
105
+ }
106
+ };
107
+ throw new TypeError(s ? "Object is not iterable." : "Symbol.iterator is not defined.");
108
+ }
109
+
110
+ function __read(o, n) {
111
+ var m = typeof Symbol === "function" && o[Symbol.iterator];
112
+ if (!m) return o;
113
+ var i = m.call(o), r, ar = [], e;
114
+ try {
115
+ while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value);
116
+ }
117
+ catch (error) { e = { error: error }; }
118
+ finally {
119
+ try {
120
+ if (r && !r.done && (m = i["return"])) m.call(i);
121
+ }
122
+ finally { if (e) throw e.error; }
123
+ }
124
+ return ar;
125
+ }
126
+
127
+ function __spreadArray(to, from, pack) {
128
+ if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {
129
+ if (ar || !(i in from)) {
130
+ if (!ar) ar = Array.prototype.slice.call(from, 0, i);
131
+ ar[i] = from[i];
132
+ }
133
+ }
134
+ return to.concat(ar || Array.prototype.slice.call(from));
80
135
  }
81
136
 
82
137
  /**
@@ -96,107 +151,2433 @@ var PipelineExecutionError = /** @class */ (function (_super) {
96
151
  }(Error));
97
152
 
98
153
  /**
99
- * Remote server is a proxy server that uses its execution tools internally and exposes the executor interface externally.
154
+ * This error type indicates that the error should not happen and its last check before crashing with some other error
100
155
  *
101
- * You can simply use `RemoteExecutionTools` on client-side javascript and connect to your remote server.
102
- * This is useful to make all logic on browser side but not expose your API keys or no need to use customer's GPU.
156
+ * @public exported from `@promptbook/core`
157
+ */
158
+ var UnexpectedError = /** @class */ (function (_super) {
159
+ __extends(UnexpectedError, _super);
160
+ function UnexpectedError(message) {
161
+ var _this = _super.call(this, spaceTrim(function (block) { return "\n ".concat(block(message), "\n\n Note: This error should not happen.\n It's probbably a bug in the pipeline collection\n\n Please report issue:\n https://github.com/webgptorg/promptbook/issues\n\n Or contact us on me@pavolhejny.com\n\n "); })) || this;
162
+ _this.name = 'UnexpectedError';
163
+ Object.setPrototypeOf(_this, UnexpectedError.prototype);
164
+ return _this;
165
+ }
166
+ return UnexpectedError;
167
+ }(Error));
168
+
169
+ /**
170
+ * Multiple LLM Execution Tools is a proxy server that uses multiple execution tools internally and exposes the executor interface externally.
103
171
  *
104
- * @see https://github.com/webgptorg/promptbook#remote-server
105
- * @public exported from `@promptbook/remote-server`
172
+ * Note: Internal utility of `joinLlmExecutionTools` but exposed type
173
+ * @public exported from `@promptbook/types`
106
174
  */
107
- function startRemoteServer(options) {
108
- var _this = this;
109
- var port = options.port, path = options.path, collection = options.collection, createLlmExecutionTools = options.createLlmExecutionTools, _a = options.isVerbose, isVerbose = _a === void 0 ? false : _a;
110
- var httpServer = http.createServer({}, function (request, response) {
111
- var _a;
112
- if ((_a = request.url) === null || _a === void 0 ? void 0 : _a.includes('socket.io')) {
113
- return;
114
- }
115
- response.write(spaceTrim("\n Server for processing promptbook remote requests is running.\n\n Version: ".concat(PROMPTBOOK_VERSION, "\n\n For more information look at:\n https://github.com/webgptorg/promptbook\n\n ")));
116
- response.end();
175
+ var MultipleLlmExecutionTools = /** @class */ (function () {
176
+ /**
177
+ * Gets array of execution tools in order of priority
178
+ */
179
+ function MultipleLlmExecutionTools() {
180
+ var llmExecutionTools = [];
181
+ for (var _i = 0; _i < arguments.length; _i++) {
182
+ llmExecutionTools[_i] = arguments[_i];
183
+ }
184
+ this.llmExecutionTools = llmExecutionTools;
185
+ }
186
+ Object.defineProperty(MultipleLlmExecutionTools.prototype, "title", {
187
+ get: function () {
188
+ return 'Multiple LLM Providers';
189
+ },
190
+ enumerable: false,
191
+ configurable: true
117
192
  });
118
- var server = new Server(httpServer, {
119
- path: path,
120
- transports: [/*'websocket', <- TODO: [🌬] Make websocket transport work */ 'polling'],
121
- cors: {
122
- origin: '*',
123
- methods: ['GET', 'POST'],
193
+ Object.defineProperty(MultipleLlmExecutionTools.prototype, "description", {
194
+ get: function () {
195
+ return this.llmExecutionTools
196
+ .map(function (tools, index) { return "".concat(index + 1, ") ").concat(tools.title, " ").concat(tools.description || ''); })
197
+ .join('\n');
124
198
  },
199
+ enumerable: false,
200
+ configurable: true
125
201
  });
126
- server.on('connection', function (socket) {
127
- console.info(colors.gray("Client connected"), socket.id);
128
- socket.on('request', function (request) { return __awaiter(_this, void 0, void 0, function () {
129
- var prompt, clientId, executionToolsForClient, promptResult, _a, error_1;
130
- return __generator(this, function (_b) {
131
- switch (_b.label) {
202
+ /**
203
+ * Calls the best available chat model
204
+ */
205
+ MultipleLlmExecutionTools.prototype.callChatModel = function (prompt) {
206
+ return this.callCommonModel(prompt);
207
+ };
208
+ /**
209
+ * Calls the best available completion model
210
+ */
211
+ MultipleLlmExecutionTools.prototype.callCompletionModel = function (prompt) {
212
+ return this.callCommonModel(prompt);
213
+ };
214
+ /**
215
+ * Calls the best available embedding model
216
+ */
217
+ MultipleLlmExecutionTools.prototype.callEmbeddingModel = function (prompt) {
218
+ return this.callCommonModel(prompt);
219
+ };
220
+ // <- Note: [🤖]
221
+ /**
222
+ * Calls the best available model
223
+ *
224
+ * Note: This should be private or protected but is public to be usable with duck typing
225
+ */
226
+ MultipleLlmExecutionTools.prototype.callCommonModel = function (prompt) {
227
+ return __awaiter(this, void 0, void 0, function () {
228
+ var errors, _a, _b, llmExecutionTools, _c, error_1, e_1_1;
229
+ var e_1, _d;
230
+ var _this = this;
231
+ return __generator(this, function (_e) {
232
+ switch (_e.label) {
132
233
  case 0:
133
- prompt = request.prompt, clientId = request.clientId;
134
- // TODO: !! Validate here clientId (pass validator as dependency)
135
- if (isVerbose) {
136
- console.info(colors.bgWhite("Prompt:"), colors.gray(JSON.stringify(request, null, 4)));
137
- }
138
- _b.label = 1;
234
+ errors = [];
235
+ _e.label = 1;
139
236
  case 1:
140
- _b.trys.push([1, 11, 12, 13]);
141
- executionToolsForClient = createLlmExecutionTools(clientId);
142
- return [4 /*yield*/, collection.isResponsibleForPrompt(prompt)];
237
+ _e.trys.push([1, 15, 16, 17]);
238
+ _a = __values(this.llmExecutionTools), _b = _a.next();
239
+ _e.label = 2;
143
240
  case 2:
144
- if (!(_b.sent())) {
145
- throw new PipelineExecutionError("Pipeline is not in the collection of this server");
146
- }
147
- promptResult = void 0;
148
- _a = prompt.modelRequirements.modelVariant;
149
- switch (_a) {
150
- case 'CHAT': return [3 /*break*/, 3];
151
- case 'COMPLETION': return [3 /*break*/, 5];
152
- case 'EMBEDDING': return [3 /*break*/, 7];
153
- }
154
- return [3 /*break*/, 9];
241
+ if (!!_b.done) return [3 /*break*/, 14];
242
+ llmExecutionTools = _b.value;
243
+ _e.label = 3;
155
244
  case 3:
156
- if (executionToolsForClient.callChatModel === undefined) {
157
- // Note: [0] This check should not be a thing
158
- throw new PipelineExecutionError("Chat model is not available");
245
+ _e.trys.push([3, 12, , 13]);
246
+ _c = prompt.modelRequirements.modelVariant;
247
+ switch (_c) {
248
+ case 'CHAT': return [3 /*break*/, 4];
249
+ case 'COMPLETION': return [3 /*break*/, 6];
250
+ case 'EMBEDDING': return [3 /*break*/, 8];
159
251
  }
160
- return [4 /*yield*/, executionToolsForClient.callChatModel(prompt)];
161
- case 4:
162
- promptResult = _b.sent();
163
252
  return [3 /*break*/, 10];
164
- case 5:
165
- if (executionToolsForClient.callCompletionModel === undefined) {
166
- // Note: [0] This check should not be a thing
167
- throw new PipelineExecutionError("Completion model is not available");
253
+ case 4:
254
+ if (llmExecutionTools.callChatModel === undefined) {
255
+ return [3 /*break*/, 13];
168
256
  }
169
- return [4 /*yield*/, executionToolsForClient.callCompletionModel(prompt)];
257
+ return [4 /*yield*/, llmExecutionTools.callChatModel(prompt)];
258
+ case 5: return [2 /*return*/, _e.sent()];
170
259
  case 6:
171
- promptResult = _b.sent();
172
- return [3 /*break*/, 10];
173
- case 7:
174
- if (executionToolsForClient.callEmbeddingModel === undefined) {
175
- // Note: [0] This check should not be a thing
176
- throw new PipelineExecutionError("Embedding model is not available");
260
+ if (llmExecutionTools.callCompletionModel === undefined) {
261
+ return [3 /*break*/, 13];
177
262
  }
178
- return [4 /*yield*/, executionToolsForClient.callEmbeddingModel(prompt)];
263
+ return [4 /*yield*/, llmExecutionTools.callCompletionModel(prompt)];
264
+ case 7: return [2 /*return*/, _e.sent()];
179
265
  case 8:
180
- promptResult = _b.sent();
181
- return [3 /*break*/, 10];
182
- case 9: throw new PipelineExecutionError("Unknown model variant \"".concat(prompt.modelRequirements.modelVariant, "\""));
183
- case 10:
184
- if (isVerbose) {
185
- console.info(colors.bgGreen("PromptResult:"), colors.green(JSON.stringify(promptResult, null, 4)));
266
+ if (llmExecutionTools.callEmbeddingModel === undefined) {
267
+ return [3 /*break*/, 13];
186
268
  }
187
- socket.emit('response', { promptResult: promptResult });
188
- return [3 /*break*/, 13];
189
- case 11:
190
- error_1 = _b.sent();
191
- if (!(error_1 instanceof Error)) {
269
+ return [4 /*yield*/, llmExecutionTools.callEmbeddingModel(prompt)];
270
+ case 9: return [2 /*return*/, _e.sent()];
271
+ case 10: throw new UnexpectedError("Unknown model variant \"".concat(prompt.modelRequirements.modelVariant, "\""));
272
+ case 11: return [3 /*break*/, 13];
273
+ case 12:
274
+ error_1 = _e.sent();
275
+ if (!(error_1 instanceof Error) || error_1 instanceof UnexpectedError) {
192
276
  throw error_1;
193
277
  }
194
- socket.emit('error', { errorMessage: error_1.message });
278
+ errors.push(error_1);
195
279
  return [3 /*break*/, 13];
196
- case 12:
280
+ case 13:
281
+ _b = _a.next();
282
+ return [3 /*break*/, 2];
283
+ case 14: return [3 /*break*/, 17];
284
+ case 15:
285
+ e_1_1 = _e.sent();
286
+ e_1 = { error: e_1_1 };
287
+ return [3 /*break*/, 17];
288
+ case 16:
289
+ try {
290
+ if (_b && !_b.done && (_d = _a.return)) _d.call(_a);
291
+ }
292
+ finally { if (e_1) throw e_1.error; }
293
+ return [7 /*endfinally*/];
294
+ case 17:
295
+ if (errors.length === 1) {
296
+ throw errors[0];
297
+ }
298
+ else if (errors.length > 1) {
299
+ throw new PipelineExecutionError(
300
+ // TODO: Tell which execution tools failed like
301
+ // 1) OpenAI throw PipelineExecutionError: Parameter {knowledge} is not defined
302
+ // 2) AnthropicClaude throw PipelineExecutionError: Parameter {knowledge} is not defined
303
+ // 3) ...
304
+ spaceTrim$1(function (block) { return "\n All execution tools failed:\n\n ".concat(block(errors
305
+ .map(function (error, i) { return "".concat(i + 1, ") **").concat(error.name || 'Error', ":** ").concat(error.message); })
306
+ .join('\n')), "\n\n "); }));
307
+ }
308
+ else if (this.llmExecutionTools.length === 0) {
309
+ throw new PipelineExecutionError("You have not provided any `LlmExecutionTools`");
310
+ }
311
+ else {
312
+ throw new PipelineExecutionError(spaceTrim$1(function (block) { return "\n You have not provided any `LlmExecutionTools` that support model variant \"".concat(prompt.modelRequirements.modelVariant, "\n\n Available `LlmExecutionTools`:\n ").concat(block(_this.llmExecutionTools
313
+ .map(function (tools) { return "- ".concat(tools.title, " ").concat(tools.description || ''); })
314
+ .join('\n')), "\n\n "); }));
315
+ }
316
+ }
317
+ });
318
+ });
319
+ };
320
+ /**
321
+ * List all available models that can be used
322
+ * This lists is a combination of all available models from all execution tools
323
+ */
324
+ MultipleLlmExecutionTools.prototype.listModels = function () {
325
+ return __awaiter(this, void 0, void 0, function () {
326
+ var availableModels, _a, _b, llmExecutionTools, models, e_2_1;
327
+ var e_2, _c;
328
+ return __generator(this, function (_d) {
329
+ switch (_d.label) {
330
+ case 0:
331
+ availableModels = [];
332
+ _d.label = 1;
333
+ case 1:
334
+ _d.trys.push([1, 6, 7, 8]);
335
+ _a = __values(this.llmExecutionTools), _b = _a.next();
336
+ _d.label = 2;
337
+ case 2:
338
+ if (!!_b.done) return [3 /*break*/, 5];
339
+ llmExecutionTools = _b.value;
340
+ return [4 /*yield*/, llmExecutionTools.listModels()];
341
+ case 3:
342
+ models = _d.sent();
343
+ availableModels.push.apply(availableModels, __spreadArray([], __read(models), false));
344
+ _d.label = 4;
345
+ case 4:
346
+ _b = _a.next();
347
+ return [3 /*break*/, 2];
348
+ case 5: return [3 /*break*/, 8];
349
+ case 6:
350
+ e_2_1 = _d.sent();
351
+ e_2 = { error: e_2_1 };
352
+ return [3 /*break*/, 8];
353
+ case 7:
354
+ try {
355
+ if (_b && !_b.done && (_c = _a.return)) _c.call(_a);
356
+ }
357
+ finally { if (e_2) throw e_2.error; }
358
+ return [7 /*endfinally*/];
359
+ case 8: return [2 /*return*/, availableModels];
360
+ }
361
+ });
362
+ });
363
+ };
364
+ return MultipleLlmExecutionTools;
365
+ }());
366
+ /**
367
+ * TODO: [🧠][🎛] Aggregating multiple models - have result not only from one first aviable model BUT all of them
368
+ * TODO: [🏖] If no llmTools have for example not defined `callCompletionModel` this will still return object with defined `callCompletionModel` which just throws `PipelineExecutionError`, make it undefined instead
369
+ * Look how `countTotalUsage` (and `cacheLlmTools`) implements it
370
+ */
371
+
372
+ /**
373
+ * Joins multiple LLM Execution Tools into one
374
+ *
375
+ * @returns {LlmExecutionTools} Single wrapper for multiple LlmExecutionTools
376
+ *
377
+ * 0) If there is no LlmExecutionTools, it warns and returns valid but empty LlmExecutionTools
378
+ * 1) If there is only one LlmExecutionTools, it returns it wrapped in a proxy object
379
+ * 2) If there are multiple LlmExecutionTools, first will be used first, second will be used if the first hasn`t defined model variant or fails, etc.
380
+ * 3) When all LlmExecutionTools fail, it throws an error with a list of all errors merged into one
381
+ *
382
+ *
383
+ * Tip: You don't have to use this function directly, just pass an array of LlmExecutionTools to the `ExecutionTools`
384
+ *
385
+ * @public exported from `@promptbook/core`
386
+ */
387
+ function joinLlmExecutionTools() {
388
+ var llmExecutionTools = [];
389
+ for (var _i = 0; _i < arguments.length; _i++) {
390
+ llmExecutionTools[_i] = arguments[_i];
391
+ }
392
+ if (llmExecutionTools.length === 0) {
393
+ var warningMessage = spaceTrim$1("\n You have not provided any `LlmExecutionTools`\n This means that you won't be able to execute any prompts that require large language models like GPT-4 or Anthropic's Claude.\n\n Technically, it's not an error, but it's probably not what you want because it does not make sense to use Promptbook without language models.\n ");
394
+ // TODO: [🟥] Detect browser / node and make it colorfull
395
+ console.warn(warningMessage);
396
+ /*
397
+ return {
398
+ async listModels() {
399
+ // TODO: [🟥] Detect browser / node and make it colorfull
400
+ console.warn(
401
+ spaceTrim(
402
+ (block) => `
403
+
404
+ You can't list models because you have no LLM Execution Tools defined:
405
+
406
+ tl;dr
407
+
408
+ ${block(warningMessage)}
409
+ `,
410
+ ),
411
+ );
412
+ return [];
413
+ },
414
+ };
415
+ */
416
+ }
417
+ return new (MultipleLlmExecutionTools.bind.apply(MultipleLlmExecutionTools, __spreadArray([void 0], __read(llmExecutionTools), false)))();
418
+ }
419
+ /**
420
+ * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
421
+ */
422
+
423
+ /**
424
+ * Remote server is a proxy server that uses its execution tools internally and exposes the executor interface externally.
425
+ *
426
+ * You can simply use `RemoteExecutionTools` on client-side javascript and connect to your remote server.
427
+ * This is useful to make all logic on browser side but not expose your API keys or no need to use customer's GPU.
428
+ *
429
+ * @see https://github.com/webgptorg/promptbook#remote-server
430
+ * @public exported from `@promptbook/remote-client`
431
+ */
432
+ var RemoteLlmExecutionTools = /** @class */ (function () {
433
+ function RemoteLlmExecutionTools(options) {
434
+ this.options = options;
435
+ }
436
+ Object.defineProperty(RemoteLlmExecutionTools.prototype, "title", {
437
+ get: function () {
438
+ // TODO: [🧠] Maybe fetch title+description from the remote server (as well as if model methods are defined)
439
+ return 'Remote server';
440
+ },
441
+ enumerable: false,
442
+ configurable: true
443
+ });
444
+ Object.defineProperty(RemoteLlmExecutionTools.prototype, "description", {
445
+ get: function () {
446
+ return 'Use all models by your remote server';
447
+ },
448
+ enumerable: false,
449
+ configurable: true
450
+ });
451
+ /**
452
+ * Creates a connection to the remote proxy server.
453
+ */
454
+ RemoteLlmExecutionTools.prototype.makeConnection = function () {
455
+ var _this = this;
456
+ return new Promise(function (resolve, reject) {
457
+ var socket = io(_this.options.remoteUrl, {
458
+ path: _this.options.path,
459
+ // path: `${this.remoteUrl.pathname}/socket.io`,
460
+ transports: [/*'websocket', <- TODO: [🌬] Make websocket transport work */ 'polling'],
461
+ });
462
+ // console.log('Connecting to', this.options.remoteUrl.href, { socket });
463
+ socket.on('connect', function () {
464
+ resolve(socket);
465
+ });
466
+ setTimeout(function () {
467
+ reject(new Error("Timeout while connecting to ".concat(_this.options.remoteUrl)));
468
+ }, 60000 /* <- TODO: Timeout to config */);
469
+ });
470
+ };
471
+ /**
472
+ * Calls remote proxy server to use a chat model
473
+ */
474
+ RemoteLlmExecutionTools.prototype.callChatModel = function (prompt) {
475
+ if (this.options.isVerbose) {
476
+ console.info("\uD83D\uDD8B Remote callChatModel call");
477
+ }
478
+ return /* not await */ this.callCommonModel(prompt);
479
+ };
480
+ /**
481
+ * Calls remote proxy server to use a completion model
482
+ */
483
+ RemoteLlmExecutionTools.prototype.callCompletionModel = function (prompt) {
484
+ if (this.options.isVerbose) {
485
+ console.info("\uD83D\uDCAC Remote callCompletionModel call");
486
+ }
487
+ return /* not await */ this.callCommonModel(prompt);
488
+ };
489
+ /**
490
+ * Calls remote proxy server to use a embedding model
491
+ */
492
+ RemoteLlmExecutionTools.prototype.callEmbeddingModel = function (prompt) {
493
+ if (this.options.isVerbose) {
494
+ console.info("\uD83D\uDCAC Remote callEmbeddingModel call");
495
+ }
496
+ return /* not await */ this.callCommonModel(prompt);
497
+ };
498
+ // <- Note: [🤖] callXxxModel
499
+ /**
500
+ * Calls remote proxy server to use both completion or chat model
501
+ */
502
+ RemoteLlmExecutionTools.prototype.callCommonModel = function (prompt) {
503
+ return __awaiter(this, void 0, void 0, function () {
504
+ var socket, promptResult;
505
+ return __generator(this, function (_a) {
506
+ switch (_a.label) {
507
+ case 0: return [4 /*yield*/, this.makeConnection()];
508
+ case 1:
509
+ socket = _a.sent();
510
+ if (this.options.isAnonymous) {
511
+ socket.emit('request', {
512
+ llmToolsConfiguration: this.options.llmToolsConfiguration,
513
+ prompt: prompt,
514
+ // <- TODO: [🛫] `prompt` is NOT fully serializable as JSON, it contains functions which are not serializable
515
+ });
516
+ }
517
+ else {
518
+ socket.emit('request', {
519
+ clientId: this.options.clientId,
520
+ prompt: prompt,
521
+ // <- TODO: [🛫] `prompt` is NOT fully serializable as JSON, it contains functions which are not serializable
522
+ });
523
+ }
524
+ return [4 /*yield*/, new Promise(function (resolve, reject) {
525
+ socket.on('response', function (response) {
526
+ resolve(response.promptResult);
527
+ socket.disconnect();
528
+ });
529
+ socket.on('error', function (error) {
530
+ reject(new PipelineExecutionError(error.errorMessage));
531
+ socket.disconnect();
532
+ });
533
+ })];
534
+ case 2:
535
+ promptResult = _a.sent();
536
+ socket.disconnect();
537
+ return [2 /*return*/, promptResult];
538
+ }
539
+ });
540
+ });
541
+ };
542
+ /**
543
+ * List all available models that can be used
544
+ */
545
+ RemoteLlmExecutionTools.prototype.listModels = function () {
546
+ return __awaiter(this, void 0, void 0, function () {
547
+ return __generator(this, function (_a) {
548
+ return [2 /*return*/, [
549
+ /* !!! */
550
+ ]];
551
+ });
552
+ });
553
+ };
554
+ return RemoteLlmExecutionTools;
555
+ }());
556
+ /**
557
+ * TODO: [🍜] !!!!!! Default remote remoteUrl and path for anonymous server
558
+ * TODO: [🍓] Allow to list compatible models with each variant
559
+ * TODO: [🗯] RemoteLlmExecutionTools should extend Destroyable and implement IDestroyable
560
+ * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
561
+ * TODO: [🧠] Maybe remove `@promptbook/remote-client` and just use `@promptbook/core`
562
+ */
563
+
564
+ /**
565
+ * Counts number of characters in the text
566
+ *
567
+ * @public exported from `@promptbook/utils`
568
+ */
569
+ function countCharacters(text) {
570
+ // Remove null characters
571
+ text = text.replace(/\0/g, '');
572
+ // Replace emojis (and also ZWJ sequence) with hyphens
573
+ text = text.replace(/(\p{Extended_Pictographic})\p{Modifier_Symbol}/gu, '$1');
574
+ text = text.replace(/(\p{Extended_Pictographic})[\u{FE00}-\u{FE0F}]/gu, '$1');
575
+ text = text.replace(/\p{Extended_Pictographic}(\u{200D}\p{Extended_Pictographic})*/gu, '-');
576
+ return text.length;
577
+ }
578
+
579
+ /**
580
+ * Counts number of lines in the text
581
+ *
582
+ * @public exported from `@promptbook/utils`
583
+ */
584
+ function countLines(text) {
585
+ if (text === '') {
586
+ return 0;
587
+ }
588
+ return text.split('\n').length;
589
+ }
590
+
591
+ /**
592
+ * Counts number of pages in the text
593
+ *
594
+ * @public exported from `@promptbook/utils`
595
+ */
596
+ function countPages(text) {
597
+ var sentencesPerPage = 5; // Assuming each page has 5 sentences
598
+ var sentences = text.split(/[.!?]+/).filter(function (sentence) { return sentence.trim() !== ''; });
599
+ var pageCount = Math.ceil(sentences.length / sentencesPerPage);
600
+ return pageCount;
601
+ }
602
+
603
+ /**
604
+ * Counts number of paragraphs in the text
605
+ *
606
+ * @public exported from `@promptbook/utils`
607
+ */
608
+ function countParagraphs(text) {
609
+ return text.split(/\n\s*\n/).filter(function (paragraph) { return paragraph.trim() !== ''; }).length;
610
+ }
611
+
612
+ /**
613
+ * Split text into sentences
614
+ *
615
+ * @public exported from `@promptbook/utils`
616
+ */
617
+ function splitIntoSentences(text) {
618
+ return text.split(/[.!?]+/).filter(function (sentence) { return sentence.trim() !== ''; });
619
+ }
620
+ /**
621
+ * Counts number of sentences in the text
622
+ *
623
+ * @public exported from `@promptbook/utils`
624
+ */
625
+ function countSentences(text) {
626
+ return splitIntoSentences(text).length;
627
+ }
628
+
629
+ var defaultDiacriticsRemovalMap = [
630
+ {
631
+ base: 'A',
632
+ letters: '\u0041\u24B6\uFF21\u00C0\u00C1\u00C2\u1EA6\u1EA4\u1EAA\u1EA8\u00C3\u0100\u0102\u1EB0\u1EAE\u1EB4\u1EB2\u0226\u01E0\u00C4\u01DE\u1EA2\u00C5\u01FA\u01CD\u0200\u0202\u1EA0\u1EAC\u1EB6\u1E00\u0104\u023A\u2C6F',
633
+ },
634
+ { base: 'AA', letters: '\uA732' },
635
+ { base: 'AE', letters: '\u00C6\u01FC\u01E2' },
636
+ { base: 'AO', letters: '\uA734' },
637
+ { base: 'AU', letters: '\uA736' },
638
+ { base: 'AV', letters: '\uA738\uA73A' },
639
+ { base: 'AY', letters: '\uA73C' },
640
+ {
641
+ base: 'B',
642
+ letters: '\u0042\u24B7\uFF22\u1E02\u1E04\u1E06\u0243\u0182\u0181',
643
+ },
644
+ {
645
+ base: 'C',
646
+ letters: '\u0043\u24B8\uFF23\u0106\u0108\u010A\u010C\u00C7\u1E08\u0187\u023B\uA73E',
647
+ },
648
+ {
649
+ base: 'D',
650
+ letters: '\u0044\u24B9\uFF24\u1E0A\u010E\u1E0C\u1E10\u1E12\u1E0E\u0110\u018B\u018A\u0189\uA779\u00D0',
651
+ },
652
+ { base: 'DZ', letters: '\u01F1\u01C4' },
653
+ { base: 'Dz', letters: '\u01F2\u01C5' },
654
+ {
655
+ base: 'E',
656
+ letters: '\u0045\u24BA\uFF25\u00C8\u00C9\u00CA\u1EC0\u1EBE\u1EC4\u1EC2\u1EBC\u0112\u1E14\u1E16\u0114\u0116\u00CB\u1EBA\u011A\u0204\u0206\u1EB8\u1EC6\u0228\u1E1C\u0118\u1E18\u1E1A\u0190\u018E',
657
+ },
658
+ { base: 'F', letters: '\u0046\u24BB\uFF26\u1E1E\u0191\uA77B' },
659
+ {
660
+ base: 'G',
661
+ letters: '\u0047\u24BC\uFF27\u01F4\u011C\u1E20\u011E\u0120\u01E6\u0122\u01E4\u0193\uA7A0\uA77D\uA77E',
662
+ },
663
+ {
664
+ base: 'H',
665
+ letters: '\u0048\u24BD\uFF28\u0124\u1E22\u1E26\u021E\u1E24\u1E28\u1E2A\u0126\u2C67\u2C75\uA78D',
666
+ },
667
+ {
668
+ base: 'I',
669
+ letters: '\u0049\u24BE\uFF29\u00CC\u00CD\u00CE\u0128\u012A\u012C\u0130\u00CF\u1E2E\u1EC8\u01CF\u0208\u020A\u1ECA\u012E\u1E2C\u0197',
670
+ },
671
+ { base: 'J', letters: '\u004A\u24BF\uFF2A\u0134\u0248' },
672
+ {
673
+ base: 'K',
674
+ letters: '\u004B\u24C0\uFF2B\u1E30\u01E8\u1E32\u0136\u1E34\u0198\u2C69\uA740\uA742\uA744\uA7A2',
675
+ },
676
+ {
677
+ base: 'L',
678
+ letters: '\u004C\u24C1\uFF2C\u013F\u0139\u013D\u1E36\u1E38\u013B\u1E3C\u1E3A\u0141\u023D\u2C62\u2C60\uA748\uA746\uA780',
679
+ },
680
+ { base: 'LJ', letters: '\u01C7' },
681
+ { base: 'Lj', letters: '\u01C8' },
682
+ { base: 'M', letters: '\u004D\u24C2\uFF2D\u1E3E\u1E40\u1E42\u2C6E\u019C' },
683
+ {
684
+ base: 'N',
685
+ letters: '\u004E\u24C3\uFF2E\u01F8\u0143\u00D1\u1E44\u0147\u1E46\u0145\u1E4A\u1E48\u0220\u019D\uA790\uA7A4',
686
+ },
687
+ { base: 'NJ', letters: '\u01CA' },
688
+ { base: 'Nj', letters: '\u01CB' },
689
+ {
690
+ base: 'O',
691
+ letters: '\u004F\u24C4\uFF2F\u00D2\u00D3\u00D4\u1ED2\u1ED0\u1ED6\u1ED4\u00D5\u1E4C\u022C\u1E4E\u014C\u1E50\u1E52\u014E\u022E\u0230\u00D6\u022A\u1ECE\u0150\u01D1\u020C\u020E\u01A0\u1EDC\u1EDA\u1EE0\u1EDE\u1EE2\u1ECC\u1ED8\u01EA\u01EC\u00D8\u01FE\u0186\u019F\uA74A\uA74C',
692
+ },
693
+ { base: 'OI', letters: '\u01A2' },
694
+ { base: 'OO', letters: '\uA74E' },
695
+ { base: 'OU', letters: '\u0222' },
696
+ { base: 'OE', letters: '\u008C\u0152' },
697
+ { base: 'oe', letters: '\u009C\u0153' },
698
+ {
699
+ base: 'P',
700
+ letters: '\u0050\u24C5\uFF30\u1E54\u1E56\u01A4\u2C63\uA750\uA752\uA754',
701
+ },
702
+ { base: 'Q', letters: '\u0051\u24C6\uFF31\uA756\uA758\u024A' },
703
+ {
704
+ base: 'R',
705
+ letters: '\u0052\u24C7\uFF32\u0154\u1E58\u0158\u0210\u0212\u1E5A\u1E5C\u0156\u1E5E\u024C\u2C64\uA75A\uA7A6\uA782',
706
+ },
707
+ {
708
+ base: 'S',
709
+ letters: '\u0053\u24C8\uFF33\u1E9E\u015A\u1E64\u015C\u1E60\u0160\u1E66\u1E62\u1E68\u0218\u015E\u2C7E\uA7A8\uA784',
710
+ },
711
+ {
712
+ base: 'T',
713
+ letters: '\u0054\u24C9\uFF34\u1E6A\u0164\u1E6C\u021A\u0162\u1E70\u1E6E\u0166\u01AC\u01AE\u023E\uA786',
714
+ },
715
+ { base: 'TZ', letters: '\uA728' },
716
+ {
717
+ base: 'U',
718
+ letters: '\u0055\u24CA\uFF35\u00D9\u00DA\u00DB\u0168\u1E78\u016A\u1E7A\u016C\u00DC\u01DB\u01D7\u01D5\u01D9\u1EE6\u016E\u0170\u01D3\u0214\u0216\u01AF\u1EEA\u1EE8\u1EEE\u1EEC\u1EF0\u1EE4\u1E72\u0172\u1E76\u1E74\u0244',
719
+ },
720
+ { base: 'V', letters: '\u0056\u24CB\uFF36\u1E7C\u1E7E\u01B2\uA75E\u0245' },
721
+ { base: 'VY', letters: '\uA760' },
722
+ {
723
+ base: 'W',
724
+ letters: '\u0057\u24CC\uFF37\u1E80\u1E82\u0174\u1E86\u1E84\u1E88\u2C72',
725
+ },
726
+ { base: 'X', letters: '\u0058\u24CD\uFF38\u1E8A\u1E8C' },
727
+ {
728
+ base: 'Y',
729
+ letters: '\u0059\u24CE\uFF39\u1EF2\u00DD\u0176\u1EF8\u0232\u1E8E\u0178\u1EF6\u1EF4\u01B3\u024E\u1EFE',
730
+ },
731
+ {
732
+ base: 'Z',
733
+ letters: '\u005A\u24CF\uFF3A\u0179\u1E90\u017B\u017D\u1E92\u1E94\u01B5\u0224\u2C7F\u2C6B\uA762',
734
+ },
735
+ {
736
+ base: 'a',
737
+ letters: '\u0061\u24D0\uFF41\u1E9A\u00E0\u00E1\u00E2\u1EA7\u1EA5\u1EAB\u1EA9\u00E3\u0101\u0103\u1EB1\u1EAF\u1EB5\u1EB3\u0227\u01E1\u00E4\u01DF\u1EA3\u00E5\u01FB\u01CE\u0201\u0203\u1EA1\u1EAD\u1EB7\u1E01\u0105\u2C65\u0250',
738
+ },
739
+ { base: 'aa', letters: '\uA733' },
740
+ { base: 'ae', letters: '\u00E6\u01FD\u01E3' },
741
+ { base: 'ao', letters: '\uA735' },
742
+ { base: 'au', letters: '\uA737' },
743
+ { base: 'av', letters: '\uA739\uA73B' },
744
+ { base: 'ay', letters: '\uA73D' },
745
+ {
746
+ base: 'b',
747
+ letters: '\u0062\u24D1\uFF42\u1E03\u1E05\u1E07\u0180\u0183\u0253',
748
+ },
749
+ {
750
+ base: 'c',
751
+ letters: '\u0063\u24D2\uFF43\u0107\u0109\u010B\u010D\u00E7\u1E09\u0188\u023C\uA73F\u2184',
752
+ },
753
+ {
754
+ base: 'd',
755
+ letters: '\u0064\u24D3\uFF44\u1E0B\u010F\u1E0D\u1E11\u1E13\u1E0F\u0111\u018C\u0256\u0257\uA77A',
756
+ },
757
+ { base: 'dz', letters: '\u01F3\u01C6' },
758
+ {
759
+ base: 'e',
760
+ letters: '\u0065\u24D4\uFF45\u00E8\u00E9\u00EA\u1EC1\u1EBF\u1EC5\u1EC3\u1EBD\u0113\u1E15\u1E17\u0115\u0117\u00EB\u1EBB\u011B\u0205\u0207\u1EB9\u1EC7\u0229\u1E1D\u0119\u1E19\u1E1B\u0247\u025B\u01DD',
761
+ },
762
+ { base: 'f', letters: '\u0066\u24D5\uFF46\u1E1F\u0192\uA77C' },
763
+ {
764
+ base: 'g',
765
+ letters: '\u0067\u24D6\uFF47\u01F5\u011D\u1E21\u011F\u0121\u01E7\u0123\u01E5\u0260\uA7A1\u1D79\uA77F',
766
+ },
767
+ {
768
+ base: 'h',
769
+ letters: '\u0068\u24D7\uFF48\u0125\u1E23\u1E27\u021F\u1E25\u1E29\u1E2B\u1E96\u0127\u2C68\u2C76\u0265',
770
+ },
771
+ { base: 'hv', letters: '\u0195' },
772
+ {
773
+ base: 'i',
774
+ letters: '\u0069\u24D8\uFF49\u00EC\u00ED\u00EE\u0129\u012B\u012D\u00EF\u1E2F\u1EC9\u01D0\u0209\u020B\u1ECB\u012F\u1E2D\u0268\u0131',
775
+ },
776
+ { base: 'j', letters: '\u006A\u24D9\uFF4A\u0135\u01F0\u0249' },
777
+ {
778
+ base: 'k',
779
+ letters: '\u006B\u24DA\uFF4B\u1E31\u01E9\u1E33\u0137\u1E35\u0199\u2C6A\uA741\uA743\uA745\uA7A3',
780
+ },
781
+ {
782
+ base: 'l',
783
+ letters: '\u006C\u24DB\uFF4C\u0140\u013A\u013E\u1E37\u1E39\u013C\u1E3D\u1E3B\u017F\u0142\u019A\u026B\u2C61\uA749\uA781\uA747',
784
+ },
785
+ { base: 'lj', letters: '\u01C9' },
786
+ { base: 'm', letters: '\u006D\u24DC\uFF4D\u1E3F\u1E41\u1E43\u0271\u026F' },
787
+ {
788
+ base: 'n',
789
+ letters: '\u006E\u24DD\uFF4E\u01F9\u0144\u00F1\u1E45\u0148\u1E47\u0146\u1E4B\u1E49\u019E\u0272\u0149\uA791\uA7A5',
790
+ },
791
+ { base: 'nj', letters: '\u01CC' },
792
+ {
793
+ base: 'o',
794
+ letters: '\u006F\u24DE\uFF4F\u00F2\u00F3\u00F4\u1ED3\u1ED1\u1ED7\u1ED5\u00F5\u1E4D\u022D\u1E4F\u014D\u1E51\u1E53\u014F\u022F\u0231\u00F6\u022B\u1ECF\u0151\u01D2\u020D\u020F\u01A1\u1EDD\u1EDB\u1EE1\u1EDF\u1EE3\u1ECD\u1ED9\u01EB\u01ED\u00F8\u01FF\u0254\uA74B\uA74D\u0275',
795
+ },
796
+ { base: 'oi', letters: '\u01A3' },
797
+ { base: 'ou', letters: '\u0223' },
798
+ { base: 'oo', letters: '\uA74F' },
799
+ {
800
+ base: 'p',
801
+ letters: '\u0070\u24DF\uFF50\u1E55\u1E57\u01A5\u1D7D\uA751\uA753\uA755',
802
+ },
803
+ { base: 'q', letters: '\u0071\u24E0\uFF51\u024B\uA757\uA759' },
804
+ {
805
+ base: 'r',
806
+ letters: '\u0072\u24E1\uFF52\u0155\u1E59\u0159\u0211\u0213\u1E5B\u1E5D\u0157\u1E5F\u024D\u027D\uA75B\uA7A7\uA783',
807
+ },
808
+ {
809
+ base: 's',
810
+ letters: '\u0073\u24E2\uFF53\u00DF\u015B\u1E65\u015D\u1E61\u0161\u1E67\u1E63\u1E69\u0219\u015F\u023F\uA7A9\uA785\u1E9B',
811
+ },
812
+ {
813
+ base: 't',
814
+ letters: '\u0074\u24E3\uFF54\u1E6B\u1E97\u0165\u1E6D\u021B\u0163\u1E71\u1E6F\u0167\u01AD\u0288\u2C66\uA787',
815
+ },
816
+ { base: 'tz', letters: '\uA729' },
817
+ {
818
+ base: 'u',
819
+ letters: '\u0075\u24E4\uFF55\u00F9\u00FA\u00FB\u0169\u1E79\u016B\u1E7B\u016D\u00FC\u01DC\u01D8\u01D6\u01DA\u1EE7\u016F\u0171\u01D4\u0215\u0217\u01B0\u1EEB\u1EE9\u1EEF\u1EED\u1EF1\u1EE5\u1E73\u0173\u1E77\u1E75\u0289',
820
+ },
821
+ { base: 'v', letters: '\u0076\u24E5\uFF56\u1E7D\u1E7F\u028B\uA75F\u028C' },
822
+ { base: 'vy', letters: '\uA761' },
823
+ {
824
+ base: 'w',
825
+ letters: '\u0077\u24E6\uFF57\u1E81\u1E83\u0175\u1E87\u1E85\u1E98\u1E89\u2C73',
826
+ },
827
+ { base: 'x', letters: '\u0078\u24E7\uFF58\u1E8B\u1E8D' },
828
+ {
829
+ base: 'y',
830
+ letters: '\u0079\u24E8\uFF59\u1EF3\u00FD\u0177\u1EF9\u0233\u1E8F\u00FF\u1EF7\u1E99\u1EF5\u01B4\u024F\u1EFF',
831
+ },
832
+ {
833
+ base: 'z',
834
+ letters: '\u007A\u24E9\uFF5A\u017A\u1E91\u017C\u017E\u1E93\u1E95\u01B6\u0225\u0240\u2C6C\uA763',
835
+ },
836
+ ];
837
+ /**
838
+ * Map of letters from diacritic variant to diacritless variant
839
+ * Contains lowercase and uppercase separatelly
840
+ *
841
+ * > "á" => "a"
842
+ * > "ě" => "e"
843
+ * > "Ă" => "A"
844
+ * > ...
845
+ *
846
+ * @public exported from `@promptbook/utils`
847
+ */
848
+ var DIACRITIC_VARIANTS_LETTERS = {};
849
+ // tslint:disable-next-line: prefer-for-of
850
+ for (var i = 0; i < defaultDiacriticsRemovalMap.length; i++) {
851
+ var letters = defaultDiacriticsRemovalMap[i].letters;
852
+ // tslint:disable-next-line: prefer-for-of
853
+ for (var j = 0; j < letters.length; j++) {
854
+ DIACRITIC_VARIANTS_LETTERS[letters[j]] = defaultDiacriticsRemovalMap[i].base;
855
+ }
856
+ }
857
+ // <- TODO: [🍓] Put to maker function to save execution time if not needed
858
+ /*
859
+ @see https://stackoverflow.com/questions/990904/remove-accents-diacritics-in-a-string-in-javascript
860
+ Licensed under the Apache License, Version 2.0 (the "License");
861
+ you may not use this file except in compliance with the License.
862
+ You may obtain a copy of the License at
863
+
864
+ http://www.apache.org/licenses/LICENSE-2.0
865
+
866
+ Unless required by applicable law or agreed to in writing, software
867
+ distributed under the License is distributed on an "AS IS" BASIS,
868
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
869
+ See the License for the specific language governing permissions and
870
+ limitations under the License.
871
+ */
872
+
873
+ /**
874
+ * @@@
875
+ *
876
+ * @param input @@@
877
+ * @returns @@@
878
+ * @public exported from `@promptbook/utils`
879
+ */
880
+ function removeDiacritics(input) {
881
+ /*eslint no-control-regex: "off"*/
882
+ return input.replace(/[^\u0000-\u007E]/g, function (a) {
883
+ return DIACRITIC_VARIANTS_LETTERS[a] || a;
884
+ });
885
+ }
886
+ /**
887
+ * TODO: [Ж] Variant for cyrillic (and in general non-latin) letters
888
+ */
889
+
890
+ /**
891
+ * Counts number of words in the text
892
+ *
893
+ * @public exported from `@promptbook/utils`
894
+ */
895
+ function countWords(text) {
896
+ text = text.replace(/[\p{Extended_Pictographic}]/gu, 'a');
897
+ text = removeDiacritics(text);
898
+ return text.split(/[^a-zа-я0-9]+/i).filter(function (word) { return word.length > 0; }).length;
899
+ }
900
+
901
+ /**
902
+ * Helper of usage compute
903
+ *
904
+ * @param content the content of prompt or response
905
+ * @returns part of PromptResultUsageCounts
906
+ *
907
+ * @private internal utility of LlmExecutionTools
908
+ */
909
+ function computeUsageCounts(content) {
910
+ return {
911
+ charactersCount: { value: countCharacters(content) },
912
+ wordsCount: { value: countWords(content) },
913
+ sentencesCount: { value: countSentences(content) },
914
+ linesCount: { value: countLines(content) },
915
+ paragraphsCount: { value: countParagraphs(content) },
916
+ pagesCount: { value: countPages(content) },
917
+ };
918
+ }
919
+
920
+ /**
921
+ * Make UncertainNumber
922
+ *
923
+ * @param value
924
+ *
925
+ * @private utility for initializating UncertainNumber
926
+ */
927
+ function uncertainNumber(value) {
928
+ if (value === null || value === undefined || Number.isNaN(value)) {
929
+ return { value: 0, isUncertain: true };
930
+ }
931
+ return { value: value };
932
+ }
933
+
934
+ /**
935
+ * Get current date in ISO 8601 format
936
+ *
937
+ * @private internal utility
938
+ */
939
+ function getCurrentIsoDate() {
940
+ return new Date().toISOString();
941
+ }
942
+
943
+ /**
944
+ * @@@
945
+ *
946
+ * Note: This function mutates the object and returns the original (but mutated-deep-freezed) object
947
+ *
948
+ * @returns The same object as the input, but deeply frozen
949
+ * @public exported from `@promptbook/utils`
950
+ */
951
+ function deepFreeze(objectValue) {
952
+ var e_1, _a;
953
+ var propertyNames = Object.getOwnPropertyNames(objectValue);
954
+ try {
955
+ for (var propertyNames_1 = __values(propertyNames), propertyNames_1_1 = propertyNames_1.next(); !propertyNames_1_1.done; propertyNames_1_1 = propertyNames_1.next()) {
956
+ var propertyName = propertyNames_1_1.value;
957
+ var value = objectValue[propertyName];
958
+ if (value && typeof value === 'object') {
959
+ deepFreeze(value);
960
+ }
961
+ }
962
+ }
963
+ catch (e_1_1) { e_1 = { error: e_1_1 }; }
964
+ finally {
965
+ try {
966
+ if (propertyNames_1_1 && !propertyNames_1_1.done && (_a = propertyNames_1.return)) _a.call(propertyNames_1);
967
+ }
968
+ finally { if (e_1) throw e_1.error; }
969
+ }
970
+ return Object.freeze(objectValue);
971
+ }
972
+ /**
973
+ * TODO: [🧠] Is there a way how to meaningfully test this utility
974
+ */
975
+
976
+ // <- TODO: [🧠] Better system for generator warnings - not always "code" and "by `@promptbook/cli`"
977
+ /**
978
+ * The maximum number of iterations for a loops
979
+ *
980
+ * @private within the repository - too low-level in comparison with other `MAX_...`
981
+ */
982
+ var LOOP_LIMIT = 1000;
983
+ /**
984
+ * Nonce which is used for replacing things in strings
985
+ *
986
+ * @private within the repository
987
+ */
988
+ var REPLACING_NONCE = 'u$k42k%!V2zo34w7Fu#@QUHYPW';
989
+ /**
990
+ * The names of the parameters that are reserved for special purposes
991
+ *
992
+ * @public exported from `@promptbook/core`
993
+ */
994
+ deepFreeze([
995
+ 'content',
996
+ 'context',
997
+ 'knowledge',
998
+ 'samples',
999
+ 'modelName',
1000
+ 'currentDate',
1001
+ // <- TODO: Add more like 'date', 'modelName',...
1002
+ // <- TODO: Add [emoji] + instructions ACRY when adding new reserved parameter
1003
+ ]);
1004
+ /**
1005
+ * @@@
1006
+ *
1007
+ * @private within the repository
1008
+ */
1009
+ var RESERVED_PARAMETER_MISSING_VALUE = 'MISSING-' + REPLACING_NONCE;
1010
+ /**
1011
+ * @@@
1012
+ *
1013
+ * @private within the repository
1014
+ */
1015
+ var RESERVED_PARAMETER_RESTRICTED = 'RESTRICTED-' + REPLACING_NONCE;
1016
+
1017
+ /**
1018
+ * This error type indicates that some limit was reached
1019
+ *
1020
+ * @public exported from `@promptbook/core`
1021
+ */
1022
+ var LimitReachedError = /** @class */ (function (_super) {
1023
+ __extends(LimitReachedError, _super);
1024
+ function LimitReachedError(message) {
1025
+ var _this = _super.call(this, message) || this;
1026
+ _this.name = 'LimitReachedError';
1027
+ Object.setPrototypeOf(_this, LimitReachedError.prototype);
1028
+ return _this;
1029
+ }
1030
+ return LimitReachedError;
1031
+ }(Error));
1032
+
1033
+ /**
1034
+ * Replaces parameters in template with values from parameters object
1035
+ *
1036
+ * @param template the template with parameters in {curly} braces
1037
+ * @param parameters the object with parameters
1038
+ * @returns the template with replaced parameters
1039
+ * @throws {PipelineExecutionError} if parameter is not defined, not closed, or not opened
1040
+ * @public exported from `@promptbook/utils`
1041
+ */
1042
+ function replaceParameters(template, parameters) {
1043
+ var e_1, _a;
1044
+ try {
1045
+ for (var _b = __values(Object.entries(parameters)), _c = _b.next(); !_c.done; _c = _b.next()) {
1046
+ var _d = __read(_c.value, 2), parameterName = _d[0], parameterValue = _d[1];
1047
+ if (parameterValue === RESERVED_PARAMETER_MISSING_VALUE) {
1048
+ throw new UnexpectedError("Parameter {".concat(parameterName, "} has missing value"));
1049
+ }
1050
+ else if (parameterValue === RESERVED_PARAMETER_RESTRICTED) {
1051
+ // TODO: [🍵]
1052
+ throw new UnexpectedError("Parameter {".concat(parameterName, "} is restricted to use"));
1053
+ }
1054
+ }
1055
+ }
1056
+ catch (e_1_1) { e_1 = { error: e_1_1 }; }
1057
+ finally {
1058
+ try {
1059
+ if (_c && !_c.done && (_a = _b.return)) _a.call(_b);
1060
+ }
1061
+ finally { if (e_1) throw e_1.error; }
1062
+ }
1063
+ var replacedTemplate = template;
1064
+ var match;
1065
+ var loopLimit = LOOP_LIMIT;
1066
+ var _loop_1 = function () {
1067
+ if (loopLimit-- < 0) {
1068
+ throw new LimitReachedError('Loop limit reached during parameters replacement in `replaceParameters`');
1069
+ }
1070
+ var precol = match.groups.precol;
1071
+ var parameterName = match.groups.parameterName;
1072
+ if (parameterName === '') {
1073
+ return "continue";
1074
+ }
1075
+ if (parameterName.indexOf('{') !== -1 || parameterName.indexOf('}') !== -1) {
1076
+ throw new PipelineExecutionError('Parameter is already opened or not closed');
1077
+ }
1078
+ if (parameters[parameterName] === undefined) {
1079
+ throw new PipelineExecutionError("Parameter {".concat(parameterName, "} is not defined"));
1080
+ }
1081
+ var parameterValue = parameters[parameterName];
1082
+ if (parameterValue === undefined) {
1083
+ throw new PipelineExecutionError("Parameter {".concat(parameterName, "} is not defined"));
1084
+ }
1085
+ parameterValue = parameterValue.toString();
1086
+ if (parameterValue.includes('\n') && /^\s*\W{0,3}\s*$/.test(precol)) {
1087
+ parameterValue = parameterValue
1088
+ .split('\n')
1089
+ .map(function (line, index) { return (index === 0 ? line : "".concat(precol).concat(line)); })
1090
+ .join('\n');
1091
+ }
1092
+ replacedTemplate =
1093
+ replacedTemplate.substring(0, match.index + precol.length) +
1094
+ parameterValue +
1095
+ replacedTemplate.substring(match.index + precol.length + parameterName.length + 2);
1096
+ };
1097
+ while ((match = /^(?<precol>.*){(?<parameterName>\w+)}(.*)/m /* <- Not global */
1098
+ .exec(replacedTemplate))) {
1099
+ _loop_1();
1100
+ }
1101
+ // [💫] Check if there are parameters that are not closed properly
1102
+ if (/{\w+$/.test(replacedTemplate)) {
1103
+ throw new PipelineExecutionError('Parameter is not closed');
1104
+ }
1105
+ // [💫] Check if there are parameters that are not opened properly
1106
+ if (/^\w+}/.test(replacedTemplate)) {
1107
+ throw new PipelineExecutionError('Parameter is not opened');
1108
+ }
1109
+ return replacedTemplate;
1110
+ }
1111
+
1112
+ /**
1113
+ * Function computeUsage will create price per one token based on the string value found on openai page
1114
+ *
1115
+ * @private within the repository, used only as internal helper for `OPENAI_MODELS`
1116
+ */
1117
+ function computeUsage(value) {
1118
+ var _a = __read(value.split(' / '), 2), price = _a[0], tokens = _a[1];
1119
+ return parseFloat(price.replace('$', '')) / parseFloat(tokens.replace('M tokens', '')) / 1000000;
1120
+ }
1121
+
1122
+ /**
1123
+ * List of available Anthropic Claude models with pricing
1124
+ *
1125
+ * Note: Done at 2024-05-25
1126
+ *
1127
+ * @see https://docs.anthropic.com/en/docs/models-overview
1128
+ * @public exported from `@promptbook/anthropic-claude`
1129
+ */
1130
+ var ANTHROPIC_CLAUDE_MODELS = [
1131
+ {
1132
+ modelVariant: 'CHAT',
1133
+ modelTitle: 'Claude 3 Opus',
1134
+ modelName: 'claude-3-opus-20240229',
1135
+ pricing: {
1136
+ prompt: computeUsage("$15.00 / 1M tokens"),
1137
+ output: computeUsage("$75.00 / 1M tokens"),
1138
+ },
1139
+ },
1140
+ {
1141
+ modelVariant: 'CHAT',
1142
+ modelTitle: 'Claude 3 Sonnet',
1143
+ modelName: 'claude-3-sonnet-20240229',
1144
+ pricing: {
1145
+ prompt: computeUsage("$3.00 / 1M tokens"),
1146
+ output: computeUsage("$15.00 / 1M tokens"),
1147
+ },
1148
+ },
1149
+ {
1150
+ modelVariant: 'CHAT',
1151
+ modelTitle: 'Claude 3 Haiku',
1152
+ modelName: ' claude-3-haiku-20240307',
1153
+ pricing: {
1154
+ prompt: computeUsage("$0.25 / 1M tokens"),
1155
+ output: computeUsage("$1.25 / 1M tokens"),
1156
+ },
1157
+ },
1158
+ {
1159
+ modelVariant: 'CHAT',
1160
+ modelTitle: 'Claude 2.1',
1161
+ modelName: 'claude-2.1',
1162
+ pricing: {
1163
+ prompt: computeUsage("$8.00 / 1M tokens"),
1164
+ output: computeUsage("$24.00 / 1M tokens"),
1165
+ },
1166
+ },
1167
+ {
1168
+ modelVariant: 'CHAT',
1169
+ modelTitle: 'Claude 2',
1170
+ modelName: 'claude-2.0',
1171
+ pricing: {
1172
+ prompt: computeUsage("$8.00 / 1M tokens"),
1173
+ output: computeUsage("$24.00 / 1M tokens"),
1174
+ },
1175
+ },
1176
+ {
1177
+ modelVariant: 'CHAT',
1178
+ modelTitle: ' Claude Instant 1.2',
1179
+ modelName: 'claude-instant-1.2',
1180
+ pricing: {
1181
+ prompt: computeUsage("$0.80 / 1M tokens"),
1182
+ output: computeUsage("$2.40 / 1M tokens"),
1183
+ },
1184
+ },
1185
+ // TODO: !!! Claude 1 and 2 has also completion versions - ask Hoagy
1186
+ ];
1187
+ /**
1188
+ * Note: [🤖] Add models of new variant
1189
+ * TODO: [🧠] !!! Add embedding models OR Anthropic has only chat+completion models?
1190
+ * TODO: [🧠] Some mechanism to propagate unsureness
1191
+ * TODO: [🧠][👮‍♀️] Put here more info like description, isVision, trainingDateCutoff, languages, strengths ( Top-level performance, intelligence, fluency, and understanding), contextWindow,...
1192
+ * TODO: [🕚] Make this list dynamic - dynamically can be listed modelNames but not modelVariant, legacy status, context length and pricing
1193
+ */
1194
+
1195
+ /**
1196
+ * Execution Tools for calling Anthropic Claude API.
1197
+ *
1198
+ * @public exported from `@promptbook/anthropic-claude`
1199
+ * @deprecated use `createAnthropicClaudeExecutionTools` instead
1200
+ */
1201
+ var AnthropicClaudeExecutionTools = /** @class */ (function () {
1202
+ /**
1203
+ * Creates Anthropic Claude Execution Tools.
1204
+ *
1205
+ * @param options which are relevant are directly passed to the Anthropic Claude client
1206
+ */
1207
+ function AnthropicClaudeExecutionTools(options) {
1208
+ if (options === void 0) { options = { isProxied: false }; }
1209
+ this.options = options;
1210
+ // Note: Passing only Anthropic Claude relevant options to Anthropic constructor
1211
+ var anthropicOptions = __assign({}, options);
1212
+ delete anthropicOptions.isVerbose;
1213
+ delete anthropicOptions.isProxied;
1214
+ this.client = new Anthropic(anthropicOptions);
1215
+ }
1216
+ Object.defineProperty(AnthropicClaudeExecutionTools.prototype, "title", {
1217
+ get: function () {
1218
+ return 'Anthropic Claude';
1219
+ },
1220
+ enumerable: false,
1221
+ configurable: true
1222
+ });
1223
+ Object.defineProperty(AnthropicClaudeExecutionTools.prototype, "description", {
1224
+ get: function () {
1225
+ return 'Use all models provided by Anthropic Claude';
1226
+ },
1227
+ enumerable: false,
1228
+ configurable: true
1229
+ });
1230
+ /**
1231
+ * Calls Anthropic Claude API to use a chat model.
1232
+ */
1233
+ AnthropicClaudeExecutionTools.prototype.callChatModel = function (prompt) {
1234
+ return __awaiter(this, void 0, void 0, function () {
1235
+ var content, parameters, modelRequirements, modelName, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
1236
+ return __generator(this, function (_a) {
1237
+ switch (_a.label) {
1238
+ case 0:
1239
+ if (this.options.isVerbose) {
1240
+ console.info('💬 Anthropic Claude callChatModel call');
1241
+ }
1242
+ content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
1243
+ // TODO: [☂] Use here more modelRequirements
1244
+ if (modelRequirements.modelVariant !== 'CHAT') {
1245
+ throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
1246
+ }
1247
+ modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
1248
+ rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
1249
+ rawRequest = {
1250
+ model: modelRequirements.modelName || this.getDefaultChatModel().modelName,
1251
+ max_tokens: modelRequirements.maxTokens || 4096,
1252
+ // <- TODO: [🌾] Make some global max cap for maxTokens
1253
+ temperature: modelRequirements.temperature,
1254
+ system: modelRequirements.systemMessage,
1255
+ // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
1256
+ // <- Note: [🧆]
1257
+ messages: [
1258
+ {
1259
+ role: 'user',
1260
+ content: rawPromptContent,
1261
+ },
1262
+ ],
1263
+ // TODO: Is here some equivalent of user identification?> user: this.options.user,
1264
+ };
1265
+ start = getCurrentIsoDate();
1266
+ if (this.options.isVerbose) {
1267
+ console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
1268
+ }
1269
+ return [4 /*yield*/, this.client.messages.create(rawRequest)];
1270
+ case 1:
1271
+ rawResponse = _a.sent();
1272
+ if (this.options.isVerbose) {
1273
+ console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
1274
+ }
1275
+ if (!rawResponse.content[0]) {
1276
+ throw new PipelineExecutionError('No content from Anthropic Claude');
1277
+ }
1278
+ if (rawResponse.content.length > 1) {
1279
+ throw new PipelineExecutionError('More than one content blocks from Anthropic Claude');
1280
+ }
1281
+ resultContent = rawResponse.content[0].text;
1282
+ // eslint-disable-next-line prefer-const
1283
+ complete = getCurrentIsoDate();
1284
+ usage = {
1285
+ price: { value: 0, isUncertain: true } /* <- TODO: [🐞] Compute usage */,
1286
+ input: __assign({ tokensCount: uncertainNumber(rawResponse.usage.input_tokens) }, computeUsageCounts(prompt.content)),
1287
+ output: __assign({ tokensCount: uncertainNumber(rawResponse.usage.output_tokens) }, computeUsageCounts(prompt.content)),
1288
+ };
1289
+ return [2 /*return*/, {
1290
+ content: resultContent,
1291
+ modelName: rawResponse.model,
1292
+ timing: {
1293
+ start: start,
1294
+ complete: complete,
1295
+ },
1296
+ usage: usage,
1297
+ rawPromptContent: rawPromptContent,
1298
+ rawRequest: rawRequest,
1299
+ rawResponse: rawResponse,
1300
+ // <- [🗯]
1301
+ }];
1302
+ }
1303
+ });
1304
+ });
1305
+ };
1306
+ /*
1307
+ TODO: [👏]
1308
+ public async callCompletionModel(
1309
+ prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements'>,
1310
+ ): Promise<PromptCompletionResult> {
1311
+
1312
+ if (this.options.isVerbose) {
1313
+ console.info('🖋 Anthropic Claude callCompletionModel call');
1314
+ }
1315
+
1316
+ const { content, parameters, modelRequirements } = prompt;
1317
+
1318
+ // TODO: [☂] Use here more modelRequirements
1319
+ if (modelRequirements.modelVariant !== 'COMPLETION') {
1320
+ throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
1321
+ }
1322
+
1323
+ const modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
1324
+ const modelSettings = {
1325
+ model: modelName,
1326
+ max_tokens: modelRequirements.maxTokens || 2000, // <- Note: 2000 is for lagacy reasons
1327
+ // <- TODO: [🌾] Make some global max cap for maxTokens
1328
+ // <- TODO: Use here `systemMessage`, `temperature` and `seed`
1329
+ };
1330
+
1331
+ const rawRequest: xxxx.Completions.CompletionCreateParamsNonStreaming = {
1332
+ ...modelSettings,
1333
+ prompt: rawPromptContent,
1334
+ user: this.options.user,
1335
+ };
1336
+ const start: string_date_iso8601 = getCurrentIsoDate();
1337
+ let complete: string_date_iso8601;
1338
+
1339
+ if (this.options.isVerbose) {
1340
+ console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
1341
+ }
1342
+ const rawResponse = await this.client.completions.create(rawRequest);
1343
+ if (this.options.isVerbose) {
1344
+ console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
1345
+ }
1346
+
1347
+ if (!rawResponse.choices[0]) {
1348
+ throw new PipelineExecutionError('No choises from Anthropic Claude');
1349
+ }
1350
+
1351
+ if (rawResponse.choices.length > 1) {
1352
+ // TODO: This should be maybe only warning
1353
+ throw new PipelineExecutionError('More than one choise from Anthropic Claude');
1354
+ }
1355
+
1356
+ const resultContent = rawResponse.choices[0].text;
1357
+ // eslint-disable-next-line prefer-const
1358
+ complete = getCurrentIsoDate();
1359
+ const usage = { price: 'UNKNOWN', inputTokens: 0, outputTokens: 0 /* <- TODO: [🐞] Compute usage * / } satisfies PromptResultUsage;
1360
+
1361
+
1362
+
1363
+ return {
1364
+ content: resultContent,
1365
+ modelName: rawResponse.model || model,
1366
+ timing: {
1367
+ start,
1368
+ complete,
1369
+ },
1370
+ usage,
1371
+ rawResponse,
1372
+ // <- [🗯]
1373
+ };
1374
+ }
1375
+ */
1376
+ // <- Note: [🤖] callXxxModel
1377
+ /**
1378
+ * Get the model that should be used as default
1379
+ */
1380
+ AnthropicClaudeExecutionTools.prototype.getDefaultModel = function (defaultModelName) {
1381
+ var model = ANTHROPIC_CLAUDE_MODELS.find(function (_a) {
1382
+ var modelName = _a.modelName;
1383
+ return modelName.startsWith(defaultModelName);
1384
+ });
1385
+ if (model === undefined) {
1386
+ throw new UnexpectedError(spaceTrim$1(function (block) {
1387
+ return "\n Cannot find model in OpenAI models with name \"".concat(defaultModelName, "\" which should be used as default.\n\n Available models:\n ").concat(block(ANTHROPIC_CLAUDE_MODELS.map(function (_a) {
1388
+ var modelName = _a.modelName;
1389
+ return "- \"".concat(modelName, "\"");
1390
+ }).join('\n')), "\n\n ");
1391
+ }));
1392
+ }
1393
+ return model;
1394
+ };
1395
+ /**
1396
+ * Default model for chat variant.
1397
+ */
1398
+ AnthropicClaudeExecutionTools.prototype.getDefaultChatModel = function () {
1399
+ return this.getDefaultModel('claude-3-opus');
1400
+ };
1401
+ // <- Note: [🤖] getDefaultXxxModel
1402
+ /**
1403
+ * List all available Anthropic Claude models that can be used
1404
+ */
1405
+ AnthropicClaudeExecutionTools.prototype.listModels = function () {
1406
+ return ANTHROPIC_CLAUDE_MODELS;
1407
+ };
1408
+ return AnthropicClaudeExecutionTools;
1409
+ }());
1410
+ /**
1411
+ * TODO: [🍆] JSON mode
1412
+ * TODO: [🧠] Maybe handle errors via transformAnthropicError (like transformAzureError)
1413
+ * TODO: Maybe Create some common util for callChatModel and callCompletionModel
1414
+ * TODO: Maybe make custom OpenaiError
1415
+ * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
1416
+ * TODO: [🍜] !!!!!! Auto use anonymous server in browser
1417
+ * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
1418
+ * TODO: [📅] Maybe instead of `RemoteLlmExecutionToolsOptions` use `proxyWithAnonymousRemoteServer` (if implemented)
1419
+ */
1420
+
1421
+ /**
1422
+ * Execution Tools for calling Anthropic Claude API.
1423
+ *
1424
+ * @public exported from `@promptbook/anthropic-claude`
1425
+ */
1426
+ function createAnthropicClaudeExecutionTools(options) {
1427
+ if (options.isProxied) {
1428
+ return new RemoteLlmExecutionTools(__assign(__assign({}, options), { isAnonymous: true, llmToolsConfiguration: [
1429
+ {
1430
+ title: 'Anthropic Claude (proxied)',
1431
+ packageName: '@promptbook/anthropic-claude',
1432
+ className: 'AnthropicClaudeExecutionTools',
1433
+ options: __assign(__assign({}, options), { isProxied: false }),
1434
+ },
1435
+ ] }));
1436
+ }
1437
+ return new AnthropicClaudeExecutionTools(options);
1438
+ }
1439
+ /**
1440
+ * TODO: !!!!!! Make this with all LLM providers
1441
+ * TODO: !!!!!! Maybe change all `new AnthropicClaudeExecutionTools` -> `createAnthropicClaudeExecutionTools` in manual
1442
+ */
1443
+
1444
+ /**
1445
+ * List of available OpenAI models with pricing
1446
+ *
1447
+ * Note: Done at 2024-05-20
1448
+ *
1449
+ * @see https://platform.openai.com/docs/models/
1450
+ * @see https://openai.com/api/pricing/
1451
+ * @public exported from `@promptbook/openai`
1452
+ */
1453
+ var OPENAI_MODELS = [
1454
+ /*/
1455
+ {
1456
+ modelTitle: 'dall-e-3',
1457
+ modelName: 'dall-e-3',
1458
+ },
1459
+ /**/
1460
+ /*/
1461
+ {
1462
+ modelTitle: 'whisper-1',
1463
+ modelName: 'whisper-1',
1464
+ },
1465
+ /**/
1466
+ /**/
1467
+ {
1468
+ modelVariant: 'COMPLETION',
1469
+ modelTitle: 'davinci-002',
1470
+ modelName: 'davinci-002',
1471
+ pricing: {
1472
+ prompt: computeUsage("$2.00 / 1M tokens"),
1473
+ output: computeUsage("$2.00 / 1M tokens"), // <- not sure
1474
+ },
1475
+ },
1476
+ /**/
1477
+ /*/
1478
+ {
1479
+ modelTitle: 'dall-e-2',
1480
+ modelName: 'dall-e-2',
1481
+ },
1482
+ /**/
1483
+ /**/
1484
+ {
1485
+ modelVariant: 'CHAT',
1486
+ modelTitle: 'gpt-3.5-turbo-16k',
1487
+ modelName: 'gpt-3.5-turbo-16k',
1488
+ pricing: {
1489
+ prompt: computeUsage("$3.00 / 1M tokens"),
1490
+ output: computeUsage("$4.00 / 1M tokens"),
1491
+ },
1492
+ },
1493
+ /**/
1494
+ /*/
1495
+ {
1496
+ modelTitle: 'tts-1-hd-1106',
1497
+ modelName: 'tts-1-hd-1106',
1498
+ },
1499
+ /**/
1500
+ /*/
1501
+ {
1502
+ modelTitle: 'tts-1-hd',
1503
+ modelName: 'tts-1-hd',
1504
+ },
1505
+ /**/
1506
+ /**/
1507
+ {
1508
+ modelVariant: 'CHAT',
1509
+ modelTitle: 'gpt-4',
1510
+ modelName: 'gpt-4',
1511
+ pricing: {
1512
+ prompt: computeUsage("$30.00 / 1M tokens"),
1513
+ output: computeUsage("$60.00 / 1M tokens"),
1514
+ },
1515
+ },
1516
+ /**/
1517
+ /**/
1518
+ {
1519
+ modelVariant: 'CHAT',
1520
+ modelTitle: 'gpt-4-32k',
1521
+ modelName: 'gpt-4-32k',
1522
+ pricing: {
1523
+ prompt: computeUsage("$60.00 / 1M tokens"),
1524
+ output: computeUsage("$120.00 / 1M tokens"),
1525
+ },
1526
+ },
1527
+ /**/
1528
+ /*/
1529
+ {
1530
+ modelVariant: 'CHAT',
1531
+ modelTitle: 'gpt-4-0613',
1532
+ modelName: 'gpt-4-0613',
1533
+ pricing: {
1534
+ prompt: computeUsage(` / 1M tokens`),
1535
+ output: computeUsage(` / 1M tokens`),
1536
+ },
1537
+ },
1538
+ /**/
1539
+ /**/
1540
+ {
1541
+ modelVariant: 'CHAT',
1542
+ modelTitle: 'gpt-4-turbo-2024-04-09',
1543
+ modelName: 'gpt-4-turbo-2024-04-09',
1544
+ pricing: {
1545
+ prompt: computeUsage("$10.00 / 1M tokens"),
1546
+ output: computeUsage("$30.00 / 1M tokens"),
1547
+ },
1548
+ },
1549
+ /**/
1550
+ /**/
1551
+ {
1552
+ modelVariant: 'CHAT',
1553
+ modelTitle: 'gpt-3.5-turbo-1106',
1554
+ modelName: 'gpt-3.5-turbo-1106',
1555
+ pricing: {
1556
+ prompt: computeUsage("$1.00 / 1M tokens"),
1557
+ output: computeUsage("$2.00 / 1M tokens"),
1558
+ },
1559
+ },
1560
+ /**/
1561
+ /**/
1562
+ {
1563
+ modelVariant: 'CHAT',
1564
+ modelTitle: 'gpt-4-turbo',
1565
+ modelName: 'gpt-4-turbo',
1566
+ pricing: {
1567
+ prompt: computeUsage("$10.00 / 1M tokens"),
1568
+ output: computeUsage("$30.00 / 1M tokens"),
1569
+ },
1570
+ },
1571
+ /**/
1572
+ /**/
1573
+ {
1574
+ modelVariant: 'COMPLETION',
1575
+ modelTitle: 'gpt-3.5-turbo-instruct-0914',
1576
+ modelName: 'gpt-3.5-turbo-instruct-0914',
1577
+ pricing: {
1578
+ prompt: computeUsage("$1.50 / 1M tokens"),
1579
+ output: computeUsage("$2.00 / 1M tokens"), // <- For gpt-3.5-turbo-instruct
1580
+ },
1581
+ },
1582
+ /**/
1583
+ /**/
1584
+ {
1585
+ modelVariant: 'COMPLETION',
1586
+ modelTitle: 'gpt-3.5-turbo-instruct',
1587
+ modelName: 'gpt-3.5-turbo-instruct',
1588
+ pricing: {
1589
+ prompt: computeUsage("$1.50 / 1M tokens"),
1590
+ output: computeUsage("$2.00 / 1M tokens"),
1591
+ },
1592
+ },
1593
+ /**/
1594
+ /*/
1595
+ {
1596
+ modelTitle: 'tts-1',
1597
+ modelName: 'tts-1',
1598
+ },
1599
+ /**/
1600
+ /**/
1601
+ {
1602
+ modelVariant: 'CHAT',
1603
+ modelTitle: 'gpt-3.5-turbo',
1604
+ modelName: 'gpt-3.5-turbo',
1605
+ pricing: {
1606
+ prompt: computeUsage("$3.00 / 1M tokens"),
1607
+ output: computeUsage("$6.00 / 1M tokens"), // <- Not sure, refer to gpt-3.5-turbo in Fine-tuning models
1608
+ },
1609
+ },
1610
+ /**/
1611
+ /**/
1612
+ {
1613
+ modelVariant: 'CHAT',
1614
+ modelTitle: 'gpt-3.5-turbo-0301',
1615
+ modelName: 'gpt-3.5-turbo-0301',
1616
+ pricing: {
1617
+ prompt: computeUsage("$1.50 / 1M tokens"),
1618
+ output: computeUsage("$2.00 / 1M tokens"),
1619
+ },
1620
+ },
1621
+ /**/
1622
+ /**/
1623
+ {
1624
+ modelVariant: 'COMPLETION',
1625
+ modelTitle: 'babbage-002',
1626
+ modelName: 'babbage-002',
1627
+ pricing: {
1628
+ prompt: computeUsage("$0.40 / 1M tokens"),
1629
+ output: computeUsage("$0.40 / 1M tokens"), // <- Not sure
1630
+ },
1631
+ },
1632
+ /**/
1633
+ /**/
1634
+ {
1635
+ modelVariant: 'CHAT',
1636
+ modelTitle: 'gpt-4-1106-preview',
1637
+ modelName: 'gpt-4-1106-preview',
1638
+ pricing: {
1639
+ prompt: computeUsage("$10.00 / 1M tokens"),
1640
+ output: computeUsage("$30.00 / 1M tokens"),
1641
+ },
1642
+ },
1643
+ /**/
1644
+ /**/
1645
+ {
1646
+ modelVariant: 'CHAT',
1647
+ modelTitle: 'gpt-4-0125-preview',
1648
+ modelName: 'gpt-4-0125-preview',
1649
+ pricing: {
1650
+ prompt: computeUsage("$10.00 / 1M tokens"),
1651
+ output: computeUsage("$30.00 / 1M tokens"),
1652
+ },
1653
+ },
1654
+ /**/
1655
+ /*/
1656
+ {
1657
+ modelTitle: 'tts-1-1106',
1658
+ modelName: 'tts-1-1106',
1659
+ },
1660
+ /**/
1661
+ /**/
1662
+ {
1663
+ modelVariant: 'CHAT',
1664
+ modelTitle: 'gpt-3.5-turbo-0125',
1665
+ modelName: 'gpt-3.5-turbo-0125',
1666
+ pricing: {
1667
+ prompt: computeUsage("$0.50 / 1M tokens"),
1668
+ output: computeUsage("$1.50 / 1M tokens"),
1669
+ },
1670
+ },
1671
+ /**/
1672
+ /**/
1673
+ {
1674
+ modelVariant: 'CHAT',
1675
+ modelTitle: 'gpt-4-turbo-preview',
1676
+ modelName: 'gpt-4-turbo-preview',
1677
+ pricing: {
1678
+ prompt: computeUsage("$10.00 / 1M tokens"),
1679
+ output: computeUsage("$30.00 / 1M tokens"), // <- Not sure, just for gpt-4-turbo
1680
+ },
1681
+ },
1682
+ /**/
1683
+ /**/
1684
+ {
1685
+ modelVariant: 'EMBEDDING',
1686
+ modelTitle: 'text-embedding-3-large',
1687
+ modelName: 'text-embedding-3-large',
1688
+ pricing: {
1689
+ prompt: computeUsage("$0.13 / 1M tokens"),
1690
+ // TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
1691
+ output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
1692
+ },
1693
+ },
1694
+ /**/
1695
+ /**/
1696
+ {
1697
+ modelVariant: 'EMBEDDING',
1698
+ modelTitle: 'text-embedding-3-small',
1699
+ modelName: 'text-embedding-3-small',
1700
+ pricing: {
1701
+ prompt: computeUsage("$0.02 / 1M tokens"),
1702
+ // TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
1703
+ output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
1704
+ },
1705
+ },
1706
+ /**/
1707
+ /**/
1708
+ {
1709
+ modelVariant: 'CHAT',
1710
+ modelTitle: 'gpt-3.5-turbo-0613',
1711
+ modelName: 'gpt-3.5-turbo-0613',
1712
+ pricing: {
1713
+ prompt: computeUsage("$1.50 / 1M tokens"),
1714
+ output: computeUsage("$2.00 / 1M tokens"),
1715
+ },
1716
+ },
1717
+ /**/
1718
+ /**/
1719
+ {
1720
+ modelVariant: 'EMBEDDING',
1721
+ modelTitle: 'text-embedding-ada-002',
1722
+ modelName: 'text-embedding-ada-002',
1723
+ pricing: {
1724
+ prompt: computeUsage("$0.1 / 1M tokens"),
1725
+ // TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
1726
+ output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
1727
+ },
1728
+ },
1729
+ /**/
1730
+ /*/
1731
+ {
1732
+ modelVariant: 'CHAT',
1733
+ modelTitle: 'gpt-4-1106-vision-preview',
1734
+ modelName: 'gpt-4-1106-vision-preview',
1735
+ },
1736
+ /**/
1737
+ /*/
1738
+ {
1739
+ modelVariant: 'CHAT',
1740
+ modelTitle: 'gpt-4-vision-preview',
1741
+ modelName: 'gpt-4-vision-preview',
1742
+ pricing: {
1743
+ prompt: computeUsage(`$10.00 / 1M tokens`),
1744
+ output: computeUsage(`$30.00 / 1M tokens`),
1745
+ },
1746
+ },
1747
+ /**/
1748
+ /**/
1749
+ {
1750
+ modelVariant: 'CHAT',
1751
+ modelTitle: 'gpt-4o-2024-05-13',
1752
+ modelName: 'gpt-4o-2024-05-13',
1753
+ pricing: {
1754
+ prompt: computeUsage("$5.00 / 1M tokens"),
1755
+ output: computeUsage("$15.00 / 1M tokens"),
1756
+ },
1757
+ },
1758
+ /**/
1759
+ /**/
1760
+ {
1761
+ modelVariant: 'CHAT',
1762
+ modelTitle: 'gpt-4o',
1763
+ modelName: 'gpt-4o',
1764
+ pricing: {
1765
+ prompt: computeUsage("$5.00 / 1M tokens"),
1766
+ output: computeUsage("$15.00 / 1M tokens"),
1767
+ },
1768
+ },
1769
+ /**/
1770
+ /**/
1771
+ {
1772
+ modelVariant: 'CHAT',
1773
+ modelTitle: 'gpt-3.5-turbo-16k-0613',
1774
+ modelName: 'gpt-3.5-turbo-16k-0613',
1775
+ pricing: {
1776
+ prompt: computeUsage("$3.00 / 1M tokens"),
1777
+ output: computeUsage("$4.00 / 1M tokens"),
1778
+ },
1779
+ },
1780
+ /**/
1781
+ ];
1782
+ /**
1783
+ * Note: [🤖] Add models of new variant
1784
+ * TODO: [🧠] Some mechanism to propagate unsureness
1785
+ * TODO: [🕚][👮‍♀️] Make this list dynamic - dynamically can be listed modelNames but not modelVariant, legacy status, context length and pricing
1786
+ * TODO: [🧠][👮‍♀️] Put here more info like description, isVision, trainingDateCutoff, languages, strengths ( Top-level performance, intelligence, fluency, and understanding), contextWindow,...
1787
+ * @see https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4
1788
+ * @see https://openai.com/api/pricing/
1789
+ * @see /other/playground/playground.ts
1790
+ * TODO: [🍓] Make better
1791
+ * TODO: Change model titles to human eg: "gpt-4-turbo-2024-04-09" -> "GPT-4 Turbo (2024-04-09)"
1792
+ * TODO: [🚸] Not all models are compatible with JSON mode, add this information here and use it
1793
+ */
1794
+
1795
+ /**
1796
+ * Execution Tools for calling Azure OpenAI API.
1797
+ *
1798
+ * @public exported from `@promptbook/azure-openai`
1799
+ */
1800
+ var AzureOpenAiExecutionTools = /** @class */ (function () {
1801
+ /**
1802
+ * Creates OpenAI Execution Tools.
1803
+ *
1804
+ * @param options which are relevant are directly passed to the OpenAI client
1805
+ */
1806
+ function AzureOpenAiExecutionTools(options) {
1807
+ this.options = options;
1808
+ this.client = new OpenAIClient("https://".concat(options.resourceName, ".openai.azure.com/"), new AzureKeyCredential(options.apiKey));
1809
+ }
1810
+ Object.defineProperty(AzureOpenAiExecutionTools.prototype, "title", {
1811
+ get: function () {
1812
+ return 'Azure OpenAI';
1813
+ },
1814
+ enumerable: false,
1815
+ configurable: true
1816
+ });
1817
+ Object.defineProperty(AzureOpenAiExecutionTools.prototype, "description", {
1818
+ get: function () {
1819
+ return 'Use all models trained by OpenAI provided by Azure';
1820
+ },
1821
+ enumerable: false,
1822
+ configurable: true
1823
+ });
1824
+ /**
1825
+ * Calls OpenAI API to use a chat model.
1826
+ */
1827
+ AzureOpenAiExecutionTools.prototype.callChatModel = function (prompt) {
1828
+ var _a, _b;
1829
+ return __awaiter(this, void 0, void 0, function () {
1830
+ var content, parameters, modelRequirements, modelName, modelSettings, rawPromptContent, messages, start, complete, rawRequest, rawResponse, resultContent, usage, error_1;
1831
+ var _c;
1832
+ return __generator(this, function (_d) {
1833
+ switch (_d.label) {
1834
+ case 0:
1835
+ if (this.options.isVerbose) {
1836
+ console.info('💬 OpenAI callChatModel call');
1837
+ }
1838
+ content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
1839
+ // TODO: [☂] Use here more modelRequirements
1840
+ if (modelRequirements.modelVariant !== 'CHAT') {
1841
+ throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
1842
+ }
1843
+ _d.label = 1;
1844
+ case 1:
1845
+ _d.trys.push([1, 3, , 4]);
1846
+ modelName = prompt.modelRequirements.modelName || this.options.deploymentName;
1847
+ modelSettings = {
1848
+ maxTokens: modelRequirements.maxTokens,
1849
+ // <- TODO: [🌾] Make some global max cap for maxTokens
1850
+ temperature: modelRequirements.temperature,
1851
+ user: this.options.user,
1852
+ // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
1853
+ // <- Note: [🧆]
1854
+ };
1855
+ rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
1856
+ messages = __spreadArray(__spreadArray([], __read((modelRequirements.systemMessage === undefined
1857
+ ? []
1858
+ : [
1859
+ {
1860
+ role: 'system',
1861
+ content: modelRequirements.systemMessage,
1862
+ },
1863
+ ])), false), [
1864
+ {
1865
+ role: 'user',
1866
+ content: rawPromptContent,
1867
+ },
1868
+ ], false);
1869
+ start = getCurrentIsoDate();
1870
+ complete = void 0;
1871
+ if (this.options.isVerbose) {
1872
+ console.info(colors.bgWhite('messages'), JSON.stringify(messages, null, 4));
1873
+ }
1874
+ rawRequest = [modelName, messages, modelSettings];
1875
+ return [4 /*yield*/, (_c = this.client).getChatCompletions.apply(_c, __spreadArray([], __read(rawRequest), false))];
1876
+ case 2:
1877
+ rawResponse = _d.sent();
1878
+ if (this.options.isVerbose) {
1879
+ console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
1880
+ }
1881
+ if (!rawResponse.choices[0]) {
1882
+ throw new PipelineExecutionError('No choises from Azure OpenAI');
1883
+ }
1884
+ if (rawResponse.choices.length > 1) {
1885
+ // TODO: This should be maybe only warning
1886
+ throw new PipelineExecutionError('More than one choise from Azure OpenAI');
1887
+ }
1888
+ if (!rawResponse.choices[0].message || !rawResponse.choices[0].message.content) {
1889
+ throw new PipelineExecutionError('Empty response from Azure OpenAI');
1890
+ }
1891
+ resultContent = rawResponse.choices[0].message.content;
1892
+ // eslint-disable-next-line prefer-const
1893
+ complete = getCurrentIsoDate();
1894
+ usage = {
1895
+ price: uncertainNumber() /* <- TODO: [🐞] Compute usage */,
1896
+ input: __assign({ tokensCount: uncertainNumber((_a = rawResponse.usage) === null || _a === void 0 ? void 0 : _a.promptTokens) }, computeUsageCounts(prompt.content)),
1897
+ output: __assign({ tokensCount: uncertainNumber((_b = rawResponse.usage) === null || _b === void 0 ? void 0 : _b.completionTokens) }, computeUsageCounts(prompt.content)),
1898
+ };
1899
+ return [2 /*return*/, {
1900
+ content: resultContent,
1901
+ modelName: modelName,
1902
+ timing: {
1903
+ start: start,
1904
+ complete: complete,
1905
+ },
1906
+ usage: usage,
1907
+ rawPromptContent: rawPromptContent,
1908
+ rawRequest: rawRequest,
1909
+ rawResponse: rawResponse,
1910
+ // <- [🗯]
1911
+ }];
1912
+ case 3:
1913
+ error_1 = _d.sent();
1914
+ throw this.transformAzureError(error_1);
1915
+ case 4: return [2 /*return*/];
1916
+ }
1917
+ });
1918
+ });
1919
+ };
1920
+ /**
1921
+ * Calls Azure OpenAI API to use a complete model.
1922
+ */
1923
+ AzureOpenAiExecutionTools.prototype.callCompletionModel = function (prompt) {
1924
+ var _a, _b;
1925
+ return __awaiter(this, void 0, void 0, function () {
1926
+ var content, parameters, modelRequirements, modelName, modelSettings, start, complete, rawPromptContent, rawRequest, rawResponse, resultContent, usage, error_2;
1927
+ var _c;
1928
+ return __generator(this, function (_d) {
1929
+ switch (_d.label) {
1930
+ case 0:
1931
+ if (this.options.isVerbose) {
1932
+ console.info('🖋 OpenAI callCompletionModel call');
1933
+ }
1934
+ content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
1935
+ // TODO: [☂] Use here more modelRequirements
1936
+ if (modelRequirements.modelVariant !== 'COMPLETION') {
1937
+ throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
1938
+ }
1939
+ _d.label = 1;
1940
+ case 1:
1941
+ _d.trys.push([1, 3, , 4]);
1942
+ modelName = prompt.modelRequirements.modelName || this.options.deploymentName;
1943
+ modelSettings = {
1944
+ maxTokens: modelRequirements.maxTokens || 2000,
1945
+ // <- TODO: [🌾] Make some global max cap for maxTokens
1946
+ temperature: modelRequirements.temperature,
1947
+ user: this.options.user,
1948
+ // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
1949
+ // <- Note: [🧆]
1950
+ };
1951
+ start = getCurrentIsoDate();
1952
+ complete = void 0;
1953
+ if (this.options.isVerbose) {
1954
+ console.info(colors.bgWhite('content'), JSON.stringify(content, null, 4));
1955
+ console.info(colors.bgWhite('parameters'), JSON.stringify(parameters, null, 4));
1956
+ }
1957
+ rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
1958
+ rawRequest = [
1959
+ modelName,
1960
+ [rawPromptContent],
1961
+ modelSettings,
1962
+ ];
1963
+ return [4 /*yield*/, (_c = this.client).getCompletions.apply(_c, __spreadArray([], __read(rawRequest), false))];
1964
+ case 2:
1965
+ rawResponse = _d.sent();
1966
+ if (this.options.isVerbose) {
1967
+ console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
1968
+ }
1969
+ if (!rawResponse.choices[0]) {
1970
+ throw new PipelineExecutionError('No choises from OpenAI');
1971
+ }
1972
+ if (rawResponse.choices.length > 1) {
1973
+ // TODO: This should be maybe only warning
1974
+ throw new PipelineExecutionError('More than one choise from OpenAI');
1975
+ }
1976
+ resultContent = rawResponse.choices[0].text;
1977
+ // eslint-disable-next-line prefer-const
1978
+ complete = getCurrentIsoDate();
1979
+ usage = {
1980
+ price: uncertainNumber() /* <- TODO: [🐞] Compute usage */,
1981
+ input: __assign({ tokensCount: uncertainNumber((_a = rawResponse.usage) === null || _a === void 0 ? void 0 : _a.promptTokens) }, computeUsageCounts(prompt.content)),
1982
+ output: __assign({ tokensCount: uncertainNumber((_b = rawResponse.usage) === null || _b === void 0 ? void 0 : _b.completionTokens) }, computeUsageCounts(prompt.content)),
1983
+ };
1984
+ return [2 /*return*/, {
1985
+ content: resultContent,
1986
+ modelName: modelName,
1987
+ timing: {
1988
+ start: start,
1989
+ complete: complete,
1990
+ },
1991
+ usage: usage,
1992
+ rawPromptContent: rawPromptContent,
1993
+ rawRequest: rawRequest,
1994
+ rawResponse: rawResponse,
1995
+ // <- [🗯]
1996
+ }];
1997
+ case 3:
1998
+ error_2 = _d.sent();
1999
+ throw this.transformAzureError(error_2);
2000
+ case 4: return [2 /*return*/];
2001
+ }
2002
+ });
2003
+ });
2004
+ };
2005
+ // <- Note: [🤖] callXxxModel
2006
+ /**
2007
+ * Changes Azure error (which is not propper Error but object) to propper Error
2008
+ */
2009
+ AzureOpenAiExecutionTools.prototype.transformAzureError = function (azureError) {
2010
+ if (typeof azureError !== 'object' || azureError === null) {
2011
+ return new PipelineExecutionError("Unknown Azure OpenAI error");
2012
+ }
2013
+ var code = azureError.code, message = azureError.message;
2014
+ return new PipelineExecutionError("".concat(code, ": ").concat(message));
2015
+ };
2016
+ /**
2017
+ * List all available Azure OpenAI models that can be used
2018
+ */
2019
+ AzureOpenAiExecutionTools.prototype.listModels = function () {
2020
+ return __awaiter(this, void 0, void 0, function () {
2021
+ return __generator(this, function (_a) {
2022
+ // TODO: !!! Do here some filtering which models are really available as deployment
2023
+ // @see https://management.azure.com/subscriptions/subscriptionId/resourceGroups/resourceGroupName/providers/Microsoft.CognitiveServices/accounts/accountName/deployments?api-version=2023-05-01
2024
+ return [2 /*return*/, OPENAI_MODELS.map(function (_a) {
2025
+ var modelTitle = _a.modelTitle, modelName = _a.modelName, modelVariant = _a.modelVariant;
2026
+ return ({
2027
+ modelTitle: "Azure ".concat(modelTitle),
2028
+ modelName: modelName,
2029
+ modelVariant: modelVariant,
2030
+ });
2031
+ })];
2032
+ });
2033
+ });
2034
+ };
2035
+ return AzureOpenAiExecutionTools;
2036
+ }());
2037
+ /**
2038
+ * TODO: Maybe Create some common util for callChatModel and callCompletionModel
2039
+ * TODO: Maybe make custom AzureOpenaiError
2040
+ * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
2041
+ * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
2042
+ */
2043
+
2044
+ /**
2045
+ * Computes the usage of the OpenAI API based on the response from OpenAI
2046
+ *
2047
+ * @param promptContent The content of the prompt
2048
+ * @param resultContent The content of the result (for embedding prompts or failed prompts pass empty string)
2049
+ * @param rawResponse The raw response from OpenAI API
2050
+ * @throws {PipelineExecutionError} If the usage is not defined in the response from OpenAI
2051
+ * @private internal utility of `OpenAiExecutionTools`
2052
+ */
2053
+ function computeOpenaiUsage(promptContent, // <- Note: Intentionally using [] to access type properties to bring jsdoc from Prompt/PromptResult to consumer
2054
+ resultContent, rawResponse) {
2055
+ var _a, _b;
2056
+ if (rawResponse.usage === undefined) {
2057
+ throw new PipelineExecutionError('The usage is not defined in the response from OpenAI');
2058
+ }
2059
+ if (((_a = rawResponse.usage) === null || _a === void 0 ? void 0 : _a.prompt_tokens) === undefined) {
2060
+ throw new PipelineExecutionError('In OpenAI response `usage.prompt_tokens` not defined');
2061
+ }
2062
+ var inputTokens = rawResponse.usage.prompt_tokens;
2063
+ var outputTokens = ((_b = rawResponse.usage) === null || _b === void 0 ? void 0 : _b.completion_tokens) || 0;
2064
+ var modelInfo = OPENAI_MODELS.find(function (model) { return model.modelName === rawResponse.model; });
2065
+ var price;
2066
+ if (modelInfo === undefined || modelInfo.pricing === undefined) {
2067
+ price = uncertainNumber();
2068
+ }
2069
+ else {
2070
+ price = uncertainNumber(inputTokens * modelInfo.pricing.prompt + outputTokens * modelInfo.pricing.output);
2071
+ }
2072
+ return {
2073
+ price: price,
2074
+ input: __assign({ tokensCount: uncertainNumber(rawResponse.usage.prompt_tokens) }, computeUsageCounts(promptContent)),
2075
+ output: __assign({ tokensCount: uncertainNumber(outputTokens) }, computeUsageCounts(resultContent)),
2076
+ };
2077
+ }
2078
+
2079
+ /**
2080
+ * Execution Tools for calling OpenAI API.
2081
+ *
2082
+ * @public exported from `@promptbook/openai`
2083
+ */
2084
+ var OpenAiExecutionTools = /** @class */ (function () {
2085
+ /**
2086
+ * Creates OpenAI Execution Tools.
2087
+ *
2088
+ * @param options which are relevant are directly passed to the OpenAI client
2089
+ */
2090
+ function OpenAiExecutionTools(options) {
2091
+ if (options === void 0) { options = {}; }
2092
+ this.options = options;
2093
+ // Note: Passing only OpenAI relevant options to OpenAI constructor
2094
+ var openAiOptions = __assign({}, options);
2095
+ delete openAiOptions.isVerbose;
2096
+ delete openAiOptions.user;
2097
+ this.client = new OpenAI(__assign({}, openAiOptions));
2098
+ }
2099
+ Object.defineProperty(OpenAiExecutionTools.prototype, "title", {
2100
+ get: function () {
2101
+ return 'OpenAI';
2102
+ },
2103
+ enumerable: false,
2104
+ configurable: true
2105
+ });
2106
+ Object.defineProperty(OpenAiExecutionTools.prototype, "description", {
2107
+ get: function () {
2108
+ return 'Use all models provided by OpenAI';
2109
+ },
2110
+ enumerable: false,
2111
+ configurable: true
2112
+ });
2113
+ /**
2114
+ * Calls OpenAI API to use a chat model.
2115
+ */
2116
+ OpenAiExecutionTools.prototype.callChatModel = function (prompt) {
2117
+ return __awaiter(this, void 0, void 0, function () {
2118
+ var content, parameters, modelRequirements, expectFormat, modelName, modelSettings, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
2119
+ return __generator(this, function (_a) {
2120
+ switch (_a.label) {
2121
+ case 0:
2122
+ if (this.options.isVerbose) {
2123
+ console.info('💬 OpenAI callChatModel call', { prompt: prompt });
2124
+ }
2125
+ content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements, expectFormat = prompt.expectFormat;
2126
+ // TODO: [☂] Use here more modelRequirements
2127
+ if (modelRequirements.modelVariant !== 'CHAT') {
2128
+ throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
2129
+ }
2130
+ modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
2131
+ modelSettings = {
2132
+ model: modelName,
2133
+ max_tokens: modelRequirements.maxTokens,
2134
+ // <- TODO: [🌾] Make some global max cap for maxTokens
2135
+ temperature: modelRequirements.temperature,
2136
+ // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
2137
+ // <- Note: [🧆]
2138
+ };
2139
+ if (expectFormat === 'JSON') {
2140
+ modelSettings.response_format = {
2141
+ type: 'json_object',
2142
+ };
2143
+ }
2144
+ rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
2145
+ rawRequest = __assign(__assign({}, modelSettings), { messages: __spreadArray(__spreadArray([], __read((modelRequirements.systemMessage === undefined
2146
+ ? []
2147
+ : [
2148
+ {
2149
+ role: 'system',
2150
+ content: modelRequirements.systemMessage,
2151
+ },
2152
+ ])), false), [
2153
+ {
2154
+ role: 'user',
2155
+ content: rawPromptContent,
2156
+ },
2157
+ ], false), user: this.options.user });
2158
+ start = getCurrentIsoDate();
2159
+ if (this.options.isVerbose) {
2160
+ console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
2161
+ }
2162
+ return [4 /*yield*/, this.client.chat.completions.create(rawRequest)];
2163
+ case 1:
2164
+ rawResponse = _a.sent();
2165
+ if (this.options.isVerbose) {
2166
+ console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
2167
+ }
2168
+ if (!rawResponse.choices[0]) {
2169
+ throw new PipelineExecutionError('No choises from OpenAI');
2170
+ }
2171
+ if (rawResponse.choices.length > 1) {
2172
+ // TODO: This should be maybe only warning
2173
+ throw new PipelineExecutionError('More than one choise from OpenAI');
2174
+ }
2175
+ resultContent = rawResponse.choices[0].message.content;
2176
+ // eslint-disable-next-line prefer-const
2177
+ complete = getCurrentIsoDate();
2178
+ usage = computeOpenaiUsage(content, resultContent || '', rawResponse);
2179
+ if (resultContent === null) {
2180
+ throw new PipelineExecutionError('No response message from OpenAI');
2181
+ }
2182
+ return [2 /*return*/, {
2183
+ content: resultContent,
2184
+ modelName: rawResponse.model || modelName,
2185
+ timing: {
2186
+ start: start,
2187
+ complete: complete,
2188
+ },
2189
+ usage: usage,
2190
+ rawPromptContent: rawPromptContent,
2191
+ rawRequest: rawRequest,
2192
+ rawResponse: rawResponse,
2193
+ // <- [🗯]
2194
+ }];
2195
+ }
2196
+ });
2197
+ });
2198
+ };
2199
+ /**
2200
+ * Calls OpenAI API to use a complete model.
2201
+ */
2202
+ OpenAiExecutionTools.prototype.callCompletionModel = function (prompt) {
2203
+ return __awaiter(this, void 0, void 0, function () {
2204
+ var content, parameters, modelRequirements, modelName, modelSettings, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
2205
+ return __generator(this, function (_a) {
2206
+ switch (_a.label) {
2207
+ case 0:
2208
+ if (this.options.isVerbose) {
2209
+ console.info('🖋 OpenAI callCompletionModel call', { prompt: prompt });
2210
+ }
2211
+ content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
2212
+ // TODO: [☂] Use here more modelRequirements
2213
+ if (modelRequirements.modelVariant !== 'COMPLETION') {
2214
+ throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
2215
+ }
2216
+ modelName = modelRequirements.modelName || this.getDefaultCompletionModel().modelName;
2217
+ modelSettings = {
2218
+ model: modelName,
2219
+ max_tokens: modelRequirements.maxTokens || 2000,
2220
+ // <- TODO: [🌾] Make some global max cap for maxTokens
2221
+ temperature: modelRequirements.temperature,
2222
+ // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
2223
+ // <- Note: [🧆]
2224
+ };
2225
+ rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
2226
+ rawRequest = __assign(__assign({}, modelSettings), { prompt: rawPromptContent, user: this.options.user });
2227
+ start = getCurrentIsoDate();
2228
+ if (this.options.isVerbose) {
2229
+ console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
2230
+ }
2231
+ return [4 /*yield*/, this.client.completions.create(rawRequest)];
2232
+ case 1:
2233
+ rawResponse = _a.sent();
2234
+ if (this.options.isVerbose) {
2235
+ console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
2236
+ }
2237
+ if (!rawResponse.choices[0]) {
2238
+ throw new PipelineExecutionError('No choises from OpenAI');
2239
+ }
2240
+ if (rawResponse.choices.length > 1) {
2241
+ // TODO: This should be maybe only warning
2242
+ throw new PipelineExecutionError('More than one choise from OpenAI');
2243
+ }
2244
+ resultContent = rawResponse.choices[0].text;
2245
+ // eslint-disable-next-line prefer-const
2246
+ complete = getCurrentIsoDate();
2247
+ usage = computeOpenaiUsage(content, resultContent || '', rawResponse);
2248
+ return [2 /*return*/, {
2249
+ content: resultContent,
2250
+ modelName: rawResponse.model || modelName,
2251
+ timing: {
2252
+ start: start,
2253
+ complete: complete,
2254
+ },
2255
+ usage: usage,
2256
+ rawPromptContent: rawPromptContent,
2257
+ rawRequest: rawRequest,
2258
+ rawResponse: rawResponse,
2259
+ // <- [🗯]
2260
+ }];
2261
+ }
2262
+ });
2263
+ });
2264
+ };
2265
+ /**
2266
+ * Calls OpenAI API to use a embedding model
2267
+ */
2268
+ OpenAiExecutionTools.prototype.callEmbeddingModel = function (prompt) {
2269
+ return __awaiter(this, void 0, void 0, function () {
2270
+ var content, parameters, modelRequirements, modelName, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
2271
+ return __generator(this, function (_a) {
2272
+ switch (_a.label) {
2273
+ case 0:
2274
+ if (this.options.isVerbose) {
2275
+ console.info('🖋 OpenAI embedding call', { prompt: prompt });
2276
+ }
2277
+ content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
2278
+ // TODO: [☂] Use here more modelRequirements
2279
+ if (modelRequirements.modelVariant !== 'EMBEDDING') {
2280
+ throw new PipelineExecutionError('Use embed only for EMBEDDING variant');
2281
+ }
2282
+ modelName = modelRequirements.modelName || this.getDefaultEmbeddingModel().modelName;
2283
+ rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
2284
+ rawRequest = {
2285
+ input: rawPromptContent,
2286
+ model: modelName,
2287
+ };
2288
+ start = getCurrentIsoDate();
2289
+ if (this.options.isVerbose) {
2290
+ console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
2291
+ }
2292
+ return [4 /*yield*/, this.client.embeddings.create(rawRequest)];
2293
+ case 1:
2294
+ rawResponse = _a.sent();
2295
+ if (this.options.isVerbose) {
2296
+ console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
2297
+ }
2298
+ if (rawResponse.data.length !== 1) {
2299
+ throw new PipelineExecutionError("Expected exactly 1 data item in response, got ".concat(rawResponse.data.length));
2300
+ }
2301
+ resultContent = rawResponse.data[0].embedding;
2302
+ // eslint-disable-next-line prefer-const
2303
+ complete = getCurrentIsoDate();
2304
+ usage = computeOpenaiUsage(content, '', rawResponse);
2305
+ return [2 /*return*/, {
2306
+ content: resultContent,
2307
+ modelName: rawResponse.model || modelName,
2308
+ timing: {
2309
+ start: start,
2310
+ complete: complete,
2311
+ },
2312
+ usage: usage,
2313
+ rawPromptContent: rawPromptContent,
2314
+ rawRequest: rawRequest,
2315
+ rawResponse: rawResponse,
2316
+ // <- [🗯]
2317
+ }];
2318
+ }
2319
+ });
2320
+ });
2321
+ };
2322
+ // <- Note: [🤖] callXxxModel
2323
+ /**
2324
+ * Get the model that should be used as default
2325
+ */
2326
+ OpenAiExecutionTools.prototype.getDefaultModel = function (defaultModelName) {
2327
+ var model = OPENAI_MODELS.find(function (_a) {
2328
+ var modelName = _a.modelName;
2329
+ return modelName === defaultModelName;
2330
+ });
2331
+ if (model === undefined) {
2332
+ throw new UnexpectedError(spaceTrim$1(function (block) {
2333
+ return "\n Cannot find model in OpenAI models with name \"".concat(defaultModelName, "\" which should be used as default.\n\n Available models:\n ").concat(block(OPENAI_MODELS.map(function (_a) {
2334
+ var modelName = _a.modelName;
2335
+ return "- \"".concat(modelName, "\"");
2336
+ }).join('\n')), "\n\n ");
2337
+ }));
2338
+ }
2339
+ return model;
2340
+ };
2341
+ /**
2342
+ * Default model for chat variant.
2343
+ */
2344
+ OpenAiExecutionTools.prototype.getDefaultChatModel = function () {
2345
+ return this.getDefaultModel('gpt-4o');
2346
+ };
2347
+ /**
2348
+ * Default model for completion variant.
2349
+ */
2350
+ OpenAiExecutionTools.prototype.getDefaultCompletionModel = function () {
2351
+ return this.getDefaultModel('gpt-3.5-turbo-instruct');
2352
+ };
2353
+ /**
2354
+ * Default model for completion variant.
2355
+ */
2356
+ OpenAiExecutionTools.prototype.getDefaultEmbeddingModel = function () {
2357
+ return this.getDefaultModel('text-embedding-3-large');
2358
+ };
2359
+ // <- Note: [🤖] getDefaultXxxModel
2360
+ /**
2361
+ * List all available OpenAI models that can be used
2362
+ */
2363
+ OpenAiExecutionTools.prototype.listModels = function () {
2364
+ /*
2365
+ Note: Dynamic lising of the models
2366
+ const models = await this.openai.models.list({});
2367
+
2368
+ console.log({ models });
2369
+ console.log(models.data);
2370
+ */
2371
+ return OPENAI_MODELS;
2372
+ };
2373
+ return OpenAiExecutionTools;
2374
+ }());
2375
+ /**
2376
+ * TODO: [🧠][🧙‍♂️] Maybe there can be some wizzard for thoose who want to use just OpenAI
2377
+ * TODO: Maybe Create some common util for callChatModel and callCompletionModel
2378
+ * TODO: Maybe make custom OpenaiError
2379
+ * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
2380
+ * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
2381
+ */
2382
+
2383
+ /**
2384
+ * @private internal type for `createLlmToolsFromConfiguration`
2385
+ */
2386
+ var EXECUTION_TOOLS_CLASSES = {
2387
+ createOpenAiExecutionTools: function (options) {
2388
+ return new OpenAiExecutionTools(__assign(__assign({}, options), { dangerouslyAllowBrowser: true /* <- TODO: [🧠] !!! Some mechanism for auto-detection of browser, maybe hide in `OpenAiExecutionTools` */ }));
2389
+ },
2390
+ createAnthropicClaudeExecutionTools: createAnthropicClaudeExecutionTools,
2391
+ createAzureOpenAiExecutionTools: function (options) { return new AzureOpenAiExecutionTools(options); },
2392
+ // <- Note: [🦑] Add here new LLM provider
2393
+ };
2394
+ /**
2395
+ * TODO: [🧠] Better file name than `config.ts` + maybe move to two separate files
2396
+ * TODO: [🧠][🎌] Adding this should be responsibility of each provider package NOT this one central place
2397
+ */
2398
+
2399
+ /**
2400
+ * @@@
2401
+ *
2402
+ * Note: This function is not cached, every call creates new instance of `MultipleLlmExecutionTools`
2403
+ *
2404
+ * @returns @@@
2405
+ * @public exported from `@promptbook/core`
2406
+ */
2407
+ function createLlmToolsFromConfiguration(configuration, options) {
2408
+ if (options === void 0) { options = {}; }
2409
+ var _a = options.isVerbose, isVerbose = _a === void 0 ? false : _a;
2410
+ dotenv.config();
2411
+ var llmTools = configuration.map(function (llmConfiguration) {
2412
+ return EXECUTION_TOOLS_CLASSES["create".concat(llmConfiguration.className)](__assign({ isVerbose: isVerbose }, llmConfiguration.options));
2413
+ });
2414
+ return joinLlmExecutionTools.apply(void 0, __spreadArray([], __read(llmTools), false));
2415
+ }
2416
+ /**
2417
+ * TODO: [🎌] Togethere with `createLlmToolsFromConfiguration` + 'EXECUTION_TOOLS_CLASSES' gets to `@promptbook/core` ALL model providers, make this more efficient
2418
+ * TODO: [🧠][🎌] Dynamically install required providers
2419
+ * TODO: @@@ write discussion about this - wizzard
2420
+ * TODO: [🧠][🍛] Which name is better `createLlmToolsFromConfig` or `createLlmToolsFromConfiguration`?
2421
+ * TODO: [🧠] Is there some meaningfull way how to test this util
2422
+ * TODO: This should be maybe not under `_common` but under `utils`
2423
+ */
2424
+
2425
+ /**
2426
+ * Remote server is a proxy server that uses its execution tools internally and exposes the executor interface externally.
2427
+ *
2428
+ * You can simply use `RemoteExecutionTools` on client-side javascript and connect to your remote server.
2429
+ * This is useful to make all logic on browser side but not expose your API keys or no need to use customer's GPU.
2430
+ *
2431
+ * @see https://github.com/webgptorg/promptbook#remote-server
2432
+ * @public exported from `@promptbook/remote-server`
2433
+ */
2434
+ function startRemoteServer(options) {
2435
+ var _this = this;
2436
+ var _a = __assign({ isAnonymousModeAllowed: false, isCollectionModeAllowed: false, collection: null, createLlmExecutionTools: null }, options), port = _a.port, path = _a.path, collection = _a.collection, createLlmExecutionTools = _a.createLlmExecutionTools,
2437
+ // <- TODO: [🧠][🤺] Remove `createLlmExecutionTools`, pass just `llmExecutionTools`
2438
+ isAnonymousModeAllowed = _a.isAnonymousModeAllowed, isCollectionModeAllowed = _a.isCollectionModeAllowed, _b = _a.isVerbose, isVerbose = _b === void 0 ? false : _b;
2439
+ // <- TODO: [🦪] Some helper type to be able to use discriminant union types with destructuring
2440
+ var httpServer = http.createServer({}, function (request, response) { return __awaiter(_this, void 0, void 0, function () {
2441
+ var _a, _b;
2442
+ var _this = this;
2443
+ var _c;
2444
+ return __generator(this, function (_d) {
2445
+ switch (_d.label) {
2446
+ case 0:
2447
+ if ((_c = request.url) === null || _c === void 0 ? void 0 : _c.includes('socket.io')) {
2448
+ return [2 /*return*/];
2449
+ }
2450
+ _b = (_a = response).write;
2451
+ return [4 /*yield*/, spaceTrim(function (block) { return __awaiter(_this, void 0, void 0, function () {
2452
+ var _a, _b, _c, _d, _e;
2453
+ return __generator(this, function (_f) {
2454
+ switch (_f.label) {
2455
+ case 0:
2456
+ _b = (_a = "\n Server for processing promptbook remote requests is running.\n\n Version: ".concat(PROMPTBOOK_VERSION, "\n Anonymouse mode: ").concat(isAnonymousModeAllowed ? 'enabled' : 'disabled', "\n Collection mode: ").concat(isCollectionModeAllowed ? 'enabled' : 'disabled', "\n ")).concat;
2457
+ _c = block;
2458
+ if (!!isCollectionModeAllowed) return [3 /*break*/, 1];
2459
+ _d = '';
2460
+ return [3 /*break*/, 3];
2461
+ case 1:
2462
+ _e = 'Pipelines in collection:\n';
2463
+ return [4 /*yield*/, collection.listPipelines()];
2464
+ case 2:
2465
+ _d = _e +
2466
+ (_f.sent())
2467
+ .map(function (pipelineUrl) { return "- ".concat(pipelineUrl); })
2468
+ .join('\n');
2469
+ _f.label = 3;
2470
+ case 3: return [2 /*return*/, _b.apply(_a, [_c.apply(void 0, [_d]), "\n\n For more information look at:\n https://github.com/webgptorg/promptbook\n "])];
2471
+ }
2472
+ });
2473
+ }); })];
2474
+ case 1:
2475
+ _b.apply(_a, [_d.sent()]);
2476
+ response.end();
2477
+ return [2 /*return*/];
2478
+ }
2479
+ });
2480
+ }); });
2481
+ var server = new Server(httpServer, {
2482
+ path: path,
2483
+ transports: [/*'websocket', <- TODO: [🌬] Make websocket transport work */ 'polling'],
2484
+ cors: {
2485
+ origin: '*',
2486
+ methods: ['GET', 'POST'],
2487
+ },
2488
+ });
2489
+ server.on('connection', function (socket) {
2490
+ console.info(colors.gray("Client connected"), socket.id);
2491
+ socket.on('request', function (request) { return __awaiter(_this, void 0, void 0, function () {
2492
+ var _a, prompt, clientId, llmToolsConfiguration, llmExecutionTools, promptResult, _b, error_1;
2493
+ return __generator(this, function (_c) {
2494
+ switch (_c.label) {
2495
+ case 0:
2496
+ _a = __assign({ clientId: null, llmToolsConfiguration: null }, request), prompt = _a.prompt, clientId = _a.clientId, llmToolsConfiguration = _a.llmToolsConfiguration;
2497
+ // <- TODO: [🦪] Some helper type to be able to use discriminant union types with destructuring
2498
+ if (isVerbose) {
2499
+ console.info(colors.bgWhite("Prompt:"), colors.gray(JSON.stringify(request, null, 4)));
2500
+ }
2501
+ _c.label = 1;
2502
+ case 1:
2503
+ _c.trys.push([1, 14, 15, 16]);
2504
+ if (llmToolsConfiguration !== null && !isAnonymousModeAllowed) {
2505
+ throw new PipelineExecutionError("Anonymous mode is not allowed"); // <- TODO: !!!!!! Test
2506
+ }
2507
+ if (clientId !== null && !isCollectionModeAllowed) {
2508
+ throw new PipelineExecutionError("Collection mode is not allowed"); // <- TODO: !!!!!! Test
2509
+ }
2510
+ llmExecutionTools = void 0;
2511
+ if (!(llmToolsConfiguration !== null)) return [3 /*break*/, 2];
2512
+ // Note: Anonymouse mode
2513
+ // TODO: Maybe check that configuration is not empty
2514
+ llmExecutionTools = createLlmToolsFromConfiguration(llmToolsConfiguration);
2515
+ return [3 /*break*/, 5];
2516
+ case 2:
2517
+ if (!(createLlmExecutionTools !== null)) return [3 /*break*/, 4];
2518
+ // Note: Collection mode
2519
+ llmExecutionTools = createLlmExecutionTools(clientId);
2520
+ return [4 /*yield*/, collection.isResponsibleForPrompt(prompt)];
2521
+ case 3:
2522
+ if (!(_c.sent())) {
2523
+ throw new PipelineExecutionError("Pipeline is not in the collection of this server");
2524
+ }
2525
+ return [3 /*break*/, 5];
2526
+ case 4: throw new PipelineExecutionError("You must provide either llmToolsConfiguration or createLlmExecutionTools");
2527
+ case 5:
2528
+ promptResult = void 0;
2529
+ _b = prompt.modelRequirements.modelVariant;
2530
+ switch (_b) {
2531
+ case 'CHAT': return [3 /*break*/, 6];
2532
+ case 'COMPLETION': return [3 /*break*/, 8];
2533
+ case 'EMBEDDING': return [3 /*break*/, 10];
2534
+ }
2535
+ return [3 /*break*/, 12];
2536
+ case 6:
2537
+ if (llmExecutionTools.callChatModel === undefined) {
2538
+ // Note: [0] This check should not be a thing
2539
+ throw new PipelineExecutionError("Chat model is not available");
2540
+ }
2541
+ return [4 /*yield*/, llmExecutionTools.callChatModel(prompt)];
2542
+ case 7:
2543
+ promptResult = _c.sent();
2544
+ return [3 /*break*/, 13];
2545
+ case 8:
2546
+ if (llmExecutionTools.callCompletionModel === undefined) {
2547
+ // Note: [0] This check should not be a thing
2548
+ throw new PipelineExecutionError("Completion model is not available");
2549
+ }
2550
+ return [4 /*yield*/, llmExecutionTools.callCompletionModel(prompt)];
2551
+ case 9:
2552
+ promptResult = _c.sent();
2553
+ return [3 /*break*/, 13];
2554
+ case 10:
2555
+ if (llmExecutionTools.callEmbeddingModel === undefined) {
2556
+ // Note: [0] This check should not be a thing
2557
+ throw new PipelineExecutionError("Embedding model is not available");
2558
+ }
2559
+ return [4 /*yield*/, llmExecutionTools.callEmbeddingModel(prompt)];
2560
+ case 11:
2561
+ promptResult = _c.sent();
2562
+ return [3 /*break*/, 13];
2563
+ case 12: throw new PipelineExecutionError("Unknown model variant \"".concat(prompt.modelRequirements.modelVariant, "\""));
2564
+ case 13:
2565
+ if (isVerbose) {
2566
+ console.info(colors.bgGreen("PromptResult:"), colors.green(JSON.stringify(promptResult, null, 4)));
2567
+ }
2568
+ socket.emit('response', { promptResult: promptResult });
2569
+ return [3 /*break*/, 16];
2570
+ case 14:
2571
+ error_1 = _c.sent();
2572
+ if (!(error_1 instanceof Error)) {
2573
+ throw error_1;
2574
+ }
2575
+ socket.emit('error', { errorMessage: error_1.message });
2576
+ return [3 /*break*/, 16];
2577
+ case 15:
197
2578
  socket.disconnect();
198
2579
  return [7 /*endfinally*/];
199
- case 13: return [2 /*return*/];
2580
+ case 16: return [2 /*return*/];
200
2581
  }
201
2582
  });
202
2583
  }); });
@@ -229,13 +2610,14 @@ function startRemoteServer(options) {
229
2610
  };
230
2611
  }
231
2612
  /**
232
- * TODO: [🍜] Add anonymous option
2613
+ * TODO: [🍜] !!!!!! Add anonymous option
233
2614
  * TODO: [⚖] Expose the collection to be able to connect to same collection via createCollectionFromUrl
234
2615
  * TODO: Handle progress - support streaming
235
2616
  * TODO: [🗯] Do not hang up immediately but wait until client closes OR timeout
236
2617
  * TODO: [🗯] Timeout on chat to free up resources
237
2618
  * TODO: [🃏] Pass here some security token to prevent malitious usage and/or DDoS
238
2619
  * TODO: [0] Set unavailable models as undefined in `RemoteLlmExecutionTools` NOT throw error here
2620
+ * TODO: Constrain anonymous mode for specific models / providers
239
2621
  */
240
2622
 
241
2623
  export { PROMPTBOOK_VERSION, startRemoteServer };