@promptbook/remote-server 0.65.0-2 → 0.65.0-3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (25) hide show
  1. package/esm/index.es.js +2464 -82
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/src/_packages/anthropic-claude.index.d.ts +6 -0
  4. package/esm/typings/src/_packages/core.index.d.ts +2 -0
  5. package/esm/typings/src/_packages/node.index.d.ts +0 -2
  6. package/esm/typings/src/_packages/remote-client.index.d.ts +2 -2
  7. package/esm/typings/src/_packages/types.index.d.ts +16 -2
  8. package/esm/typings/src/llm-providers/_common/config.d.ts +3 -3
  9. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +5 -3
  10. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionToolsOptions.d.ts +23 -2
  11. package/esm/typings/src/llm-providers/anthropic-claude/createAnthropicClaudeExecutionTools.d.ts +13 -0
  12. package/esm/typings/src/llm-providers/anthropic-claude/playground/playground.d.ts +1 -1
  13. package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +3 -2
  14. package/esm/typings/src/llm-providers/remote/interfaces/Promptbook_Server_Error.d.ts +2 -2
  15. package/esm/typings/src/llm-providers/remote/interfaces/Promptbook_Server_Progress.d.ts +2 -2
  16. package/esm/typings/src/llm-providers/remote/interfaces/Promptbook_Server_Request.d.ts +14 -2
  17. package/esm/typings/src/llm-providers/remote/interfaces/RemoteLlmExecutionToolsOptions.d.ts +49 -0
  18. package/esm/typings/src/llm-providers/remote/interfaces/RemoteServerOptions.d.ts +23 -2
  19. package/esm/typings/src/llm-providers/remote/playground/playground.d.ts +2 -0
  20. package/esm/typings/src/llm-providers/remote/startRemoteServer.d.ts +2 -1
  21. package/esm/typings/src/types/typeAliases.d.ts +6 -0
  22. package/package.json +6 -2
  23. package/umd/index.umd.js +2484 -85
  24. package/umd/index.umd.js.map +1 -1
  25. package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionToolsOptions.d.ts +0 -26
package/umd/index.umd.js CHANGED
@@ -1,19 +1,41 @@
1
1
  (function (global, factory) {
2
- typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports, require('colors'), require('http'), require('socket.io'), require('spacetrim')) :
3
- typeof define === 'function' && define.amd ? define(['exports', 'colors', 'http', 'socket.io', 'spacetrim'], factory) :
4
- (global = typeof globalThis !== 'undefined' ? globalThis : global || self, factory(global["promptbook-remote-server"] = {}, global.colors, global.http, global.socket_io, global.spacetrim));
5
- })(this, (function (exports, colors, http, socket_io, spacetrim) { 'use strict';
2
+ typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports, require('colors'), require('http'), require('socket.io'), require('spacetrim'), require('dotenv'), require('socket.io-client'), require('@anthropic-ai/sdk'), require('@azure/openai'), require('openai')) :
3
+ typeof define === 'function' && define.amd ? define(['exports', 'colors', 'http', 'socket.io', 'spacetrim', 'dotenv', 'socket.io-client', '@anthropic-ai/sdk', '@azure/openai', 'openai'], factory) :
4
+ (global = typeof globalThis !== 'undefined' ? globalThis : global || self, factory(global["promptbook-remote-server"] = {}, global.colors, global.http, global.socket_io, global.spaceTrim, global.dotenv, global.socket_ioClient, global.Anthropic, global.openai, global.OpenAI));
5
+ })(this, (function (exports, colors, http, socket_io, spaceTrim, dotenv, socket_ioClient, Anthropic, openai, OpenAI) { 'use strict';
6
6
 
7
7
  function _interopDefaultLegacy (e) { return e && typeof e === 'object' && 'default' in e ? e : { 'default': e }; }
8
8
 
9
+ function _interopNamespace(e) {
10
+ if (e && e.__esModule) return e;
11
+ var n = Object.create(null);
12
+ if (e) {
13
+ Object.keys(e).forEach(function (k) {
14
+ if (k !== 'default') {
15
+ var d = Object.getOwnPropertyDescriptor(e, k);
16
+ Object.defineProperty(n, k, d.get ? d : {
17
+ enumerable: true,
18
+ get: function () { return e[k]; }
19
+ });
20
+ }
21
+ });
22
+ }
23
+ n["default"] = e;
24
+ return Object.freeze(n);
25
+ }
26
+
9
27
  var colors__default = /*#__PURE__*/_interopDefaultLegacy(colors);
10
28
  var http__default = /*#__PURE__*/_interopDefaultLegacy(http);
29
+ var spaceTrim__default = /*#__PURE__*/_interopDefaultLegacy(spaceTrim);
30
+ var dotenv__namespace = /*#__PURE__*/_interopNamespace(dotenv);
31
+ var Anthropic__default = /*#__PURE__*/_interopDefaultLegacy(Anthropic);
32
+ var OpenAI__default = /*#__PURE__*/_interopDefaultLegacy(OpenAI);
11
33
 
12
34
  // ⚠️ WARNING: This code has been generated so that any manual changes will be overwritten
13
35
  /**
14
36
  * The version of the Promptbook library
15
37
  */
16
- var PROMPTBOOK_VERSION = '0.65.0-1';
38
+ var PROMPTBOOK_VERSION = '0.65.0-2';
17
39
  // TODO: !!!! List here all the versions and annotate + put into script
18
40
 
19
41
  /*! *****************************************************************************
@@ -47,6 +69,17 @@
47
69
  d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());
48
70
  }
49
71
 
72
+ var __assign = function() {
73
+ __assign = Object.assign || function __assign(t) {
74
+ for (var s, i = 1, n = arguments.length; i < n; i++) {
75
+ s = arguments[i];
76
+ for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p)) t[p] = s[p];
77
+ }
78
+ return t;
79
+ };
80
+ return __assign.apply(this, arguments);
81
+ };
82
+
50
83
  function __awaiter(thisArg, _arguments, P, generator) {
51
84
  function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
52
85
  return new (P || (P = Promise))(function (resolve, reject) {
@@ -83,6 +116,45 @@
83
116
  } catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; }
84
117
  if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true };
85
118
  }
119
+ }
120
+
121
+ function __values(o) {
122
+ var s = typeof Symbol === "function" && Symbol.iterator, m = s && o[s], i = 0;
123
+ if (m) return m.call(o);
124
+ if (o && typeof o.length === "number") return {
125
+ next: function () {
126
+ if (o && i >= o.length) o = void 0;
127
+ return { value: o && o[i++], done: !o };
128
+ }
129
+ };
130
+ throw new TypeError(s ? "Object is not iterable." : "Symbol.iterator is not defined.");
131
+ }
132
+
133
+ function __read(o, n) {
134
+ var m = typeof Symbol === "function" && o[Symbol.iterator];
135
+ if (!m) return o;
136
+ var i = m.call(o), r, ar = [], e;
137
+ try {
138
+ while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value);
139
+ }
140
+ catch (error) { e = { error: error }; }
141
+ finally {
142
+ try {
143
+ if (r && !r.done && (m = i["return"])) m.call(i);
144
+ }
145
+ finally { if (e) throw e.error; }
146
+ }
147
+ return ar;
148
+ }
149
+
150
+ function __spreadArray(to, from, pack) {
151
+ if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {
152
+ if (ar || !(i in from)) {
153
+ if (!ar) ar = Array.prototype.slice.call(from, 0, i);
154
+ ar[i] = from[i];
155
+ }
156
+ }
157
+ return to.concat(ar || Array.prototype.slice.call(from));
86
158
  }
87
159
 
88
160
  /**
@@ -102,107 +174,2433 @@
102
174
  }(Error));
103
175
 
104
176
  /**
105
- * Remote server is a proxy server that uses its execution tools internally and exposes the executor interface externally.
177
+ * This error type indicates that the error should not happen and its last check before crashing with some other error
106
178
  *
107
- * You can simply use `RemoteExecutionTools` on client-side javascript and connect to your remote server.
108
- * This is useful to make all logic on browser side but not expose your API keys or no need to use customer's GPU.
179
+ * @public exported from `@promptbook/core`
180
+ */
181
+ var UnexpectedError = /** @class */ (function (_super) {
182
+ __extends(UnexpectedError, _super);
183
+ function UnexpectedError(message) {
184
+ var _this = _super.call(this, spaceTrim.spaceTrim(function (block) { return "\n ".concat(block(message), "\n\n Note: This error should not happen.\n It's probbably a bug in the pipeline collection\n\n Please report issue:\n https://github.com/webgptorg/promptbook/issues\n\n Or contact us on me@pavolhejny.com\n\n "); })) || this;
185
+ _this.name = 'UnexpectedError';
186
+ Object.setPrototypeOf(_this, UnexpectedError.prototype);
187
+ return _this;
188
+ }
189
+ return UnexpectedError;
190
+ }(Error));
191
+
192
+ /**
193
+ * Multiple LLM Execution Tools is a proxy server that uses multiple execution tools internally and exposes the executor interface externally.
109
194
  *
110
- * @see https://github.com/webgptorg/promptbook#remote-server
111
- * @public exported from `@promptbook/remote-server`
195
+ * Note: Internal utility of `joinLlmExecutionTools` but exposed type
196
+ * @public exported from `@promptbook/types`
112
197
  */
113
- function startRemoteServer(options) {
114
- var _this = this;
115
- var port = options.port, path = options.path, collection = options.collection, createLlmExecutionTools = options.createLlmExecutionTools, _a = options.isVerbose, isVerbose = _a === void 0 ? false : _a;
116
- var httpServer = http__default["default"].createServer({}, function (request, response) {
117
- var _a;
118
- if ((_a = request.url) === null || _a === void 0 ? void 0 : _a.includes('socket.io')) {
119
- return;
120
- }
121
- response.write(spacetrim.spaceTrim("\n Server for processing promptbook remote requests is running.\n\n Version: ".concat(PROMPTBOOK_VERSION, "\n\n For more information look at:\n https://github.com/webgptorg/promptbook\n\n ")));
122
- response.end();
198
+ var MultipleLlmExecutionTools = /** @class */ (function () {
199
+ /**
200
+ * Gets array of execution tools in order of priority
201
+ */
202
+ function MultipleLlmExecutionTools() {
203
+ var llmExecutionTools = [];
204
+ for (var _i = 0; _i < arguments.length; _i++) {
205
+ llmExecutionTools[_i] = arguments[_i];
206
+ }
207
+ this.llmExecutionTools = llmExecutionTools;
208
+ }
209
+ Object.defineProperty(MultipleLlmExecutionTools.prototype, "title", {
210
+ get: function () {
211
+ return 'Multiple LLM Providers';
212
+ },
213
+ enumerable: false,
214
+ configurable: true
123
215
  });
124
- var server = new socket_io.Server(httpServer, {
125
- path: path,
126
- transports: [/*'websocket', <- TODO: [🌬] Make websocket transport work */ 'polling'],
127
- cors: {
128
- origin: '*',
129
- methods: ['GET', 'POST'],
216
+ Object.defineProperty(MultipleLlmExecutionTools.prototype, "description", {
217
+ get: function () {
218
+ return this.llmExecutionTools
219
+ .map(function (tools, index) { return "".concat(index + 1, ") ").concat(tools.title, " ").concat(tools.description || ''); })
220
+ .join('\n');
130
221
  },
222
+ enumerable: false,
223
+ configurable: true
131
224
  });
132
- server.on('connection', function (socket) {
133
- console.info(colors__default["default"].gray("Client connected"), socket.id);
134
- socket.on('request', function (request) { return __awaiter(_this, void 0, void 0, function () {
135
- var prompt, clientId, executionToolsForClient, promptResult, _a, error_1;
136
- return __generator(this, function (_b) {
137
- switch (_b.label) {
225
+ /**
226
+ * Calls the best available chat model
227
+ */
228
+ MultipleLlmExecutionTools.prototype.callChatModel = function (prompt) {
229
+ return this.callCommonModel(prompt);
230
+ };
231
+ /**
232
+ * Calls the best available completion model
233
+ */
234
+ MultipleLlmExecutionTools.prototype.callCompletionModel = function (prompt) {
235
+ return this.callCommonModel(prompt);
236
+ };
237
+ /**
238
+ * Calls the best available embedding model
239
+ */
240
+ MultipleLlmExecutionTools.prototype.callEmbeddingModel = function (prompt) {
241
+ return this.callCommonModel(prompt);
242
+ };
243
+ // <- Note: [🤖]
244
+ /**
245
+ * Calls the best available model
246
+ *
247
+ * Note: This should be private or protected but is public to be usable with duck typing
248
+ */
249
+ MultipleLlmExecutionTools.prototype.callCommonModel = function (prompt) {
250
+ return __awaiter(this, void 0, void 0, function () {
251
+ var errors, _a, _b, llmExecutionTools, _c, error_1, e_1_1;
252
+ var e_1, _d;
253
+ var _this = this;
254
+ return __generator(this, function (_e) {
255
+ switch (_e.label) {
138
256
  case 0:
139
- prompt = request.prompt, clientId = request.clientId;
140
- // TODO: !! Validate here clientId (pass validator as dependency)
141
- if (isVerbose) {
142
- console.info(colors__default["default"].bgWhite("Prompt:"), colors__default["default"].gray(JSON.stringify(request, null, 4)));
143
- }
144
- _b.label = 1;
257
+ errors = [];
258
+ _e.label = 1;
145
259
  case 1:
146
- _b.trys.push([1, 11, 12, 13]);
147
- executionToolsForClient = createLlmExecutionTools(clientId);
148
- return [4 /*yield*/, collection.isResponsibleForPrompt(prompt)];
260
+ _e.trys.push([1, 15, 16, 17]);
261
+ _a = __values(this.llmExecutionTools), _b = _a.next();
262
+ _e.label = 2;
149
263
  case 2:
150
- if (!(_b.sent())) {
151
- throw new PipelineExecutionError("Pipeline is not in the collection of this server");
152
- }
153
- promptResult = void 0;
154
- _a = prompt.modelRequirements.modelVariant;
155
- switch (_a) {
156
- case 'CHAT': return [3 /*break*/, 3];
157
- case 'COMPLETION': return [3 /*break*/, 5];
158
- case 'EMBEDDING': return [3 /*break*/, 7];
159
- }
160
- return [3 /*break*/, 9];
264
+ if (!!_b.done) return [3 /*break*/, 14];
265
+ llmExecutionTools = _b.value;
266
+ _e.label = 3;
161
267
  case 3:
162
- if (executionToolsForClient.callChatModel === undefined) {
163
- // Note: [0] This check should not be a thing
164
- throw new PipelineExecutionError("Chat model is not available");
268
+ _e.trys.push([3, 12, , 13]);
269
+ _c = prompt.modelRequirements.modelVariant;
270
+ switch (_c) {
271
+ case 'CHAT': return [3 /*break*/, 4];
272
+ case 'COMPLETION': return [3 /*break*/, 6];
273
+ case 'EMBEDDING': return [3 /*break*/, 8];
165
274
  }
166
- return [4 /*yield*/, executionToolsForClient.callChatModel(prompt)];
167
- case 4:
168
- promptResult = _b.sent();
169
275
  return [3 /*break*/, 10];
170
- case 5:
171
- if (executionToolsForClient.callCompletionModel === undefined) {
172
- // Note: [0] This check should not be a thing
173
- throw new PipelineExecutionError("Completion model is not available");
276
+ case 4:
277
+ if (llmExecutionTools.callChatModel === undefined) {
278
+ return [3 /*break*/, 13];
174
279
  }
175
- return [4 /*yield*/, executionToolsForClient.callCompletionModel(prompt)];
280
+ return [4 /*yield*/, llmExecutionTools.callChatModel(prompt)];
281
+ case 5: return [2 /*return*/, _e.sent()];
176
282
  case 6:
177
- promptResult = _b.sent();
178
- return [3 /*break*/, 10];
179
- case 7:
180
- if (executionToolsForClient.callEmbeddingModel === undefined) {
181
- // Note: [0] This check should not be a thing
182
- throw new PipelineExecutionError("Embedding model is not available");
283
+ if (llmExecutionTools.callCompletionModel === undefined) {
284
+ return [3 /*break*/, 13];
183
285
  }
184
- return [4 /*yield*/, executionToolsForClient.callEmbeddingModel(prompt)];
286
+ return [4 /*yield*/, llmExecutionTools.callCompletionModel(prompt)];
287
+ case 7: return [2 /*return*/, _e.sent()];
185
288
  case 8:
186
- promptResult = _b.sent();
187
- return [3 /*break*/, 10];
188
- case 9: throw new PipelineExecutionError("Unknown model variant \"".concat(prompt.modelRequirements.modelVariant, "\""));
189
- case 10:
190
- if (isVerbose) {
191
- console.info(colors__default["default"].bgGreen("PromptResult:"), colors__default["default"].green(JSON.stringify(promptResult, null, 4)));
289
+ if (llmExecutionTools.callEmbeddingModel === undefined) {
290
+ return [3 /*break*/, 13];
192
291
  }
193
- socket.emit('response', { promptResult: promptResult });
194
- return [3 /*break*/, 13];
195
- case 11:
196
- error_1 = _b.sent();
197
- if (!(error_1 instanceof Error)) {
292
+ return [4 /*yield*/, llmExecutionTools.callEmbeddingModel(prompt)];
293
+ case 9: return [2 /*return*/, _e.sent()];
294
+ case 10: throw new UnexpectedError("Unknown model variant \"".concat(prompt.modelRequirements.modelVariant, "\""));
295
+ case 11: return [3 /*break*/, 13];
296
+ case 12:
297
+ error_1 = _e.sent();
298
+ if (!(error_1 instanceof Error) || error_1 instanceof UnexpectedError) {
198
299
  throw error_1;
199
300
  }
200
- socket.emit('error', { errorMessage: error_1.message });
301
+ errors.push(error_1);
201
302
  return [3 /*break*/, 13];
202
- case 12:
303
+ case 13:
304
+ _b = _a.next();
305
+ return [3 /*break*/, 2];
306
+ case 14: return [3 /*break*/, 17];
307
+ case 15:
308
+ e_1_1 = _e.sent();
309
+ e_1 = { error: e_1_1 };
310
+ return [3 /*break*/, 17];
311
+ case 16:
312
+ try {
313
+ if (_b && !_b.done && (_d = _a.return)) _d.call(_a);
314
+ }
315
+ finally { if (e_1) throw e_1.error; }
316
+ return [7 /*endfinally*/];
317
+ case 17:
318
+ if (errors.length === 1) {
319
+ throw errors[0];
320
+ }
321
+ else if (errors.length > 1) {
322
+ throw new PipelineExecutionError(
323
+ // TODO: Tell which execution tools failed like
324
+ // 1) OpenAI throw PipelineExecutionError: Parameter {knowledge} is not defined
325
+ // 2) AnthropicClaude throw PipelineExecutionError: Parameter {knowledge} is not defined
326
+ // 3) ...
327
+ spaceTrim__default["default"](function (block) { return "\n All execution tools failed:\n\n ".concat(block(errors
328
+ .map(function (error, i) { return "".concat(i + 1, ") **").concat(error.name || 'Error', ":** ").concat(error.message); })
329
+ .join('\n')), "\n\n "); }));
330
+ }
331
+ else if (this.llmExecutionTools.length === 0) {
332
+ throw new PipelineExecutionError("You have not provided any `LlmExecutionTools`");
333
+ }
334
+ else {
335
+ throw new PipelineExecutionError(spaceTrim__default["default"](function (block) { return "\n You have not provided any `LlmExecutionTools` that support model variant \"".concat(prompt.modelRequirements.modelVariant, "\n\n Available `LlmExecutionTools`:\n ").concat(block(_this.llmExecutionTools
336
+ .map(function (tools) { return "- ".concat(tools.title, " ").concat(tools.description || ''); })
337
+ .join('\n')), "\n\n "); }));
338
+ }
339
+ }
340
+ });
341
+ });
342
+ };
343
+ /**
344
+ * List all available models that can be used
345
+ * This lists is a combination of all available models from all execution tools
346
+ */
347
+ MultipleLlmExecutionTools.prototype.listModels = function () {
348
+ return __awaiter(this, void 0, void 0, function () {
349
+ var availableModels, _a, _b, llmExecutionTools, models, e_2_1;
350
+ var e_2, _c;
351
+ return __generator(this, function (_d) {
352
+ switch (_d.label) {
353
+ case 0:
354
+ availableModels = [];
355
+ _d.label = 1;
356
+ case 1:
357
+ _d.trys.push([1, 6, 7, 8]);
358
+ _a = __values(this.llmExecutionTools), _b = _a.next();
359
+ _d.label = 2;
360
+ case 2:
361
+ if (!!_b.done) return [3 /*break*/, 5];
362
+ llmExecutionTools = _b.value;
363
+ return [4 /*yield*/, llmExecutionTools.listModels()];
364
+ case 3:
365
+ models = _d.sent();
366
+ availableModels.push.apply(availableModels, __spreadArray([], __read(models), false));
367
+ _d.label = 4;
368
+ case 4:
369
+ _b = _a.next();
370
+ return [3 /*break*/, 2];
371
+ case 5: return [3 /*break*/, 8];
372
+ case 6:
373
+ e_2_1 = _d.sent();
374
+ e_2 = { error: e_2_1 };
375
+ return [3 /*break*/, 8];
376
+ case 7:
377
+ try {
378
+ if (_b && !_b.done && (_c = _a.return)) _c.call(_a);
379
+ }
380
+ finally { if (e_2) throw e_2.error; }
381
+ return [7 /*endfinally*/];
382
+ case 8: return [2 /*return*/, availableModels];
383
+ }
384
+ });
385
+ });
386
+ };
387
+ return MultipleLlmExecutionTools;
388
+ }());
389
+ /**
390
+ * TODO: [🧠][🎛] Aggregating multiple models - have result not only from one first aviable model BUT all of them
391
+ * TODO: [🏖] If no llmTools have for example not defined `callCompletionModel` this will still return object with defined `callCompletionModel` which just throws `PipelineExecutionError`, make it undefined instead
392
+ * Look how `countTotalUsage` (and `cacheLlmTools`) implements it
393
+ */
394
+
395
+ /**
396
+ * Joins multiple LLM Execution Tools into one
397
+ *
398
+ * @returns {LlmExecutionTools} Single wrapper for multiple LlmExecutionTools
399
+ *
400
+ * 0) If there is no LlmExecutionTools, it warns and returns valid but empty LlmExecutionTools
401
+ * 1) If there is only one LlmExecutionTools, it returns it wrapped in a proxy object
402
+ * 2) If there are multiple LlmExecutionTools, first will be used first, second will be used if the first hasn`t defined model variant or fails, etc.
403
+ * 3) When all LlmExecutionTools fail, it throws an error with a list of all errors merged into one
404
+ *
405
+ *
406
+ * Tip: You don't have to use this function directly, just pass an array of LlmExecutionTools to the `ExecutionTools`
407
+ *
408
+ * @public exported from `@promptbook/core`
409
+ */
410
+ function joinLlmExecutionTools() {
411
+ var llmExecutionTools = [];
412
+ for (var _i = 0; _i < arguments.length; _i++) {
413
+ llmExecutionTools[_i] = arguments[_i];
414
+ }
415
+ if (llmExecutionTools.length === 0) {
416
+ var warningMessage = spaceTrim__default["default"]("\n You have not provided any `LlmExecutionTools`\n This means that you won't be able to execute any prompts that require large language models like GPT-4 or Anthropic's Claude.\n\n Technically, it's not an error, but it's probably not what you want because it does not make sense to use Promptbook without language models.\n ");
417
+ // TODO: [🟥] Detect browser / node and make it colorfull
418
+ console.warn(warningMessage);
419
+ /*
420
+ return {
421
+ async listModels() {
422
+ // TODO: [🟥] Detect browser / node and make it colorfull
423
+ console.warn(
424
+ spaceTrim(
425
+ (block) => `
426
+
427
+ You can't list models because you have no LLM Execution Tools defined:
428
+
429
+ tl;dr
430
+
431
+ ${block(warningMessage)}
432
+ `,
433
+ ),
434
+ );
435
+ return [];
436
+ },
437
+ };
438
+ */
439
+ }
440
+ return new (MultipleLlmExecutionTools.bind.apply(MultipleLlmExecutionTools, __spreadArray([void 0], __read(llmExecutionTools), false)))();
441
+ }
442
+ /**
443
+ * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
444
+ */
445
+
446
+ /**
447
+ * Remote server is a proxy server that uses its execution tools internally and exposes the executor interface externally.
448
+ *
449
+ * You can simply use `RemoteExecutionTools` on client-side javascript and connect to your remote server.
450
+ * This is useful to make all logic on browser side but not expose your API keys or no need to use customer's GPU.
451
+ *
452
+ * @see https://github.com/webgptorg/promptbook#remote-server
453
+ * @public exported from `@promptbook/remote-client`
454
+ */
455
+ var RemoteLlmExecutionTools = /** @class */ (function () {
456
+ function RemoteLlmExecutionTools(options) {
457
+ this.options = options;
458
+ }
459
+ Object.defineProperty(RemoteLlmExecutionTools.prototype, "title", {
460
+ get: function () {
461
+ // TODO: [🧠] Maybe fetch title+description from the remote server (as well as if model methods are defined)
462
+ return 'Remote server';
463
+ },
464
+ enumerable: false,
465
+ configurable: true
466
+ });
467
+ Object.defineProperty(RemoteLlmExecutionTools.prototype, "description", {
468
+ get: function () {
469
+ return 'Use all models by your remote server';
470
+ },
471
+ enumerable: false,
472
+ configurable: true
473
+ });
474
+ /**
475
+ * Creates a connection to the remote proxy server.
476
+ */
477
+ RemoteLlmExecutionTools.prototype.makeConnection = function () {
478
+ var _this = this;
479
+ return new Promise(function (resolve, reject) {
480
+ var socket = socket_ioClient.io(_this.options.remoteUrl, {
481
+ path: _this.options.path,
482
+ // path: `${this.remoteUrl.pathname}/socket.io`,
483
+ transports: [/*'websocket', <- TODO: [🌬] Make websocket transport work */ 'polling'],
484
+ });
485
+ // console.log('Connecting to', this.options.remoteUrl.href, { socket });
486
+ socket.on('connect', function () {
487
+ resolve(socket);
488
+ });
489
+ setTimeout(function () {
490
+ reject(new Error("Timeout while connecting to ".concat(_this.options.remoteUrl)));
491
+ }, 60000 /* <- TODO: Timeout to config */);
492
+ });
493
+ };
494
+ /**
495
+ * Calls remote proxy server to use a chat model
496
+ */
497
+ RemoteLlmExecutionTools.prototype.callChatModel = function (prompt) {
498
+ if (this.options.isVerbose) {
499
+ console.info("\uD83D\uDD8B Remote callChatModel call");
500
+ }
501
+ return /* not await */ this.callCommonModel(prompt);
502
+ };
503
+ /**
504
+ * Calls remote proxy server to use a completion model
505
+ */
506
+ RemoteLlmExecutionTools.prototype.callCompletionModel = function (prompt) {
507
+ if (this.options.isVerbose) {
508
+ console.info("\uD83D\uDCAC Remote callCompletionModel call");
509
+ }
510
+ return /* not await */ this.callCommonModel(prompt);
511
+ };
512
+ /**
513
+ * Calls remote proxy server to use a embedding model
514
+ */
515
+ RemoteLlmExecutionTools.prototype.callEmbeddingModel = function (prompt) {
516
+ if (this.options.isVerbose) {
517
+ console.info("\uD83D\uDCAC Remote callEmbeddingModel call");
518
+ }
519
+ return /* not await */ this.callCommonModel(prompt);
520
+ };
521
+ // <- Note: [🤖] callXxxModel
522
+ /**
523
+ * Calls remote proxy server to use both completion or chat model
524
+ */
525
+ RemoteLlmExecutionTools.prototype.callCommonModel = function (prompt) {
526
+ return __awaiter(this, void 0, void 0, function () {
527
+ var socket, promptResult;
528
+ return __generator(this, function (_a) {
529
+ switch (_a.label) {
530
+ case 0: return [4 /*yield*/, this.makeConnection()];
531
+ case 1:
532
+ socket = _a.sent();
533
+ if (this.options.isAnonymous) {
534
+ socket.emit('request', {
535
+ llmToolsConfiguration: this.options.llmToolsConfiguration,
536
+ prompt: prompt,
537
+ // <- TODO: [🛫] `prompt` is NOT fully serializable as JSON, it contains functions which are not serializable
538
+ });
539
+ }
540
+ else {
541
+ socket.emit('request', {
542
+ clientId: this.options.clientId,
543
+ prompt: prompt,
544
+ // <- TODO: [🛫] `prompt` is NOT fully serializable as JSON, it contains functions which are not serializable
545
+ });
546
+ }
547
+ return [4 /*yield*/, new Promise(function (resolve, reject) {
548
+ socket.on('response', function (response) {
549
+ resolve(response.promptResult);
550
+ socket.disconnect();
551
+ });
552
+ socket.on('error', function (error) {
553
+ reject(new PipelineExecutionError(error.errorMessage));
554
+ socket.disconnect();
555
+ });
556
+ })];
557
+ case 2:
558
+ promptResult = _a.sent();
559
+ socket.disconnect();
560
+ return [2 /*return*/, promptResult];
561
+ }
562
+ });
563
+ });
564
+ };
565
+ /**
566
+ * List all available models that can be used
567
+ */
568
+ RemoteLlmExecutionTools.prototype.listModels = function () {
569
+ return __awaiter(this, void 0, void 0, function () {
570
+ return __generator(this, function (_a) {
571
+ return [2 /*return*/, [
572
+ /* !!! */
573
+ ]];
574
+ });
575
+ });
576
+ };
577
+ return RemoteLlmExecutionTools;
578
+ }());
579
+ /**
580
+ * TODO: [🍜] !!!!!! Default remote remoteUrl and path for anonymous server
581
+ * TODO: [🍓] Allow to list compatible models with each variant
582
+ * TODO: [🗯] RemoteLlmExecutionTools should extend Destroyable and implement IDestroyable
583
+ * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
584
+ * TODO: [🧠] Maybe remove `@promptbook/remote-client` and just use `@promptbook/core`
585
+ */
586
+
587
+ /**
588
+ * Counts number of characters in the text
589
+ *
590
+ * @public exported from `@promptbook/utils`
591
+ */
592
+ function countCharacters(text) {
593
+ // Remove null characters
594
+ text = text.replace(/\0/g, '');
595
+ // Replace emojis (and also ZWJ sequence) with hyphens
596
+ text = text.replace(/(\p{Extended_Pictographic})\p{Modifier_Symbol}/gu, '$1');
597
+ text = text.replace(/(\p{Extended_Pictographic})[\u{FE00}-\u{FE0F}]/gu, '$1');
598
+ text = text.replace(/\p{Extended_Pictographic}(\u{200D}\p{Extended_Pictographic})*/gu, '-');
599
+ return text.length;
600
+ }
601
+
602
+ /**
603
+ * Counts number of lines in the text
604
+ *
605
+ * @public exported from `@promptbook/utils`
606
+ */
607
+ function countLines(text) {
608
+ if (text === '') {
609
+ return 0;
610
+ }
611
+ return text.split('\n').length;
612
+ }
613
+
614
+ /**
615
+ * Counts number of pages in the text
616
+ *
617
+ * @public exported from `@promptbook/utils`
618
+ */
619
+ function countPages(text) {
620
+ var sentencesPerPage = 5; // Assuming each page has 5 sentences
621
+ var sentences = text.split(/[.!?]+/).filter(function (sentence) { return sentence.trim() !== ''; });
622
+ var pageCount = Math.ceil(sentences.length / sentencesPerPage);
623
+ return pageCount;
624
+ }
625
+
626
+ /**
627
+ * Counts number of paragraphs in the text
628
+ *
629
+ * @public exported from `@promptbook/utils`
630
+ */
631
+ function countParagraphs(text) {
632
+ return text.split(/\n\s*\n/).filter(function (paragraph) { return paragraph.trim() !== ''; }).length;
633
+ }
634
+
635
+ /**
636
+ * Split text into sentences
637
+ *
638
+ * @public exported from `@promptbook/utils`
639
+ */
640
+ function splitIntoSentences(text) {
641
+ return text.split(/[.!?]+/).filter(function (sentence) { return sentence.trim() !== ''; });
642
+ }
643
+ /**
644
+ * Counts number of sentences in the text
645
+ *
646
+ * @public exported from `@promptbook/utils`
647
+ */
648
+ function countSentences(text) {
649
+ return splitIntoSentences(text).length;
650
+ }
651
+
652
+ var defaultDiacriticsRemovalMap = [
653
+ {
654
+ base: 'A',
655
+ letters: '\u0041\u24B6\uFF21\u00C0\u00C1\u00C2\u1EA6\u1EA4\u1EAA\u1EA8\u00C3\u0100\u0102\u1EB0\u1EAE\u1EB4\u1EB2\u0226\u01E0\u00C4\u01DE\u1EA2\u00C5\u01FA\u01CD\u0200\u0202\u1EA0\u1EAC\u1EB6\u1E00\u0104\u023A\u2C6F',
656
+ },
657
+ { base: 'AA', letters: '\uA732' },
658
+ { base: 'AE', letters: '\u00C6\u01FC\u01E2' },
659
+ { base: 'AO', letters: '\uA734' },
660
+ { base: 'AU', letters: '\uA736' },
661
+ { base: 'AV', letters: '\uA738\uA73A' },
662
+ { base: 'AY', letters: '\uA73C' },
663
+ {
664
+ base: 'B',
665
+ letters: '\u0042\u24B7\uFF22\u1E02\u1E04\u1E06\u0243\u0182\u0181',
666
+ },
667
+ {
668
+ base: 'C',
669
+ letters: '\u0043\u24B8\uFF23\u0106\u0108\u010A\u010C\u00C7\u1E08\u0187\u023B\uA73E',
670
+ },
671
+ {
672
+ base: 'D',
673
+ letters: '\u0044\u24B9\uFF24\u1E0A\u010E\u1E0C\u1E10\u1E12\u1E0E\u0110\u018B\u018A\u0189\uA779\u00D0',
674
+ },
675
+ { base: 'DZ', letters: '\u01F1\u01C4' },
676
+ { base: 'Dz', letters: '\u01F2\u01C5' },
677
+ {
678
+ base: 'E',
679
+ letters: '\u0045\u24BA\uFF25\u00C8\u00C9\u00CA\u1EC0\u1EBE\u1EC4\u1EC2\u1EBC\u0112\u1E14\u1E16\u0114\u0116\u00CB\u1EBA\u011A\u0204\u0206\u1EB8\u1EC6\u0228\u1E1C\u0118\u1E18\u1E1A\u0190\u018E',
680
+ },
681
+ { base: 'F', letters: '\u0046\u24BB\uFF26\u1E1E\u0191\uA77B' },
682
+ {
683
+ base: 'G',
684
+ letters: '\u0047\u24BC\uFF27\u01F4\u011C\u1E20\u011E\u0120\u01E6\u0122\u01E4\u0193\uA7A0\uA77D\uA77E',
685
+ },
686
+ {
687
+ base: 'H',
688
+ letters: '\u0048\u24BD\uFF28\u0124\u1E22\u1E26\u021E\u1E24\u1E28\u1E2A\u0126\u2C67\u2C75\uA78D',
689
+ },
690
+ {
691
+ base: 'I',
692
+ letters: '\u0049\u24BE\uFF29\u00CC\u00CD\u00CE\u0128\u012A\u012C\u0130\u00CF\u1E2E\u1EC8\u01CF\u0208\u020A\u1ECA\u012E\u1E2C\u0197',
693
+ },
694
+ { base: 'J', letters: '\u004A\u24BF\uFF2A\u0134\u0248' },
695
+ {
696
+ base: 'K',
697
+ letters: '\u004B\u24C0\uFF2B\u1E30\u01E8\u1E32\u0136\u1E34\u0198\u2C69\uA740\uA742\uA744\uA7A2',
698
+ },
699
+ {
700
+ base: 'L',
701
+ letters: '\u004C\u24C1\uFF2C\u013F\u0139\u013D\u1E36\u1E38\u013B\u1E3C\u1E3A\u0141\u023D\u2C62\u2C60\uA748\uA746\uA780',
702
+ },
703
+ { base: 'LJ', letters: '\u01C7' },
704
+ { base: 'Lj', letters: '\u01C8' },
705
+ { base: 'M', letters: '\u004D\u24C2\uFF2D\u1E3E\u1E40\u1E42\u2C6E\u019C' },
706
+ {
707
+ base: 'N',
708
+ letters: '\u004E\u24C3\uFF2E\u01F8\u0143\u00D1\u1E44\u0147\u1E46\u0145\u1E4A\u1E48\u0220\u019D\uA790\uA7A4',
709
+ },
710
+ { base: 'NJ', letters: '\u01CA' },
711
+ { base: 'Nj', letters: '\u01CB' },
712
+ {
713
+ base: 'O',
714
+ letters: '\u004F\u24C4\uFF2F\u00D2\u00D3\u00D4\u1ED2\u1ED0\u1ED6\u1ED4\u00D5\u1E4C\u022C\u1E4E\u014C\u1E50\u1E52\u014E\u022E\u0230\u00D6\u022A\u1ECE\u0150\u01D1\u020C\u020E\u01A0\u1EDC\u1EDA\u1EE0\u1EDE\u1EE2\u1ECC\u1ED8\u01EA\u01EC\u00D8\u01FE\u0186\u019F\uA74A\uA74C',
715
+ },
716
+ { base: 'OI', letters: '\u01A2' },
717
+ { base: 'OO', letters: '\uA74E' },
718
+ { base: 'OU', letters: '\u0222' },
719
+ { base: 'OE', letters: '\u008C\u0152' },
720
+ { base: 'oe', letters: '\u009C\u0153' },
721
+ {
722
+ base: 'P',
723
+ letters: '\u0050\u24C5\uFF30\u1E54\u1E56\u01A4\u2C63\uA750\uA752\uA754',
724
+ },
725
+ { base: 'Q', letters: '\u0051\u24C6\uFF31\uA756\uA758\u024A' },
726
+ {
727
+ base: 'R',
728
+ letters: '\u0052\u24C7\uFF32\u0154\u1E58\u0158\u0210\u0212\u1E5A\u1E5C\u0156\u1E5E\u024C\u2C64\uA75A\uA7A6\uA782',
729
+ },
730
+ {
731
+ base: 'S',
732
+ letters: '\u0053\u24C8\uFF33\u1E9E\u015A\u1E64\u015C\u1E60\u0160\u1E66\u1E62\u1E68\u0218\u015E\u2C7E\uA7A8\uA784',
733
+ },
734
+ {
735
+ base: 'T',
736
+ letters: '\u0054\u24C9\uFF34\u1E6A\u0164\u1E6C\u021A\u0162\u1E70\u1E6E\u0166\u01AC\u01AE\u023E\uA786',
737
+ },
738
+ { base: 'TZ', letters: '\uA728' },
739
+ {
740
+ base: 'U',
741
+ letters: '\u0055\u24CA\uFF35\u00D9\u00DA\u00DB\u0168\u1E78\u016A\u1E7A\u016C\u00DC\u01DB\u01D7\u01D5\u01D9\u1EE6\u016E\u0170\u01D3\u0214\u0216\u01AF\u1EEA\u1EE8\u1EEE\u1EEC\u1EF0\u1EE4\u1E72\u0172\u1E76\u1E74\u0244',
742
+ },
743
+ { base: 'V', letters: '\u0056\u24CB\uFF36\u1E7C\u1E7E\u01B2\uA75E\u0245' },
744
+ { base: 'VY', letters: '\uA760' },
745
+ {
746
+ base: 'W',
747
+ letters: '\u0057\u24CC\uFF37\u1E80\u1E82\u0174\u1E86\u1E84\u1E88\u2C72',
748
+ },
749
+ { base: 'X', letters: '\u0058\u24CD\uFF38\u1E8A\u1E8C' },
750
+ {
751
+ base: 'Y',
752
+ letters: '\u0059\u24CE\uFF39\u1EF2\u00DD\u0176\u1EF8\u0232\u1E8E\u0178\u1EF6\u1EF4\u01B3\u024E\u1EFE',
753
+ },
754
+ {
755
+ base: 'Z',
756
+ letters: '\u005A\u24CF\uFF3A\u0179\u1E90\u017B\u017D\u1E92\u1E94\u01B5\u0224\u2C7F\u2C6B\uA762',
757
+ },
758
+ {
759
+ base: 'a',
760
+ letters: '\u0061\u24D0\uFF41\u1E9A\u00E0\u00E1\u00E2\u1EA7\u1EA5\u1EAB\u1EA9\u00E3\u0101\u0103\u1EB1\u1EAF\u1EB5\u1EB3\u0227\u01E1\u00E4\u01DF\u1EA3\u00E5\u01FB\u01CE\u0201\u0203\u1EA1\u1EAD\u1EB7\u1E01\u0105\u2C65\u0250',
761
+ },
762
+ { base: 'aa', letters: '\uA733' },
763
+ { base: 'ae', letters: '\u00E6\u01FD\u01E3' },
764
+ { base: 'ao', letters: '\uA735' },
765
+ { base: 'au', letters: '\uA737' },
766
+ { base: 'av', letters: '\uA739\uA73B' },
767
+ { base: 'ay', letters: '\uA73D' },
768
+ {
769
+ base: 'b',
770
+ letters: '\u0062\u24D1\uFF42\u1E03\u1E05\u1E07\u0180\u0183\u0253',
771
+ },
772
+ {
773
+ base: 'c',
774
+ letters: '\u0063\u24D2\uFF43\u0107\u0109\u010B\u010D\u00E7\u1E09\u0188\u023C\uA73F\u2184',
775
+ },
776
+ {
777
+ base: 'd',
778
+ letters: '\u0064\u24D3\uFF44\u1E0B\u010F\u1E0D\u1E11\u1E13\u1E0F\u0111\u018C\u0256\u0257\uA77A',
779
+ },
780
+ { base: 'dz', letters: '\u01F3\u01C6' },
781
+ {
782
+ base: 'e',
783
+ letters: '\u0065\u24D4\uFF45\u00E8\u00E9\u00EA\u1EC1\u1EBF\u1EC5\u1EC3\u1EBD\u0113\u1E15\u1E17\u0115\u0117\u00EB\u1EBB\u011B\u0205\u0207\u1EB9\u1EC7\u0229\u1E1D\u0119\u1E19\u1E1B\u0247\u025B\u01DD',
784
+ },
785
+ { base: 'f', letters: '\u0066\u24D5\uFF46\u1E1F\u0192\uA77C' },
786
+ {
787
+ base: 'g',
788
+ letters: '\u0067\u24D6\uFF47\u01F5\u011D\u1E21\u011F\u0121\u01E7\u0123\u01E5\u0260\uA7A1\u1D79\uA77F',
789
+ },
790
+ {
791
+ base: 'h',
792
+ letters: '\u0068\u24D7\uFF48\u0125\u1E23\u1E27\u021F\u1E25\u1E29\u1E2B\u1E96\u0127\u2C68\u2C76\u0265',
793
+ },
794
+ { base: 'hv', letters: '\u0195' },
795
+ {
796
+ base: 'i',
797
+ letters: '\u0069\u24D8\uFF49\u00EC\u00ED\u00EE\u0129\u012B\u012D\u00EF\u1E2F\u1EC9\u01D0\u0209\u020B\u1ECB\u012F\u1E2D\u0268\u0131',
798
+ },
799
+ { base: 'j', letters: '\u006A\u24D9\uFF4A\u0135\u01F0\u0249' },
800
+ {
801
+ base: 'k',
802
+ letters: '\u006B\u24DA\uFF4B\u1E31\u01E9\u1E33\u0137\u1E35\u0199\u2C6A\uA741\uA743\uA745\uA7A3',
803
+ },
804
+ {
805
+ base: 'l',
806
+ letters: '\u006C\u24DB\uFF4C\u0140\u013A\u013E\u1E37\u1E39\u013C\u1E3D\u1E3B\u017F\u0142\u019A\u026B\u2C61\uA749\uA781\uA747',
807
+ },
808
+ { base: 'lj', letters: '\u01C9' },
809
+ { base: 'm', letters: '\u006D\u24DC\uFF4D\u1E3F\u1E41\u1E43\u0271\u026F' },
810
+ {
811
+ base: 'n',
812
+ letters: '\u006E\u24DD\uFF4E\u01F9\u0144\u00F1\u1E45\u0148\u1E47\u0146\u1E4B\u1E49\u019E\u0272\u0149\uA791\uA7A5',
813
+ },
814
+ { base: 'nj', letters: '\u01CC' },
815
+ {
816
+ base: 'o',
817
+ letters: '\u006F\u24DE\uFF4F\u00F2\u00F3\u00F4\u1ED3\u1ED1\u1ED7\u1ED5\u00F5\u1E4D\u022D\u1E4F\u014D\u1E51\u1E53\u014F\u022F\u0231\u00F6\u022B\u1ECF\u0151\u01D2\u020D\u020F\u01A1\u1EDD\u1EDB\u1EE1\u1EDF\u1EE3\u1ECD\u1ED9\u01EB\u01ED\u00F8\u01FF\u0254\uA74B\uA74D\u0275',
818
+ },
819
+ { base: 'oi', letters: '\u01A3' },
820
+ { base: 'ou', letters: '\u0223' },
821
+ { base: 'oo', letters: '\uA74F' },
822
+ {
823
+ base: 'p',
824
+ letters: '\u0070\u24DF\uFF50\u1E55\u1E57\u01A5\u1D7D\uA751\uA753\uA755',
825
+ },
826
+ { base: 'q', letters: '\u0071\u24E0\uFF51\u024B\uA757\uA759' },
827
+ {
828
+ base: 'r',
829
+ letters: '\u0072\u24E1\uFF52\u0155\u1E59\u0159\u0211\u0213\u1E5B\u1E5D\u0157\u1E5F\u024D\u027D\uA75B\uA7A7\uA783',
830
+ },
831
+ {
832
+ base: 's',
833
+ letters: '\u0073\u24E2\uFF53\u00DF\u015B\u1E65\u015D\u1E61\u0161\u1E67\u1E63\u1E69\u0219\u015F\u023F\uA7A9\uA785\u1E9B',
834
+ },
835
+ {
836
+ base: 't',
837
+ letters: '\u0074\u24E3\uFF54\u1E6B\u1E97\u0165\u1E6D\u021B\u0163\u1E71\u1E6F\u0167\u01AD\u0288\u2C66\uA787',
838
+ },
839
+ { base: 'tz', letters: '\uA729' },
840
+ {
841
+ base: 'u',
842
+ letters: '\u0075\u24E4\uFF55\u00F9\u00FA\u00FB\u0169\u1E79\u016B\u1E7B\u016D\u00FC\u01DC\u01D8\u01D6\u01DA\u1EE7\u016F\u0171\u01D4\u0215\u0217\u01B0\u1EEB\u1EE9\u1EEF\u1EED\u1EF1\u1EE5\u1E73\u0173\u1E77\u1E75\u0289',
843
+ },
844
+ { base: 'v', letters: '\u0076\u24E5\uFF56\u1E7D\u1E7F\u028B\uA75F\u028C' },
845
+ { base: 'vy', letters: '\uA761' },
846
+ {
847
+ base: 'w',
848
+ letters: '\u0077\u24E6\uFF57\u1E81\u1E83\u0175\u1E87\u1E85\u1E98\u1E89\u2C73',
849
+ },
850
+ { base: 'x', letters: '\u0078\u24E7\uFF58\u1E8B\u1E8D' },
851
+ {
852
+ base: 'y',
853
+ letters: '\u0079\u24E8\uFF59\u1EF3\u00FD\u0177\u1EF9\u0233\u1E8F\u00FF\u1EF7\u1E99\u1EF5\u01B4\u024F\u1EFF',
854
+ },
855
+ {
856
+ base: 'z',
857
+ letters: '\u007A\u24E9\uFF5A\u017A\u1E91\u017C\u017E\u1E93\u1E95\u01B6\u0225\u0240\u2C6C\uA763',
858
+ },
859
+ ];
860
+ /**
861
+ * Map of letters from diacritic variant to diacritless variant
862
+ * Contains lowercase and uppercase separatelly
863
+ *
864
+ * > "á" => "a"
865
+ * > "ě" => "e"
866
+ * > "Ă" => "A"
867
+ * > ...
868
+ *
869
+ * @public exported from `@promptbook/utils`
870
+ */
871
+ var DIACRITIC_VARIANTS_LETTERS = {};
872
+ // tslint:disable-next-line: prefer-for-of
873
+ for (var i = 0; i < defaultDiacriticsRemovalMap.length; i++) {
874
+ var letters = defaultDiacriticsRemovalMap[i].letters;
875
+ // tslint:disable-next-line: prefer-for-of
876
+ for (var j = 0; j < letters.length; j++) {
877
+ DIACRITIC_VARIANTS_LETTERS[letters[j]] = defaultDiacriticsRemovalMap[i].base;
878
+ }
879
+ }
880
+ // <- TODO: [🍓] Put to maker function to save execution time if not needed
881
+ /*
882
+ @see https://stackoverflow.com/questions/990904/remove-accents-diacritics-in-a-string-in-javascript
883
+ Licensed under the Apache License, Version 2.0 (the "License");
884
+ you may not use this file except in compliance with the License.
885
+ You may obtain a copy of the License at
886
+
887
+ http://www.apache.org/licenses/LICENSE-2.0
888
+
889
+ Unless required by applicable law or agreed to in writing, software
890
+ distributed under the License is distributed on an "AS IS" BASIS,
891
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
892
+ See the License for the specific language governing permissions and
893
+ limitations under the License.
894
+ */
895
+
896
+ /**
897
+ * @@@
898
+ *
899
+ * @param input @@@
900
+ * @returns @@@
901
+ * @public exported from `@promptbook/utils`
902
+ */
903
+ function removeDiacritics(input) {
904
+ /*eslint no-control-regex: "off"*/
905
+ return input.replace(/[^\u0000-\u007E]/g, function (a) {
906
+ return DIACRITIC_VARIANTS_LETTERS[a] || a;
907
+ });
908
+ }
909
+ /**
910
+ * TODO: [Ж] Variant for cyrillic (and in general non-latin) letters
911
+ */
912
+
913
+ /**
914
+ * Counts number of words in the text
915
+ *
916
+ * @public exported from `@promptbook/utils`
917
+ */
918
+ function countWords(text) {
919
+ text = text.replace(/[\p{Extended_Pictographic}]/gu, 'a');
920
+ text = removeDiacritics(text);
921
+ return text.split(/[^a-zа-я0-9]+/i).filter(function (word) { return word.length > 0; }).length;
922
+ }
923
+
924
+ /**
925
+ * Helper of usage compute
926
+ *
927
+ * @param content the content of prompt or response
928
+ * @returns part of PromptResultUsageCounts
929
+ *
930
+ * @private internal utility of LlmExecutionTools
931
+ */
932
+ function computeUsageCounts(content) {
933
+ return {
934
+ charactersCount: { value: countCharacters(content) },
935
+ wordsCount: { value: countWords(content) },
936
+ sentencesCount: { value: countSentences(content) },
937
+ linesCount: { value: countLines(content) },
938
+ paragraphsCount: { value: countParagraphs(content) },
939
+ pagesCount: { value: countPages(content) },
940
+ };
941
+ }
942
+
943
+ /**
944
+ * Make UncertainNumber
945
+ *
946
+ * @param value
947
+ *
948
+ * @private utility for initializating UncertainNumber
949
+ */
950
+ function uncertainNumber(value) {
951
+ if (value === null || value === undefined || Number.isNaN(value)) {
952
+ return { value: 0, isUncertain: true };
953
+ }
954
+ return { value: value };
955
+ }
956
+
957
+ /**
958
+ * Get current date in ISO 8601 format
959
+ *
960
+ * @private internal utility
961
+ */
962
+ function getCurrentIsoDate() {
963
+ return new Date().toISOString();
964
+ }
965
+
966
+ /**
967
+ * @@@
968
+ *
969
+ * Note: This function mutates the object and returns the original (but mutated-deep-freezed) object
970
+ *
971
+ * @returns The same object as the input, but deeply frozen
972
+ * @public exported from `@promptbook/utils`
973
+ */
974
+ function deepFreeze(objectValue) {
975
+ var e_1, _a;
976
+ var propertyNames = Object.getOwnPropertyNames(objectValue);
977
+ try {
978
+ for (var propertyNames_1 = __values(propertyNames), propertyNames_1_1 = propertyNames_1.next(); !propertyNames_1_1.done; propertyNames_1_1 = propertyNames_1.next()) {
979
+ var propertyName = propertyNames_1_1.value;
980
+ var value = objectValue[propertyName];
981
+ if (value && typeof value === 'object') {
982
+ deepFreeze(value);
983
+ }
984
+ }
985
+ }
986
+ catch (e_1_1) { e_1 = { error: e_1_1 }; }
987
+ finally {
988
+ try {
989
+ if (propertyNames_1_1 && !propertyNames_1_1.done && (_a = propertyNames_1.return)) _a.call(propertyNames_1);
990
+ }
991
+ finally { if (e_1) throw e_1.error; }
992
+ }
993
+ return Object.freeze(objectValue);
994
+ }
995
+ /**
996
+ * TODO: [🧠] Is there a way how to meaningfully test this utility
997
+ */
998
+
999
+ // <- TODO: [🧠] Better system for generator warnings - not always "code" and "by `@promptbook/cli`"
1000
+ /**
1001
+ * The maximum number of iterations for a loops
1002
+ *
1003
+ * @private within the repository - too low-level in comparison with other `MAX_...`
1004
+ */
1005
+ var LOOP_LIMIT = 1000;
1006
+ /**
1007
+ * Nonce which is used for replacing things in strings
1008
+ *
1009
+ * @private within the repository
1010
+ */
1011
+ var REPLACING_NONCE = 'u$k42k%!V2zo34w7Fu#@QUHYPW';
1012
+ /**
1013
+ * The names of the parameters that are reserved for special purposes
1014
+ *
1015
+ * @public exported from `@promptbook/core`
1016
+ */
1017
+ deepFreeze([
1018
+ 'content',
1019
+ 'context',
1020
+ 'knowledge',
1021
+ 'samples',
1022
+ 'modelName',
1023
+ 'currentDate',
1024
+ // <- TODO: Add more like 'date', 'modelName',...
1025
+ // <- TODO: Add [emoji] + instructions ACRY when adding new reserved parameter
1026
+ ]);
1027
+ /**
1028
+ * @@@
1029
+ *
1030
+ * @private within the repository
1031
+ */
1032
+ var RESERVED_PARAMETER_MISSING_VALUE = 'MISSING-' + REPLACING_NONCE;
1033
+ /**
1034
+ * @@@
1035
+ *
1036
+ * @private within the repository
1037
+ */
1038
+ var RESERVED_PARAMETER_RESTRICTED = 'RESTRICTED-' + REPLACING_NONCE;
1039
+
1040
+ /**
1041
+ * This error type indicates that some limit was reached
1042
+ *
1043
+ * @public exported from `@promptbook/core`
1044
+ */
1045
+ var LimitReachedError = /** @class */ (function (_super) {
1046
+ __extends(LimitReachedError, _super);
1047
+ function LimitReachedError(message) {
1048
+ var _this = _super.call(this, message) || this;
1049
+ _this.name = 'LimitReachedError';
1050
+ Object.setPrototypeOf(_this, LimitReachedError.prototype);
1051
+ return _this;
1052
+ }
1053
+ return LimitReachedError;
1054
+ }(Error));
1055
+
1056
+ /**
1057
+ * Replaces parameters in template with values from parameters object
1058
+ *
1059
+ * @param template the template with parameters in {curly} braces
1060
+ * @param parameters the object with parameters
1061
+ * @returns the template with replaced parameters
1062
+ * @throws {PipelineExecutionError} if parameter is not defined, not closed, or not opened
1063
+ * @public exported from `@promptbook/utils`
1064
+ */
1065
+ function replaceParameters(template, parameters) {
1066
+ var e_1, _a;
1067
+ try {
1068
+ for (var _b = __values(Object.entries(parameters)), _c = _b.next(); !_c.done; _c = _b.next()) {
1069
+ var _d = __read(_c.value, 2), parameterName = _d[0], parameterValue = _d[1];
1070
+ if (parameterValue === RESERVED_PARAMETER_MISSING_VALUE) {
1071
+ throw new UnexpectedError("Parameter {".concat(parameterName, "} has missing value"));
1072
+ }
1073
+ else if (parameterValue === RESERVED_PARAMETER_RESTRICTED) {
1074
+ // TODO: [🍵]
1075
+ throw new UnexpectedError("Parameter {".concat(parameterName, "} is restricted to use"));
1076
+ }
1077
+ }
1078
+ }
1079
+ catch (e_1_1) { e_1 = { error: e_1_1 }; }
1080
+ finally {
1081
+ try {
1082
+ if (_c && !_c.done && (_a = _b.return)) _a.call(_b);
1083
+ }
1084
+ finally { if (e_1) throw e_1.error; }
1085
+ }
1086
+ var replacedTemplate = template;
1087
+ var match;
1088
+ var loopLimit = LOOP_LIMIT;
1089
+ var _loop_1 = function () {
1090
+ if (loopLimit-- < 0) {
1091
+ throw new LimitReachedError('Loop limit reached during parameters replacement in `replaceParameters`');
1092
+ }
1093
+ var precol = match.groups.precol;
1094
+ var parameterName = match.groups.parameterName;
1095
+ if (parameterName === '') {
1096
+ return "continue";
1097
+ }
1098
+ if (parameterName.indexOf('{') !== -1 || parameterName.indexOf('}') !== -1) {
1099
+ throw new PipelineExecutionError('Parameter is already opened or not closed');
1100
+ }
1101
+ if (parameters[parameterName] === undefined) {
1102
+ throw new PipelineExecutionError("Parameter {".concat(parameterName, "} is not defined"));
1103
+ }
1104
+ var parameterValue = parameters[parameterName];
1105
+ if (parameterValue === undefined) {
1106
+ throw new PipelineExecutionError("Parameter {".concat(parameterName, "} is not defined"));
1107
+ }
1108
+ parameterValue = parameterValue.toString();
1109
+ if (parameterValue.includes('\n') && /^\s*\W{0,3}\s*$/.test(precol)) {
1110
+ parameterValue = parameterValue
1111
+ .split('\n')
1112
+ .map(function (line, index) { return (index === 0 ? line : "".concat(precol).concat(line)); })
1113
+ .join('\n');
1114
+ }
1115
+ replacedTemplate =
1116
+ replacedTemplate.substring(0, match.index + precol.length) +
1117
+ parameterValue +
1118
+ replacedTemplate.substring(match.index + precol.length + parameterName.length + 2);
1119
+ };
1120
+ while ((match = /^(?<precol>.*){(?<parameterName>\w+)}(.*)/m /* <- Not global */
1121
+ .exec(replacedTemplate))) {
1122
+ _loop_1();
1123
+ }
1124
+ // [💫] Check if there are parameters that are not closed properly
1125
+ if (/{\w+$/.test(replacedTemplate)) {
1126
+ throw new PipelineExecutionError('Parameter is not closed');
1127
+ }
1128
+ // [💫] Check if there are parameters that are not opened properly
1129
+ if (/^\w+}/.test(replacedTemplate)) {
1130
+ throw new PipelineExecutionError('Parameter is not opened');
1131
+ }
1132
+ return replacedTemplate;
1133
+ }
1134
+
1135
+ /**
1136
+ * Function computeUsage will create price per one token based on the string value found on openai page
1137
+ *
1138
+ * @private within the repository, used only as internal helper for `OPENAI_MODELS`
1139
+ */
1140
+ function computeUsage(value) {
1141
+ var _a = __read(value.split(' / '), 2), price = _a[0], tokens = _a[1];
1142
+ return parseFloat(price.replace('$', '')) / parseFloat(tokens.replace('M tokens', '')) / 1000000;
1143
+ }
1144
+
1145
+ /**
1146
+ * List of available Anthropic Claude models with pricing
1147
+ *
1148
+ * Note: Done at 2024-05-25
1149
+ *
1150
+ * @see https://docs.anthropic.com/en/docs/models-overview
1151
+ * @public exported from `@promptbook/anthropic-claude`
1152
+ */
1153
+ var ANTHROPIC_CLAUDE_MODELS = [
1154
+ {
1155
+ modelVariant: 'CHAT',
1156
+ modelTitle: 'Claude 3 Opus',
1157
+ modelName: 'claude-3-opus-20240229',
1158
+ pricing: {
1159
+ prompt: computeUsage("$15.00 / 1M tokens"),
1160
+ output: computeUsage("$75.00 / 1M tokens"),
1161
+ },
1162
+ },
1163
+ {
1164
+ modelVariant: 'CHAT',
1165
+ modelTitle: 'Claude 3 Sonnet',
1166
+ modelName: 'claude-3-sonnet-20240229',
1167
+ pricing: {
1168
+ prompt: computeUsage("$3.00 / 1M tokens"),
1169
+ output: computeUsage("$15.00 / 1M tokens"),
1170
+ },
1171
+ },
1172
+ {
1173
+ modelVariant: 'CHAT',
1174
+ modelTitle: 'Claude 3 Haiku',
1175
+ modelName: ' claude-3-haiku-20240307',
1176
+ pricing: {
1177
+ prompt: computeUsage("$0.25 / 1M tokens"),
1178
+ output: computeUsage("$1.25 / 1M tokens"),
1179
+ },
1180
+ },
1181
+ {
1182
+ modelVariant: 'CHAT',
1183
+ modelTitle: 'Claude 2.1',
1184
+ modelName: 'claude-2.1',
1185
+ pricing: {
1186
+ prompt: computeUsage("$8.00 / 1M tokens"),
1187
+ output: computeUsage("$24.00 / 1M tokens"),
1188
+ },
1189
+ },
1190
+ {
1191
+ modelVariant: 'CHAT',
1192
+ modelTitle: 'Claude 2',
1193
+ modelName: 'claude-2.0',
1194
+ pricing: {
1195
+ prompt: computeUsage("$8.00 / 1M tokens"),
1196
+ output: computeUsage("$24.00 / 1M tokens"),
1197
+ },
1198
+ },
1199
+ {
1200
+ modelVariant: 'CHAT',
1201
+ modelTitle: ' Claude Instant 1.2',
1202
+ modelName: 'claude-instant-1.2',
1203
+ pricing: {
1204
+ prompt: computeUsage("$0.80 / 1M tokens"),
1205
+ output: computeUsage("$2.40 / 1M tokens"),
1206
+ },
1207
+ },
1208
+ // TODO: !!! Claude 1 and 2 has also completion versions - ask Hoagy
1209
+ ];
1210
+ /**
1211
+ * Note: [🤖] Add models of new variant
1212
+ * TODO: [🧠] !!! Add embedding models OR Anthropic has only chat+completion models?
1213
+ * TODO: [🧠] Some mechanism to propagate unsureness
1214
+ * TODO: [🧠][👮‍♀️] Put here more info like description, isVision, trainingDateCutoff, languages, strengths ( Top-level performance, intelligence, fluency, and understanding), contextWindow,...
1215
+ * TODO: [🕚] Make this list dynamic - dynamically can be listed modelNames but not modelVariant, legacy status, context length and pricing
1216
+ */
1217
+
1218
+ /**
1219
+ * Execution Tools for calling Anthropic Claude API.
1220
+ *
1221
+ * @public exported from `@promptbook/anthropic-claude`
1222
+ * @deprecated use `createAnthropicClaudeExecutionTools` instead
1223
+ */
1224
+ var AnthropicClaudeExecutionTools = /** @class */ (function () {
1225
+ /**
1226
+ * Creates Anthropic Claude Execution Tools.
1227
+ *
1228
+ * @param options which are relevant are directly passed to the Anthropic Claude client
1229
+ */
1230
+ function AnthropicClaudeExecutionTools(options) {
1231
+ if (options === void 0) { options = { isProxied: false }; }
1232
+ this.options = options;
1233
+ // Note: Passing only Anthropic Claude relevant options to Anthropic constructor
1234
+ var anthropicOptions = __assign({}, options);
1235
+ delete anthropicOptions.isVerbose;
1236
+ delete anthropicOptions.isProxied;
1237
+ this.client = new Anthropic__default["default"](anthropicOptions);
1238
+ }
1239
+ Object.defineProperty(AnthropicClaudeExecutionTools.prototype, "title", {
1240
+ get: function () {
1241
+ return 'Anthropic Claude';
1242
+ },
1243
+ enumerable: false,
1244
+ configurable: true
1245
+ });
1246
+ Object.defineProperty(AnthropicClaudeExecutionTools.prototype, "description", {
1247
+ get: function () {
1248
+ return 'Use all models provided by Anthropic Claude';
1249
+ },
1250
+ enumerable: false,
1251
+ configurable: true
1252
+ });
1253
+ /**
1254
+ * Calls Anthropic Claude API to use a chat model.
1255
+ */
1256
+ AnthropicClaudeExecutionTools.prototype.callChatModel = function (prompt) {
1257
+ return __awaiter(this, void 0, void 0, function () {
1258
+ var content, parameters, modelRequirements, modelName, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
1259
+ return __generator(this, function (_a) {
1260
+ switch (_a.label) {
1261
+ case 0:
1262
+ if (this.options.isVerbose) {
1263
+ console.info('💬 Anthropic Claude callChatModel call');
1264
+ }
1265
+ content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
1266
+ // TODO: [☂] Use here more modelRequirements
1267
+ if (modelRequirements.modelVariant !== 'CHAT') {
1268
+ throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
1269
+ }
1270
+ modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
1271
+ rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
1272
+ rawRequest = {
1273
+ model: modelRequirements.modelName || this.getDefaultChatModel().modelName,
1274
+ max_tokens: modelRequirements.maxTokens || 4096,
1275
+ // <- TODO: [🌾] Make some global max cap for maxTokens
1276
+ temperature: modelRequirements.temperature,
1277
+ system: modelRequirements.systemMessage,
1278
+ // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
1279
+ // <- Note: [🧆]
1280
+ messages: [
1281
+ {
1282
+ role: 'user',
1283
+ content: rawPromptContent,
1284
+ },
1285
+ ],
1286
+ // TODO: Is here some equivalent of user identification?> user: this.options.user,
1287
+ };
1288
+ start = getCurrentIsoDate();
1289
+ if (this.options.isVerbose) {
1290
+ console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
1291
+ }
1292
+ return [4 /*yield*/, this.client.messages.create(rawRequest)];
1293
+ case 1:
1294
+ rawResponse = _a.sent();
1295
+ if (this.options.isVerbose) {
1296
+ console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
1297
+ }
1298
+ if (!rawResponse.content[0]) {
1299
+ throw new PipelineExecutionError('No content from Anthropic Claude');
1300
+ }
1301
+ if (rawResponse.content.length > 1) {
1302
+ throw new PipelineExecutionError('More than one content blocks from Anthropic Claude');
1303
+ }
1304
+ resultContent = rawResponse.content[0].text;
1305
+ // eslint-disable-next-line prefer-const
1306
+ complete = getCurrentIsoDate();
1307
+ usage = {
1308
+ price: { value: 0, isUncertain: true } /* <- TODO: [🐞] Compute usage */,
1309
+ input: __assign({ tokensCount: uncertainNumber(rawResponse.usage.input_tokens) }, computeUsageCounts(prompt.content)),
1310
+ output: __assign({ tokensCount: uncertainNumber(rawResponse.usage.output_tokens) }, computeUsageCounts(prompt.content)),
1311
+ };
1312
+ return [2 /*return*/, {
1313
+ content: resultContent,
1314
+ modelName: rawResponse.model,
1315
+ timing: {
1316
+ start: start,
1317
+ complete: complete,
1318
+ },
1319
+ usage: usage,
1320
+ rawPromptContent: rawPromptContent,
1321
+ rawRequest: rawRequest,
1322
+ rawResponse: rawResponse,
1323
+ // <- [🗯]
1324
+ }];
1325
+ }
1326
+ });
1327
+ });
1328
+ };
1329
+ /*
1330
+ TODO: [👏]
1331
+ public async callCompletionModel(
1332
+ prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements'>,
1333
+ ): Promise<PromptCompletionResult> {
1334
+
1335
+ if (this.options.isVerbose) {
1336
+ console.info('🖋 Anthropic Claude callCompletionModel call');
1337
+ }
1338
+
1339
+ const { content, parameters, modelRequirements } = prompt;
1340
+
1341
+ // TODO: [☂] Use here more modelRequirements
1342
+ if (modelRequirements.modelVariant !== 'COMPLETION') {
1343
+ throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
1344
+ }
1345
+
1346
+ const modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
1347
+ const modelSettings = {
1348
+ model: modelName,
1349
+ max_tokens: modelRequirements.maxTokens || 2000, // <- Note: 2000 is for lagacy reasons
1350
+ // <- TODO: [🌾] Make some global max cap for maxTokens
1351
+ // <- TODO: Use here `systemMessage`, `temperature` and `seed`
1352
+ };
1353
+
1354
+ const rawRequest: xxxx.Completions.CompletionCreateParamsNonStreaming = {
1355
+ ...modelSettings,
1356
+ prompt: rawPromptContent,
1357
+ user: this.options.user,
1358
+ };
1359
+ const start: string_date_iso8601 = getCurrentIsoDate();
1360
+ let complete: string_date_iso8601;
1361
+
1362
+ if (this.options.isVerbose) {
1363
+ console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
1364
+ }
1365
+ const rawResponse = await this.client.completions.create(rawRequest);
1366
+ if (this.options.isVerbose) {
1367
+ console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
1368
+ }
1369
+
1370
+ if (!rawResponse.choices[0]) {
1371
+ throw new PipelineExecutionError('No choises from Anthropic Claude');
1372
+ }
1373
+
1374
+ if (rawResponse.choices.length > 1) {
1375
+ // TODO: This should be maybe only warning
1376
+ throw new PipelineExecutionError('More than one choise from Anthropic Claude');
1377
+ }
1378
+
1379
+ const resultContent = rawResponse.choices[0].text;
1380
+ // eslint-disable-next-line prefer-const
1381
+ complete = getCurrentIsoDate();
1382
+ const usage = { price: 'UNKNOWN', inputTokens: 0, outputTokens: 0 /* <- TODO: [🐞] Compute usage * / } satisfies PromptResultUsage;
1383
+
1384
+
1385
+
1386
+ return {
1387
+ content: resultContent,
1388
+ modelName: rawResponse.model || model,
1389
+ timing: {
1390
+ start,
1391
+ complete,
1392
+ },
1393
+ usage,
1394
+ rawResponse,
1395
+ // <- [🗯]
1396
+ };
1397
+ }
1398
+ */
1399
+ // <- Note: [🤖] callXxxModel
1400
+ /**
1401
+ * Get the model that should be used as default
1402
+ */
1403
+ AnthropicClaudeExecutionTools.prototype.getDefaultModel = function (defaultModelName) {
1404
+ var model = ANTHROPIC_CLAUDE_MODELS.find(function (_a) {
1405
+ var modelName = _a.modelName;
1406
+ return modelName.startsWith(defaultModelName);
1407
+ });
1408
+ if (model === undefined) {
1409
+ throw new UnexpectedError(spaceTrim__default["default"](function (block) {
1410
+ return "\n Cannot find model in OpenAI models with name \"".concat(defaultModelName, "\" which should be used as default.\n\n Available models:\n ").concat(block(ANTHROPIC_CLAUDE_MODELS.map(function (_a) {
1411
+ var modelName = _a.modelName;
1412
+ return "- \"".concat(modelName, "\"");
1413
+ }).join('\n')), "\n\n ");
1414
+ }));
1415
+ }
1416
+ return model;
1417
+ };
1418
+ /**
1419
+ * Default model for chat variant.
1420
+ */
1421
+ AnthropicClaudeExecutionTools.prototype.getDefaultChatModel = function () {
1422
+ return this.getDefaultModel('claude-3-opus');
1423
+ };
1424
+ // <- Note: [🤖] getDefaultXxxModel
1425
+ /**
1426
+ * List all available Anthropic Claude models that can be used
1427
+ */
1428
+ AnthropicClaudeExecutionTools.prototype.listModels = function () {
1429
+ return ANTHROPIC_CLAUDE_MODELS;
1430
+ };
1431
+ return AnthropicClaudeExecutionTools;
1432
+ }());
1433
+ /**
1434
+ * TODO: [🍆] JSON mode
1435
+ * TODO: [🧠] Maybe handle errors via transformAnthropicError (like transformAzureError)
1436
+ * TODO: Maybe Create some common util for callChatModel and callCompletionModel
1437
+ * TODO: Maybe make custom OpenaiError
1438
+ * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
1439
+ * TODO: [🍜] !!!!!! Auto use anonymous server in browser
1440
+ * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
1441
+ * TODO: [📅] Maybe instead of `RemoteLlmExecutionToolsOptions` use `proxyWithAnonymousRemoteServer` (if implemented)
1442
+ */
1443
+
1444
+ /**
1445
+ * Execution Tools for calling Anthropic Claude API.
1446
+ *
1447
+ * @public exported from `@promptbook/anthropic-claude`
1448
+ */
1449
+ function createAnthropicClaudeExecutionTools(options) {
1450
+ if (options.isProxied) {
1451
+ return new RemoteLlmExecutionTools(__assign(__assign({}, options), { isAnonymous: true, llmToolsConfiguration: [
1452
+ {
1453
+ title: 'Anthropic Claude (proxied)',
1454
+ packageName: '@promptbook/anthropic-claude',
1455
+ className: 'AnthropicClaudeExecutionTools',
1456
+ options: __assign(__assign({}, options), { isProxied: false }),
1457
+ },
1458
+ ] }));
1459
+ }
1460
+ return new AnthropicClaudeExecutionTools(options);
1461
+ }
1462
+ /**
1463
+ * TODO: !!!!!! Make this with all LLM providers
1464
+ * TODO: !!!!!! Maybe change all `new AnthropicClaudeExecutionTools` -> `createAnthropicClaudeExecutionTools` in manual
1465
+ */
1466
+
1467
+ /**
1468
+ * List of available OpenAI models with pricing
1469
+ *
1470
+ * Note: Done at 2024-05-20
1471
+ *
1472
+ * @see https://platform.openai.com/docs/models/
1473
+ * @see https://openai.com/api/pricing/
1474
+ * @public exported from `@promptbook/openai`
1475
+ */
1476
+ var OPENAI_MODELS = [
1477
+ /*/
1478
+ {
1479
+ modelTitle: 'dall-e-3',
1480
+ modelName: 'dall-e-3',
1481
+ },
1482
+ /**/
1483
+ /*/
1484
+ {
1485
+ modelTitle: 'whisper-1',
1486
+ modelName: 'whisper-1',
1487
+ },
1488
+ /**/
1489
+ /**/
1490
+ {
1491
+ modelVariant: 'COMPLETION',
1492
+ modelTitle: 'davinci-002',
1493
+ modelName: 'davinci-002',
1494
+ pricing: {
1495
+ prompt: computeUsage("$2.00 / 1M tokens"),
1496
+ output: computeUsage("$2.00 / 1M tokens"), // <- not sure
1497
+ },
1498
+ },
1499
+ /**/
1500
+ /*/
1501
+ {
1502
+ modelTitle: 'dall-e-2',
1503
+ modelName: 'dall-e-2',
1504
+ },
1505
+ /**/
1506
+ /**/
1507
+ {
1508
+ modelVariant: 'CHAT',
1509
+ modelTitle: 'gpt-3.5-turbo-16k',
1510
+ modelName: 'gpt-3.5-turbo-16k',
1511
+ pricing: {
1512
+ prompt: computeUsage("$3.00 / 1M tokens"),
1513
+ output: computeUsage("$4.00 / 1M tokens"),
1514
+ },
1515
+ },
1516
+ /**/
1517
+ /*/
1518
+ {
1519
+ modelTitle: 'tts-1-hd-1106',
1520
+ modelName: 'tts-1-hd-1106',
1521
+ },
1522
+ /**/
1523
+ /*/
1524
+ {
1525
+ modelTitle: 'tts-1-hd',
1526
+ modelName: 'tts-1-hd',
1527
+ },
1528
+ /**/
1529
+ /**/
1530
+ {
1531
+ modelVariant: 'CHAT',
1532
+ modelTitle: 'gpt-4',
1533
+ modelName: 'gpt-4',
1534
+ pricing: {
1535
+ prompt: computeUsage("$30.00 / 1M tokens"),
1536
+ output: computeUsage("$60.00 / 1M tokens"),
1537
+ },
1538
+ },
1539
+ /**/
1540
+ /**/
1541
+ {
1542
+ modelVariant: 'CHAT',
1543
+ modelTitle: 'gpt-4-32k',
1544
+ modelName: 'gpt-4-32k',
1545
+ pricing: {
1546
+ prompt: computeUsage("$60.00 / 1M tokens"),
1547
+ output: computeUsage("$120.00 / 1M tokens"),
1548
+ },
1549
+ },
1550
+ /**/
1551
+ /*/
1552
+ {
1553
+ modelVariant: 'CHAT',
1554
+ modelTitle: 'gpt-4-0613',
1555
+ modelName: 'gpt-4-0613',
1556
+ pricing: {
1557
+ prompt: computeUsage(` / 1M tokens`),
1558
+ output: computeUsage(` / 1M tokens`),
1559
+ },
1560
+ },
1561
+ /**/
1562
+ /**/
1563
+ {
1564
+ modelVariant: 'CHAT',
1565
+ modelTitle: 'gpt-4-turbo-2024-04-09',
1566
+ modelName: 'gpt-4-turbo-2024-04-09',
1567
+ pricing: {
1568
+ prompt: computeUsage("$10.00 / 1M tokens"),
1569
+ output: computeUsage("$30.00 / 1M tokens"),
1570
+ },
1571
+ },
1572
+ /**/
1573
+ /**/
1574
+ {
1575
+ modelVariant: 'CHAT',
1576
+ modelTitle: 'gpt-3.5-turbo-1106',
1577
+ modelName: 'gpt-3.5-turbo-1106',
1578
+ pricing: {
1579
+ prompt: computeUsage("$1.00 / 1M tokens"),
1580
+ output: computeUsage("$2.00 / 1M tokens"),
1581
+ },
1582
+ },
1583
+ /**/
1584
+ /**/
1585
+ {
1586
+ modelVariant: 'CHAT',
1587
+ modelTitle: 'gpt-4-turbo',
1588
+ modelName: 'gpt-4-turbo',
1589
+ pricing: {
1590
+ prompt: computeUsage("$10.00 / 1M tokens"),
1591
+ output: computeUsage("$30.00 / 1M tokens"),
1592
+ },
1593
+ },
1594
+ /**/
1595
+ /**/
1596
+ {
1597
+ modelVariant: 'COMPLETION',
1598
+ modelTitle: 'gpt-3.5-turbo-instruct-0914',
1599
+ modelName: 'gpt-3.5-turbo-instruct-0914',
1600
+ pricing: {
1601
+ prompt: computeUsage("$1.50 / 1M tokens"),
1602
+ output: computeUsage("$2.00 / 1M tokens"), // <- For gpt-3.5-turbo-instruct
1603
+ },
1604
+ },
1605
+ /**/
1606
+ /**/
1607
+ {
1608
+ modelVariant: 'COMPLETION',
1609
+ modelTitle: 'gpt-3.5-turbo-instruct',
1610
+ modelName: 'gpt-3.5-turbo-instruct',
1611
+ pricing: {
1612
+ prompt: computeUsage("$1.50 / 1M tokens"),
1613
+ output: computeUsage("$2.00 / 1M tokens"),
1614
+ },
1615
+ },
1616
+ /**/
1617
+ /*/
1618
+ {
1619
+ modelTitle: 'tts-1',
1620
+ modelName: 'tts-1',
1621
+ },
1622
+ /**/
1623
+ /**/
1624
+ {
1625
+ modelVariant: 'CHAT',
1626
+ modelTitle: 'gpt-3.5-turbo',
1627
+ modelName: 'gpt-3.5-turbo',
1628
+ pricing: {
1629
+ prompt: computeUsage("$3.00 / 1M tokens"),
1630
+ output: computeUsage("$6.00 / 1M tokens"), // <- Not sure, refer to gpt-3.5-turbo in Fine-tuning models
1631
+ },
1632
+ },
1633
+ /**/
1634
+ /**/
1635
+ {
1636
+ modelVariant: 'CHAT',
1637
+ modelTitle: 'gpt-3.5-turbo-0301',
1638
+ modelName: 'gpt-3.5-turbo-0301',
1639
+ pricing: {
1640
+ prompt: computeUsage("$1.50 / 1M tokens"),
1641
+ output: computeUsage("$2.00 / 1M tokens"),
1642
+ },
1643
+ },
1644
+ /**/
1645
+ /**/
1646
+ {
1647
+ modelVariant: 'COMPLETION',
1648
+ modelTitle: 'babbage-002',
1649
+ modelName: 'babbage-002',
1650
+ pricing: {
1651
+ prompt: computeUsage("$0.40 / 1M tokens"),
1652
+ output: computeUsage("$0.40 / 1M tokens"), // <- Not sure
1653
+ },
1654
+ },
1655
+ /**/
1656
+ /**/
1657
+ {
1658
+ modelVariant: 'CHAT',
1659
+ modelTitle: 'gpt-4-1106-preview',
1660
+ modelName: 'gpt-4-1106-preview',
1661
+ pricing: {
1662
+ prompt: computeUsage("$10.00 / 1M tokens"),
1663
+ output: computeUsage("$30.00 / 1M tokens"),
1664
+ },
1665
+ },
1666
+ /**/
1667
+ /**/
1668
+ {
1669
+ modelVariant: 'CHAT',
1670
+ modelTitle: 'gpt-4-0125-preview',
1671
+ modelName: 'gpt-4-0125-preview',
1672
+ pricing: {
1673
+ prompt: computeUsage("$10.00 / 1M tokens"),
1674
+ output: computeUsage("$30.00 / 1M tokens"),
1675
+ },
1676
+ },
1677
+ /**/
1678
+ /*/
1679
+ {
1680
+ modelTitle: 'tts-1-1106',
1681
+ modelName: 'tts-1-1106',
1682
+ },
1683
+ /**/
1684
+ /**/
1685
+ {
1686
+ modelVariant: 'CHAT',
1687
+ modelTitle: 'gpt-3.5-turbo-0125',
1688
+ modelName: 'gpt-3.5-turbo-0125',
1689
+ pricing: {
1690
+ prompt: computeUsage("$0.50 / 1M tokens"),
1691
+ output: computeUsage("$1.50 / 1M tokens"),
1692
+ },
1693
+ },
1694
+ /**/
1695
+ /**/
1696
+ {
1697
+ modelVariant: 'CHAT',
1698
+ modelTitle: 'gpt-4-turbo-preview',
1699
+ modelName: 'gpt-4-turbo-preview',
1700
+ pricing: {
1701
+ prompt: computeUsage("$10.00 / 1M tokens"),
1702
+ output: computeUsage("$30.00 / 1M tokens"), // <- Not sure, just for gpt-4-turbo
1703
+ },
1704
+ },
1705
+ /**/
1706
+ /**/
1707
+ {
1708
+ modelVariant: 'EMBEDDING',
1709
+ modelTitle: 'text-embedding-3-large',
1710
+ modelName: 'text-embedding-3-large',
1711
+ pricing: {
1712
+ prompt: computeUsage("$0.13 / 1M tokens"),
1713
+ // TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
1714
+ output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
1715
+ },
1716
+ },
1717
+ /**/
1718
+ /**/
1719
+ {
1720
+ modelVariant: 'EMBEDDING',
1721
+ modelTitle: 'text-embedding-3-small',
1722
+ modelName: 'text-embedding-3-small',
1723
+ pricing: {
1724
+ prompt: computeUsage("$0.02 / 1M tokens"),
1725
+ // TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
1726
+ output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
1727
+ },
1728
+ },
1729
+ /**/
1730
+ /**/
1731
+ {
1732
+ modelVariant: 'CHAT',
1733
+ modelTitle: 'gpt-3.5-turbo-0613',
1734
+ modelName: 'gpt-3.5-turbo-0613',
1735
+ pricing: {
1736
+ prompt: computeUsage("$1.50 / 1M tokens"),
1737
+ output: computeUsage("$2.00 / 1M tokens"),
1738
+ },
1739
+ },
1740
+ /**/
1741
+ /**/
1742
+ {
1743
+ modelVariant: 'EMBEDDING',
1744
+ modelTitle: 'text-embedding-ada-002',
1745
+ modelName: 'text-embedding-ada-002',
1746
+ pricing: {
1747
+ prompt: computeUsage("$0.1 / 1M tokens"),
1748
+ // TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
1749
+ output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
1750
+ },
1751
+ },
1752
+ /**/
1753
+ /*/
1754
+ {
1755
+ modelVariant: 'CHAT',
1756
+ modelTitle: 'gpt-4-1106-vision-preview',
1757
+ modelName: 'gpt-4-1106-vision-preview',
1758
+ },
1759
+ /**/
1760
+ /*/
1761
+ {
1762
+ modelVariant: 'CHAT',
1763
+ modelTitle: 'gpt-4-vision-preview',
1764
+ modelName: 'gpt-4-vision-preview',
1765
+ pricing: {
1766
+ prompt: computeUsage(`$10.00 / 1M tokens`),
1767
+ output: computeUsage(`$30.00 / 1M tokens`),
1768
+ },
1769
+ },
1770
+ /**/
1771
+ /**/
1772
+ {
1773
+ modelVariant: 'CHAT',
1774
+ modelTitle: 'gpt-4o-2024-05-13',
1775
+ modelName: 'gpt-4o-2024-05-13',
1776
+ pricing: {
1777
+ prompt: computeUsage("$5.00 / 1M tokens"),
1778
+ output: computeUsage("$15.00 / 1M tokens"),
1779
+ },
1780
+ },
1781
+ /**/
1782
+ /**/
1783
+ {
1784
+ modelVariant: 'CHAT',
1785
+ modelTitle: 'gpt-4o',
1786
+ modelName: 'gpt-4o',
1787
+ pricing: {
1788
+ prompt: computeUsage("$5.00 / 1M tokens"),
1789
+ output: computeUsage("$15.00 / 1M tokens"),
1790
+ },
1791
+ },
1792
+ /**/
1793
+ /**/
1794
+ {
1795
+ modelVariant: 'CHAT',
1796
+ modelTitle: 'gpt-3.5-turbo-16k-0613',
1797
+ modelName: 'gpt-3.5-turbo-16k-0613',
1798
+ pricing: {
1799
+ prompt: computeUsage("$3.00 / 1M tokens"),
1800
+ output: computeUsage("$4.00 / 1M tokens"),
1801
+ },
1802
+ },
1803
+ /**/
1804
+ ];
1805
+ /**
1806
+ * Note: [🤖] Add models of new variant
1807
+ * TODO: [🧠] Some mechanism to propagate unsureness
1808
+ * TODO: [🕚][👮‍♀️] Make this list dynamic - dynamically can be listed modelNames but not modelVariant, legacy status, context length and pricing
1809
+ * TODO: [🧠][👮‍♀️] Put here more info like description, isVision, trainingDateCutoff, languages, strengths ( Top-level performance, intelligence, fluency, and understanding), contextWindow,...
1810
+ * @see https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4
1811
+ * @see https://openai.com/api/pricing/
1812
+ * @see /other/playground/playground.ts
1813
+ * TODO: [🍓] Make better
1814
+ * TODO: Change model titles to human eg: "gpt-4-turbo-2024-04-09" -> "GPT-4 Turbo (2024-04-09)"
1815
+ * TODO: [🚸] Not all models are compatible with JSON mode, add this information here and use it
1816
+ */
1817
+
1818
+ /**
1819
+ * Execution Tools for calling Azure OpenAI API.
1820
+ *
1821
+ * @public exported from `@promptbook/azure-openai`
1822
+ */
1823
+ var AzureOpenAiExecutionTools = /** @class */ (function () {
1824
+ /**
1825
+ * Creates OpenAI Execution Tools.
1826
+ *
1827
+ * @param options which are relevant are directly passed to the OpenAI client
1828
+ */
1829
+ function AzureOpenAiExecutionTools(options) {
1830
+ this.options = options;
1831
+ this.client = new openai.OpenAIClient("https://".concat(options.resourceName, ".openai.azure.com/"), new openai.AzureKeyCredential(options.apiKey));
1832
+ }
1833
+ Object.defineProperty(AzureOpenAiExecutionTools.prototype, "title", {
1834
+ get: function () {
1835
+ return 'Azure OpenAI';
1836
+ },
1837
+ enumerable: false,
1838
+ configurable: true
1839
+ });
1840
+ Object.defineProperty(AzureOpenAiExecutionTools.prototype, "description", {
1841
+ get: function () {
1842
+ return 'Use all models trained by OpenAI provided by Azure';
1843
+ },
1844
+ enumerable: false,
1845
+ configurable: true
1846
+ });
1847
+ /**
1848
+ * Calls OpenAI API to use a chat model.
1849
+ */
1850
+ AzureOpenAiExecutionTools.prototype.callChatModel = function (prompt) {
1851
+ var _a, _b;
1852
+ return __awaiter(this, void 0, void 0, function () {
1853
+ var content, parameters, modelRequirements, modelName, modelSettings, rawPromptContent, messages, start, complete, rawRequest, rawResponse, resultContent, usage, error_1;
1854
+ var _c;
1855
+ return __generator(this, function (_d) {
1856
+ switch (_d.label) {
1857
+ case 0:
1858
+ if (this.options.isVerbose) {
1859
+ console.info('💬 OpenAI callChatModel call');
1860
+ }
1861
+ content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
1862
+ // TODO: [☂] Use here more modelRequirements
1863
+ if (modelRequirements.modelVariant !== 'CHAT') {
1864
+ throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
1865
+ }
1866
+ _d.label = 1;
1867
+ case 1:
1868
+ _d.trys.push([1, 3, , 4]);
1869
+ modelName = prompt.modelRequirements.modelName || this.options.deploymentName;
1870
+ modelSettings = {
1871
+ maxTokens: modelRequirements.maxTokens,
1872
+ // <- TODO: [🌾] Make some global max cap for maxTokens
1873
+ temperature: modelRequirements.temperature,
1874
+ user: this.options.user,
1875
+ // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
1876
+ // <- Note: [🧆]
1877
+ };
1878
+ rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
1879
+ messages = __spreadArray(__spreadArray([], __read((modelRequirements.systemMessage === undefined
1880
+ ? []
1881
+ : [
1882
+ {
1883
+ role: 'system',
1884
+ content: modelRequirements.systemMessage,
1885
+ },
1886
+ ])), false), [
1887
+ {
1888
+ role: 'user',
1889
+ content: rawPromptContent,
1890
+ },
1891
+ ], false);
1892
+ start = getCurrentIsoDate();
1893
+ complete = void 0;
1894
+ if (this.options.isVerbose) {
1895
+ console.info(colors__default["default"].bgWhite('messages'), JSON.stringify(messages, null, 4));
1896
+ }
1897
+ rawRequest = [modelName, messages, modelSettings];
1898
+ return [4 /*yield*/, (_c = this.client).getChatCompletions.apply(_c, __spreadArray([], __read(rawRequest), false))];
1899
+ case 2:
1900
+ rawResponse = _d.sent();
1901
+ if (this.options.isVerbose) {
1902
+ console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
1903
+ }
1904
+ if (!rawResponse.choices[0]) {
1905
+ throw new PipelineExecutionError('No choises from Azure OpenAI');
1906
+ }
1907
+ if (rawResponse.choices.length > 1) {
1908
+ // TODO: This should be maybe only warning
1909
+ throw new PipelineExecutionError('More than one choise from Azure OpenAI');
1910
+ }
1911
+ if (!rawResponse.choices[0].message || !rawResponse.choices[0].message.content) {
1912
+ throw new PipelineExecutionError('Empty response from Azure OpenAI');
1913
+ }
1914
+ resultContent = rawResponse.choices[0].message.content;
1915
+ // eslint-disable-next-line prefer-const
1916
+ complete = getCurrentIsoDate();
1917
+ usage = {
1918
+ price: uncertainNumber() /* <- TODO: [🐞] Compute usage */,
1919
+ input: __assign({ tokensCount: uncertainNumber((_a = rawResponse.usage) === null || _a === void 0 ? void 0 : _a.promptTokens) }, computeUsageCounts(prompt.content)),
1920
+ output: __assign({ tokensCount: uncertainNumber((_b = rawResponse.usage) === null || _b === void 0 ? void 0 : _b.completionTokens) }, computeUsageCounts(prompt.content)),
1921
+ };
1922
+ return [2 /*return*/, {
1923
+ content: resultContent,
1924
+ modelName: modelName,
1925
+ timing: {
1926
+ start: start,
1927
+ complete: complete,
1928
+ },
1929
+ usage: usage,
1930
+ rawPromptContent: rawPromptContent,
1931
+ rawRequest: rawRequest,
1932
+ rawResponse: rawResponse,
1933
+ // <- [🗯]
1934
+ }];
1935
+ case 3:
1936
+ error_1 = _d.sent();
1937
+ throw this.transformAzureError(error_1);
1938
+ case 4: return [2 /*return*/];
1939
+ }
1940
+ });
1941
+ });
1942
+ };
1943
+ /**
1944
+ * Calls Azure OpenAI API to use a complete model.
1945
+ */
1946
+ AzureOpenAiExecutionTools.prototype.callCompletionModel = function (prompt) {
1947
+ var _a, _b;
1948
+ return __awaiter(this, void 0, void 0, function () {
1949
+ var content, parameters, modelRequirements, modelName, modelSettings, start, complete, rawPromptContent, rawRequest, rawResponse, resultContent, usage, error_2;
1950
+ var _c;
1951
+ return __generator(this, function (_d) {
1952
+ switch (_d.label) {
1953
+ case 0:
1954
+ if (this.options.isVerbose) {
1955
+ console.info('🖋 OpenAI callCompletionModel call');
1956
+ }
1957
+ content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
1958
+ // TODO: [☂] Use here more modelRequirements
1959
+ if (modelRequirements.modelVariant !== 'COMPLETION') {
1960
+ throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
1961
+ }
1962
+ _d.label = 1;
1963
+ case 1:
1964
+ _d.trys.push([1, 3, , 4]);
1965
+ modelName = prompt.modelRequirements.modelName || this.options.deploymentName;
1966
+ modelSettings = {
1967
+ maxTokens: modelRequirements.maxTokens || 2000,
1968
+ // <- TODO: [🌾] Make some global max cap for maxTokens
1969
+ temperature: modelRequirements.temperature,
1970
+ user: this.options.user,
1971
+ // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
1972
+ // <- Note: [🧆]
1973
+ };
1974
+ start = getCurrentIsoDate();
1975
+ complete = void 0;
1976
+ if (this.options.isVerbose) {
1977
+ console.info(colors__default["default"].bgWhite('content'), JSON.stringify(content, null, 4));
1978
+ console.info(colors__default["default"].bgWhite('parameters'), JSON.stringify(parameters, null, 4));
1979
+ }
1980
+ rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
1981
+ rawRequest = [
1982
+ modelName,
1983
+ [rawPromptContent],
1984
+ modelSettings,
1985
+ ];
1986
+ return [4 /*yield*/, (_c = this.client).getCompletions.apply(_c, __spreadArray([], __read(rawRequest), false))];
1987
+ case 2:
1988
+ rawResponse = _d.sent();
1989
+ if (this.options.isVerbose) {
1990
+ console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
1991
+ }
1992
+ if (!rawResponse.choices[0]) {
1993
+ throw new PipelineExecutionError('No choises from OpenAI');
1994
+ }
1995
+ if (rawResponse.choices.length > 1) {
1996
+ // TODO: This should be maybe only warning
1997
+ throw new PipelineExecutionError('More than one choise from OpenAI');
1998
+ }
1999
+ resultContent = rawResponse.choices[0].text;
2000
+ // eslint-disable-next-line prefer-const
2001
+ complete = getCurrentIsoDate();
2002
+ usage = {
2003
+ price: uncertainNumber() /* <- TODO: [🐞] Compute usage */,
2004
+ input: __assign({ tokensCount: uncertainNumber((_a = rawResponse.usage) === null || _a === void 0 ? void 0 : _a.promptTokens) }, computeUsageCounts(prompt.content)),
2005
+ output: __assign({ tokensCount: uncertainNumber((_b = rawResponse.usage) === null || _b === void 0 ? void 0 : _b.completionTokens) }, computeUsageCounts(prompt.content)),
2006
+ };
2007
+ return [2 /*return*/, {
2008
+ content: resultContent,
2009
+ modelName: modelName,
2010
+ timing: {
2011
+ start: start,
2012
+ complete: complete,
2013
+ },
2014
+ usage: usage,
2015
+ rawPromptContent: rawPromptContent,
2016
+ rawRequest: rawRequest,
2017
+ rawResponse: rawResponse,
2018
+ // <- [🗯]
2019
+ }];
2020
+ case 3:
2021
+ error_2 = _d.sent();
2022
+ throw this.transformAzureError(error_2);
2023
+ case 4: return [2 /*return*/];
2024
+ }
2025
+ });
2026
+ });
2027
+ };
2028
+ // <- Note: [🤖] callXxxModel
2029
+ /**
2030
+ * Changes Azure error (which is not propper Error but object) to propper Error
2031
+ */
2032
+ AzureOpenAiExecutionTools.prototype.transformAzureError = function (azureError) {
2033
+ if (typeof azureError !== 'object' || azureError === null) {
2034
+ return new PipelineExecutionError("Unknown Azure OpenAI error");
2035
+ }
2036
+ var code = azureError.code, message = azureError.message;
2037
+ return new PipelineExecutionError("".concat(code, ": ").concat(message));
2038
+ };
2039
+ /**
2040
+ * List all available Azure OpenAI models that can be used
2041
+ */
2042
+ AzureOpenAiExecutionTools.prototype.listModels = function () {
2043
+ return __awaiter(this, void 0, void 0, function () {
2044
+ return __generator(this, function (_a) {
2045
+ // TODO: !!! Do here some filtering which models are really available as deployment
2046
+ // @see https://management.azure.com/subscriptions/subscriptionId/resourceGroups/resourceGroupName/providers/Microsoft.CognitiveServices/accounts/accountName/deployments?api-version=2023-05-01
2047
+ return [2 /*return*/, OPENAI_MODELS.map(function (_a) {
2048
+ var modelTitle = _a.modelTitle, modelName = _a.modelName, modelVariant = _a.modelVariant;
2049
+ return ({
2050
+ modelTitle: "Azure ".concat(modelTitle),
2051
+ modelName: modelName,
2052
+ modelVariant: modelVariant,
2053
+ });
2054
+ })];
2055
+ });
2056
+ });
2057
+ };
2058
+ return AzureOpenAiExecutionTools;
2059
+ }());
2060
+ /**
2061
+ * TODO: Maybe Create some common util for callChatModel and callCompletionModel
2062
+ * TODO: Maybe make custom AzureOpenaiError
2063
+ * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
2064
+ * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
2065
+ */
2066
+
2067
+ /**
2068
+ * Computes the usage of the OpenAI API based on the response from OpenAI
2069
+ *
2070
+ * @param promptContent The content of the prompt
2071
+ * @param resultContent The content of the result (for embedding prompts or failed prompts pass empty string)
2072
+ * @param rawResponse The raw response from OpenAI API
2073
+ * @throws {PipelineExecutionError} If the usage is not defined in the response from OpenAI
2074
+ * @private internal utility of `OpenAiExecutionTools`
2075
+ */
2076
+ function computeOpenaiUsage(promptContent, // <- Note: Intentionally using [] to access type properties to bring jsdoc from Prompt/PromptResult to consumer
2077
+ resultContent, rawResponse) {
2078
+ var _a, _b;
2079
+ if (rawResponse.usage === undefined) {
2080
+ throw new PipelineExecutionError('The usage is not defined in the response from OpenAI');
2081
+ }
2082
+ if (((_a = rawResponse.usage) === null || _a === void 0 ? void 0 : _a.prompt_tokens) === undefined) {
2083
+ throw new PipelineExecutionError('In OpenAI response `usage.prompt_tokens` not defined');
2084
+ }
2085
+ var inputTokens = rawResponse.usage.prompt_tokens;
2086
+ var outputTokens = ((_b = rawResponse.usage) === null || _b === void 0 ? void 0 : _b.completion_tokens) || 0;
2087
+ var modelInfo = OPENAI_MODELS.find(function (model) { return model.modelName === rawResponse.model; });
2088
+ var price;
2089
+ if (modelInfo === undefined || modelInfo.pricing === undefined) {
2090
+ price = uncertainNumber();
2091
+ }
2092
+ else {
2093
+ price = uncertainNumber(inputTokens * modelInfo.pricing.prompt + outputTokens * modelInfo.pricing.output);
2094
+ }
2095
+ return {
2096
+ price: price,
2097
+ input: __assign({ tokensCount: uncertainNumber(rawResponse.usage.prompt_tokens) }, computeUsageCounts(promptContent)),
2098
+ output: __assign({ tokensCount: uncertainNumber(outputTokens) }, computeUsageCounts(resultContent)),
2099
+ };
2100
+ }
2101
+
2102
+ /**
2103
+ * Execution Tools for calling OpenAI API.
2104
+ *
2105
+ * @public exported from `@promptbook/openai`
2106
+ */
2107
+ var OpenAiExecutionTools = /** @class */ (function () {
2108
+ /**
2109
+ * Creates OpenAI Execution Tools.
2110
+ *
2111
+ * @param options which are relevant are directly passed to the OpenAI client
2112
+ */
2113
+ function OpenAiExecutionTools(options) {
2114
+ if (options === void 0) { options = {}; }
2115
+ this.options = options;
2116
+ // Note: Passing only OpenAI relevant options to OpenAI constructor
2117
+ var openAiOptions = __assign({}, options);
2118
+ delete openAiOptions.isVerbose;
2119
+ delete openAiOptions.user;
2120
+ this.client = new OpenAI__default["default"](__assign({}, openAiOptions));
2121
+ }
2122
+ Object.defineProperty(OpenAiExecutionTools.prototype, "title", {
2123
+ get: function () {
2124
+ return 'OpenAI';
2125
+ },
2126
+ enumerable: false,
2127
+ configurable: true
2128
+ });
2129
+ Object.defineProperty(OpenAiExecutionTools.prototype, "description", {
2130
+ get: function () {
2131
+ return 'Use all models provided by OpenAI';
2132
+ },
2133
+ enumerable: false,
2134
+ configurable: true
2135
+ });
2136
+ /**
2137
+ * Calls OpenAI API to use a chat model.
2138
+ */
2139
+ OpenAiExecutionTools.prototype.callChatModel = function (prompt) {
2140
+ return __awaiter(this, void 0, void 0, function () {
2141
+ var content, parameters, modelRequirements, expectFormat, modelName, modelSettings, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
2142
+ return __generator(this, function (_a) {
2143
+ switch (_a.label) {
2144
+ case 0:
2145
+ if (this.options.isVerbose) {
2146
+ console.info('💬 OpenAI callChatModel call', { prompt: prompt });
2147
+ }
2148
+ content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements, expectFormat = prompt.expectFormat;
2149
+ // TODO: [☂] Use here more modelRequirements
2150
+ if (modelRequirements.modelVariant !== 'CHAT') {
2151
+ throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
2152
+ }
2153
+ modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
2154
+ modelSettings = {
2155
+ model: modelName,
2156
+ max_tokens: modelRequirements.maxTokens,
2157
+ // <- TODO: [🌾] Make some global max cap for maxTokens
2158
+ temperature: modelRequirements.temperature,
2159
+ // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
2160
+ // <- Note: [🧆]
2161
+ };
2162
+ if (expectFormat === 'JSON') {
2163
+ modelSettings.response_format = {
2164
+ type: 'json_object',
2165
+ };
2166
+ }
2167
+ rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
2168
+ rawRequest = __assign(__assign({}, modelSettings), { messages: __spreadArray(__spreadArray([], __read((modelRequirements.systemMessage === undefined
2169
+ ? []
2170
+ : [
2171
+ {
2172
+ role: 'system',
2173
+ content: modelRequirements.systemMessage,
2174
+ },
2175
+ ])), false), [
2176
+ {
2177
+ role: 'user',
2178
+ content: rawPromptContent,
2179
+ },
2180
+ ], false), user: this.options.user });
2181
+ start = getCurrentIsoDate();
2182
+ if (this.options.isVerbose) {
2183
+ console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
2184
+ }
2185
+ return [4 /*yield*/, this.client.chat.completions.create(rawRequest)];
2186
+ case 1:
2187
+ rawResponse = _a.sent();
2188
+ if (this.options.isVerbose) {
2189
+ console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
2190
+ }
2191
+ if (!rawResponse.choices[0]) {
2192
+ throw new PipelineExecutionError('No choises from OpenAI');
2193
+ }
2194
+ if (rawResponse.choices.length > 1) {
2195
+ // TODO: This should be maybe only warning
2196
+ throw new PipelineExecutionError('More than one choise from OpenAI');
2197
+ }
2198
+ resultContent = rawResponse.choices[0].message.content;
2199
+ // eslint-disable-next-line prefer-const
2200
+ complete = getCurrentIsoDate();
2201
+ usage = computeOpenaiUsage(content, resultContent || '', rawResponse);
2202
+ if (resultContent === null) {
2203
+ throw new PipelineExecutionError('No response message from OpenAI');
2204
+ }
2205
+ return [2 /*return*/, {
2206
+ content: resultContent,
2207
+ modelName: rawResponse.model || modelName,
2208
+ timing: {
2209
+ start: start,
2210
+ complete: complete,
2211
+ },
2212
+ usage: usage,
2213
+ rawPromptContent: rawPromptContent,
2214
+ rawRequest: rawRequest,
2215
+ rawResponse: rawResponse,
2216
+ // <- [🗯]
2217
+ }];
2218
+ }
2219
+ });
2220
+ });
2221
+ };
2222
+ /**
2223
+ * Calls OpenAI API to use a complete model.
2224
+ */
2225
+ OpenAiExecutionTools.prototype.callCompletionModel = function (prompt) {
2226
+ return __awaiter(this, void 0, void 0, function () {
2227
+ var content, parameters, modelRequirements, modelName, modelSettings, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
2228
+ return __generator(this, function (_a) {
2229
+ switch (_a.label) {
2230
+ case 0:
2231
+ if (this.options.isVerbose) {
2232
+ console.info('🖋 OpenAI callCompletionModel call', { prompt: prompt });
2233
+ }
2234
+ content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
2235
+ // TODO: [☂] Use here more modelRequirements
2236
+ if (modelRequirements.modelVariant !== 'COMPLETION') {
2237
+ throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
2238
+ }
2239
+ modelName = modelRequirements.modelName || this.getDefaultCompletionModel().modelName;
2240
+ modelSettings = {
2241
+ model: modelName,
2242
+ max_tokens: modelRequirements.maxTokens || 2000,
2243
+ // <- TODO: [🌾] Make some global max cap for maxTokens
2244
+ temperature: modelRequirements.temperature,
2245
+ // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
2246
+ // <- Note: [🧆]
2247
+ };
2248
+ rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
2249
+ rawRequest = __assign(__assign({}, modelSettings), { prompt: rawPromptContent, user: this.options.user });
2250
+ start = getCurrentIsoDate();
2251
+ if (this.options.isVerbose) {
2252
+ console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
2253
+ }
2254
+ return [4 /*yield*/, this.client.completions.create(rawRequest)];
2255
+ case 1:
2256
+ rawResponse = _a.sent();
2257
+ if (this.options.isVerbose) {
2258
+ console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
2259
+ }
2260
+ if (!rawResponse.choices[0]) {
2261
+ throw new PipelineExecutionError('No choises from OpenAI');
2262
+ }
2263
+ if (rawResponse.choices.length > 1) {
2264
+ // TODO: This should be maybe only warning
2265
+ throw new PipelineExecutionError('More than one choise from OpenAI');
2266
+ }
2267
+ resultContent = rawResponse.choices[0].text;
2268
+ // eslint-disable-next-line prefer-const
2269
+ complete = getCurrentIsoDate();
2270
+ usage = computeOpenaiUsage(content, resultContent || '', rawResponse);
2271
+ return [2 /*return*/, {
2272
+ content: resultContent,
2273
+ modelName: rawResponse.model || modelName,
2274
+ timing: {
2275
+ start: start,
2276
+ complete: complete,
2277
+ },
2278
+ usage: usage,
2279
+ rawPromptContent: rawPromptContent,
2280
+ rawRequest: rawRequest,
2281
+ rawResponse: rawResponse,
2282
+ // <- [🗯]
2283
+ }];
2284
+ }
2285
+ });
2286
+ });
2287
+ };
2288
+ /**
2289
+ * Calls OpenAI API to use a embedding model
2290
+ */
2291
+ OpenAiExecutionTools.prototype.callEmbeddingModel = function (prompt) {
2292
+ return __awaiter(this, void 0, void 0, function () {
2293
+ var content, parameters, modelRequirements, modelName, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
2294
+ return __generator(this, function (_a) {
2295
+ switch (_a.label) {
2296
+ case 0:
2297
+ if (this.options.isVerbose) {
2298
+ console.info('🖋 OpenAI embedding call', { prompt: prompt });
2299
+ }
2300
+ content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
2301
+ // TODO: [☂] Use here more modelRequirements
2302
+ if (modelRequirements.modelVariant !== 'EMBEDDING') {
2303
+ throw new PipelineExecutionError('Use embed only for EMBEDDING variant');
2304
+ }
2305
+ modelName = modelRequirements.modelName || this.getDefaultEmbeddingModel().modelName;
2306
+ rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
2307
+ rawRequest = {
2308
+ input: rawPromptContent,
2309
+ model: modelName,
2310
+ };
2311
+ start = getCurrentIsoDate();
2312
+ if (this.options.isVerbose) {
2313
+ console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
2314
+ }
2315
+ return [4 /*yield*/, this.client.embeddings.create(rawRequest)];
2316
+ case 1:
2317
+ rawResponse = _a.sent();
2318
+ if (this.options.isVerbose) {
2319
+ console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
2320
+ }
2321
+ if (rawResponse.data.length !== 1) {
2322
+ throw new PipelineExecutionError("Expected exactly 1 data item in response, got ".concat(rawResponse.data.length));
2323
+ }
2324
+ resultContent = rawResponse.data[0].embedding;
2325
+ // eslint-disable-next-line prefer-const
2326
+ complete = getCurrentIsoDate();
2327
+ usage = computeOpenaiUsage(content, '', rawResponse);
2328
+ return [2 /*return*/, {
2329
+ content: resultContent,
2330
+ modelName: rawResponse.model || modelName,
2331
+ timing: {
2332
+ start: start,
2333
+ complete: complete,
2334
+ },
2335
+ usage: usage,
2336
+ rawPromptContent: rawPromptContent,
2337
+ rawRequest: rawRequest,
2338
+ rawResponse: rawResponse,
2339
+ // <- [🗯]
2340
+ }];
2341
+ }
2342
+ });
2343
+ });
2344
+ };
2345
+ // <- Note: [🤖] callXxxModel
2346
+ /**
2347
+ * Get the model that should be used as default
2348
+ */
2349
+ OpenAiExecutionTools.prototype.getDefaultModel = function (defaultModelName) {
2350
+ var model = OPENAI_MODELS.find(function (_a) {
2351
+ var modelName = _a.modelName;
2352
+ return modelName === defaultModelName;
2353
+ });
2354
+ if (model === undefined) {
2355
+ throw new UnexpectedError(spaceTrim__default["default"](function (block) {
2356
+ return "\n Cannot find model in OpenAI models with name \"".concat(defaultModelName, "\" which should be used as default.\n\n Available models:\n ").concat(block(OPENAI_MODELS.map(function (_a) {
2357
+ var modelName = _a.modelName;
2358
+ return "- \"".concat(modelName, "\"");
2359
+ }).join('\n')), "\n\n ");
2360
+ }));
2361
+ }
2362
+ return model;
2363
+ };
2364
+ /**
2365
+ * Default model for chat variant.
2366
+ */
2367
+ OpenAiExecutionTools.prototype.getDefaultChatModel = function () {
2368
+ return this.getDefaultModel('gpt-4o');
2369
+ };
2370
+ /**
2371
+ * Default model for completion variant.
2372
+ */
2373
+ OpenAiExecutionTools.prototype.getDefaultCompletionModel = function () {
2374
+ return this.getDefaultModel('gpt-3.5-turbo-instruct');
2375
+ };
2376
+ /**
2377
+ * Default model for completion variant.
2378
+ */
2379
+ OpenAiExecutionTools.prototype.getDefaultEmbeddingModel = function () {
2380
+ return this.getDefaultModel('text-embedding-3-large');
2381
+ };
2382
+ // <- Note: [🤖] getDefaultXxxModel
2383
+ /**
2384
+ * List all available OpenAI models that can be used
2385
+ */
2386
+ OpenAiExecutionTools.prototype.listModels = function () {
2387
+ /*
2388
+ Note: Dynamic lising of the models
2389
+ const models = await this.openai.models.list({});
2390
+
2391
+ console.log({ models });
2392
+ console.log(models.data);
2393
+ */
2394
+ return OPENAI_MODELS;
2395
+ };
2396
+ return OpenAiExecutionTools;
2397
+ }());
2398
+ /**
2399
+ * TODO: [🧠][🧙‍♂️] Maybe there can be some wizzard for thoose who want to use just OpenAI
2400
+ * TODO: Maybe Create some common util for callChatModel and callCompletionModel
2401
+ * TODO: Maybe make custom OpenaiError
2402
+ * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
2403
+ * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
2404
+ */
2405
+
2406
+ /**
2407
+ * @private internal type for `createLlmToolsFromConfiguration`
2408
+ */
2409
+ var EXECUTION_TOOLS_CLASSES = {
2410
+ createOpenAiExecutionTools: function (options) {
2411
+ return new OpenAiExecutionTools(__assign(__assign({}, options), { dangerouslyAllowBrowser: true /* <- TODO: [🧠] !!! Some mechanism for auto-detection of browser, maybe hide in `OpenAiExecutionTools` */ }));
2412
+ },
2413
+ createAnthropicClaudeExecutionTools: createAnthropicClaudeExecutionTools,
2414
+ createAzureOpenAiExecutionTools: function (options) { return new AzureOpenAiExecutionTools(options); },
2415
+ // <- Note: [🦑] Add here new LLM provider
2416
+ };
2417
+ /**
2418
+ * TODO: [🧠] Better file name than `config.ts` + maybe move to two separate files
2419
+ * TODO: [🧠][🎌] Adding this should be responsibility of each provider package NOT this one central place
2420
+ */
2421
+
2422
+ /**
2423
+ * @@@
2424
+ *
2425
+ * Note: This function is not cached, every call creates new instance of `MultipleLlmExecutionTools`
2426
+ *
2427
+ * @returns @@@
2428
+ * @public exported from `@promptbook/core`
2429
+ */
2430
+ function createLlmToolsFromConfiguration(configuration, options) {
2431
+ if (options === void 0) { options = {}; }
2432
+ var _a = options.isVerbose, isVerbose = _a === void 0 ? false : _a;
2433
+ dotenv__namespace.config();
2434
+ var llmTools = configuration.map(function (llmConfiguration) {
2435
+ return EXECUTION_TOOLS_CLASSES["create".concat(llmConfiguration.className)](__assign({ isVerbose: isVerbose }, llmConfiguration.options));
2436
+ });
2437
+ return joinLlmExecutionTools.apply(void 0, __spreadArray([], __read(llmTools), false));
2438
+ }
2439
+ /**
2440
+ * TODO: [🎌] Togethere with `createLlmToolsFromConfiguration` + 'EXECUTION_TOOLS_CLASSES' gets to `@promptbook/core` ALL model providers, make this more efficient
2441
+ * TODO: [🧠][🎌] Dynamically install required providers
2442
+ * TODO: @@@ write discussion about this - wizzard
2443
+ * TODO: [🧠][🍛] Which name is better `createLlmToolsFromConfig` or `createLlmToolsFromConfiguration`?
2444
+ * TODO: [🧠] Is there some meaningfull way how to test this util
2445
+ * TODO: This should be maybe not under `_common` but under `utils`
2446
+ */
2447
+
2448
+ /**
2449
+ * Remote server is a proxy server that uses its execution tools internally and exposes the executor interface externally.
2450
+ *
2451
+ * You can simply use `RemoteExecutionTools` on client-side javascript and connect to your remote server.
2452
+ * This is useful to make all logic on browser side but not expose your API keys or no need to use customer's GPU.
2453
+ *
2454
+ * @see https://github.com/webgptorg/promptbook#remote-server
2455
+ * @public exported from `@promptbook/remote-server`
2456
+ */
2457
+ function startRemoteServer(options) {
2458
+ var _this = this;
2459
+ var _a = __assign({ isAnonymousModeAllowed: false, isCollectionModeAllowed: false, collection: null, createLlmExecutionTools: null }, options), port = _a.port, path = _a.path, collection = _a.collection, createLlmExecutionTools = _a.createLlmExecutionTools,
2460
+ // <- TODO: [🧠][🤺] Remove `createLlmExecutionTools`, pass just `llmExecutionTools`
2461
+ isAnonymousModeAllowed = _a.isAnonymousModeAllowed, isCollectionModeAllowed = _a.isCollectionModeAllowed, _b = _a.isVerbose, isVerbose = _b === void 0 ? false : _b;
2462
+ // <- TODO: [🦪] Some helper type to be able to use discriminant union types with destructuring
2463
+ var httpServer = http__default["default"].createServer({}, function (request, response) { return __awaiter(_this, void 0, void 0, function () {
2464
+ var _a, _b;
2465
+ var _this = this;
2466
+ var _c;
2467
+ return __generator(this, function (_d) {
2468
+ switch (_d.label) {
2469
+ case 0:
2470
+ if ((_c = request.url) === null || _c === void 0 ? void 0 : _c.includes('socket.io')) {
2471
+ return [2 /*return*/];
2472
+ }
2473
+ _b = (_a = response).write;
2474
+ return [4 /*yield*/, spaceTrim.spaceTrim(function (block) { return __awaiter(_this, void 0, void 0, function () {
2475
+ var _a, _b, _c, _d, _e;
2476
+ return __generator(this, function (_f) {
2477
+ switch (_f.label) {
2478
+ case 0:
2479
+ _b = (_a = "\n Server for processing promptbook remote requests is running.\n\n Version: ".concat(PROMPTBOOK_VERSION, "\n Anonymouse mode: ").concat(isAnonymousModeAllowed ? 'enabled' : 'disabled', "\n Collection mode: ").concat(isCollectionModeAllowed ? 'enabled' : 'disabled', "\n ")).concat;
2480
+ _c = block;
2481
+ if (!!isCollectionModeAllowed) return [3 /*break*/, 1];
2482
+ _d = '';
2483
+ return [3 /*break*/, 3];
2484
+ case 1:
2485
+ _e = 'Pipelines in collection:\n';
2486
+ return [4 /*yield*/, collection.listPipelines()];
2487
+ case 2:
2488
+ _d = _e +
2489
+ (_f.sent())
2490
+ .map(function (pipelineUrl) { return "- ".concat(pipelineUrl); })
2491
+ .join('\n');
2492
+ _f.label = 3;
2493
+ case 3: return [2 /*return*/, _b.apply(_a, [_c.apply(void 0, [_d]), "\n\n For more information look at:\n https://github.com/webgptorg/promptbook\n "])];
2494
+ }
2495
+ });
2496
+ }); })];
2497
+ case 1:
2498
+ _b.apply(_a, [_d.sent()]);
2499
+ response.end();
2500
+ return [2 /*return*/];
2501
+ }
2502
+ });
2503
+ }); });
2504
+ var server = new socket_io.Server(httpServer, {
2505
+ path: path,
2506
+ transports: [/*'websocket', <- TODO: [🌬] Make websocket transport work */ 'polling'],
2507
+ cors: {
2508
+ origin: '*',
2509
+ methods: ['GET', 'POST'],
2510
+ },
2511
+ });
2512
+ server.on('connection', function (socket) {
2513
+ console.info(colors__default["default"].gray("Client connected"), socket.id);
2514
+ socket.on('request', function (request) { return __awaiter(_this, void 0, void 0, function () {
2515
+ var _a, prompt, clientId, llmToolsConfiguration, llmExecutionTools, promptResult, _b, error_1;
2516
+ return __generator(this, function (_c) {
2517
+ switch (_c.label) {
2518
+ case 0:
2519
+ _a = __assign({ clientId: null, llmToolsConfiguration: null }, request), prompt = _a.prompt, clientId = _a.clientId, llmToolsConfiguration = _a.llmToolsConfiguration;
2520
+ // <- TODO: [🦪] Some helper type to be able to use discriminant union types with destructuring
2521
+ if (isVerbose) {
2522
+ console.info(colors__default["default"].bgWhite("Prompt:"), colors__default["default"].gray(JSON.stringify(request, null, 4)));
2523
+ }
2524
+ _c.label = 1;
2525
+ case 1:
2526
+ _c.trys.push([1, 14, 15, 16]);
2527
+ if (llmToolsConfiguration !== null && !isAnonymousModeAllowed) {
2528
+ throw new PipelineExecutionError("Anonymous mode is not allowed"); // <- TODO: !!!!!! Test
2529
+ }
2530
+ if (clientId !== null && !isCollectionModeAllowed) {
2531
+ throw new PipelineExecutionError("Collection mode is not allowed"); // <- TODO: !!!!!! Test
2532
+ }
2533
+ llmExecutionTools = void 0;
2534
+ if (!(llmToolsConfiguration !== null)) return [3 /*break*/, 2];
2535
+ // Note: Anonymouse mode
2536
+ // TODO: Maybe check that configuration is not empty
2537
+ llmExecutionTools = createLlmToolsFromConfiguration(llmToolsConfiguration);
2538
+ return [3 /*break*/, 5];
2539
+ case 2:
2540
+ if (!(createLlmExecutionTools !== null)) return [3 /*break*/, 4];
2541
+ // Note: Collection mode
2542
+ llmExecutionTools = createLlmExecutionTools(clientId);
2543
+ return [4 /*yield*/, collection.isResponsibleForPrompt(prompt)];
2544
+ case 3:
2545
+ if (!(_c.sent())) {
2546
+ throw new PipelineExecutionError("Pipeline is not in the collection of this server");
2547
+ }
2548
+ return [3 /*break*/, 5];
2549
+ case 4: throw new PipelineExecutionError("You must provide either llmToolsConfiguration or createLlmExecutionTools");
2550
+ case 5:
2551
+ promptResult = void 0;
2552
+ _b = prompt.modelRequirements.modelVariant;
2553
+ switch (_b) {
2554
+ case 'CHAT': return [3 /*break*/, 6];
2555
+ case 'COMPLETION': return [3 /*break*/, 8];
2556
+ case 'EMBEDDING': return [3 /*break*/, 10];
2557
+ }
2558
+ return [3 /*break*/, 12];
2559
+ case 6:
2560
+ if (llmExecutionTools.callChatModel === undefined) {
2561
+ // Note: [0] This check should not be a thing
2562
+ throw new PipelineExecutionError("Chat model is not available");
2563
+ }
2564
+ return [4 /*yield*/, llmExecutionTools.callChatModel(prompt)];
2565
+ case 7:
2566
+ promptResult = _c.sent();
2567
+ return [3 /*break*/, 13];
2568
+ case 8:
2569
+ if (llmExecutionTools.callCompletionModel === undefined) {
2570
+ // Note: [0] This check should not be a thing
2571
+ throw new PipelineExecutionError("Completion model is not available");
2572
+ }
2573
+ return [4 /*yield*/, llmExecutionTools.callCompletionModel(prompt)];
2574
+ case 9:
2575
+ promptResult = _c.sent();
2576
+ return [3 /*break*/, 13];
2577
+ case 10:
2578
+ if (llmExecutionTools.callEmbeddingModel === undefined) {
2579
+ // Note: [0] This check should not be a thing
2580
+ throw new PipelineExecutionError("Embedding model is not available");
2581
+ }
2582
+ return [4 /*yield*/, llmExecutionTools.callEmbeddingModel(prompt)];
2583
+ case 11:
2584
+ promptResult = _c.sent();
2585
+ return [3 /*break*/, 13];
2586
+ case 12: throw new PipelineExecutionError("Unknown model variant \"".concat(prompt.modelRequirements.modelVariant, "\""));
2587
+ case 13:
2588
+ if (isVerbose) {
2589
+ console.info(colors__default["default"].bgGreen("PromptResult:"), colors__default["default"].green(JSON.stringify(promptResult, null, 4)));
2590
+ }
2591
+ socket.emit('response', { promptResult: promptResult });
2592
+ return [3 /*break*/, 16];
2593
+ case 14:
2594
+ error_1 = _c.sent();
2595
+ if (!(error_1 instanceof Error)) {
2596
+ throw error_1;
2597
+ }
2598
+ socket.emit('error', { errorMessage: error_1.message });
2599
+ return [3 /*break*/, 16];
2600
+ case 15:
203
2601
  socket.disconnect();
204
2602
  return [7 /*endfinally*/];
205
- case 13: return [2 /*return*/];
2603
+ case 16: return [2 /*return*/];
206
2604
  }
207
2605
  });
208
2606
  }); });
@@ -235,13 +2633,14 @@
235
2633
  };
236
2634
  }
237
2635
  /**
238
- * TODO: [🍜] Add anonymous option
2636
+ * TODO: [🍜] !!!!!! Add anonymous option
239
2637
  * TODO: [⚖] Expose the collection to be able to connect to same collection via createCollectionFromUrl
240
2638
  * TODO: Handle progress - support streaming
241
2639
  * TODO: [🗯] Do not hang up immediately but wait until client closes OR timeout
242
2640
  * TODO: [🗯] Timeout on chat to free up resources
243
2641
  * TODO: [🃏] Pass here some security token to prevent malitious usage and/or DDoS
244
2642
  * TODO: [0] Set unavailable models as undefined in `RemoteLlmExecutionTools` NOT throw error here
2643
+ * TODO: Constrain anonymous mode for specific models / providers
245
2644
  */
246
2645
 
247
2646
  exports.PROMPTBOOK_VERSION = PROMPTBOOK_VERSION;