@promptbook/openai 0.86.8 → 0.86.22

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/umd/index.umd.js CHANGED
@@ -17,138 +17,19 @@
17
17
  * @generated
18
18
  * @see https://github.com/webgptorg/book
19
19
  */
20
- var BOOK_LANGUAGE_VERSION = '1.0.0';
20
+ const BOOK_LANGUAGE_VERSION = '1.0.0';
21
21
  /**
22
22
  * The version of the Promptbook engine
23
23
  *
24
24
  * @generated
25
25
  * @see https://github.com/webgptorg/promptbook
26
26
  */
27
- var PROMPTBOOK_ENGINE_VERSION = '0.86.8';
27
+ const PROMPTBOOK_ENGINE_VERSION = '0.86.22';
28
28
  /**
29
29
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
30
30
  * Note: [💞] Ignore a discrepancy between file name and entity name
31
31
  */
32
32
 
33
- /*! *****************************************************************************
34
- Copyright (c) Microsoft Corporation.
35
-
36
- Permission to use, copy, modify, and/or distribute this software for any
37
- purpose with or without fee is hereby granted.
38
-
39
- THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
40
- REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
41
- AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
42
- INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
43
- LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
44
- OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
45
- PERFORMANCE OF THIS SOFTWARE.
46
- ***************************************************************************** */
47
- /* global Reflect, Promise */
48
-
49
- var extendStatics = function(d, b) {
50
- extendStatics = Object.setPrototypeOf ||
51
- ({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||
52
- function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; };
53
- return extendStatics(d, b);
54
- };
55
-
56
- function __extends(d, b) {
57
- if (typeof b !== "function" && b !== null)
58
- throw new TypeError("Class extends value " + String(b) + " is not a constructor or null");
59
- extendStatics(d, b);
60
- function __() { this.constructor = d; }
61
- d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());
62
- }
63
-
64
- var __assign = function() {
65
- __assign = Object.assign || function __assign(t) {
66
- for (var s, i = 1, n = arguments.length; i < n; i++) {
67
- s = arguments[i];
68
- for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p)) t[p] = s[p];
69
- }
70
- return t;
71
- };
72
- return __assign.apply(this, arguments);
73
- };
74
-
75
- function __awaiter(thisArg, _arguments, P, generator) {
76
- function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
77
- return new (P || (P = Promise))(function (resolve, reject) {
78
- function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
79
- function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
80
- function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
81
- step((generator = generator.apply(thisArg, _arguments || [])).next());
82
- });
83
- }
84
-
85
- function __generator(thisArg, body) {
86
- var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g;
87
- return g = { next: verb(0), "throw": verb(1), "return": verb(2) }, typeof Symbol === "function" && (g[Symbol.iterator] = function() { return this; }), g;
88
- function verb(n) { return function (v) { return step([n, v]); }; }
89
- function step(op) {
90
- if (f) throw new TypeError("Generator is already executing.");
91
- while (_) try {
92
- if (f = 1, y && (t = op[0] & 2 ? y["return"] : op[0] ? y["throw"] || ((t = y["return"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;
93
- if (y = 0, t) op = [op[0] & 2, t.value];
94
- switch (op[0]) {
95
- case 0: case 1: t = op; break;
96
- case 4: _.label++; return { value: op[1], done: false };
97
- case 5: _.label++; y = op[1]; op = [0]; continue;
98
- case 7: op = _.ops.pop(); _.trys.pop(); continue;
99
- default:
100
- if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; }
101
- if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; }
102
- if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; }
103
- if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; }
104
- if (t[2]) _.ops.pop();
105
- _.trys.pop(); continue;
106
- }
107
- op = body.call(thisArg, _);
108
- } catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; }
109
- if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true };
110
- }
111
- }
112
-
113
- function __values(o) {
114
- var s = typeof Symbol === "function" && Symbol.iterator, m = s && o[s], i = 0;
115
- if (m) return m.call(o);
116
- if (o && typeof o.length === "number") return {
117
- next: function () {
118
- if (o && i >= o.length) o = void 0;
119
- return { value: o && o[i++], done: !o };
120
- }
121
- };
122
- throw new TypeError(s ? "Object is not iterable." : "Symbol.iterator is not defined.");
123
- }
124
-
125
- function __read(o, n) {
126
- var m = typeof Symbol === "function" && o[Symbol.iterator];
127
- if (!m) return o;
128
- var i = m.call(o), r, ar = [], e;
129
- try {
130
- while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value);
131
- }
132
- catch (error) { e = { error: error }; }
133
- finally {
134
- try {
135
- if (r && !r.done && (m = i["return"])) m.call(i);
136
- }
137
- finally { if (e) throw e.error; }
138
- }
139
- return ar;
140
- }
141
-
142
- function __spreadArray(to, from, pack) {
143
- if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {
144
- if (ar || !(i in from)) {
145
- if (!ar) ar = Array.prototype.slice.call(from, 0, i);
146
- ar[i] = from[i];
147
- }
148
- }
149
- return to.concat(ar || Array.prototype.slice.call(from));
150
- }
151
-
152
33
  /**
153
34
  * Detects if the code is running in a browser environment in main thread (Not in a web worker)
154
35
  *
@@ -156,7 +37,13 @@
156
37
  *
157
38
  * @public exported from `@promptbook/utils`
158
39
  */
159
- var $isRunningInBrowser = new Function("\n try {\n return this === window;\n } catch (e) {\n return false;\n }\n");
40
+ const $isRunningInBrowser = new Function(`
41
+ try {
42
+ return this === window;
43
+ } catch (e) {
44
+ return false;
45
+ }
46
+ `);
160
47
  /**
161
48
  * TODO: [🎺]
162
49
  */
@@ -168,7 +55,17 @@
168
55
  *
169
56
  * @public exported from `@promptbook/utils`
170
57
  */
171
- var $isRunningInWebWorker = new Function("\n try {\n if (typeof WorkerGlobalScope !== 'undefined' && self instanceof WorkerGlobalScope) {\n return true;\n } else {\n return false;\n }\n } catch (e) {\n return false;\n }\n");
58
+ const $isRunningInWebWorker = new Function(`
59
+ try {
60
+ if (typeof WorkerGlobalScope !== 'undefined' && self instanceof WorkerGlobalScope) {
61
+ return true;
62
+ } else {
63
+ return false;
64
+ }
65
+ } catch (e) {
66
+ return false;
67
+ }
68
+ `);
172
69
  /**
173
70
  * TODO: [🎺]
174
71
  */
@@ -178,32 +75,36 @@
178
75
  *
179
76
  * @public exported from `@promptbook/core`
180
77
  */
181
- var NotYetImplementedError = /** @class */ (function (_super) {
182
- __extends(NotYetImplementedError, _super);
183
- function NotYetImplementedError(message) {
184
- var _this = _super.call(this, spaceTrim.spaceTrim(function (block) { return "\n ".concat(block(message), "\n\n Note: This feature is not implemented yet but it will be soon.\n\n If you want speed up the implementation or just read more, look here:\n https://github.com/webgptorg/promptbook\n\n Or contact us on pavol@ptbk.io\n\n "); })) || this;
185
- _this.name = 'NotYetImplementedError';
186
- Object.setPrototypeOf(_this, NotYetImplementedError.prototype);
187
- return _this;
78
+ class NotYetImplementedError extends Error {
79
+ constructor(message) {
80
+ super(spaceTrim.spaceTrim((block) => `
81
+ ${block(message)}
82
+
83
+ Note: This feature is not implemented yet but it will be soon.
84
+
85
+ If you want speed up the implementation or just read more, look here:
86
+ https://github.com/webgptorg/promptbook
87
+
88
+ Or contact us on pavol@ptbk.io
89
+
90
+ `));
91
+ this.name = 'NotYetImplementedError';
92
+ Object.setPrototypeOf(this, NotYetImplementedError.prototype);
188
93
  }
189
- return NotYetImplementedError;
190
- }(Error));
94
+ }
191
95
 
192
96
  /**
193
97
  * This error indicates errors during the execution of the pipeline
194
98
  *
195
99
  * @public exported from `@promptbook/core`
196
100
  */
197
- var PipelineExecutionError = /** @class */ (function (_super) {
198
- __extends(PipelineExecutionError, _super);
199
- function PipelineExecutionError(message) {
200
- var _this = _super.call(this, message) || this;
201
- _this.name = 'PipelineExecutionError';
202
- Object.setPrototypeOf(_this, PipelineExecutionError.prototype);
203
- return _this;
101
+ class PipelineExecutionError extends Error {
102
+ constructor(message) {
103
+ super(message);
104
+ this.name = 'PipelineExecutionError';
105
+ Object.setPrototypeOf(this, PipelineExecutionError.prototype);
204
106
  }
205
- return PipelineExecutionError;
206
- }(Error));
107
+ }
207
108
 
208
109
  /**
209
110
  * Freezes the given object and all its nested objects recursively
@@ -215,26 +116,15 @@
215
116
  * @public exported from `@promptbook/utils`
216
117
  */
217
118
  function $deepFreeze(objectValue) {
218
- var e_1, _a;
219
119
  if (Array.isArray(objectValue)) {
220
- return Object.freeze(objectValue.map(function (item) { return $deepFreeze(item); }));
120
+ return Object.freeze(objectValue.map((item) => $deepFreeze(item)));
221
121
  }
222
- var propertyNames = Object.getOwnPropertyNames(objectValue);
223
- try {
224
- for (var propertyNames_1 = __values(propertyNames), propertyNames_1_1 = propertyNames_1.next(); !propertyNames_1_1.done; propertyNames_1_1 = propertyNames_1.next()) {
225
- var propertyName = propertyNames_1_1.value;
226
- var value = objectValue[propertyName];
227
- if (value && typeof value === 'object') {
228
- $deepFreeze(value);
229
- }
230
- }
231
- }
232
- catch (e_1_1) { e_1 = { error: e_1_1 }; }
233
- finally {
234
- try {
235
- if (propertyNames_1_1 && !propertyNames_1_1.done && (_a = propertyNames_1.return)) _a.call(propertyNames_1);
122
+ const propertyNames = Object.getOwnPropertyNames(objectValue);
123
+ for (const propertyName of propertyNames) {
124
+ const value = objectValue[propertyName];
125
+ if (value && typeof value === 'object') {
126
+ $deepFreeze(value);
236
127
  }
237
- finally { if (e_1) throw e_1.error; }
238
128
  }
239
129
  Object.freeze(objectValue);
240
130
  return objectValue;
@@ -274,7 +164,7 @@
274
164
  *
275
165
  * @public exported from `@promptbook/core`
276
166
  */
277
- var UNCERTAIN_USAGE = $deepFreeze({
167
+ const UNCERTAIN_USAGE = $deepFreeze({
278
168
  price: { value: 0, isUncertain: true },
279
169
  input: {
280
170
  tokensCount: { value: 0, isUncertain: true },
@@ -318,32 +208,32 @@
318
208
  *
319
209
  * @public exported from `@promptbook/core`
320
210
  */
321
- var NAME = "Promptbook";
211
+ const NAME = `Promptbook`;
322
212
  /**
323
213
  * Email of the responsible person
324
214
  *
325
215
  * @public exported from `@promptbook/core`
326
216
  */
327
- var ADMIN_EMAIL = 'pavol@ptbk.io';
217
+ const ADMIN_EMAIL = 'pavol@ptbk.io';
328
218
  /**
329
219
  * Name of the responsible person for the Promptbook on GitHub
330
220
  *
331
221
  * @public exported from `@promptbook/core`
332
222
  */
333
- var ADMIN_GITHUB_NAME = 'hejny';
223
+ const ADMIN_GITHUB_NAME = 'hejny';
334
224
  // <- TODO: [🧠] Better system for generator warnings - not always "code" and "by `@promptbook/cli`"
335
225
  /**
336
226
  * The maximum number of iterations for a loops
337
227
  *
338
228
  * @private within the repository - too low-level in comparison with other `MAX_...`
339
229
  */
340
- var LOOP_LIMIT = 1000;
230
+ const LOOP_LIMIT = 1000;
341
231
  /**
342
232
  * Strings to represent various values in the context of parameter values
343
233
  *
344
234
  * @public exported from `@promptbook/utils`
345
235
  */
346
- var VALUE_STRINGS = {
236
+ const VALUE_STRINGS = {
347
237
  empty: '(nothing; empty string)',
348
238
  null: '(no value; null)',
349
239
  undefined: '(unknown value; undefined)',
@@ -357,7 +247,7 @@
357
247
  *
358
248
  * @public exported from `@promptbook/utils`
359
249
  */
360
- var SMALL_NUMBER = 0.001;
250
+ const SMALL_NUMBER = 0.001;
361
251
  // <- TODO: [🧜‍♂️]
362
252
  /**
363
253
  * @@@
@@ -382,8 +272,11 @@
382
272
  * @public exported from `@promptbook/utils`
383
273
  */
384
274
  function orderJson(options) {
385
- var value = options.value, order = options.order;
386
- var orderedValue = __assign(__assign({}, (order === undefined ? {} : Object.fromEntries(order.map(function (key) { return [key, undefined]; })))), value);
275
+ const { value, order } = options;
276
+ const orderedValue = {
277
+ ...(order === undefined ? {} : Object.fromEntries(order.map((key) => [key, undefined]))),
278
+ ...value,
279
+ };
387
280
  return orderedValue;
388
281
  }
389
282
 
@@ -393,11 +286,37 @@
393
286
  * @private private within the repository
394
287
  */
395
288
  function getErrorReportUrl(error) {
396
- var report = {
397
- title: "\uD83D\uDC1C Error report from ".concat(NAME),
398
- body: spaceTrim__default["default"](function (block) { return "\n\n\n `".concat(error.name || 'Error', "` has occurred in the [").concat(NAME, "], please look into it @").concat(ADMIN_GITHUB_NAME, ".\n\n ```\n ").concat(block(error.message || '(no error message)'), "\n ```\n\n\n ## More info:\n\n - **Promptbook engine version:** ").concat(PROMPTBOOK_ENGINE_VERSION, "\n - **Book language version:** ").concat(BOOK_LANGUAGE_VERSION, "\n - **Time:** ").concat(new Date().toISOString(), "\n\n <details>\n <summary>Stack trace:</summary>\n\n ## Stack trace:\n\n ```stacktrace\n ").concat(block(error.stack || '(empty)'), "\n ```\n </details>\n\n "); }),
289
+ const report = {
290
+ title: `🐜 Error report from ${NAME}`,
291
+ body: spaceTrim__default["default"]((block) => `
292
+
293
+
294
+ \`${error.name || 'Error'}\` has occurred in the [${NAME}], please look into it @${ADMIN_GITHUB_NAME}.
295
+
296
+ \`\`\`
297
+ ${block(error.message || '(no error message)')}
298
+ \`\`\`
299
+
300
+
301
+ ## More info:
302
+
303
+ - **Promptbook engine version:** ${PROMPTBOOK_ENGINE_VERSION}
304
+ - **Book language version:** ${BOOK_LANGUAGE_VERSION}
305
+ - **Time:** ${new Date().toISOString()}
306
+
307
+ <details>
308
+ <summary>Stack trace:</summary>
309
+
310
+ ## Stack trace:
311
+
312
+ \`\`\`stacktrace
313
+ ${block(error.stack || '(empty)')}
314
+ \`\`\`
315
+ </details>
316
+
317
+ `),
399
318
  };
400
- var reportUrl = new URL("https://github.com/webgptorg/promptbook/issues/new");
319
+ const reportUrl = new URL(`https://github.com/webgptorg/promptbook/issues/new`);
401
320
  reportUrl.searchParams.set('labels', 'bug');
402
321
  reportUrl.searchParams.set('assignees', ADMIN_GITHUB_NAME);
403
322
  reportUrl.searchParams.set('title', report.title);
@@ -410,16 +329,24 @@
410
329
  *
411
330
  * @public exported from `@promptbook/core`
412
331
  */
413
- var UnexpectedError = /** @class */ (function (_super) {
414
- __extends(UnexpectedError, _super);
415
- function UnexpectedError(message) {
416
- var _this = _super.call(this, spaceTrim.spaceTrim(function (block) { return "\n ".concat(block(message), "\n\n Note: This error should not happen.\n It's probbably a bug in the pipeline collection\n\n Please report issue:\n ").concat(block(getErrorReportUrl(new Error(message)).href), "\n\n Or contact us on ").concat(ADMIN_EMAIL, "\n\n "); })) || this;
417
- _this.name = 'UnexpectedError';
418
- Object.setPrototypeOf(_this, UnexpectedError.prototype);
419
- return _this;
332
+ class UnexpectedError extends Error {
333
+ constructor(message) {
334
+ super(spaceTrim.spaceTrim((block) => `
335
+ ${block(message)}
336
+
337
+ Note: This error should not happen.
338
+ It's probbably a bug in the pipeline collection
339
+
340
+ Please report issue:
341
+ ${block(getErrorReportUrl(new Error(message)).href)}
342
+
343
+ Or contact us on ${ADMIN_EMAIL}
344
+
345
+ `));
346
+ this.name = 'UnexpectedError';
347
+ Object.setPrototypeOf(this, UnexpectedError.prototype);
420
348
  }
421
- return UnexpectedError;
422
- }(Error));
349
+ }
423
350
 
424
351
  /**
425
352
  * Checks if the value is [🚉] serializable as JSON
@@ -442,10 +369,9 @@
442
369
  * @public exported from `@promptbook/utils`
443
370
  */
444
371
  function checkSerializableAsJson(options) {
445
- var e_1, _a;
446
- var value = options.value, name = options.name, message = options.message;
372
+ const { value, name, message } = options;
447
373
  if (value === undefined) {
448
- throw new UnexpectedError("".concat(name, " is undefined"));
374
+ throw new UnexpectedError(`${name} is undefined`);
449
375
  }
450
376
  else if (value === null) {
451
377
  return;
@@ -460,49 +386,54 @@
460
386
  return;
461
387
  }
462
388
  else if (typeof value === 'symbol') {
463
- throw new UnexpectedError("".concat(name, " is symbol"));
389
+ throw new UnexpectedError(`${name} is symbol`);
464
390
  }
465
391
  else if (typeof value === 'function') {
466
- throw new UnexpectedError("".concat(name, " is function"));
392
+ throw new UnexpectedError(`${name} is function`);
467
393
  }
468
394
  else if (typeof value === 'object' && Array.isArray(value)) {
469
- for (var i = 0; i < value.length; i++) {
470
- checkSerializableAsJson({ name: "".concat(name, "[").concat(i, "]"), value: value[i], message: message });
395
+ for (let i = 0; i < value.length; i++) {
396
+ checkSerializableAsJson({ name: `${name}[${i}]`, value: value[i], message });
471
397
  }
472
398
  }
473
399
  else if (typeof value === 'object') {
474
400
  if (value instanceof Date) {
475
- throw new UnexpectedError(spaceTrim__default["default"](function (block) { return "\n `".concat(name, "` is Date\n\n Use `string_date_iso8601` instead\n\n Additional message for `").concat(name, "`:\n ").concat(block(message || '(nothing)'), "\n "); }));
401
+ throw new UnexpectedError(spaceTrim__default["default"]((block) => `
402
+ \`${name}\` is Date
403
+
404
+ Use \`string_date_iso8601\` instead
405
+
406
+ Additional message for \`${name}\`:
407
+ ${block(message || '(nothing)')}
408
+ `));
476
409
  }
477
410
  else if (value instanceof Map) {
478
- throw new UnexpectedError("".concat(name, " is Map"));
411
+ throw new UnexpectedError(`${name} is Map`);
479
412
  }
480
413
  else if (value instanceof Set) {
481
- throw new UnexpectedError("".concat(name, " is Set"));
414
+ throw new UnexpectedError(`${name} is Set`);
482
415
  }
483
416
  else if (value instanceof RegExp) {
484
- throw new UnexpectedError("".concat(name, " is RegExp"));
417
+ throw new UnexpectedError(`${name} is RegExp`);
485
418
  }
486
419
  else if (value instanceof Error) {
487
- throw new UnexpectedError(spaceTrim__default["default"](function (block) { return "\n `".concat(name, "` is unserialized Error\n\n Use function `serializeError`\n\n Additional message for `").concat(name, "`:\n ").concat(block(message || '(nothing)'), "\n\n "); }));
420
+ throw new UnexpectedError(spaceTrim__default["default"]((block) => `
421
+ \`${name}\` is unserialized Error
422
+
423
+ Use function \`serializeError\`
424
+
425
+ Additional message for \`${name}\`:
426
+ ${block(message || '(nothing)')}
427
+
428
+ `));
488
429
  }
489
430
  else {
490
- try {
491
- for (var _b = __values(Object.entries(value)), _c = _b.next(); !_c.done; _c = _b.next()) {
492
- var _d = __read(_c.value, 2), subName = _d[0], subValue = _d[1];
493
- if (subValue === undefined) {
494
- // Note: undefined in object is serializable - it is just omited
495
- continue;
496
- }
497
- checkSerializableAsJson({ name: "".concat(name, ".").concat(subName), value: subValue, message: message });
431
+ for (const [subName, subValue] of Object.entries(value)) {
432
+ if (subValue === undefined) {
433
+ // Note: undefined in object is serializable - it is just omited
434
+ continue;
498
435
  }
499
- }
500
- catch (e_1_1) { e_1 = { error: e_1_1 }; }
501
- finally {
502
- try {
503
- if (_c && !_c.done && (_a = _b.return)) _a.call(_b);
504
- }
505
- finally { if (e_1) throw e_1.error; }
436
+ checkSerializableAsJson({ name: `${name}.${subName}`, value: subValue, message });
506
437
  }
507
438
  try {
508
439
  JSON.stringify(value); // <- TODO: [0]
@@ -511,7 +442,14 @@
511
442
  if (!(error instanceof Error)) {
512
443
  throw error;
513
444
  }
514
- throw new UnexpectedError(spaceTrim__default["default"](function (block) { return "\n `".concat(name, "` is not serializable\n\n ").concat(block(error.stack || error.message), "\n\n Additional message for `").concat(name, "`:\n ").concat(block(message || '(nothing)'), "\n "); }));
445
+ throw new UnexpectedError(spaceTrim__default["default"]((block) => `
446
+ \`${name}\` is not serializable
447
+
448
+ ${block(error.stack || error.message)}
449
+
450
+ Additional message for \`${name}\`:
451
+ ${block(message || '(nothing)')}
452
+ `));
515
453
  }
516
454
  /*
517
455
  TODO: [0] Is there some more elegant way to check circular references?
@@ -536,7 +474,12 @@
536
474
  }
537
475
  }
538
476
  else {
539
- throw new UnexpectedError(spaceTrim__default["default"](function (block) { return "\n `".concat(name, "` is unknown type\n\n Additional message for `").concat(name, "`:\n ").concat(block(message || '(nothing)'), "\n "); }));
477
+ throw new UnexpectedError(spaceTrim__default["default"]((block) => `
478
+ \`${name}\` is unknown type
479
+
480
+ Additional message for \`${name}\`:
481
+ ${block(message || '(nothing)')}
482
+ `));
540
483
  }
541
484
  }
542
485
  /**
@@ -582,9 +525,9 @@
582
525
  * @public exported from `@promptbook/utils`
583
526
  */
584
527
  function exportJson(options) {
585
- var name = options.name, value = options.value, order = options.order, message = options.message;
586
- checkSerializableAsJson({ name: name, value: value, message: message });
587
- var orderedValue =
528
+ const { name, value, order, message } = options;
529
+ checkSerializableAsJson({ name, value, message });
530
+ const orderedValue =
588
531
  // TODO: Fix error "Type instantiation is excessively deep and possibly infinite."
589
532
  // eslint-disable-next-line @typescript-eslint/ban-ts-comment
590
533
  // @ts-ignore
@@ -607,19 +550,19 @@
607
550
  *
608
551
  * @private within the repository
609
552
  */
610
- var REPLACING_NONCE = 'ptbkauk42kV2dzao34faw7FudQUHYPtW';
553
+ const REPLACING_NONCE = 'ptbkauk42kV2dzao34faw7FudQUHYPtW';
611
554
  /**
612
555
  * @@@
613
556
  *
614
557
  * @private within the repository
615
558
  */
616
- var RESERVED_PARAMETER_MISSING_VALUE = 'MISSING-' + REPLACING_NONCE;
559
+ const RESERVED_PARAMETER_MISSING_VALUE = 'MISSING-' + REPLACING_NONCE;
617
560
  /**
618
561
  * @@@
619
562
  *
620
563
  * @private within the repository
621
564
  */
622
- var RESERVED_PARAMETER_RESTRICTED = 'RESTRICTED-' + REPLACING_NONCE;
565
+ const RESERVED_PARAMETER_RESTRICTED = 'RESTRICTED-' + REPLACING_NONCE;
623
566
  /**
624
567
  * The names of the parameters that are reserved for special purposes
625
568
  *
@@ -627,7 +570,7 @@
627
570
  */
628
571
  exportJson({
629
572
  name: 'RESERVED_PARAMETER_NAMES',
630
- message: "The names of the parameters that are reserved for special purposes",
573
+ message: `The names of the parameters that are reserved for special purposes`,
631
574
  value: [
632
575
  'content',
633
576
  'context',
@@ -649,16 +592,13 @@
649
592
  *
650
593
  * @public exported from `@promptbook/core`
651
594
  */
652
- var LimitReachedError = /** @class */ (function (_super) {
653
- __extends(LimitReachedError, _super);
654
- function LimitReachedError(message) {
655
- var _this = _super.call(this, message) || this;
656
- _this.name = 'LimitReachedError';
657
- Object.setPrototypeOf(_this, LimitReachedError.prototype);
658
- return _this;
595
+ class LimitReachedError extends Error {
596
+ constructor(message) {
597
+ super(message);
598
+ this.name = 'LimitReachedError';
599
+ Object.setPrototypeOf(this, LimitReachedError.prototype);
659
600
  }
660
- return LimitReachedError;
661
- }(Error));
601
+ }
662
602
 
663
603
  /**
664
604
  * Format either small or big number
@@ -678,9 +618,9 @@
678
618
  else if (value === -Infinity) {
679
619
  return VALUE_STRINGS.negativeInfinity;
680
620
  }
681
- for (var exponent = 0; exponent < 15; exponent++) {
682
- var factor = Math.pow(10, exponent);
683
- var valueRounded = Math.round(value * factor) / factor;
621
+ for (let exponent = 0; exponent < 15; exponent++) {
622
+ const factor = 10 ** exponent;
623
+ const valueRounded = Math.round(value * factor) / factor;
684
624
  if (Math.abs(value - valueRounded) / value < SMALL_NUMBER) {
685
625
  return valueRounded.toFixed(exponent);
686
626
  }
@@ -748,47 +688,38 @@
748
688
  * @public exported from `@promptbook/utils`
749
689
  */
750
690
  function templateParameters(template, parameters) {
751
- var e_1, _a;
752
- try {
753
- for (var _b = __values(Object.entries(parameters)), _c = _b.next(); !_c.done; _c = _b.next()) {
754
- var _d = __read(_c.value, 2), parameterName = _d[0], parameterValue = _d[1];
755
- if (parameterValue === RESERVED_PARAMETER_MISSING_VALUE) {
756
- throw new UnexpectedError("Parameter `{".concat(parameterName, "}` has missing value"));
757
- }
758
- else if (parameterValue === RESERVED_PARAMETER_RESTRICTED) {
759
- // TODO: [🍵]
760
- throw new UnexpectedError("Parameter `{".concat(parameterName, "}` is restricted to use"));
761
- }
691
+ for (const [parameterName, parameterValue] of Object.entries(parameters)) {
692
+ if (parameterValue === RESERVED_PARAMETER_MISSING_VALUE) {
693
+ throw new UnexpectedError(`Parameter \`{${parameterName}}\` has missing value`);
762
694
  }
763
- }
764
- catch (e_1_1) { e_1 = { error: e_1_1 }; }
765
- finally {
766
- try {
767
- if (_c && !_c.done && (_a = _b.return)) _a.call(_b);
695
+ else if (parameterValue === RESERVED_PARAMETER_RESTRICTED) {
696
+ // TODO: [🍵]
697
+ throw new UnexpectedError(`Parameter \`{${parameterName}}\` is restricted to use`);
768
698
  }
769
- finally { if (e_1) throw e_1.error; }
770
699
  }
771
- var replacedTemplates = template;
772
- var match;
773
- var loopLimit = LOOP_LIMIT;
774
- var _loop_1 = function () {
700
+ let replacedTemplates = template;
701
+ let match;
702
+ let loopLimit = LOOP_LIMIT;
703
+ while ((match = /^(?<precol>.*){(?<parameterName>\w+)}(.*)/m /* <- Not global */
704
+ .exec(replacedTemplates))) {
775
705
  if (loopLimit-- < 0) {
776
706
  throw new LimitReachedError('Loop limit reached during parameters replacement in `templateParameters`');
777
707
  }
778
- var precol = match.groups.precol;
779
- var parameterName = match.groups.parameterName;
708
+ const precol = match.groups.precol;
709
+ const parameterName = match.groups.parameterName;
780
710
  if (parameterName === '') {
781
- return "continue";
711
+ // Note: Skip empty placeholders. It's used to avoid confusion with JSON-like strings
712
+ continue;
782
713
  }
783
714
  if (parameterName.indexOf('{') !== -1 || parameterName.indexOf('}') !== -1) {
784
715
  throw new PipelineExecutionError('Parameter is already opened or not closed');
785
716
  }
786
717
  if (parameters[parameterName] === undefined) {
787
- throw new PipelineExecutionError("Parameter `{".concat(parameterName, "}` is not defined"));
718
+ throw new PipelineExecutionError(`Parameter \`{${parameterName}}\` is not defined`);
788
719
  }
789
- var parameterValue = parameters[parameterName];
720
+ let parameterValue = parameters[parameterName];
790
721
  if (parameterValue === undefined) {
791
- throw new PipelineExecutionError("Parameter `{".concat(parameterName, "}` is not defined"));
722
+ throw new PipelineExecutionError(`Parameter \`{${parameterName}}\` is not defined`);
792
723
  }
793
724
  parameterValue = valueToString(parameterValue);
794
725
  // Escape curly braces in parameter values to prevent prompt-injection
@@ -796,17 +727,13 @@
796
727
  if (parameterValue.includes('\n') && /^\s*\W{0,3}\s*$/.test(precol)) {
797
728
  parameterValue = parameterValue
798
729
  .split('\n')
799
- .map(function (line, index) { return (index === 0 ? line : "".concat(precol).concat(line)); })
730
+ .map((line, index) => (index === 0 ? line : `${precol}${line}`))
800
731
  .join('\n');
801
732
  }
802
733
  replacedTemplates =
803
734
  replacedTemplates.substring(0, match.index + precol.length) +
804
735
  parameterValue +
805
736
  replacedTemplates.substring(match.index + precol.length + parameterName.length + 2);
806
- };
807
- while ((match = /^(?<precol>.*){(?<parameterName>\w+)}(.*)/m /* <- Not global */
808
- .exec(replacedTemplates))) {
809
- _loop_1();
810
737
  }
811
738
  // [💫] Check if there are parameters that are not closed properly
812
739
  if (/{\w+$/.test(replacedTemplates)) {
@@ -839,13 +766,13 @@
839
766
  *
840
767
  * @public exported from `@promptbook/utils`
841
768
  */
842
- var CHARACTERS_PER_STANDARD_LINE = 63;
769
+ const CHARACTERS_PER_STANDARD_LINE = 63;
843
770
  /**
844
771
  * Number of lines per standard A4 page with 11pt Arial font size and standard margins and spacing.
845
772
  *
846
773
  * @public exported from `@promptbook/utils`
847
774
  */
848
- var LINES_PER_STANDARD_PAGE = 44;
775
+ const LINES_PER_STANDARD_PAGE = 44;
849
776
  /**
850
777
  * TODO: [🧠] Should be this `constants.ts` or `config.ts`?
851
778
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -861,8 +788,8 @@
861
788
  function countLines(text) {
862
789
  text = text.replace('\r\n', '\n');
863
790
  text = text.replace('\r', '\n');
864
- var lines = text.split('\n');
865
- return lines.reduce(function (count, line) { return count + Math.ceil(line.length / CHARACTERS_PER_STANDARD_LINE); }, 0);
791
+ const lines = text.split('\n');
792
+ return lines.reduce((count, line) => count + Math.ceil(line.length / CHARACTERS_PER_STANDARD_LINE), 0);
866
793
  }
867
794
 
868
795
  /**
@@ -882,7 +809,7 @@
882
809
  * @public exported from `@promptbook/utils`
883
810
  */
884
811
  function countParagraphs(text) {
885
- return text.split(/\n\s*\n/).filter(function (paragraph) { return paragraph.trim() !== ''; }).length;
812
+ return text.split(/\n\s*\n/).filter((paragraph) => paragraph.trim() !== '').length;
886
813
  }
887
814
 
888
815
  /**
@@ -891,7 +818,7 @@
891
818
  * @public exported from `@promptbook/utils`
892
819
  */
893
820
  function splitIntoSentences(text) {
894
- return text.split(/[.!?]+/).filter(function (sentence) { return sentence.trim() !== ''; });
821
+ return text.split(/[.!?]+/).filter((sentence) => sentence.trim() !== '');
895
822
  }
896
823
  /**
897
824
  * Counts number of sentences in the text
@@ -902,7 +829,7 @@
902
829
  return splitIntoSentences(text).length;
903
830
  }
904
831
 
905
- var defaultDiacriticsRemovalMap = [
832
+ const defaultDiacriticsRemovalMap = [
906
833
  {
907
834
  base: 'A',
908
835
  letters: '\u0041\u24B6\uFF21\u00C0\u00C1\u00C2\u1EA6\u1EA4\u1EAA\u1EA8\u00C3\u0100\u0102\u1EB0\u1EAE\u1EB4\u1EB2\u0226\u01E0\u00C4\u01DE\u1EA2\u00C5\u01FA\u01CD\u0200\u0202\u1EA0\u1EAC\u1EB6\u1E00\u0104\u023A\u2C6F',
@@ -1121,12 +1048,12 @@
1121
1048
  *
1122
1049
  * @public exported from `@promptbook/utils`
1123
1050
  */
1124
- var DIACRITIC_VARIANTS_LETTERS = {};
1051
+ const DIACRITIC_VARIANTS_LETTERS = {};
1125
1052
  // tslint:disable-next-line: prefer-for-of
1126
- for (var i = 0; i < defaultDiacriticsRemovalMap.length; i++) {
1127
- var letters = defaultDiacriticsRemovalMap[i].letters;
1053
+ for (let i = 0; i < defaultDiacriticsRemovalMap.length; i++) {
1054
+ const letters = defaultDiacriticsRemovalMap[i].letters;
1128
1055
  // tslint:disable-next-line: prefer-for-of
1129
- for (var j = 0; j < letters.length; j++) {
1056
+ for (let j = 0; j < letters.length; j++) {
1130
1057
  DIACRITIC_VARIANTS_LETTERS[letters[j]] = defaultDiacriticsRemovalMap[i].base;
1131
1058
  }
1132
1059
  }
@@ -1155,7 +1082,7 @@
1155
1082
  */
1156
1083
  function removeDiacritics(input) {
1157
1084
  /*eslint no-control-regex: "off"*/
1158
- return input.replace(/[^\u0000-\u007E]/g, function (a) {
1085
+ return input.replace(/[^\u0000-\u007E]/g, (a) => {
1159
1086
  return DIACRITIC_VARIANTS_LETTERS[a] || a;
1160
1087
  });
1161
1088
  }
@@ -1171,7 +1098,9 @@
1171
1098
  function countWords(text) {
1172
1099
  text = text.replace(/[\p{Extended_Pictographic}]/gu, 'a');
1173
1100
  text = removeDiacritics(text);
1174
- return text.split(/[^a-zа-я0-9]+/i).filter(function (word) { return word.length > 0; }).length;
1101
+ // Add spaces before uppercase letters preceded by lowercase letters (for camelCase)
1102
+ text = text.replace(/([a-z])([A-Z])/g, '$1 $2');
1103
+ return text.split(/[^a-zа-я0-9]+/i).filter((word) => word.length > 0).length;
1175
1104
  }
1176
1105
 
1177
1106
  /**
@@ -1204,7 +1133,7 @@
1204
1133
  if (value === null || value === undefined || Number.isNaN(value)) {
1205
1134
  return { value: 0, isUncertain: true };
1206
1135
  }
1207
- return { value: value };
1136
+ return { value };
1208
1137
  }
1209
1138
 
1210
1139
  /**
@@ -1213,7 +1142,7 @@
1213
1142
  * @private within the repository, used only as internal helper for `OPENAI_MODELS`
1214
1143
  */
1215
1144
  function computeUsage(value) {
1216
- var _a = __read(value.split(' / '), 2), price = _a[0], tokens = _a[1];
1145
+ const [price, tokens] = value.split(' / ');
1217
1146
  return parseFloat(price.replace('$', '')) / parseFloat(tokens.replace('M tokens', '')) / 1000000;
1218
1147
  }
1219
1148
 
@@ -1226,7 +1155,7 @@
1226
1155
  * @see https://openai.com/api/pricing/
1227
1156
  * @public exported from `@promptbook/openai`
1228
1157
  */
1229
- var OPENAI_MODELS = exportJson({
1158
+ const OPENAI_MODELS = exportJson({
1230
1159
  name: 'OPENAI_MODELS',
1231
1160
  value: [
1232
1161
  /*/
@@ -1247,8 +1176,8 @@
1247
1176
  modelTitle: 'davinci-002',
1248
1177
  modelName: 'davinci-002',
1249
1178
  pricing: {
1250
- prompt: computeUsage("$2.00 / 1M tokens"),
1251
- output: computeUsage("$2.00 / 1M tokens"), // <- not sure
1179
+ prompt: computeUsage(`$2.00 / 1M tokens`),
1180
+ output: computeUsage(`$2.00 / 1M tokens`), // <- not sure
1252
1181
  },
1253
1182
  },
1254
1183
  /**/
@@ -1264,8 +1193,8 @@
1264
1193
  modelTitle: 'gpt-3.5-turbo-16k',
1265
1194
  modelName: 'gpt-3.5-turbo-16k',
1266
1195
  pricing: {
1267
- prompt: computeUsage("$3.00 / 1M tokens"),
1268
- output: computeUsage("$4.00 / 1M tokens"),
1196
+ prompt: computeUsage(`$3.00 / 1M tokens`),
1197
+ output: computeUsage(`$4.00 / 1M tokens`),
1269
1198
  },
1270
1199
  },
1271
1200
  /**/
@@ -1287,8 +1216,8 @@
1287
1216
  modelTitle: 'gpt-4',
1288
1217
  modelName: 'gpt-4',
1289
1218
  pricing: {
1290
- prompt: computeUsage("$30.00 / 1M tokens"),
1291
- output: computeUsage("$60.00 / 1M tokens"),
1219
+ prompt: computeUsage(`$30.00 / 1M tokens`),
1220
+ output: computeUsage(`$60.00 / 1M tokens`),
1292
1221
  },
1293
1222
  },
1294
1223
  /**/
@@ -1298,8 +1227,8 @@
1298
1227
  modelTitle: 'gpt-4-32k',
1299
1228
  modelName: 'gpt-4-32k',
1300
1229
  pricing: {
1301
- prompt: computeUsage("$60.00 / 1M tokens"),
1302
- output: computeUsage("$120.00 / 1M tokens"),
1230
+ prompt: computeUsage(`$60.00 / 1M tokens`),
1231
+ output: computeUsage(`$120.00 / 1M tokens`),
1303
1232
  },
1304
1233
  },
1305
1234
  /**/
@@ -1320,8 +1249,8 @@
1320
1249
  modelTitle: 'gpt-4-turbo-2024-04-09',
1321
1250
  modelName: 'gpt-4-turbo-2024-04-09',
1322
1251
  pricing: {
1323
- prompt: computeUsage("$10.00 / 1M tokens"),
1324
- output: computeUsage("$30.00 / 1M tokens"),
1252
+ prompt: computeUsage(`$10.00 / 1M tokens`),
1253
+ output: computeUsage(`$30.00 / 1M tokens`),
1325
1254
  },
1326
1255
  },
1327
1256
  /**/
@@ -1331,8 +1260,8 @@
1331
1260
  modelTitle: 'gpt-3.5-turbo-1106',
1332
1261
  modelName: 'gpt-3.5-turbo-1106',
1333
1262
  pricing: {
1334
- prompt: computeUsage("$1.00 / 1M tokens"),
1335
- output: computeUsage("$2.00 / 1M tokens"),
1263
+ prompt: computeUsage(`$1.00 / 1M tokens`),
1264
+ output: computeUsage(`$2.00 / 1M tokens`),
1336
1265
  },
1337
1266
  },
1338
1267
  /**/
@@ -1342,8 +1271,8 @@
1342
1271
  modelTitle: 'gpt-4-turbo',
1343
1272
  modelName: 'gpt-4-turbo',
1344
1273
  pricing: {
1345
- prompt: computeUsage("$10.00 / 1M tokens"),
1346
- output: computeUsage("$30.00 / 1M tokens"),
1274
+ prompt: computeUsage(`$10.00 / 1M tokens`),
1275
+ output: computeUsage(`$30.00 / 1M tokens`),
1347
1276
  },
1348
1277
  },
1349
1278
  /**/
@@ -1353,8 +1282,8 @@
1353
1282
  modelTitle: 'gpt-3.5-turbo-instruct-0914',
1354
1283
  modelName: 'gpt-3.5-turbo-instruct-0914',
1355
1284
  pricing: {
1356
- prompt: computeUsage("$1.50 / 1M tokens"),
1357
- output: computeUsage("$2.00 / 1M tokens"), // <- For gpt-3.5-turbo-instruct
1285
+ prompt: computeUsage(`$1.50 / 1M tokens`),
1286
+ output: computeUsage(`$2.00 / 1M tokens`), // <- For gpt-3.5-turbo-instruct
1358
1287
  },
1359
1288
  },
1360
1289
  /**/
@@ -1364,8 +1293,8 @@
1364
1293
  modelTitle: 'gpt-3.5-turbo-instruct',
1365
1294
  modelName: 'gpt-3.5-turbo-instruct',
1366
1295
  pricing: {
1367
- prompt: computeUsage("$1.50 / 1M tokens"),
1368
- output: computeUsage("$2.00 / 1M tokens"),
1296
+ prompt: computeUsage(`$1.50 / 1M tokens`),
1297
+ output: computeUsage(`$2.00 / 1M tokens`),
1369
1298
  },
1370
1299
  },
1371
1300
  /**/
@@ -1381,8 +1310,8 @@
1381
1310
  modelTitle: 'gpt-3.5-turbo',
1382
1311
  modelName: 'gpt-3.5-turbo',
1383
1312
  pricing: {
1384
- prompt: computeUsage("$3.00 / 1M tokens"),
1385
- output: computeUsage("$6.00 / 1M tokens"), // <- Not sure, refer to gpt-3.5-turbo in Fine-tuning models
1313
+ prompt: computeUsage(`$3.00 / 1M tokens`),
1314
+ output: computeUsage(`$6.00 / 1M tokens`), // <- Not sure, refer to gpt-3.5-turbo in Fine-tuning models
1386
1315
  },
1387
1316
  },
1388
1317
  /**/
@@ -1392,8 +1321,8 @@
1392
1321
  modelTitle: 'gpt-3.5-turbo-0301',
1393
1322
  modelName: 'gpt-3.5-turbo-0301',
1394
1323
  pricing: {
1395
- prompt: computeUsage("$1.50 / 1M tokens"),
1396
- output: computeUsage("$2.00 / 1M tokens"),
1324
+ prompt: computeUsage(`$1.50 / 1M tokens`),
1325
+ output: computeUsage(`$2.00 / 1M tokens`),
1397
1326
  },
1398
1327
  },
1399
1328
  /**/
@@ -1403,8 +1332,8 @@
1403
1332
  modelTitle: 'babbage-002',
1404
1333
  modelName: 'babbage-002',
1405
1334
  pricing: {
1406
- prompt: computeUsage("$0.40 / 1M tokens"),
1407
- output: computeUsage("$0.40 / 1M tokens"), // <- Not sure
1335
+ prompt: computeUsage(`$0.40 / 1M tokens`),
1336
+ output: computeUsage(`$0.40 / 1M tokens`), // <- Not sure
1408
1337
  },
1409
1338
  },
1410
1339
  /**/
@@ -1414,8 +1343,8 @@
1414
1343
  modelTitle: 'gpt-4-1106-preview',
1415
1344
  modelName: 'gpt-4-1106-preview',
1416
1345
  pricing: {
1417
- prompt: computeUsage("$10.00 / 1M tokens"),
1418
- output: computeUsage("$30.00 / 1M tokens"),
1346
+ prompt: computeUsage(`$10.00 / 1M tokens`),
1347
+ output: computeUsage(`$30.00 / 1M tokens`),
1419
1348
  },
1420
1349
  },
1421
1350
  /**/
@@ -1425,8 +1354,8 @@
1425
1354
  modelTitle: 'gpt-4-0125-preview',
1426
1355
  modelName: 'gpt-4-0125-preview',
1427
1356
  pricing: {
1428
- prompt: computeUsage("$10.00 / 1M tokens"),
1429
- output: computeUsage("$30.00 / 1M tokens"),
1357
+ prompt: computeUsage(`$10.00 / 1M tokens`),
1358
+ output: computeUsage(`$30.00 / 1M tokens`),
1430
1359
  },
1431
1360
  },
1432
1361
  /**/
@@ -1442,8 +1371,8 @@
1442
1371
  modelTitle: 'gpt-3.5-turbo-0125',
1443
1372
  modelName: 'gpt-3.5-turbo-0125',
1444
1373
  pricing: {
1445
- prompt: computeUsage("$0.50 / 1M tokens"),
1446
- output: computeUsage("$1.50 / 1M tokens"),
1374
+ prompt: computeUsage(`$0.50 / 1M tokens`),
1375
+ output: computeUsage(`$1.50 / 1M tokens`),
1447
1376
  },
1448
1377
  },
1449
1378
  /**/
@@ -1453,8 +1382,8 @@
1453
1382
  modelTitle: 'gpt-4-turbo-preview',
1454
1383
  modelName: 'gpt-4-turbo-preview',
1455
1384
  pricing: {
1456
- prompt: computeUsage("$10.00 / 1M tokens"),
1457
- output: computeUsage("$30.00 / 1M tokens"), // <- Not sure, just for gpt-4-turbo
1385
+ prompt: computeUsage(`$10.00 / 1M tokens`),
1386
+ output: computeUsage(`$30.00 / 1M tokens`), // <- Not sure, just for gpt-4-turbo
1458
1387
  },
1459
1388
  },
1460
1389
  /**/
@@ -1464,7 +1393,7 @@
1464
1393
  modelTitle: 'text-embedding-3-large',
1465
1394
  modelName: 'text-embedding-3-large',
1466
1395
  pricing: {
1467
- prompt: computeUsage("$0.13 / 1M tokens"),
1396
+ prompt: computeUsage(`$0.13 / 1M tokens`),
1468
1397
  // TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
1469
1398
  output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
1470
1399
  },
@@ -1476,7 +1405,7 @@
1476
1405
  modelTitle: 'text-embedding-3-small',
1477
1406
  modelName: 'text-embedding-3-small',
1478
1407
  pricing: {
1479
- prompt: computeUsage("$0.02 / 1M tokens"),
1408
+ prompt: computeUsage(`$0.02 / 1M tokens`),
1480
1409
  // TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
1481
1410
  output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
1482
1411
  },
@@ -1488,8 +1417,8 @@
1488
1417
  modelTitle: 'gpt-3.5-turbo-0613',
1489
1418
  modelName: 'gpt-3.5-turbo-0613',
1490
1419
  pricing: {
1491
- prompt: computeUsage("$1.50 / 1M tokens"),
1492
- output: computeUsage("$2.00 / 1M tokens"),
1420
+ prompt: computeUsage(`$1.50 / 1M tokens`),
1421
+ output: computeUsage(`$2.00 / 1M tokens`),
1493
1422
  },
1494
1423
  },
1495
1424
  /**/
@@ -1499,7 +1428,7 @@
1499
1428
  modelTitle: 'text-embedding-ada-002',
1500
1429
  modelName: 'text-embedding-ada-002',
1501
1430
  pricing: {
1502
- prompt: computeUsage("$0.1 / 1M tokens"),
1431
+ prompt: computeUsage(`$0.1 / 1M tokens`),
1503
1432
  // TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
1504
1433
  output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
1505
1434
  },
@@ -1529,8 +1458,8 @@
1529
1458
  modelTitle: 'gpt-4o-2024-05-13',
1530
1459
  modelName: 'gpt-4o-2024-05-13',
1531
1460
  pricing: {
1532
- prompt: computeUsage("$5.00 / 1M tokens"),
1533
- output: computeUsage("$15.00 / 1M tokens"),
1461
+ prompt: computeUsage(`$5.00 / 1M tokens`),
1462
+ output: computeUsage(`$15.00 / 1M tokens`),
1534
1463
  },
1535
1464
  //TODO: [main] !!3 Add gpt-4o-mini-2024-07-18 and all others to be up to date
1536
1465
  },
@@ -1541,8 +1470,8 @@
1541
1470
  modelTitle: 'gpt-4o',
1542
1471
  modelName: 'gpt-4o',
1543
1472
  pricing: {
1544
- prompt: computeUsage("$5.00 / 1M tokens"),
1545
- output: computeUsage("$15.00 / 1M tokens"),
1473
+ prompt: computeUsage(`$5.00 / 1M tokens`),
1474
+ output: computeUsage(`$15.00 / 1M tokens`),
1546
1475
  },
1547
1476
  },
1548
1477
  /**/
@@ -1552,8 +1481,8 @@
1552
1481
  modelTitle: 'o1-preview',
1553
1482
  modelName: 'o1-preview',
1554
1483
  pricing: {
1555
- prompt: computeUsage("$15.00 / 1M tokens"),
1556
- output: computeUsage("$60.00 / 1M tokens"),
1484
+ prompt: computeUsage(`$15.00 / 1M tokens`),
1485
+ output: computeUsage(`$60.00 / 1M tokens`),
1557
1486
  },
1558
1487
  },
1559
1488
  /**/
@@ -1564,8 +1493,8 @@
1564
1493
  modelName: 'o1-preview-2024-09-12',
1565
1494
  // <- TODO: [💩] Some better system to organize theese date suffixes and versions
1566
1495
  pricing: {
1567
- prompt: computeUsage("$15.00 / 1M tokens"),
1568
- output: computeUsage("$60.00 / 1M tokens"),
1496
+ prompt: computeUsage(`$15.00 / 1M tokens`),
1497
+ output: computeUsage(`$60.00 / 1M tokens`),
1569
1498
  },
1570
1499
  },
1571
1500
  /**/
@@ -1575,8 +1504,8 @@
1575
1504
  modelTitle: 'o1-mini',
1576
1505
  modelName: 'o1-mini',
1577
1506
  pricing: {
1578
- prompt: computeUsage("$3.00 / 1M tokens"),
1579
- output: computeUsage("$12.00 / 1M tokens"),
1507
+ prompt: computeUsage(`$3.00 / 1M tokens`),
1508
+ output: computeUsage(`$12.00 / 1M tokens`),
1580
1509
  },
1581
1510
  },
1582
1511
  /**/
@@ -1586,8 +1515,8 @@
1586
1515
  modelTitle: 'o1',
1587
1516
  modelName: 'o1',
1588
1517
  pricing: {
1589
- prompt: computeUsage("$3.00 / 1M tokens"),
1590
- output: computeUsage("$12.00 / 1M tokens"),
1518
+ prompt: computeUsage(`$3.00 / 1M tokens`),
1519
+ output: computeUsage(`$12.00 / 1M tokens`),
1591
1520
  // <- TODO: !! Unsure, check the pricing
1592
1521
  },
1593
1522
  },
@@ -1598,8 +1527,8 @@
1598
1527
  modelTitle: 'o3-mini',
1599
1528
  modelName: 'o3-mini',
1600
1529
  pricing: {
1601
- prompt: computeUsage("$3.00 / 1M tokens"),
1602
- output: computeUsage("$12.00 / 1M tokens"),
1530
+ prompt: computeUsage(`$3.00 / 1M tokens`),
1531
+ output: computeUsage(`$12.00 / 1M tokens`),
1603
1532
  // <- TODO: !! Unsure, check the pricing
1604
1533
  },
1605
1534
  },
@@ -1610,8 +1539,8 @@
1610
1539
  modelTitle: 'o1-mini-2024-09-12',
1611
1540
  modelName: 'o1-mini-2024-09-12',
1612
1541
  pricing: {
1613
- prompt: computeUsage("$3.00 / 1M tokens"),
1614
- output: computeUsage("$12.00 / 1M tokens"),
1542
+ prompt: computeUsage(`$3.00 / 1M tokens`),
1543
+ output: computeUsage(`$12.00 / 1M tokens`),
1615
1544
  },
1616
1545
  },
1617
1546
  /**/
@@ -1621,8 +1550,8 @@
1621
1550
  modelTitle: 'gpt-3.5-turbo-16k-0613',
1622
1551
  modelName: 'gpt-3.5-turbo-16k-0613',
1623
1552
  pricing: {
1624
- prompt: computeUsage("$3.00 / 1M tokens"),
1625
- output: computeUsage("$4.00 / 1M tokens"),
1553
+ prompt: computeUsage(`$3.00 / 1M tokens`),
1554
+ output: computeUsage(`$4.00 / 1M tokens`),
1626
1555
  },
1627
1556
  },
1628
1557
  /**/
@@ -1661,10 +1590,10 @@
1661
1590
  if (((_a = rawResponse.usage) === null || _a === void 0 ? void 0 : _a.prompt_tokens) === undefined) {
1662
1591
  throw new PipelineExecutionError('In OpenAI response `usage.prompt_tokens` not defined');
1663
1592
  }
1664
- var inputTokens = rawResponse.usage.prompt_tokens;
1665
- var outputTokens = ((_b = rawResponse.usage) === null || _b === void 0 ? void 0 : _b.completion_tokens) || 0;
1666
- var modelInfo = OPENAI_MODELS.find(function (model) { return model.modelName === rawResponse.model; });
1667
- var price;
1593
+ const inputTokens = rawResponse.usage.prompt_tokens;
1594
+ const outputTokens = ((_b = rawResponse.usage) === null || _b === void 0 ? void 0 : _b.completion_tokens) || 0;
1595
+ const modelInfo = OPENAI_MODELS.find((model) => model.modelName === rawResponse.model);
1596
+ let price;
1668
1597
  if (modelInfo === undefined || modelInfo.pricing === undefined) {
1669
1598
  price = uncertainNumber();
1670
1599
  }
@@ -1672,9 +1601,15 @@
1672
1601
  price = uncertainNumber(inputTokens * modelInfo.pricing.prompt + outputTokens * modelInfo.pricing.output);
1673
1602
  }
1674
1603
  return {
1675
- price: price,
1676
- input: __assign({ tokensCount: uncertainNumber(rawResponse.usage.prompt_tokens) }, computeUsageCounts(promptContent)),
1677
- output: __assign({ tokensCount: uncertainNumber(outputTokens) }, computeUsageCounts(resultContent)),
1604
+ price,
1605
+ input: {
1606
+ tokensCount: uncertainNumber(rawResponse.usage.prompt_tokens),
1607
+ ...computeUsageCounts(promptContent),
1608
+ },
1609
+ output: {
1610
+ tokensCount: uncertainNumber(outputTokens),
1611
+ ...computeUsageCounts(resultContent),
1612
+ },
1678
1613
  };
1679
1614
  }
1680
1615
  /**
@@ -1686,75 +1621,55 @@
1686
1621
  *
1687
1622
  * @public exported from `@promptbook/openai`
1688
1623
  */
1689
- var OpenAiExecutionTools = /** @class */ (function () {
1624
+ class OpenAiExecutionTools {
1690
1625
  /**
1691
1626
  * Creates OpenAI Execution Tools.
1692
1627
  *
1693
1628
  * @param options which are relevant are directly passed to the OpenAI client
1694
1629
  */
1695
- function OpenAiExecutionTools(options) {
1630
+ constructor(options) {
1696
1631
  this.options = options;
1697
1632
  /**
1698
1633
  * OpenAI API client.
1699
1634
  */
1700
1635
  this.client = null;
1701
1636
  }
1702
- Object.defineProperty(OpenAiExecutionTools.prototype, "title", {
1703
- get: function () {
1704
- return 'OpenAI';
1705
- },
1706
- enumerable: false,
1707
- configurable: true
1708
- });
1709
- Object.defineProperty(OpenAiExecutionTools.prototype, "description", {
1710
- get: function () {
1711
- return 'Use all models provided by OpenAI';
1712
- },
1713
- enumerable: false,
1714
- configurable: true
1715
- });
1716
- OpenAiExecutionTools.prototype.getClient = function () {
1717
- return __awaiter(this, void 0, void 0, function () {
1718
- var openAiOptions;
1719
- return __generator(this, function (_a) {
1720
- if (this.client === null) {
1721
- openAiOptions = __assign({}, this.options);
1722
- delete openAiOptions.isVerbose;
1723
- delete openAiOptions.userId;
1724
- this.client = new OpenAI__default["default"](openAiOptions);
1725
- }
1726
- return [2 /*return*/, this.client];
1727
- });
1728
- });
1729
- };
1637
+ get title() {
1638
+ return 'OpenAI';
1639
+ }
1640
+ get description() {
1641
+ return 'Use all models provided by OpenAI';
1642
+ }
1643
+ async getClient() {
1644
+ if (this.client === null) {
1645
+ // Note: Passing only OpenAI relevant options to OpenAI constructor
1646
+ const openAiOptions = { ...this.options };
1647
+ delete openAiOptions.isVerbose;
1648
+ delete openAiOptions.userId;
1649
+ this.client = new OpenAI__default["default"](openAiOptions);
1650
+ }
1651
+ return this.client;
1652
+ }
1730
1653
  /**
1731
1654
  * Create (sub)tools for calling OpenAI API Assistants
1732
1655
  *
1733
1656
  * @param assistantId Which assistant to use
1734
1657
  * @returns Tools for calling OpenAI API Assistants with same token
1735
1658
  */
1736
- OpenAiExecutionTools.prototype.createAssistantSubtools = function (assistantId) {
1737
- return new OpenAiAssistantExecutionTools(__assign(__assign({}, this.options), { assistantId: assistantId }));
1738
- };
1659
+ createAssistantSubtools(assistantId) {
1660
+ return new OpenAiAssistantExecutionTools({ ...this.options, assistantId });
1661
+ }
1739
1662
  /**
1740
1663
  * Check the `options` passed to `constructor`
1741
1664
  */
1742
- OpenAiExecutionTools.prototype.checkConfiguration = function () {
1743
- return __awaiter(this, void 0, void 0, function () {
1744
- return __generator(this, function (_a) {
1745
- switch (_a.label) {
1746
- case 0: return [4 /*yield*/, this.getClient()];
1747
- case 1:
1748
- _a.sent();
1749
- return [2 /*return*/];
1750
- }
1751
- });
1752
- });
1753
- };
1665
+ async checkConfiguration() {
1666
+ await this.getClient();
1667
+ // TODO: [🎍] Do here a real check that API is online, working and API key is correct
1668
+ }
1754
1669
  /**
1755
1670
  * List all available OpenAI models that can be used
1756
1671
  */
1757
- OpenAiExecutionTools.prototype.listModels = function () {
1672
+ listModels() {
1758
1673
  /*
1759
1674
  Note: Dynamic lising of the models
1760
1675
  const models = await this.openai.models.list({});
@@ -1763,301 +1678,273 @@
1763
1678
  console.log(models.data);
1764
1679
  */
1765
1680
  return OPENAI_MODELS;
1766
- };
1681
+ }
1767
1682
  /**
1768
1683
  * Calls OpenAI API to use a chat model.
1769
1684
  */
1770
- OpenAiExecutionTools.prototype.callChatModel = function (prompt) {
1685
+ async callChatModel(prompt) {
1771
1686
  var _a;
1772
- return __awaiter(this, void 0, void 0, function () {
1773
- var content, parameters, modelRequirements, format, client, modelName, modelSettings, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
1774
- var _this = this;
1775
- return __generator(this, function (_b) {
1776
- switch (_b.label) {
1777
- case 0:
1778
- if (this.options.isVerbose) {
1779
- console.info('💬 OpenAI callChatModel call', { prompt: prompt });
1780
- }
1781
- content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements, format = prompt.format;
1782
- return [4 /*yield*/, this.getClient()];
1783
- case 1:
1784
- client = _b.sent();
1785
- // TODO: [] Use here more modelRequirements
1786
- if (modelRequirements.modelVariant !== 'CHAT') {
1787
- throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
1788
- }
1789
- modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
1790
- modelSettings = {
1791
- model: modelName,
1792
- max_tokens: modelRequirements.maxTokens,
1793
- // <- TODO: [🌾] Make some global max cap for maxTokens
1794
- temperature: modelRequirements.temperature,
1795
- // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
1796
- // <- Note: [🧆]
1797
- };
1798
- if (format === 'JSON') {
1799
- modelSettings.response_format = {
1800
- type: 'json_object',
1801
- };
1802
- }
1803
- rawPromptContent = templateParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
1804
- rawRequest = __assign(__assign({}, modelSettings), { messages: __spreadArray(__spreadArray([], __read((modelRequirements.systemMessage === undefined
1805
- ? []
1806
- : [
1807
- {
1808
- role: 'system',
1809
- content: modelRequirements.systemMessage,
1810
- },
1811
- ])), false), [
1812
- {
1813
- role: 'user',
1814
- content: rawPromptContent,
1815
- },
1816
- ], false), user: (_a = this.options.userId) === null || _a === void 0 ? void 0 : _a.toString() });
1817
- start = $getCurrentDate();
1818
- if (this.options.isVerbose) {
1819
- console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
1820
- }
1821
- return [4 /*yield*/, client.chat.completions.create(rawRequest).catch(function (error) {
1822
- if (_this.options.isVerbose) {
1823
- console.info(colors__default["default"].bgRed('error'), error);
1824
- }
1825
- throw error;
1826
- })];
1827
- case 2:
1828
- rawResponse = _b.sent();
1829
- if (this.options.isVerbose) {
1830
- console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
1831
- }
1832
- if (!rawResponse.choices[0]) {
1833
- throw new PipelineExecutionError('No choises from OpenAI');
1834
- }
1835
- if (rawResponse.choices.length > 1) {
1836
- // TODO: This should be maybe only warning
1837
- throw new PipelineExecutionError('More than one choise from OpenAI');
1838
- }
1839
- resultContent = rawResponse.choices[0].message.content;
1840
- // eslint-disable-next-line prefer-const
1841
- complete = $getCurrentDate();
1842
- usage = computeOpenAiUsage(content || '', resultContent || '', rawResponse);
1843
- if (resultContent === null) {
1844
- throw new PipelineExecutionError('No response message from OpenAI');
1845
- }
1846
- return [2 /*return*/, exportJson({
1847
- name: 'promptResult',
1848
- message: "Result of `OpenAiExecutionTools.callChatModel`",
1849
- order: [],
1850
- value: {
1851
- content: resultContent,
1852
- modelName: rawResponse.model || modelName,
1853
- timing: {
1854
- start: start,
1855
- complete: complete,
1856
- },
1857
- usage: usage,
1858
- rawPromptContent: rawPromptContent,
1859
- rawRequest: rawRequest,
1860
- rawResponse: rawResponse,
1861
- // <- [🗯]
1862
- },
1863
- })];
1864
- }
1865
- });
1687
+ if (this.options.isVerbose) {
1688
+ console.info('💬 OpenAI callChatModel call', { prompt });
1689
+ }
1690
+ const { content, parameters, modelRequirements, format } = prompt;
1691
+ const client = await this.getClient();
1692
+ // TODO: [☂] Use here more modelRequirements
1693
+ if (modelRequirements.modelVariant !== 'CHAT') {
1694
+ throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
1695
+ }
1696
+ const modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
1697
+ const modelSettings = {
1698
+ model: modelName,
1699
+ max_tokens: modelRequirements.maxTokens,
1700
+ // <- TODO: [🌾] Make some global max cap for maxTokens
1701
+ temperature: modelRequirements.temperature,
1702
+ // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
1703
+ // <- Note: [🧆]
1704
+ }; // <- TODO: [💩] Guard here types better
1705
+ if (format === 'JSON') {
1706
+ modelSettings.response_format = {
1707
+ type: 'json_object',
1708
+ };
1709
+ }
1710
+ // <- TODO: [🚸] Not all models are compatible with JSON mode
1711
+ // > 'response_format' of type 'json_object' is not supported with this model.
1712
+ const rawPromptContent = templateParameters(content, { ...parameters, modelName });
1713
+ const rawRequest = {
1714
+ ...modelSettings,
1715
+ messages: [
1716
+ ...(modelRequirements.systemMessage === undefined
1717
+ ? []
1718
+ : [
1719
+ {
1720
+ role: 'system',
1721
+ content: modelRequirements.systemMessage,
1722
+ },
1723
+ ]),
1724
+ {
1725
+ role: 'user',
1726
+ content: rawPromptContent,
1727
+ },
1728
+ ],
1729
+ user: (_a = this.options.userId) === null || _a === void 0 ? void 0 : _a.toString(),
1730
+ };
1731
+ const start = $getCurrentDate();
1732
+ let complete;
1733
+ if (this.options.isVerbose) {
1734
+ console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
1735
+ }
1736
+ const rawResponse = await client.chat.completions.create(rawRequest).catch((error) => {
1737
+ if (this.options.isVerbose) {
1738
+ console.info(colors__default["default"].bgRed('error'), error);
1739
+ }
1740
+ throw error;
1866
1741
  });
1867
- };
1742
+ if (this.options.isVerbose) {
1743
+ console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
1744
+ }
1745
+ if (!rawResponse.choices[0]) {
1746
+ throw new PipelineExecutionError('No choises from OpenAI');
1747
+ }
1748
+ if (rawResponse.choices.length > 1) {
1749
+ // TODO: This should be maybe only warning
1750
+ throw new PipelineExecutionError('More than one choise from OpenAI');
1751
+ }
1752
+ const resultContent = rawResponse.choices[0].message.content;
1753
+ // eslint-disable-next-line prefer-const
1754
+ complete = $getCurrentDate();
1755
+ const usage = computeOpenAiUsage(content || '', resultContent || '', rawResponse);
1756
+ if (resultContent === null) {
1757
+ throw new PipelineExecutionError('No response message from OpenAI');
1758
+ }
1759
+ return exportJson({
1760
+ name: 'promptResult',
1761
+ message: `Result of \`OpenAiExecutionTools.callChatModel\``,
1762
+ order: [],
1763
+ value: {
1764
+ content: resultContent,
1765
+ modelName: rawResponse.model || modelName,
1766
+ timing: {
1767
+ start,
1768
+ complete,
1769
+ },
1770
+ usage,
1771
+ rawPromptContent,
1772
+ rawRequest,
1773
+ rawResponse,
1774
+ // <- [🗯]
1775
+ },
1776
+ });
1777
+ }
1868
1778
  /**
1869
1779
  * Calls OpenAI API to use a complete model.
1870
1780
  */
1871
- OpenAiExecutionTools.prototype.callCompletionModel = function (prompt) {
1781
+ async callCompletionModel(prompt) {
1872
1782
  var _a;
1873
- return __awaiter(this, void 0, void 0, function () {
1874
- var content, parameters, modelRequirements, client, modelName, modelSettings, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
1875
- var _this = this;
1876
- return __generator(this, function (_b) {
1877
- switch (_b.label) {
1878
- case 0:
1879
- if (this.options.isVerbose) {
1880
- console.info('🖋 OpenAI callCompletionModel call', { prompt: prompt });
1881
- }
1882
- content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
1883
- return [4 /*yield*/, this.getClient()];
1884
- case 1:
1885
- client = _b.sent();
1886
- // TODO: [] Use here more modelRequirements
1887
- if (modelRequirements.modelVariant !== 'COMPLETION') {
1888
- throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
1889
- }
1890
- modelName = modelRequirements.modelName || this.getDefaultCompletionModel().modelName;
1891
- modelSettings = {
1892
- model: modelName,
1893
- max_tokens: modelRequirements.maxTokens || 2000,
1894
- // <- TODO: [🌾] Make some global max cap for maxTokens
1895
- temperature: modelRequirements.temperature,
1896
- // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
1897
- // <- Note: [🧆]
1898
- };
1899
- rawPromptContent = templateParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
1900
- rawRequest = __assign(__assign({}, modelSettings), { prompt: rawPromptContent, user: (_a = this.options.userId) === null || _a === void 0 ? void 0 : _a.toString() });
1901
- start = $getCurrentDate();
1902
- if (this.options.isVerbose) {
1903
- console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
1904
- }
1905
- return [4 /*yield*/, client.completions.create(rawRequest).catch(function (error) {
1906
- if (_this.options.isVerbose) {
1907
- console.info(colors__default["default"].bgRed('error'), error);
1908
- }
1909
- throw error;
1910
- })];
1911
- case 2:
1912
- rawResponse = _b.sent();
1913
- if (this.options.isVerbose) {
1914
- console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
1915
- }
1916
- if (!rawResponse.choices[0]) {
1917
- throw new PipelineExecutionError('No choises from OpenAI');
1918
- }
1919
- if (rawResponse.choices.length > 1) {
1920
- // TODO: This should be maybe only warning
1921
- throw new PipelineExecutionError('More than one choise from OpenAI');
1922
- }
1923
- resultContent = rawResponse.choices[0].text;
1924
- // eslint-disable-next-line prefer-const
1925
- complete = $getCurrentDate();
1926
- usage = computeOpenAiUsage(content || '', resultContent || '', rawResponse);
1927
- return [2 /*return*/, exportJson({
1928
- name: 'promptResult',
1929
- message: "Result of `OpenAiExecutionTools.callCompletionModel`",
1930
- order: [],
1931
- value: {
1932
- content: resultContent,
1933
- modelName: rawResponse.model || modelName,
1934
- timing: {
1935
- start: start,
1936
- complete: complete,
1937
- },
1938
- usage: usage,
1939
- rawPromptContent: rawPromptContent,
1940
- rawRequest: rawRequest,
1941
- rawResponse: rawResponse,
1942
- // <- [🗯]
1943
- },
1944
- })];
1945
- }
1946
- });
1783
+ if (this.options.isVerbose) {
1784
+ console.info('🖋 OpenAI callCompletionModel call', { prompt });
1785
+ }
1786
+ const { content, parameters, modelRequirements } = prompt;
1787
+ const client = await this.getClient();
1788
+ // TODO: [☂] Use here more modelRequirements
1789
+ if (modelRequirements.modelVariant !== 'COMPLETION') {
1790
+ throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
1791
+ }
1792
+ const modelName = modelRequirements.modelName || this.getDefaultCompletionModel().modelName;
1793
+ const modelSettings = {
1794
+ model: modelName,
1795
+ max_tokens: modelRequirements.maxTokens || 2000,
1796
+ // <- TODO: [🌾] Make some global max cap for maxTokens
1797
+ temperature: modelRequirements.temperature,
1798
+ // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
1799
+ // <- Note: [🧆]
1800
+ };
1801
+ const rawPromptContent = templateParameters(content, { ...parameters, modelName });
1802
+ const rawRequest = {
1803
+ ...modelSettings,
1804
+ prompt: rawPromptContent,
1805
+ user: (_a = this.options.userId) === null || _a === void 0 ? void 0 : _a.toString(),
1806
+ };
1807
+ const start = $getCurrentDate();
1808
+ let complete;
1809
+ if (this.options.isVerbose) {
1810
+ console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
1811
+ }
1812
+ const rawResponse = await client.completions.create(rawRequest).catch((error) => {
1813
+ if (this.options.isVerbose) {
1814
+ console.info(colors__default["default"].bgRed('error'), error);
1815
+ }
1816
+ throw error;
1947
1817
  });
1948
- };
1818
+ if (this.options.isVerbose) {
1819
+ console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
1820
+ }
1821
+ if (!rawResponse.choices[0]) {
1822
+ throw new PipelineExecutionError('No choises from OpenAI');
1823
+ }
1824
+ if (rawResponse.choices.length > 1) {
1825
+ // TODO: This should be maybe only warning
1826
+ throw new PipelineExecutionError('More than one choise from OpenAI');
1827
+ }
1828
+ const resultContent = rawResponse.choices[0].text;
1829
+ // eslint-disable-next-line prefer-const
1830
+ complete = $getCurrentDate();
1831
+ const usage = computeOpenAiUsage(content || '', resultContent || '', rawResponse);
1832
+ return exportJson({
1833
+ name: 'promptResult',
1834
+ message: `Result of \`OpenAiExecutionTools.callCompletionModel\``,
1835
+ order: [],
1836
+ value: {
1837
+ content: resultContent,
1838
+ modelName: rawResponse.model || modelName,
1839
+ timing: {
1840
+ start,
1841
+ complete,
1842
+ },
1843
+ usage,
1844
+ rawPromptContent,
1845
+ rawRequest,
1846
+ rawResponse,
1847
+ // <- [🗯]
1848
+ },
1849
+ });
1850
+ }
1949
1851
  /**
1950
1852
  * Calls OpenAI API to use a embedding model
1951
1853
  */
1952
- OpenAiExecutionTools.prototype.callEmbeddingModel = function (prompt) {
1953
- return __awaiter(this, void 0, void 0, function () {
1954
- var content, parameters, modelRequirements, client, modelName, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
1955
- var _this = this;
1956
- return __generator(this, function (_a) {
1957
- switch (_a.label) {
1958
- case 0:
1959
- if (this.options.isVerbose) {
1960
- console.info('🖋 OpenAI embedding call', { prompt: prompt });
1961
- }
1962
- content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
1963
- return [4 /*yield*/, this.getClient()];
1964
- case 1:
1965
- client = _a.sent();
1966
- // TODO: [☂] Use here more modelRequirements
1967
- if (modelRequirements.modelVariant !== 'EMBEDDING') {
1968
- throw new PipelineExecutionError('Use embed only for EMBEDDING variant');
1969
- }
1970
- modelName = modelRequirements.modelName || this.getDefaultEmbeddingModel().modelName;
1971
- rawPromptContent = templateParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
1972
- rawRequest = {
1973
- input: rawPromptContent,
1974
- model: modelName,
1975
- };
1976
- start = $getCurrentDate();
1977
- if (this.options.isVerbose) {
1978
- console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
1979
- }
1980
- return [4 /*yield*/, client.embeddings.create(rawRequest).catch(function (error) {
1981
- if (_this.options.isVerbose) {
1982
- console.info(colors__default["default"].bgRed('error'), error);
1983
- }
1984
- throw error;
1985
- })];
1986
- case 2:
1987
- rawResponse = _a.sent();
1988
- if (this.options.isVerbose) {
1989
- console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
1990
- }
1991
- if (rawResponse.data.length !== 1) {
1992
- throw new PipelineExecutionError("Expected exactly 1 data item in response, got ".concat(rawResponse.data.length));
1993
- }
1994
- resultContent = rawResponse.data[0].embedding;
1995
- // eslint-disable-next-line prefer-const
1996
- complete = $getCurrentDate();
1997
- usage = computeOpenAiUsage(content || '', '',
1998
- // <- Note: Embedding does not have result content
1999
- rawResponse);
2000
- return [2 /*return*/, exportJson({
2001
- name: 'promptResult',
2002
- message: "Result of `OpenAiExecutionTools.callEmbeddingModel`",
2003
- order: [],
2004
- value: {
2005
- content: resultContent,
2006
- modelName: rawResponse.model || modelName,
2007
- timing: {
2008
- start: start,
2009
- complete: complete,
2010
- },
2011
- usage: usage,
2012
- rawPromptContent: rawPromptContent,
2013
- rawRequest: rawRequest,
2014
- rawResponse: rawResponse,
2015
- // <- [🗯]
2016
- },
2017
- })];
2018
- }
2019
- });
1854
+ async callEmbeddingModel(prompt) {
1855
+ if (this.options.isVerbose) {
1856
+ console.info('🖋 OpenAI embedding call', { prompt });
1857
+ }
1858
+ const { content, parameters, modelRequirements } = prompt;
1859
+ const client = await this.getClient();
1860
+ // TODO: [☂] Use here more modelRequirements
1861
+ if (modelRequirements.modelVariant !== 'EMBEDDING') {
1862
+ throw new PipelineExecutionError('Use embed only for EMBEDDING variant');
1863
+ }
1864
+ const modelName = modelRequirements.modelName || this.getDefaultEmbeddingModel().modelName;
1865
+ const rawPromptContent = templateParameters(content, { ...parameters, modelName });
1866
+ const rawRequest = {
1867
+ input: rawPromptContent,
1868
+ model: modelName,
1869
+ };
1870
+ const start = $getCurrentDate();
1871
+ let complete;
1872
+ if (this.options.isVerbose) {
1873
+ console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
1874
+ }
1875
+ const rawResponse = await client.embeddings.create(rawRequest).catch((error) => {
1876
+ if (this.options.isVerbose) {
1877
+ console.info(colors__default["default"].bgRed('error'), error);
1878
+ }
1879
+ throw error;
2020
1880
  });
2021
- };
1881
+ if (this.options.isVerbose) {
1882
+ console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
1883
+ }
1884
+ if (rawResponse.data.length !== 1) {
1885
+ throw new PipelineExecutionError(`Expected exactly 1 data item in response, got ${rawResponse.data.length}`);
1886
+ }
1887
+ const resultContent = rawResponse.data[0].embedding;
1888
+ // eslint-disable-next-line prefer-const
1889
+ complete = $getCurrentDate();
1890
+ const usage = computeOpenAiUsage(content || '', '',
1891
+ // <- Note: Embedding does not have result content
1892
+ rawResponse);
1893
+ return exportJson({
1894
+ name: 'promptResult',
1895
+ message: `Result of \`OpenAiExecutionTools.callEmbeddingModel\``,
1896
+ order: [],
1897
+ value: {
1898
+ content: resultContent,
1899
+ modelName: rawResponse.model || modelName,
1900
+ timing: {
1901
+ start,
1902
+ complete,
1903
+ },
1904
+ usage,
1905
+ rawPromptContent,
1906
+ rawRequest,
1907
+ rawResponse,
1908
+ // <- [🗯]
1909
+ },
1910
+ });
1911
+ }
2022
1912
  // <- Note: [🤖] callXxxModel
2023
1913
  /**
2024
1914
  * Get the model that should be used as default
2025
1915
  */
2026
- OpenAiExecutionTools.prototype.getDefaultModel = function (defaultModelName) {
2027
- var model = OPENAI_MODELS.find(function (_a) {
2028
- var modelName = _a.modelName;
2029
- return modelName === defaultModelName;
2030
- });
1916
+ getDefaultModel(defaultModelName) {
1917
+ const model = OPENAI_MODELS.find(({ modelName }) => modelName === defaultModelName);
2031
1918
  if (model === undefined) {
2032
- throw new UnexpectedError(spaceTrim__default["default"](function (block) {
2033
- return "\n Cannot find model in OpenAI models with name \"".concat(defaultModelName, "\" which should be used as default.\n\n Available models:\n ").concat(block(OPENAI_MODELS.map(function (_a) {
2034
- var modelName = _a.modelName;
2035
- return "- \"".concat(modelName, "\"");
2036
- }).join('\n')), "\n\n ");
2037
- }));
1919
+ throw new UnexpectedError(spaceTrim__default["default"]((block) => `
1920
+ Cannot find model in OpenAI models with name "${defaultModelName}" which should be used as default.
1921
+
1922
+ Available models:
1923
+ ${block(OPENAI_MODELS.map(({ modelName }) => `- "${modelName}"`).join('\n'))}
1924
+
1925
+ `));
2038
1926
  }
2039
1927
  return model;
2040
- };
1928
+ }
2041
1929
  /**
2042
1930
  * Default model for chat variant.
2043
1931
  */
2044
- OpenAiExecutionTools.prototype.getDefaultChatModel = function () {
1932
+ getDefaultChatModel() {
2045
1933
  return this.getDefaultModel('gpt-4o');
2046
- };
1934
+ }
2047
1935
  /**
2048
1936
  * Default model for completion variant.
2049
1937
  */
2050
- OpenAiExecutionTools.prototype.getDefaultCompletionModel = function () {
1938
+ getDefaultCompletionModel() {
2051
1939
  return this.getDefaultModel('gpt-3.5-turbo-instruct');
2052
- };
1940
+ }
2053
1941
  /**
2054
1942
  * Default model for completion variant.
2055
1943
  */
2056
- OpenAiExecutionTools.prototype.getDefaultEmbeddingModel = function () {
1944
+ getDefaultEmbeddingModel() {
2057
1945
  return this.getDefaultModel('text-embedding-3-large');
2058
- };
2059
- return OpenAiExecutionTools;
2060
- }());
1946
+ }
1947
+ }
2061
1948
  /**
2062
1949
  * TODO: [🧠][🧙‍♂️] Maybe there can be some wizzard for thoose who want to use just OpenAI
2063
1950
  * TODO: Maybe Create some common util for callChatModel and callCompletionModel
@@ -2073,168 +1960,158 @@
2073
1960
  *
2074
1961
  * @public exported from `@promptbook/openai`
2075
1962
  */
2076
- var OpenAiAssistantExecutionTools = /** @class */ (function (_super) {
2077
- __extends(OpenAiAssistantExecutionTools, _super);
1963
+ class OpenAiAssistantExecutionTools extends OpenAiExecutionTools {
2078
1964
  /**
2079
1965
  * Creates OpenAI Execution Tools.
2080
1966
  *
2081
1967
  * @param options which are relevant are directly passed to the OpenAI client
2082
1968
  */
2083
- function OpenAiAssistantExecutionTools(options) {
2084
- var _this = _super.call(this, options) || this;
2085
- _this.assistantId = options.assistantId;
2086
- return _this;
1969
+ constructor(options) {
1970
+ super(options);
1971
+ this.assistantId = options.assistantId;
1972
+ }
1973
+ get title() {
1974
+ return 'OpenAI Assistant';
1975
+ }
1976
+ get description() {
1977
+ return 'Use single assistant provided by OpenAI';
2087
1978
  }
2088
- Object.defineProperty(OpenAiAssistantExecutionTools.prototype, "title", {
2089
- get: function () {
2090
- return 'OpenAI Assistant';
2091
- },
2092
- enumerable: false,
2093
- configurable: true
2094
- });
2095
- Object.defineProperty(OpenAiAssistantExecutionTools.prototype, "description", {
2096
- get: function () {
2097
- return 'Use single assistant provided by OpenAI';
2098
- },
2099
- enumerable: false,
2100
- configurable: true
2101
- });
2102
1979
  /**
2103
1980
  * Calls OpenAI API to use a chat model.
2104
1981
  */
2105
- OpenAiAssistantExecutionTools.prototype.callChatModel = function (prompt) {
1982
+ async callChatModel(prompt) {
2106
1983
  var _a, _b, _c;
2107
- return __awaiter(this, void 0, void 0, function () {
2108
- var content, parameters, modelRequirements /*, format*/, client, _d, _e, key, rawPromptContent, rawRequest, start, complete, stream, rawResponse, resultContent, usage;
2109
- var e_1, _f;
2110
- var _this = this;
2111
- return __generator(this, function (_g) {
2112
- switch (_g.label) {
2113
- case 0:
2114
- if (this.options.isVerbose) {
2115
- console.info('💬 OpenAI callChatModel call', { prompt: prompt });
2116
- }
2117
- content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
2118
- return [4 /*yield*/, this.getClient()];
2119
- case 1:
2120
- client = _g.sent();
2121
- // TODO: [☂] Use here more modelRequirements
2122
- if (modelRequirements.modelVariant !== 'CHAT') {
2123
- throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
2124
- }
2125
- try {
2126
- // TODO: [👨‍👨‍👧‍👧] Remove:
2127
- for (_d = __values(['maxTokens', 'modelName', 'seed', 'temperature']), _e = _d.next(); !_e.done; _e = _d.next()) {
2128
- key = _e.value;
2129
- if (modelRequirements[key] !== undefined) {
2130
- throw new NotYetImplementedError("In `OpenAiAssistantExecutionTools` you cannot specify `".concat(key, "`"));
2131
- }
2132
- }
2133
- }
2134
- catch (e_1_1) { e_1 = { error: e_1_1 }; }
2135
- finally {
2136
- try {
2137
- if (_e && !_e.done && (_f = _d.return)) _f.call(_d);
2138
- }
2139
- finally { if (e_1) throw e_1.error; }
2140
- }
2141
- rawPromptContent = templateParameters(content, __assign(__assign({}, parameters), { modelName: 'assistant' }));
2142
- rawRequest = {
2143
- // TODO: [👨‍👨‍👧‍👧] ...modelSettings,
2144
- // TODO: [👨‍👨‍👧‍👧][🧠] What about system message for assistants, does it make sence - combination of OpenAI assistants with Promptbook Personas
2145
- assistant_id: this.assistantId,
2146
- thread: {
2147
- messages: [
2148
- // TODO: [🗯] Allow threads to be passed
2149
- { role: 'user', content: rawPromptContent },
2150
- ],
2151
- },
2152
- // <- TODO: Add user identification here> user: this.options.user,
2153
- };
2154
- start = $getCurrentDate();
2155
- if (this.options.isVerbose) {
2156
- console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
2157
- }
2158
- return [4 /*yield*/, client.beta.threads.createAndRunStream(rawRequest)];
2159
- case 2:
2160
- stream = _g.sent();
2161
- stream.on('connect', function () {
2162
- if (_this.options.isVerbose) {
2163
- console.info('connect', stream.currentEvent);
2164
- }
2165
- });
2166
- stream.on('messageDelta', function (messageDelta) {
2167
- var _a;
2168
- if (_this.options.isVerbose &&
2169
- messageDelta &&
2170
- messageDelta.content &&
2171
- messageDelta.content[0] &&
2172
- messageDelta.content[0].type === 'text') {
2173
- console.info('messageDelta', (_a = messageDelta.content[0].text) === null || _a === void 0 ? void 0 : _a.value);
2174
- }
2175
- // <- TODO: [🐚] Make streaming and running tasks working
2176
- });
2177
- stream.on('messageCreated', function (message) {
2178
- if (_this.options.isVerbose) {
2179
- console.info('messageCreated', message);
2180
- }
2181
- });
2182
- stream.on('messageDone', function (message) {
2183
- if (_this.options.isVerbose) {
2184
- console.info('messageDone', message);
2185
- }
2186
- });
2187
- return [4 /*yield*/, stream.finalMessages()];
2188
- case 3:
2189
- rawResponse = _g.sent();
2190
- if (this.options.isVerbose) {
2191
- console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
2192
- }
2193
- if (rawResponse.length !== 1) {
2194
- throw new PipelineExecutionError("There is NOT 1 BUT ".concat(rawResponse.length, " finalMessages from OpenAI"));
2195
- }
2196
- if (rawResponse[0].content.length !== 1) {
2197
- throw new PipelineExecutionError("There is NOT 1 BUT ".concat(rawResponse[0].content.length, " finalMessages content from OpenAI"));
2198
- }
2199
- if (((_a = rawResponse[0].content[0]) === null || _a === void 0 ? void 0 : _a.type) !== 'text') {
2200
- throw new PipelineExecutionError("There is NOT 'text' BUT ".concat((_b = rawResponse[0].content[0]) === null || _b === void 0 ? void 0 : _b.type, " finalMessages content type from OpenAI"));
2201
- }
2202
- resultContent = (_c = rawResponse[0].content[0]) === null || _c === void 0 ? void 0 : _c.text.value;
2203
- // <- TODO: [🧠] There are also annotations, maybe use them
2204
- // eslint-disable-next-line prefer-const
2205
- complete = $getCurrentDate();
2206
- usage = UNCERTAIN_USAGE;
2207
- // <- TODO: [🥘] Compute real usage for assistant
2208
- // ?> const usage = computeOpenAiUsage(content, resultContent || '', rawResponse);
2209
- if (resultContent === null) {
2210
- throw new PipelineExecutionError('No response message from OpenAI');
2211
- }
2212
- return [2 /*return*/, exportJson({
2213
- name: 'promptResult',
2214
- message: "Result of `OpenAiAssistantExecutionTools.callChatModel`",
2215
- order: [],
2216
- value: {
2217
- content: resultContent,
2218
- modelName: 'assistant',
2219
- // <- TODO: [🥘] Detect used model in assistant
2220
- // ?> model: rawResponse.model || modelName,
2221
- timing: {
2222
- start: start,
2223
- complete: complete,
2224
- },
2225
- usage: usage,
2226
- rawPromptContent: rawPromptContent,
2227
- rawRequest: rawRequest,
2228
- rawResponse: rawResponse,
2229
- // <- [🗯]
2230
- },
2231
- })];
2232
- }
2233
- });
1984
+ if (this.options.isVerbose) {
1985
+ console.info('💬 OpenAI callChatModel call', { prompt });
1986
+ }
1987
+ const { content, parameters, modelRequirements /*, format*/ } = prompt;
1988
+ const client = await this.getClient();
1989
+ // TODO: [☂] Use here more modelRequirements
1990
+ if (modelRequirements.modelVariant !== 'CHAT') {
1991
+ throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
1992
+ }
1993
+ // TODO: [👨‍👨‍👧‍👧] Remove:
1994
+ for (const key of ['maxTokens', 'modelName', 'seed', 'temperature']) {
1995
+ if (modelRequirements[key] !== undefined) {
1996
+ throw new NotYetImplementedError(`In \`OpenAiAssistantExecutionTools\` you cannot specify \`${key}\``);
1997
+ }
1998
+ }
1999
+ /*
2000
+ TODO: [👨‍👨‍👧‍👧] Implement all of this for Assistants
2001
+ const modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
2002
+ const modelSettings = {
2003
+ model: modelName,
2004
+ max_tokens: modelRequirements.maxTokens,
2005
+ // <- TODO: [🌾] Make some global max cap for maxTokens
2006
+
2007
+ temperature: modelRequirements.temperature,
2008
+
2009
+ // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
2010
+ // <- Note: [🧆]
2011
+ } as OpenAI.Chat.Completions.CompletionCreateParamsNonStreaming; // <- TODO: Guard here types better
2012
+
2013
+ if (format === 'JSON') {
2014
+ modelSettings.response_format = {
2015
+ type: 'json_object',
2016
+ };
2017
+ }
2018
+ */
2019
+ // <- TODO: [🚸] Not all models are compatible with JSON mode
2020
+ // > 'response_format' of type 'json_object' is not supported with this model.
2021
+ const rawPromptContent = templateParameters(content, {
2022
+ ...parameters,
2023
+ modelName: 'assistant',
2024
+ // <- [🧠] What is the best value here
2234
2025
  });
2235
- };
2236
- return OpenAiAssistantExecutionTools;
2237
- }(OpenAiExecutionTools));
2026
+ const rawRequest = {
2027
+ // TODO: [👨‍👨‍👧‍👧] ...modelSettings,
2028
+ // TODO: [👨‍👨‍👧‍👧][🧠] What about system message for assistants, does it make sence - combination of OpenAI assistants with Promptbook Personas
2029
+ assistant_id: this.assistantId,
2030
+ thread: {
2031
+ messages: [
2032
+ // TODO: [🗯] Allow threads to be passed
2033
+ { role: 'user', content: rawPromptContent },
2034
+ ],
2035
+ },
2036
+ // <- TODO: Add user identification here> user: this.options.user,
2037
+ };
2038
+ const start = $getCurrentDate();
2039
+ let complete;
2040
+ if (this.options.isVerbose) {
2041
+ console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
2042
+ }
2043
+ const stream = await client.beta.threads.createAndRunStream(rawRequest);
2044
+ stream.on('connect', () => {
2045
+ if (this.options.isVerbose) {
2046
+ console.info('connect', stream.currentEvent);
2047
+ }
2048
+ });
2049
+ stream.on('messageDelta', (messageDelta) => {
2050
+ var _a;
2051
+ if (this.options.isVerbose &&
2052
+ messageDelta &&
2053
+ messageDelta.content &&
2054
+ messageDelta.content[0] &&
2055
+ messageDelta.content[0].type === 'text') {
2056
+ console.info('messageDelta', (_a = messageDelta.content[0].text) === null || _a === void 0 ? void 0 : _a.value);
2057
+ }
2058
+ // <- TODO: [🐚] Make streaming and running tasks working
2059
+ });
2060
+ stream.on('messageCreated', (message) => {
2061
+ if (this.options.isVerbose) {
2062
+ console.info('messageCreated', message);
2063
+ }
2064
+ });
2065
+ stream.on('messageDone', (message) => {
2066
+ if (this.options.isVerbose) {
2067
+ console.info('messageDone', message);
2068
+ }
2069
+ });
2070
+ const rawResponse = await stream.finalMessages();
2071
+ if (this.options.isVerbose) {
2072
+ console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
2073
+ }
2074
+ if (rawResponse.length !== 1) {
2075
+ throw new PipelineExecutionError(`There is NOT 1 BUT ${rawResponse.length} finalMessages from OpenAI`);
2076
+ }
2077
+ if (rawResponse[0].content.length !== 1) {
2078
+ throw new PipelineExecutionError(`There is NOT 1 BUT ${rawResponse[0].content.length} finalMessages content from OpenAI`);
2079
+ }
2080
+ if (((_a = rawResponse[0].content[0]) === null || _a === void 0 ? void 0 : _a.type) !== 'text') {
2081
+ throw new PipelineExecutionError(`There is NOT 'text' BUT ${(_b = rawResponse[0].content[0]) === null || _b === void 0 ? void 0 : _b.type} finalMessages content type from OpenAI`);
2082
+ }
2083
+ const resultContent = (_c = rawResponse[0].content[0]) === null || _c === void 0 ? void 0 : _c.text.value;
2084
+ // <- TODO: [🧠] There are also annotations, maybe use them
2085
+ // eslint-disable-next-line prefer-const
2086
+ complete = $getCurrentDate();
2087
+ const usage = UNCERTAIN_USAGE;
2088
+ // <- TODO: [🥘] Compute real usage for assistant
2089
+ // ?> const usage = computeOpenAiUsage(content, resultContent || '', rawResponse);
2090
+ if (resultContent === null) {
2091
+ throw new PipelineExecutionError('No response message from OpenAI');
2092
+ }
2093
+ return exportJson({
2094
+ name: 'promptResult',
2095
+ message: `Result of \`OpenAiAssistantExecutionTools.callChatModel\``,
2096
+ order: [],
2097
+ value: {
2098
+ content: resultContent,
2099
+ modelName: 'assistant',
2100
+ // <- TODO: [🥘] Detect used model in assistant
2101
+ // ?> model: rawResponse.model || modelName,
2102
+ timing: {
2103
+ start,
2104
+ complete,
2105
+ },
2106
+ usage,
2107
+ rawPromptContent,
2108
+ rawRequest,
2109
+ rawResponse,
2110
+ // <- [🗯]
2111
+ },
2112
+ });
2113
+ }
2114
+ }
2238
2115
  /**
2239
2116
  * TODO: [🧠][🧙‍♂️] Maybe there can be some wizzard for thoose who want to use just OpenAI
2240
2117
  * TODO: Maybe make custom OpenAiError
@@ -2247,10 +2124,10 @@
2247
2124
  *
2248
2125
  * @public exported from `@promptbook/openai`
2249
2126
  */
2250
- var createOpenAiAssistantExecutionTools = Object.assign(function (options) {
2127
+ const createOpenAiAssistantExecutionTools = Object.assign((options) => {
2251
2128
  // TODO: [🧠][main] !!4 If browser, auto add `dangerouslyAllowBrowser`
2252
2129
  if (($isRunningInBrowser() || $isRunningInWebWorker()) && !options.dangerouslyAllowBrowser) {
2253
- options = __assign(__assign({}, options), { dangerouslyAllowBrowser: true });
2130
+ options = { ...options, dangerouslyAllowBrowser: true };
2254
2131
  }
2255
2132
  return new OpenAiAssistantExecutionTools(options);
2256
2133
  }, {
@@ -2267,10 +2144,10 @@
2267
2144
  *
2268
2145
  * @public exported from `@promptbook/openai`
2269
2146
  */
2270
- var createOpenAiExecutionTools = Object.assign(function (options) {
2147
+ const createOpenAiExecutionTools = Object.assign((options) => {
2271
2148
  // TODO: [🧠][main] !!4 If browser, auto add `dangerouslyAllowBrowser`
2272
2149
  if (($isRunningInBrowser() || $isRunningInWebWorker()) && !options.dangerouslyAllowBrowser) {
2273
- options = __assign(__assign({}, options), { dangerouslyAllowBrowser: true });
2150
+ options = { ...options, dangerouslyAllowBrowser: true };
2274
2151
  }
2275
2152
  return new OpenAiExecutionTools(options);
2276
2153
  }, {
@@ -2303,46 +2180,35 @@
2303
2180
  * @public exported from `@promptbook/utils`
2304
2181
  */
2305
2182
  function normalizeTo_SCREAMING_CASE(text) {
2306
- var e_1, _a;
2307
- var charType;
2308
- var lastCharType = 'OTHER';
2309
- var normalizedName = '';
2310
- try {
2311
- for (var text_1 = __values(text), text_1_1 = text_1.next(); !text_1_1.done; text_1_1 = text_1.next()) {
2312
- var char = text_1_1.value;
2313
- var normalizedChar = void 0;
2314
- if (/^[a-z]$/.test(char)) {
2315
- charType = 'LOWERCASE';
2316
- normalizedChar = char.toUpperCase();
2317
- }
2318
- else if (/^[A-Z]$/.test(char)) {
2319
- charType = 'UPPERCASE';
2320
- normalizedChar = char;
2321
- }
2322
- else if (/^[0-9]$/.test(char)) {
2323
- charType = 'NUMBER';
2324
- normalizedChar = char;
2325
- }
2326
- else {
2327
- charType = 'OTHER';
2328
- normalizedChar = '_';
2329
- }
2330
- if (charType !== lastCharType &&
2331
- !(lastCharType === 'UPPERCASE' && charType === 'LOWERCASE') &&
2332
- !(lastCharType === 'NUMBER') &&
2333
- !(charType === 'NUMBER')) {
2334
- normalizedName += '_';
2335
- }
2336
- normalizedName += normalizedChar;
2337
- lastCharType = charType;
2183
+ let charType;
2184
+ let lastCharType = 'OTHER';
2185
+ let normalizedName = '';
2186
+ for (const char of text) {
2187
+ let normalizedChar;
2188
+ if (/^[a-z]$/.test(char)) {
2189
+ charType = 'LOWERCASE';
2190
+ normalizedChar = char.toUpperCase();
2338
2191
  }
2339
- }
2340
- catch (e_1_1) { e_1 = { error: e_1_1 }; }
2341
- finally {
2342
- try {
2343
- if (text_1_1 && !text_1_1.done && (_a = text_1.return)) _a.call(text_1);
2192
+ else if (/^[A-Z]$/.test(char)) {
2193
+ charType = 'UPPERCASE';
2194
+ normalizedChar = char;
2195
+ }
2196
+ else if (/^[0-9]$/.test(char)) {
2197
+ charType = 'NUMBER';
2198
+ normalizedChar = char;
2199
+ }
2200
+ else {
2201
+ charType = 'OTHER';
2202
+ normalizedChar = '_';
2203
+ }
2204
+ if (charType !== lastCharType &&
2205
+ !(lastCharType === 'UPPERCASE' && charType === 'LOWERCASE') &&
2206
+ !(lastCharType === 'NUMBER') &&
2207
+ !(charType === 'NUMBER')) {
2208
+ normalizedName += '_';
2344
2209
  }
2345
- finally { if (e_1) throw e_1.error; }
2210
+ normalizedName += normalizedChar;
2211
+ lastCharType = charType;
2346
2212
  }
2347
2213
  normalizedName = normalizedName.replace(/_+/g, '_');
2348
2214
  normalizedName = normalizedName.replace(/_?\/_?/g, '/');
@@ -2379,27 +2245,27 @@
2379
2245
  *
2380
2246
  * @private internal utility, exported are only signleton instances of this class
2381
2247
  */
2382
- var $Register = /** @class */ (function () {
2383
- function $Register(registerName) {
2248
+ class $Register {
2249
+ constructor(registerName) {
2384
2250
  this.registerName = registerName;
2385
- var storageName = "_promptbook_".concat(normalizeTo_snake_case(registerName));
2386
- var globalScope = $getGlobalScope();
2251
+ const storageName = `_promptbook_${normalizeTo_snake_case(registerName)}`;
2252
+ const globalScope = $getGlobalScope();
2387
2253
  if (globalScope[storageName] === undefined) {
2388
2254
  globalScope[storageName] = [];
2389
2255
  }
2390
2256
  else if (!Array.isArray(globalScope[storageName])) {
2391
- throw new UnexpectedError("Expected (global) ".concat(storageName, " to be an array, but got ").concat(typeof globalScope[storageName]));
2257
+ throw new UnexpectedError(`Expected (global) ${storageName} to be an array, but got ${typeof globalScope[storageName]}`);
2392
2258
  }
2393
2259
  this.storage = globalScope[storageName];
2394
2260
  }
2395
- $Register.prototype.list = function () {
2261
+ list() {
2396
2262
  // <- TODO: ReadonlyDeep<ReadonlyArray<TRegistered>>
2397
2263
  return this.storage;
2398
- };
2399
- $Register.prototype.register = function (registered) {
2400
- var packageName = registered.packageName, className = registered.className;
2401
- var existingRegistrationIndex = this.storage.findIndex(function (item) { return item.packageName === packageName && item.className === className; });
2402
- var existingRegistration = this.storage[existingRegistrationIndex];
2264
+ }
2265
+ register(registered) {
2266
+ const { packageName, className } = registered;
2267
+ const existingRegistrationIndex = this.storage.findIndex((item) => item.packageName === packageName && item.className === className);
2268
+ const existingRegistration = this.storage[existingRegistrationIndex];
2403
2269
  if (!existingRegistration) {
2404
2270
  this.storage.push(registered);
2405
2271
  }
@@ -2408,18 +2274,17 @@
2408
2274
  }
2409
2275
  return {
2410
2276
  registerName: this.registerName,
2411
- packageName: packageName,
2412
- className: className,
2277
+ packageName,
2278
+ className,
2413
2279
  get isDestroyed() {
2414
2280
  return false;
2415
2281
  },
2416
- destroy: function () {
2417
- throw new NotYetImplementedError("Registration to ".concat(this.registerName, " is permanent in this version of Promptbook"));
2282
+ destroy() {
2283
+ throw new NotYetImplementedError(`Registration to ${this.registerName} is permanent in this version of Promptbook`);
2418
2284
  },
2419
2285
  };
2420
- };
2421
- return $Register;
2422
- }());
2286
+ }
2287
+ }
2423
2288
 
2424
2289
  /**
2425
2290
  * @@@
@@ -2428,7 +2293,7 @@
2428
2293
  * @singleton Only one instance of each register is created per build, but thare can be more @@@
2429
2294
  * @public exported from `@promptbook/core`
2430
2295
  */
2431
- var $llmToolsRegister = new $Register('llm_execution_tools_constructors');
2296
+ const $llmToolsRegister = new $Register('llm_execution_tools_constructors');
2432
2297
  /**
2433
2298
  * TODO: [®] DRY Register logic
2434
2299
  */
@@ -2442,7 +2307,7 @@
2442
2307
  * @public exported from `@promptbook/wizzard`
2443
2308
  * @public exported from `@promptbook/cli`
2444
2309
  */
2445
- var _OpenAiRegistration = $llmToolsRegister.register(createOpenAiExecutionTools);
2310
+ const _OpenAiRegistration = $llmToolsRegister.register(createOpenAiExecutionTools);
2446
2311
  /**
2447
2312
  * @@@ registration2
2448
2313
  *
@@ -2452,7 +2317,7 @@
2452
2317
  * @public exported from `@promptbook/wizzard`
2453
2318
  * @public exported from `@promptbook/cli`
2454
2319
  */
2455
- var _OpenAiAssistantRegistration = $llmToolsRegister.register(createOpenAiAssistantExecutionTools);
2320
+ const _OpenAiAssistantRegistration = $llmToolsRegister.register(createOpenAiAssistantExecutionTools);
2456
2321
  /**
2457
2322
  * TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
2458
2323
  * Note: [💞] Ignore a discrepancy between file name and entity name