@promptbook/openai 0.86.8 → 0.86.22

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/esm/index.es.js CHANGED
@@ -9,138 +9,19 @@ import OpenAI from 'openai';
9
9
  * @generated
10
10
  * @see https://github.com/webgptorg/book
11
11
  */
12
- var BOOK_LANGUAGE_VERSION = '1.0.0';
12
+ const BOOK_LANGUAGE_VERSION = '1.0.0';
13
13
  /**
14
14
  * The version of the Promptbook engine
15
15
  *
16
16
  * @generated
17
17
  * @see https://github.com/webgptorg/promptbook
18
18
  */
19
- var PROMPTBOOK_ENGINE_VERSION = '0.86.8';
19
+ const PROMPTBOOK_ENGINE_VERSION = '0.86.22';
20
20
  /**
21
21
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
22
22
  * Note: [💞] Ignore a discrepancy between file name and entity name
23
23
  */
24
24
 
25
- /*! *****************************************************************************
26
- Copyright (c) Microsoft Corporation.
27
-
28
- Permission to use, copy, modify, and/or distribute this software for any
29
- purpose with or without fee is hereby granted.
30
-
31
- THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
32
- REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
33
- AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
34
- INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
35
- LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
36
- OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
37
- PERFORMANCE OF THIS SOFTWARE.
38
- ***************************************************************************** */
39
- /* global Reflect, Promise */
40
-
41
- var extendStatics = function(d, b) {
42
- extendStatics = Object.setPrototypeOf ||
43
- ({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||
44
- function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; };
45
- return extendStatics(d, b);
46
- };
47
-
48
- function __extends(d, b) {
49
- if (typeof b !== "function" && b !== null)
50
- throw new TypeError("Class extends value " + String(b) + " is not a constructor or null");
51
- extendStatics(d, b);
52
- function __() { this.constructor = d; }
53
- d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());
54
- }
55
-
56
- var __assign = function() {
57
- __assign = Object.assign || function __assign(t) {
58
- for (var s, i = 1, n = arguments.length; i < n; i++) {
59
- s = arguments[i];
60
- for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p)) t[p] = s[p];
61
- }
62
- return t;
63
- };
64
- return __assign.apply(this, arguments);
65
- };
66
-
67
- function __awaiter(thisArg, _arguments, P, generator) {
68
- function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
69
- return new (P || (P = Promise))(function (resolve, reject) {
70
- function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
71
- function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
72
- function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
73
- step((generator = generator.apply(thisArg, _arguments || [])).next());
74
- });
75
- }
76
-
77
- function __generator(thisArg, body) {
78
- var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g;
79
- return g = { next: verb(0), "throw": verb(1), "return": verb(2) }, typeof Symbol === "function" && (g[Symbol.iterator] = function() { return this; }), g;
80
- function verb(n) { return function (v) { return step([n, v]); }; }
81
- function step(op) {
82
- if (f) throw new TypeError("Generator is already executing.");
83
- while (_) try {
84
- if (f = 1, y && (t = op[0] & 2 ? y["return"] : op[0] ? y["throw"] || ((t = y["return"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;
85
- if (y = 0, t) op = [op[0] & 2, t.value];
86
- switch (op[0]) {
87
- case 0: case 1: t = op; break;
88
- case 4: _.label++; return { value: op[1], done: false };
89
- case 5: _.label++; y = op[1]; op = [0]; continue;
90
- case 7: op = _.ops.pop(); _.trys.pop(); continue;
91
- default:
92
- if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; }
93
- if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; }
94
- if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; }
95
- if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; }
96
- if (t[2]) _.ops.pop();
97
- _.trys.pop(); continue;
98
- }
99
- op = body.call(thisArg, _);
100
- } catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; }
101
- if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true };
102
- }
103
- }
104
-
105
- function __values(o) {
106
- var s = typeof Symbol === "function" && Symbol.iterator, m = s && o[s], i = 0;
107
- if (m) return m.call(o);
108
- if (o && typeof o.length === "number") return {
109
- next: function () {
110
- if (o && i >= o.length) o = void 0;
111
- return { value: o && o[i++], done: !o };
112
- }
113
- };
114
- throw new TypeError(s ? "Object is not iterable." : "Symbol.iterator is not defined.");
115
- }
116
-
117
- function __read(o, n) {
118
- var m = typeof Symbol === "function" && o[Symbol.iterator];
119
- if (!m) return o;
120
- var i = m.call(o), r, ar = [], e;
121
- try {
122
- while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value);
123
- }
124
- catch (error) { e = { error: error }; }
125
- finally {
126
- try {
127
- if (r && !r.done && (m = i["return"])) m.call(i);
128
- }
129
- finally { if (e) throw e.error; }
130
- }
131
- return ar;
132
- }
133
-
134
- function __spreadArray(to, from, pack) {
135
- if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {
136
- if (ar || !(i in from)) {
137
- if (!ar) ar = Array.prototype.slice.call(from, 0, i);
138
- ar[i] = from[i];
139
- }
140
- }
141
- return to.concat(ar || Array.prototype.slice.call(from));
142
- }
143
-
144
25
  /**
145
26
  * Detects if the code is running in a browser environment in main thread (Not in a web worker)
146
27
  *
@@ -148,7 +29,13 @@ function __spreadArray(to, from, pack) {
148
29
  *
149
30
  * @public exported from `@promptbook/utils`
150
31
  */
151
- var $isRunningInBrowser = new Function("\n try {\n return this === window;\n } catch (e) {\n return false;\n }\n");
32
+ const $isRunningInBrowser = new Function(`
33
+ try {
34
+ return this === window;
35
+ } catch (e) {
36
+ return false;
37
+ }
38
+ `);
152
39
  /**
153
40
  * TODO: [🎺]
154
41
  */
@@ -160,7 +47,17 @@ var $isRunningInBrowser = new Function("\n try {\n return this === win
160
47
  *
161
48
  * @public exported from `@promptbook/utils`
162
49
  */
163
- var $isRunningInWebWorker = new Function("\n try {\n if (typeof WorkerGlobalScope !== 'undefined' && self instanceof WorkerGlobalScope) {\n return true;\n } else {\n return false;\n }\n } catch (e) {\n return false;\n }\n");
50
+ const $isRunningInWebWorker = new Function(`
51
+ try {
52
+ if (typeof WorkerGlobalScope !== 'undefined' && self instanceof WorkerGlobalScope) {
53
+ return true;
54
+ } else {
55
+ return false;
56
+ }
57
+ } catch (e) {
58
+ return false;
59
+ }
60
+ `);
164
61
  /**
165
62
  * TODO: [🎺]
166
63
  */
@@ -170,32 +67,36 @@ var $isRunningInWebWorker = new Function("\n try {\n if (typeof Worker
170
67
  *
171
68
  * @public exported from `@promptbook/core`
172
69
  */
173
- var NotYetImplementedError = /** @class */ (function (_super) {
174
- __extends(NotYetImplementedError, _super);
175
- function NotYetImplementedError(message) {
176
- var _this = _super.call(this, spaceTrim(function (block) { return "\n ".concat(block(message), "\n\n Note: This feature is not implemented yet but it will be soon.\n\n If you want speed up the implementation or just read more, look here:\n https://github.com/webgptorg/promptbook\n\n Or contact us on pavol@ptbk.io\n\n "); })) || this;
177
- _this.name = 'NotYetImplementedError';
178
- Object.setPrototypeOf(_this, NotYetImplementedError.prototype);
179
- return _this;
70
+ class NotYetImplementedError extends Error {
71
+ constructor(message) {
72
+ super(spaceTrim((block) => `
73
+ ${block(message)}
74
+
75
+ Note: This feature is not implemented yet but it will be soon.
76
+
77
+ If you want speed up the implementation or just read more, look here:
78
+ https://github.com/webgptorg/promptbook
79
+
80
+ Or contact us on pavol@ptbk.io
81
+
82
+ `));
83
+ this.name = 'NotYetImplementedError';
84
+ Object.setPrototypeOf(this, NotYetImplementedError.prototype);
180
85
  }
181
- return NotYetImplementedError;
182
- }(Error));
86
+ }
183
87
 
184
88
  /**
185
89
  * This error indicates errors during the execution of the pipeline
186
90
  *
187
91
  * @public exported from `@promptbook/core`
188
92
  */
189
- var PipelineExecutionError = /** @class */ (function (_super) {
190
- __extends(PipelineExecutionError, _super);
191
- function PipelineExecutionError(message) {
192
- var _this = _super.call(this, message) || this;
193
- _this.name = 'PipelineExecutionError';
194
- Object.setPrototypeOf(_this, PipelineExecutionError.prototype);
195
- return _this;
93
+ class PipelineExecutionError extends Error {
94
+ constructor(message) {
95
+ super(message);
96
+ this.name = 'PipelineExecutionError';
97
+ Object.setPrototypeOf(this, PipelineExecutionError.prototype);
196
98
  }
197
- return PipelineExecutionError;
198
- }(Error));
99
+ }
199
100
 
200
101
  /**
201
102
  * Freezes the given object and all its nested objects recursively
@@ -207,26 +108,15 @@ var PipelineExecutionError = /** @class */ (function (_super) {
207
108
  * @public exported from `@promptbook/utils`
208
109
  */
209
110
  function $deepFreeze(objectValue) {
210
- var e_1, _a;
211
111
  if (Array.isArray(objectValue)) {
212
- return Object.freeze(objectValue.map(function (item) { return $deepFreeze(item); }));
213
- }
214
- var propertyNames = Object.getOwnPropertyNames(objectValue);
215
- try {
216
- for (var propertyNames_1 = __values(propertyNames), propertyNames_1_1 = propertyNames_1.next(); !propertyNames_1_1.done; propertyNames_1_1 = propertyNames_1.next()) {
217
- var propertyName = propertyNames_1_1.value;
218
- var value = objectValue[propertyName];
219
- if (value && typeof value === 'object') {
220
- $deepFreeze(value);
221
- }
222
- }
112
+ return Object.freeze(objectValue.map((item) => $deepFreeze(item)));
223
113
  }
224
- catch (e_1_1) { e_1 = { error: e_1_1 }; }
225
- finally {
226
- try {
227
- if (propertyNames_1_1 && !propertyNames_1_1.done && (_a = propertyNames_1.return)) _a.call(propertyNames_1);
114
+ const propertyNames = Object.getOwnPropertyNames(objectValue);
115
+ for (const propertyName of propertyNames) {
116
+ const value = objectValue[propertyName];
117
+ if (value && typeof value === 'object') {
118
+ $deepFreeze(value);
228
119
  }
229
- finally { if (e_1) throw e_1.error; }
230
120
  }
231
121
  Object.freeze(objectValue);
232
122
  return objectValue;
@@ -266,7 +156,7 @@ $deepFreeze({
266
156
  *
267
157
  * @public exported from `@promptbook/core`
268
158
  */
269
- var UNCERTAIN_USAGE = $deepFreeze({
159
+ const UNCERTAIN_USAGE = $deepFreeze({
270
160
  price: { value: 0, isUncertain: true },
271
161
  input: {
272
162
  tokensCount: { value: 0, isUncertain: true },
@@ -310,32 +200,32 @@ function $getCurrentDate() {
310
200
  *
311
201
  * @public exported from `@promptbook/core`
312
202
  */
313
- var NAME = "Promptbook";
203
+ const NAME = `Promptbook`;
314
204
  /**
315
205
  * Email of the responsible person
316
206
  *
317
207
  * @public exported from `@promptbook/core`
318
208
  */
319
- var ADMIN_EMAIL = 'pavol@ptbk.io';
209
+ const ADMIN_EMAIL = 'pavol@ptbk.io';
320
210
  /**
321
211
  * Name of the responsible person for the Promptbook on GitHub
322
212
  *
323
213
  * @public exported from `@promptbook/core`
324
214
  */
325
- var ADMIN_GITHUB_NAME = 'hejny';
215
+ const ADMIN_GITHUB_NAME = 'hejny';
326
216
  // <- TODO: [🧠] Better system for generator warnings - not always "code" and "by `@promptbook/cli`"
327
217
  /**
328
218
  * The maximum number of iterations for a loops
329
219
  *
330
220
  * @private within the repository - too low-level in comparison with other `MAX_...`
331
221
  */
332
- var LOOP_LIMIT = 1000;
222
+ const LOOP_LIMIT = 1000;
333
223
  /**
334
224
  * Strings to represent various values in the context of parameter values
335
225
  *
336
226
  * @public exported from `@promptbook/utils`
337
227
  */
338
- var VALUE_STRINGS = {
228
+ const VALUE_STRINGS = {
339
229
  empty: '(nothing; empty string)',
340
230
  null: '(no value; null)',
341
231
  undefined: '(unknown value; undefined)',
@@ -349,7 +239,7 @@ var VALUE_STRINGS = {
349
239
  *
350
240
  * @public exported from `@promptbook/utils`
351
241
  */
352
- var SMALL_NUMBER = 0.001;
242
+ const SMALL_NUMBER = 0.001;
353
243
  // <- TODO: [🧜‍♂️]
354
244
  /**
355
245
  * @@@
@@ -374,8 +264,11 @@ Object.freeze({
374
264
  * @public exported from `@promptbook/utils`
375
265
  */
376
266
  function orderJson(options) {
377
- var value = options.value, order = options.order;
378
- var orderedValue = __assign(__assign({}, (order === undefined ? {} : Object.fromEntries(order.map(function (key) { return [key, undefined]; })))), value);
267
+ const { value, order } = options;
268
+ const orderedValue = {
269
+ ...(order === undefined ? {} : Object.fromEntries(order.map((key) => [key, undefined]))),
270
+ ...value,
271
+ };
379
272
  return orderedValue;
380
273
  }
381
274
 
@@ -385,11 +278,37 @@ function orderJson(options) {
385
278
  * @private private within the repository
386
279
  */
387
280
  function getErrorReportUrl(error) {
388
- var report = {
389
- title: "\uD83D\uDC1C Error report from ".concat(NAME),
390
- body: spaceTrim$1(function (block) { return "\n\n\n `".concat(error.name || 'Error', "` has occurred in the [").concat(NAME, "], please look into it @").concat(ADMIN_GITHUB_NAME, ".\n\n ```\n ").concat(block(error.message || '(no error message)'), "\n ```\n\n\n ## More info:\n\n - **Promptbook engine version:** ").concat(PROMPTBOOK_ENGINE_VERSION, "\n - **Book language version:** ").concat(BOOK_LANGUAGE_VERSION, "\n - **Time:** ").concat(new Date().toISOString(), "\n\n <details>\n <summary>Stack trace:</summary>\n\n ## Stack trace:\n\n ```stacktrace\n ").concat(block(error.stack || '(empty)'), "\n ```\n </details>\n\n "); }),
281
+ const report = {
282
+ title: `🐜 Error report from ${NAME}`,
283
+ body: spaceTrim$1((block) => `
284
+
285
+
286
+ \`${error.name || 'Error'}\` has occurred in the [${NAME}], please look into it @${ADMIN_GITHUB_NAME}.
287
+
288
+ \`\`\`
289
+ ${block(error.message || '(no error message)')}
290
+ \`\`\`
291
+
292
+
293
+ ## More info:
294
+
295
+ - **Promptbook engine version:** ${PROMPTBOOK_ENGINE_VERSION}
296
+ - **Book language version:** ${BOOK_LANGUAGE_VERSION}
297
+ - **Time:** ${new Date().toISOString()}
298
+
299
+ <details>
300
+ <summary>Stack trace:</summary>
301
+
302
+ ## Stack trace:
303
+
304
+ \`\`\`stacktrace
305
+ ${block(error.stack || '(empty)')}
306
+ \`\`\`
307
+ </details>
308
+
309
+ `),
391
310
  };
392
- var reportUrl = new URL("https://github.com/webgptorg/promptbook/issues/new");
311
+ const reportUrl = new URL(`https://github.com/webgptorg/promptbook/issues/new`);
393
312
  reportUrl.searchParams.set('labels', 'bug');
394
313
  reportUrl.searchParams.set('assignees', ADMIN_GITHUB_NAME);
395
314
  reportUrl.searchParams.set('title', report.title);
@@ -402,16 +321,24 @@ function getErrorReportUrl(error) {
402
321
  *
403
322
  * @public exported from `@promptbook/core`
404
323
  */
405
- var UnexpectedError = /** @class */ (function (_super) {
406
- __extends(UnexpectedError, _super);
407
- function UnexpectedError(message) {
408
- var _this = _super.call(this, spaceTrim(function (block) { return "\n ".concat(block(message), "\n\n Note: This error should not happen.\n It's probbably a bug in the pipeline collection\n\n Please report issue:\n ").concat(block(getErrorReportUrl(new Error(message)).href), "\n\n Or contact us on ").concat(ADMIN_EMAIL, "\n\n "); })) || this;
409
- _this.name = 'UnexpectedError';
410
- Object.setPrototypeOf(_this, UnexpectedError.prototype);
411
- return _this;
324
+ class UnexpectedError extends Error {
325
+ constructor(message) {
326
+ super(spaceTrim((block) => `
327
+ ${block(message)}
328
+
329
+ Note: This error should not happen.
330
+ It's probbably a bug in the pipeline collection
331
+
332
+ Please report issue:
333
+ ${block(getErrorReportUrl(new Error(message)).href)}
334
+
335
+ Or contact us on ${ADMIN_EMAIL}
336
+
337
+ `));
338
+ this.name = 'UnexpectedError';
339
+ Object.setPrototypeOf(this, UnexpectedError.prototype);
412
340
  }
413
- return UnexpectedError;
414
- }(Error));
341
+ }
415
342
 
416
343
  /**
417
344
  * Checks if the value is [🚉] serializable as JSON
@@ -434,10 +361,9 @@ var UnexpectedError = /** @class */ (function (_super) {
434
361
  * @public exported from `@promptbook/utils`
435
362
  */
436
363
  function checkSerializableAsJson(options) {
437
- var e_1, _a;
438
- var value = options.value, name = options.name, message = options.message;
364
+ const { value, name, message } = options;
439
365
  if (value === undefined) {
440
- throw new UnexpectedError("".concat(name, " is undefined"));
366
+ throw new UnexpectedError(`${name} is undefined`);
441
367
  }
442
368
  else if (value === null) {
443
369
  return;
@@ -452,49 +378,54 @@ function checkSerializableAsJson(options) {
452
378
  return;
453
379
  }
454
380
  else if (typeof value === 'symbol') {
455
- throw new UnexpectedError("".concat(name, " is symbol"));
381
+ throw new UnexpectedError(`${name} is symbol`);
456
382
  }
457
383
  else if (typeof value === 'function') {
458
- throw new UnexpectedError("".concat(name, " is function"));
384
+ throw new UnexpectedError(`${name} is function`);
459
385
  }
460
386
  else if (typeof value === 'object' && Array.isArray(value)) {
461
- for (var i = 0; i < value.length; i++) {
462
- checkSerializableAsJson({ name: "".concat(name, "[").concat(i, "]"), value: value[i], message: message });
387
+ for (let i = 0; i < value.length; i++) {
388
+ checkSerializableAsJson({ name: `${name}[${i}]`, value: value[i], message });
463
389
  }
464
390
  }
465
391
  else if (typeof value === 'object') {
466
392
  if (value instanceof Date) {
467
- throw new UnexpectedError(spaceTrim$1(function (block) { return "\n `".concat(name, "` is Date\n\n Use `string_date_iso8601` instead\n\n Additional message for `").concat(name, "`:\n ").concat(block(message || '(nothing)'), "\n "); }));
393
+ throw new UnexpectedError(spaceTrim$1((block) => `
394
+ \`${name}\` is Date
395
+
396
+ Use \`string_date_iso8601\` instead
397
+
398
+ Additional message for \`${name}\`:
399
+ ${block(message || '(nothing)')}
400
+ `));
468
401
  }
469
402
  else if (value instanceof Map) {
470
- throw new UnexpectedError("".concat(name, " is Map"));
403
+ throw new UnexpectedError(`${name} is Map`);
471
404
  }
472
405
  else if (value instanceof Set) {
473
- throw new UnexpectedError("".concat(name, " is Set"));
406
+ throw new UnexpectedError(`${name} is Set`);
474
407
  }
475
408
  else if (value instanceof RegExp) {
476
- throw new UnexpectedError("".concat(name, " is RegExp"));
409
+ throw new UnexpectedError(`${name} is RegExp`);
477
410
  }
478
411
  else if (value instanceof Error) {
479
- throw new UnexpectedError(spaceTrim$1(function (block) { return "\n `".concat(name, "` is unserialized Error\n\n Use function `serializeError`\n\n Additional message for `").concat(name, "`:\n ").concat(block(message || '(nothing)'), "\n\n "); }));
412
+ throw new UnexpectedError(spaceTrim$1((block) => `
413
+ \`${name}\` is unserialized Error
414
+
415
+ Use function \`serializeError\`
416
+
417
+ Additional message for \`${name}\`:
418
+ ${block(message || '(nothing)')}
419
+
420
+ `));
480
421
  }
481
422
  else {
482
- try {
483
- for (var _b = __values(Object.entries(value)), _c = _b.next(); !_c.done; _c = _b.next()) {
484
- var _d = __read(_c.value, 2), subName = _d[0], subValue = _d[1];
485
- if (subValue === undefined) {
486
- // Note: undefined in object is serializable - it is just omited
487
- continue;
488
- }
489
- checkSerializableAsJson({ name: "".concat(name, ".").concat(subName), value: subValue, message: message });
490
- }
491
- }
492
- catch (e_1_1) { e_1 = { error: e_1_1 }; }
493
- finally {
494
- try {
495
- if (_c && !_c.done && (_a = _b.return)) _a.call(_b);
423
+ for (const [subName, subValue] of Object.entries(value)) {
424
+ if (subValue === undefined) {
425
+ // Note: undefined in object is serializable - it is just omited
426
+ continue;
496
427
  }
497
- finally { if (e_1) throw e_1.error; }
428
+ checkSerializableAsJson({ name: `${name}.${subName}`, value: subValue, message });
498
429
  }
499
430
  try {
500
431
  JSON.stringify(value); // <- TODO: [0]
@@ -503,7 +434,14 @@ function checkSerializableAsJson(options) {
503
434
  if (!(error instanceof Error)) {
504
435
  throw error;
505
436
  }
506
- throw new UnexpectedError(spaceTrim$1(function (block) { return "\n `".concat(name, "` is not serializable\n\n ").concat(block(error.stack || error.message), "\n\n Additional message for `").concat(name, "`:\n ").concat(block(message || '(nothing)'), "\n "); }));
437
+ throw new UnexpectedError(spaceTrim$1((block) => `
438
+ \`${name}\` is not serializable
439
+
440
+ ${block(error.stack || error.message)}
441
+
442
+ Additional message for \`${name}\`:
443
+ ${block(message || '(nothing)')}
444
+ `));
507
445
  }
508
446
  /*
509
447
  TODO: [0] Is there some more elegant way to check circular references?
@@ -528,7 +466,12 @@ function checkSerializableAsJson(options) {
528
466
  }
529
467
  }
530
468
  else {
531
- throw new UnexpectedError(spaceTrim$1(function (block) { return "\n `".concat(name, "` is unknown type\n\n Additional message for `").concat(name, "`:\n ").concat(block(message || '(nothing)'), "\n "); }));
469
+ throw new UnexpectedError(spaceTrim$1((block) => `
470
+ \`${name}\` is unknown type
471
+
472
+ Additional message for \`${name}\`:
473
+ ${block(message || '(nothing)')}
474
+ `));
532
475
  }
533
476
  }
534
477
  /**
@@ -574,9 +517,9 @@ function deepClone(objectValue) {
574
517
  * @public exported from `@promptbook/utils`
575
518
  */
576
519
  function exportJson(options) {
577
- var name = options.name, value = options.value, order = options.order, message = options.message;
578
- checkSerializableAsJson({ name: name, value: value, message: message });
579
- var orderedValue =
520
+ const { name, value, order, message } = options;
521
+ checkSerializableAsJson({ name, value, message });
522
+ const orderedValue =
580
523
  // TODO: Fix error "Type instantiation is excessively deep and possibly infinite."
581
524
  // eslint-disable-next-line @typescript-eslint/ban-ts-comment
582
525
  // @ts-ignore
@@ -599,19 +542,19 @@ function exportJson(options) {
599
542
  *
600
543
  * @private within the repository
601
544
  */
602
- var REPLACING_NONCE = 'ptbkauk42kV2dzao34faw7FudQUHYPtW';
545
+ const REPLACING_NONCE = 'ptbkauk42kV2dzao34faw7FudQUHYPtW';
603
546
  /**
604
547
  * @@@
605
548
  *
606
549
  * @private within the repository
607
550
  */
608
- var RESERVED_PARAMETER_MISSING_VALUE = 'MISSING-' + REPLACING_NONCE;
551
+ const RESERVED_PARAMETER_MISSING_VALUE = 'MISSING-' + REPLACING_NONCE;
609
552
  /**
610
553
  * @@@
611
554
  *
612
555
  * @private within the repository
613
556
  */
614
- var RESERVED_PARAMETER_RESTRICTED = 'RESTRICTED-' + REPLACING_NONCE;
557
+ const RESERVED_PARAMETER_RESTRICTED = 'RESTRICTED-' + REPLACING_NONCE;
615
558
  /**
616
559
  * The names of the parameters that are reserved for special purposes
617
560
  *
@@ -619,7 +562,7 @@ var RESERVED_PARAMETER_RESTRICTED = 'RESTRICTED-' + REPLACING_NONCE;
619
562
  */
620
563
  exportJson({
621
564
  name: 'RESERVED_PARAMETER_NAMES',
622
- message: "The names of the parameters that are reserved for special purposes",
565
+ message: `The names of the parameters that are reserved for special purposes`,
623
566
  value: [
624
567
  'content',
625
568
  'context',
@@ -641,16 +584,13 @@ exportJson({
641
584
  *
642
585
  * @public exported from `@promptbook/core`
643
586
  */
644
- var LimitReachedError = /** @class */ (function (_super) {
645
- __extends(LimitReachedError, _super);
646
- function LimitReachedError(message) {
647
- var _this = _super.call(this, message) || this;
648
- _this.name = 'LimitReachedError';
649
- Object.setPrototypeOf(_this, LimitReachedError.prototype);
650
- return _this;
587
+ class LimitReachedError extends Error {
588
+ constructor(message) {
589
+ super(message);
590
+ this.name = 'LimitReachedError';
591
+ Object.setPrototypeOf(this, LimitReachedError.prototype);
651
592
  }
652
- return LimitReachedError;
653
- }(Error));
593
+ }
654
594
 
655
595
  /**
656
596
  * Format either small or big number
@@ -670,9 +610,9 @@ function numberToString(value) {
670
610
  else if (value === -Infinity) {
671
611
  return VALUE_STRINGS.negativeInfinity;
672
612
  }
673
- for (var exponent = 0; exponent < 15; exponent++) {
674
- var factor = Math.pow(10, exponent);
675
- var valueRounded = Math.round(value * factor) / factor;
613
+ for (let exponent = 0; exponent < 15; exponent++) {
614
+ const factor = 10 ** exponent;
615
+ const valueRounded = Math.round(value * factor) / factor;
676
616
  if (Math.abs(value - valueRounded) / value < SMALL_NUMBER) {
677
617
  return valueRounded.toFixed(exponent);
678
618
  }
@@ -740,47 +680,38 @@ function valueToString(value) {
740
680
  * @public exported from `@promptbook/utils`
741
681
  */
742
682
  function templateParameters(template, parameters) {
743
- var e_1, _a;
744
- try {
745
- for (var _b = __values(Object.entries(parameters)), _c = _b.next(); !_c.done; _c = _b.next()) {
746
- var _d = __read(_c.value, 2), parameterName = _d[0], parameterValue = _d[1];
747
- if (parameterValue === RESERVED_PARAMETER_MISSING_VALUE) {
748
- throw new UnexpectedError("Parameter `{".concat(parameterName, "}` has missing value"));
749
- }
750
- else if (parameterValue === RESERVED_PARAMETER_RESTRICTED) {
751
- // TODO: [🍵]
752
- throw new UnexpectedError("Parameter `{".concat(parameterName, "}` is restricted to use"));
753
- }
683
+ for (const [parameterName, parameterValue] of Object.entries(parameters)) {
684
+ if (parameterValue === RESERVED_PARAMETER_MISSING_VALUE) {
685
+ throw new UnexpectedError(`Parameter \`{${parameterName}}\` has missing value`);
754
686
  }
755
- }
756
- catch (e_1_1) { e_1 = { error: e_1_1 }; }
757
- finally {
758
- try {
759
- if (_c && !_c.done && (_a = _b.return)) _a.call(_b);
687
+ else if (parameterValue === RESERVED_PARAMETER_RESTRICTED) {
688
+ // TODO: [🍵]
689
+ throw new UnexpectedError(`Parameter \`{${parameterName}}\` is restricted to use`);
760
690
  }
761
- finally { if (e_1) throw e_1.error; }
762
691
  }
763
- var replacedTemplates = template;
764
- var match;
765
- var loopLimit = LOOP_LIMIT;
766
- var _loop_1 = function () {
692
+ let replacedTemplates = template;
693
+ let match;
694
+ let loopLimit = LOOP_LIMIT;
695
+ while ((match = /^(?<precol>.*){(?<parameterName>\w+)}(.*)/m /* <- Not global */
696
+ .exec(replacedTemplates))) {
767
697
  if (loopLimit-- < 0) {
768
698
  throw new LimitReachedError('Loop limit reached during parameters replacement in `templateParameters`');
769
699
  }
770
- var precol = match.groups.precol;
771
- var parameterName = match.groups.parameterName;
700
+ const precol = match.groups.precol;
701
+ const parameterName = match.groups.parameterName;
772
702
  if (parameterName === '') {
773
- return "continue";
703
+ // Note: Skip empty placeholders. It's used to avoid confusion with JSON-like strings
704
+ continue;
774
705
  }
775
706
  if (parameterName.indexOf('{') !== -1 || parameterName.indexOf('}') !== -1) {
776
707
  throw new PipelineExecutionError('Parameter is already opened or not closed');
777
708
  }
778
709
  if (parameters[parameterName] === undefined) {
779
- throw new PipelineExecutionError("Parameter `{".concat(parameterName, "}` is not defined"));
710
+ throw new PipelineExecutionError(`Parameter \`{${parameterName}}\` is not defined`);
780
711
  }
781
- var parameterValue = parameters[parameterName];
712
+ let parameterValue = parameters[parameterName];
782
713
  if (parameterValue === undefined) {
783
- throw new PipelineExecutionError("Parameter `{".concat(parameterName, "}` is not defined"));
714
+ throw new PipelineExecutionError(`Parameter \`{${parameterName}}\` is not defined`);
784
715
  }
785
716
  parameterValue = valueToString(parameterValue);
786
717
  // Escape curly braces in parameter values to prevent prompt-injection
@@ -788,17 +719,13 @@ function templateParameters(template, parameters) {
788
719
  if (parameterValue.includes('\n') && /^\s*\W{0,3}\s*$/.test(precol)) {
789
720
  parameterValue = parameterValue
790
721
  .split('\n')
791
- .map(function (line, index) { return (index === 0 ? line : "".concat(precol).concat(line)); })
722
+ .map((line, index) => (index === 0 ? line : `${precol}${line}`))
792
723
  .join('\n');
793
724
  }
794
725
  replacedTemplates =
795
726
  replacedTemplates.substring(0, match.index + precol.length) +
796
727
  parameterValue +
797
728
  replacedTemplates.substring(match.index + precol.length + parameterName.length + 2);
798
- };
799
- while ((match = /^(?<precol>.*){(?<parameterName>\w+)}(.*)/m /* <- Not global */
800
- .exec(replacedTemplates))) {
801
- _loop_1();
802
729
  }
803
730
  // [💫] Check if there are parameters that are not closed properly
804
731
  if (/{\w+$/.test(replacedTemplates)) {
@@ -831,13 +758,13 @@ function countCharacters(text) {
831
758
  *
832
759
  * @public exported from `@promptbook/utils`
833
760
  */
834
- var CHARACTERS_PER_STANDARD_LINE = 63;
761
+ const CHARACTERS_PER_STANDARD_LINE = 63;
835
762
  /**
836
763
  * Number of lines per standard A4 page with 11pt Arial font size and standard margins and spacing.
837
764
  *
838
765
  * @public exported from `@promptbook/utils`
839
766
  */
840
- var LINES_PER_STANDARD_PAGE = 44;
767
+ const LINES_PER_STANDARD_PAGE = 44;
841
768
  /**
842
769
  * TODO: [🧠] Should be this `constants.ts` or `config.ts`?
843
770
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -853,8 +780,8 @@ var LINES_PER_STANDARD_PAGE = 44;
853
780
  function countLines(text) {
854
781
  text = text.replace('\r\n', '\n');
855
782
  text = text.replace('\r', '\n');
856
- var lines = text.split('\n');
857
- return lines.reduce(function (count, line) { return count + Math.ceil(line.length / CHARACTERS_PER_STANDARD_LINE); }, 0);
783
+ const lines = text.split('\n');
784
+ return lines.reduce((count, line) => count + Math.ceil(line.length / CHARACTERS_PER_STANDARD_LINE), 0);
858
785
  }
859
786
 
860
787
  /**
@@ -874,7 +801,7 @@ function countPages(text) {
874
801
  * @public exported from `@promptbook/utils`
875
802
  */
876
803
  function countParagraphs(text) {
877
- return text.split(/\n\s*\n/).filter(function (paragraph) { return paragraph.trim() !== ''; }).length;
804
+ return text.split(/\n\s*\n/).filter((paragraph) => paragraph.trim() !== '').length;
878
805
  }
879
806
 
880
807
  /**
@@ -883,7 +810,7 @@ function countParagraphs(text) {
883
810
  * @public exported from `@promptbook/utils`
884
811
  */
885
812
  function splitIntoSentences(text) {
886
- return text.split(/[.!?]+/).filter(function (sentence) { return sentence.trim() !== ''; });
813
+ return text.split(/[.!?]+/).filter((sentence) => sentence.trim() !== '');
887
814
  }
888
815
  /**
889
816
  * Counts number of sentences in the text
@@ -894,7 +821,7 @@ function countSentences(text) {
894
821
  return splitIntoSentences(text).length;
895
822
  }
896
823
 
897
- var defaultDiacriticsRemovalMap = [
824
+ const defaultDiacriticsRemovalMap = [
898
825
  {
899
826
  base: 'A',
900
827
  letters: '\u0041\u24B6\uFF21\u00C0\u00C1\u00C2\u1EA6\u1EA4\u1EAA\u1EA8\u00C3\u0100\u0102\u1EB0\u1EAE\u1EB4\u1EB2\u0226\u01E0\u00C4\u01DE\u1EA2\u00C5\u01FA\u01CD\u0200\u0202\u1EA0\u1EAC\u1EB6\u1E00\u0104\u023A\u2C6F',
@@ -1113,12 +1040,12 @@ var defaultDiacriticsRemovalMap = [
1113
1040
  *
1114
1041
  * @public exported from `@promptbook/utils`
1115
1042
  */
1116
- var DIACRITIC_VARIANTS_LETTERS = {};
1043
+ const DIACRITIC_VARIANTS_LETTERS = {};
1117
1044
  // tslint:disable-next-line: prefer-for-of
1118
- for (var i = 0; i < defaultDiacriticsRemovalMap.length; i++) {
1119
- var letters = defaultDiacriticsRemovalMap[i].letters;
1045
+ for (let i = 0; i < defaultDiacriticsRemovalMap.length; i++) {
1046
+ const letters = defaultDiacriticsRemovalMap[i].letters;
1120
1047
  // tslint:disable-next-line: prefer-for-of
1121
- for (var j = 0; j < letters.length; j++) {
1048
+ for (let j = 0; j < letters.length; j++) {
1122
1049
  DIACRITIC_VARIANTS_LETTERS[letters[j]] = defaultDiacriticsRemovalMap[i].base;
1123
1050
  }
1124
1051
  }
@@ -1147,7 +1074,7 @@ for (var i = 0; i < defaultDiacriticsRemovalMap.length; i++) {
1147
1074
  */
1148
1075
  function removeDiacritics(input) {
1149
1076
  /*eslint no-control-regex: "off"*/
1150
- return input.replace(/[^\u0000-\u007E]/g, function (a) {
1077
+ return input.replace(/[^\u0000-\u007E]/g, (a) => {
1151
1078
  return DIACRITIC_VARIANTS_LETTERS[a] || a;
1152
1079
  });
1153
1080
  }
@@ -1163,7 +1090,9 @@ function removeDiacritics(input) {
1163
1090
  function countWords(text) {
1164
1091
  text = text.replace(/[\p{Extended_Pictographic}]/gu, 'a');
1165
1092
  text = removeDiacritics(text);
1166
- return text.split(/[^a-zа-я0-9]+/i).filter(function (word) { return word.length > 0; }).length;
1093
+ // Add spaces before uppercase letters preceded by lowercase letters (for camelCase)
1094
+ text = text.replace(/([a-z])([A-Z])/g, '$1 $2');
1095
+ return text.split(/[^a-zа-я0-9]+/i).filter((word) => word.length > 0).length;
1167
1096
  }
1168
1097
 
1169
1098
  /**
@@ -1196,7 +1125,7 @@ function uncertainNumber(value) {
1196
1125
  if (value === null || value === undefined || Number.isNaN(value)) {
1197
1126
  return { value: 0, isUncertain: true };
1198
1127
  }
1199
- return { value: value };
1128
+ return { value };
1200
1129
  }
1201
1130
 
1202
1131
  /**
@@ -1205,7 +1134,7 @@ function uncertainNumber(value) {
1205
1134
  * @private within the repository, used only as internal helper for `OPENAI_MODELS`
1206
1135
  */
1207
1136
  function computeUsage(value) {
1208
- var _a = __read(value.split(' / '), 2), price = _a[0], tokens = _a[1];
1137
+ const [price, tokens] = value.split(' / ');
1209
1138
  return parseFloat(price.replace('$', '')) / parseFloat(tokens.replace('M tokens', '')) / 1000000;
1210
1139
  }
1211
1140
 
@@ -1218,7 +1147,7 @@ function computeUsage(value) {
1218
1147
  * @see https://openai.com/api/pricing/
1219
1148
  * @public exported from `@promptbook/openai`
1220
1149
  */
1221
- var OPENAI_MODELS = exportJson({
1150
+ const OPENAI_MODELS = exportJson({
1222
1151
  name: 'OPENAI_MODELS',
1223
1152
  value: [
1224
1153
  /*/
@@ -1239,8 +1168,8 @@ var OPENAI_MODELS = exportJson({
1239
1168
  modelTitle: 'davinci-002',
1240
1169
  modelName: 'davinci-002',
1241
1170
  pricing: {
1242
- prompt: computeUsage("$2.00 / 1M tokens"),
1243
- output: computeUsage("$2.00 / 1M tokens"), // <- not sure
1171
+ prompt: computeUsage(`$2.00 / 1M tokens`),
1172
+ output: computeUsage(`$2.00 / 1M tokens`), // <- not sure
1244
1173
  },
1245
1174
  },
1246
1175
  /**/
@@ -1256,8 +1185,8 @@ var OPENAI_MODELS = exportJson({
1256
1185
  modelTitle: 'gpt-3.5-turbo-16k',
1257
1186
  modelName: 'gpt-3.5-turbo-16k',
1258
1187
  pricing: {
1259
- prompt: computeUsage("$3.00 / 1M tokens"),
1260
- output: computeUsage("$4.00 / 1M tokens"),
1188
+ prompt: computeUsage(`$3.00 / 1M tokens`),
1189
+ output: computeUsage(`$4.00 / 1M tokens`),
1261
1190
  },
1262
1191
  },
1263
1192
  /**/
@@ -1279,8 +1208,8 @@ var OPENAI_MODELS = exportJson({
1279
1208
  modelTitle: 'gpt-4',
1280
1209
  modelName: 'gpt-4',
1281
1210
  pricing: {
1282
- prompt: computeUsage("$30.00 / 1M tokens"),
1283
- output: computeUsage("$60.00 / 1M tokens"),
1211
+ prompt: computeUsage(`$30.00 / 1M tokens`),
1212
+ output: computeUsage(`$60.00 / 1M tokens`),
1284
1213
  },
1285
1214
  },
1286
1215
  /**/
@@ -1290,8 +1219,8 @@ var OPENAI_MODELS = exportJson({
1290
1219
  modelTitle: 'gpt-4-32k',
1291
1220
  modelName: 'gpt-4-32k',
1292
1221
  pricing: {
1293
- prompt: computeUsage("$60.00 / 1M tokens"),
1294
- output: computeUsage("$120.00 / 1M tokens"),
1222
+ prompt: computeUsage(`$60.00 / 1M tokens`),
1223
+ output: computeUsage(`$120.00 / 1M tokens`),
1295
1224
  },
1296
1225
  },
1297
1226
  /**/
@@ -1312,8 +1241,8 @@ var OPENAI_MODELS = exportJson({
1312
1241
  modelTitle: 'gpt-4-turbo-2024-04-09',
1313
1242
  modelName: 'gpt-4-turbo-2024-04-09',
1314
1243
  pricing: {
1315
- prompt: computeUsage("$10.00 / 1M tokens"),
1316
- output: computeUsage("$30.00 / 1M tokens"),
1244
+ prompt: computeUsage(`$10.00 / 1M tokens`),
1245
+ output: computeUsage(`$30.00 / 1M tokens`),
1317
1246
  },
1318
1247
  },
1319
1248
  /**/
@@ -1323,8 +1252,8 @@ var OPENAI_MODELS = exportJson({
1323
1252
  modelTitle: 'gpt-3.5-turbo-1106',
1324
1253
  modelName: 'gpt-3.5-turbo-1106',
1325
1254
  pricing: {
1326
- prompt: computeUsage("$1.00 / 1M tokens"),
1327
- output: computeUsage("$2.00 / 1M tokens"),
1255
+ prompt: computeUsage(`$1.00 / 1M tokens`),
1256
+ output: computeUsage(`$2.00 / 1M tokens`),
1328
1257
  },
1329
1258
  },
1330
1259
  /**/
@@ -1334,8 +1263,8 @@ var OPENAI_MODELS = exportJson({
1334
1263
  modelTitle: 'gpt-4-turbo',
1335
1264
  modelName: 'gpt-4-turbo',
1336
1265
  pricing: {
1337
- prompt: computeUsage("$10.00 / 1M tokens"),
1338
- output: computeUsage("$30.00 / 1M tokens"),
1266
+ prompt: computeUsage(`$10.00 / 1M tokens`),
1267
+ output: computeUsage(`$30.00 / 1M tokens`),
1339
1268
  },
1340
1269
  },
1341
1270
  /**/
@@ -1345,8 +1274,8 @@ var OPENAI_MODELS = exportJson({
1345
1274
  modelTitle: 'gpt-3.5-turbo-instruct-0914',
1346
1275
  modelName: 'gpt-3.5-turbo-instruct-0914',
1347
1276
  pricing: {
1348
- prompt: computeUsage("$1.50 / 1M tokens"),
1349
- output: computeUsage("$2.00 / 1M tokens"), // <- For gpt-3.5-turbo-instruct
1277
+ prompt: computeUsage(`$1.50 / 1M tokens`),
1278
+ output: computeUsage(`$2.00 / 1M tokens`), // <- For gpt-3.5-turbo-instruct
1350
1279
  },
1351
1280
  },
1352
1281
  /**/
@@ -1356,8 +1285,8 @@ var OPENAI_MODELS = exportJson({
1356
1285
  modelTitle: 'gpt-3.5-turbo-instruct',
1357
1286
  modelName: 'gpt-3.5-turbo-instruct',
1358
1287
  pricing: {
1359
- prompt: computeUsage("$1.50 / 1M tokens"),
1360
- output: computeUsage("$2.00 / 1M tokens"),
1288
+ prompt: computeUsage(`$1.50 / 1M tokens`),
1289
+ output: computeUsage(`$2.00 / 1M tokens`),
1361
1290
  },
1362
1291
  },
1363
1292
  /**/
@@ -1373,8 +1302,8 @@ var OPENAI_MODELS = exportJson({
1373
1302
  modelTitle: 'gpt-3.5-turbo',
1374
1303
  modelName: 'gpt-3.5-turbo',
1375
1304
  pricing: {
1376
- prompt: computeUsage("$3.00 / 1M tokens"),
1377
- output: computeUsage("$6.00 / 1M tokens"), // <- Not sure, refer to gpt-3.5-turbo in Fine-tuning models
1305
+ prompt: computeUsage(`$3.00 / 1M tokens`),
1306
+ output: computeUsage(`$6.00 / 1M tokens`), // <- Not sure, refer to gpt-3.5-turbo in Fine-tuning models
1378
1307
  },
1379
1308
  },
1380
1309
  /**/
@@ -1384,8 +1313,8 @@ var OPENAI_MODELS = exportJson({
1384
1313
  modelTitle: 'gpt-3.5-turbo-0301',
1385
1314
  modelName: 'gpt-3.5-turbo-0301',
1386
1315
  pricing: {
1387
- prompt: computeUsage("$1.50 / 1M tokens"),
1388
- output: computeUsage("$2.00 / 1M tokens"),
1316
+ prompt: computeUsage(`$1.50 / 1M tokens`),
1317
+ output: computeUsage(`$2.00 / 1M tokens`),
1389
1318
  },
1390
1319
  },
1391
1320
  /**/
@@ -1395,8 +1324,8 @@ var OPENAI_MODELS = exportJson({
1395
1324
  modelTitle: 'babbage-002',
1396
1325
  modelName: 'babbage-002',
1397
1326
  pricing: {
1398
- prompt: computeUsage("$0.40 / 1M tokens"),
1399
- output: computeUsage("$0.40 / 1M tokens"), // <- Not sure
1327
+ prompt: computeUsage(`$0.40 / 1M tokens`),
1328
+ output: computeUsage(`$0.40 / 1M tokens`), // <- Not sure
1400
1329
  },
1401
1330
  },
1402
1331
  /**/
@@ -1406,8 +1335,8 @@ var OPENAI_MODELS = exportJson({
1406
1335
  modelTitle: 'gpt-4-1106-preview',
1407
1336
  modelName: 'gpt-4-1106-preview',
1408
1337
  pricing: {
1409
- prompt: computeUsage("$10.00 / 1M tokens"),
1410
- output: computeUsage("$30.00 / 1M tokens"),
1338
+ prompt: computeUsage(`$10.00 / 1M tokens`),
1339
+ output: computeUsage(`$30.00 / 1M tokens`),
1411
1340
  },
1412
1341
  },
1413
1342
  /**/
@@ -1417,8 +1346,8 @@ var OPENAI_MODELS = exportJson({
1417
1346
  modelTitle: 'gpt-4-0125-preview',
1418
1347
  modelName: 'gpt-4-0125-preview',
1419
1348
  pricing: {
1420
- prompt: computeUsage("$10.00 / 1M tokens"),
1421
- output: computeUsage("$30.00 / 1M tokens"),
1349
+ prompt: computeUsage(`$10.00 / 1M tokens`),
1350
+ output: computeUsage(`$30.00 / 1M tokens`),
1422
1351
  },
1423
1352
  },
1424
1353
  /**/
@@ -1434,8 +1363,8 @@ var OPENAI_MODELS = exportJson({
1434
1363
  modelTitle: 'gpt-3.5-turbo-0125',
1435
1364
  modelName: 'gpt-3.5-turbo-0125',
1436
1365
  pricing: {
1437
- prompt: computeUsage("$0.50 / 1M tokens"),
1438
- output: computeUsage("$1.50 / 1M tokens"),
1366
+ prompt: computeUsage(`$0.50 / 1M tokens`),
1367
+ output: computeUsage(`$1.50 / 1M tokens`),
1439
1368
  },
1440
1369
  },
1441
1370
  /**/
@@ -1445,8 +1374,8 @@ var OPENAI_MODELS = exportJson({
1445
1374
  modelTitle: 'gpt-4-turbo-preview',
1446
1375
  modelName: 'gpt-4-turbo-preview',
1447
1376
  pricing: {
1448
- prompt: computeUsage("$10.00 / 1M tokens"),
1449
- output: computeUsage("$30.00 / 1M tokens"), // <- Not sure, just for gpt-4-turbo
1377
+ prompt: computeUsage(`$10.00 / 1M tokens`),
1378
+ output: computeUsage(`$30.00 / 1M tokens`), // <- Not sure, just for gpt-4-turbo
1450
1379
  },
1451
1380
  },
1452
1381
  /**/
@@ -1456,7 +1385,7 @@ var OPENAI_MODELS = exportJson({
1456
1385
  modelTitle: 'text-embedding-3-large',
1457
1386
  modelName: 'text-embedding-3-large',
1458
1387
  pricing: {
1459
- prompt: computeUsage("$0.13 / 1M tokens"),
1388
+ prompt: computeUsage(`$0.13 / 1M tokens`),
1460
1389
  // TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
1461
1390
  output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
1462
1391
  },
@@ -1468,7 +1397,7 @@ var OPENAI_MODELS = exportJson({
1468
1397
  modelTitle: 'text-embedding-3-small',
1469
1398
  modelName: 'text-embedding-3-small',
1470
1399
  pricing: {
1471
- prompt: computeUsage("$0.02 / 1M tokens"),
1400
+ prompt: computeUsage(`$0.02 / 1M tokens`),
1472
1401
  // TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
1473
1402
  output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
1474
1403
  },
@@ -1480,8 +1409,8 @@ var OPENAI_MODELS = exportJson({
1480
1409
  modelTitle: 'gpt-3.5-turbo-0613',
1481
1410
  modelName: 'gpt-3.5-turbo-0613',
1482
1411
  pricing: {
1483
- prompt: computeUsage("$1.50 / 1M tokens"),
1484
- output: computeUsage("$2.00 / 1M tokens"),
1412
+ prompt: computeUsage(`$1.50 / 1M tokens`),
1413
+ output: computeUsage(`$2.00 / 1M tokens`),
1485
1414
  },
1486
1415
  },
1487
1416
  /**/
@@ -1491,7 +1420,7 @@ var OPENAI_MODELS = exportJson({
1491
1420
  modelTitle: 'text-embedding-ada-002',
1492
1421
  modelName: 'text-embedding-ada-002',
1493
1422
  pricing: {
1494
- prompt: computeUsage("$0.1 / 1M tokens"),
1423
+ prompt: computeUsage(`$0.1 / 1M tokens`),
1495
1424
  // TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
1496
1425
  output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
1497
1426
  },
@@ -1521,8 +1450,8 @@ var OPENAI_MODELS = exportJson({
1521
1450
  modelTitle: 'gpt-4o-2024-05-13',
1522
1451
  modelName: 'gpt-4o-2024-05-13',
1523
1452
  pricing: {
1524
- prompt: computeUsage("$5.00 / 1M tokens"),
1525
- output: computeUsage("$15.00 / 1M tokens"),
1453
+ prompt: computeUsage(`$5.00 / 1M tokens`),
1454
+ output: computeUsage(`$15.00 / 1M tokens`),
1526
1455
  },
1527
1456
  //TODO: [main] !!3 Add gpt-4o-mini-2024-07-18 and all others to be up to date
1528
1457
  },
@@ -1533,8 +1462,8 @@ var OPENAI_MODELS = exportJson({
1533
1462
  modelTitle: 'gpt-4o',
1534
1463
  modelName: 'gpt-4o',
1535
1464
  pricing: {
1536
- prompt: computeUsage("$5.00 / 1M tokens"),
1537
- output: computeUsage("$15.00 / 1M tokens"),
1465
+ prompt: computeUsage(`$5.00 / 1M tokens`),
1466
+ output: computeUsage(`$15.00 / 1M tokens`),
1538
1467
  },
1539
1468
  },
1540
1469
  /**/
@@ -1544,8 +1473,8 @@ var OPENAI_MODELS = exportJson({
1544
1473
  modelTitle: 'o1-preview',
1545
1474
  modelName: 'o1-preview',
1546
1475
  pricing: {
1547
- prompt: computeUsage("$15.00 / 1M tokens"),
1548
- output: computeUsage("$60.00 / 1M tokens"),
1476
+ prompt: computeUsage(`$15.00 / 1M tokens`),
1477
+ output: computeUsage(`$60.00 / 1M tokens`),
1549
1478
  },
1550
1479
  },
1551
1480
  /**/
@@ -1556,8 +1485,8 @@ var OPENAI_MODELS = exportJson({
1556
1485
  modelName: 'o1-preview-2024-09-12',
1557
1486
  // <- TODO: [💩] Some better system to organize theese date suffixes and versions
1558
1487
  pricing: {
1559
- prompt: computeUsage("$15.00 / 1M tokens"),
1560
- output: computeUsage("$60.00 / 1M tokens"),
1488
+ prompt: computeUsage(`$15.00 / 1M tokens`),
1489
+ output: computeUsage(`$60.00 / 1M tokens`),
1561
1490
  },
1562
1491
  },
1563
1492
  /**/
@@ -1567,8 +1496,8 @@ var OPENAI_MODELS = exportJson({
1567
1496
  modelTitle: 'o1-mini',
1568
1497
  modelName: 'o1-mini',
1569
1498
  pricing: {
1570
- prompt: computeUsage("$3.00 / 1M tokens"),
1571
- output: computeUsage("$12.00 / 1M tokens"),
1499
+ prompt: computeUsage(`$3.00 / 1M tokens`),
1500
+ output: computeUsage(`$12.00 / 1M tokens`),
1572
1501
  },
1573
1502
  },
1574
1503
  /**/
@@ -1578,8 +1507,8 @@ var OPENAI_MODELS = exportJson({
1578
1507
  modelTitle: 'o1',
1579
1508
  modelName: 'o1',
1580
1509
  pricing: {
1581
- prompt: computeUsage("$3.00 / 1M tokens"),
1582
- output: computeUsage("$12.00 / 1M tokens"),
1510
+ prompt: computeUsage(`$3.00 / 1M tokens`),
1511
+ output: computeUsage(`$12.00 / 1M tokens`),
1583
1512
  // <- TODO: !! Unsure, check the pricing
1584
1513
  },
1585
1514
  },
@@ -1590,8 +1519,8 @@ var OPENAI_MODELS = exportJson({
1590
1519
  modelTitle: 'o3-mini',
1591
1520
  modelName: 'o3-mini',
1592
1521
  pricing: {
1593
- prompt: computeUsage("$3.00 / 1M tokens"),
1594
- output: computeUsage("$12.00 / 1M tokens"),
1522
+ prompt: computeUsage(`$3.00 / 1M tokens`),
1523
+ output: computeUsage(`$12.00 / 1M tokens`),
1595
1524
  // <- TODO: !! Unsure, check the pricing
1596
1525
  },
1597
1526
  },
@@ -1602,8 +1531,8 @@ var OPENAI_MODELS = exportJson({
1602
1531
  modelTitle: 'o1-mini-2024-09-12',
1603
1532
  modelName: 'o1-mini-2024-09-12',
1604
1533
  pricing: {
1605
- prompt: computeUsage("$3.00 / 1M tokens"),
1606
- output: computeUsage("$12.00 / 1M tokens"),
1534
+ prompt: computeUsage(`$3.00 / 1M tokens`),
1535
+ output: computeUsage(`$12.00 / 1M tokens`),
1607
1536
  },
1608
1537
  },
1609
1538
  /**/
@@ -1613,8 +1542,8 @@ var OPENAI_MODELS = exportJson({
1613
1542
  modelTitle: 'gpt-3.5-turbo-16k-0613',
1614
1543
  modelName: 'gpt-3.5-turbo-16k-0613',
1615
1544
  pricing: {
1616
- prompt: computeUsage("$3.00 / 1M tokens"),
1617
- output: computeUsage("$4.00 / 1M tokens"),
1545
+ prompt: computeUsage(`$3.00 / 1M tokens`),
1546
+ output: computeUsage(`$4.00 / 1M tokens`),
1618
1547
  },
1619
1548
  },
1620
1549
  /**/
@@ -1653,10 +1582,10 @@ resultContent, rawResponse) {
1653
1582
  if (((_a = rawResponse.usage) === null || _a === void 0 ? void 0 : _a.prompt_tokens) === undefined) {
1654
1583
  throw new PipelineExecutionError('In OpenAI response `usage.prompt_tokens` not defined');
1655
1584
  }
1656
- var inputTokens = rawResponse.usage.prompt_tokens;
1657
- var outputTokens = ((_b = rawResponse.usage) === null || _b === void 0 ? void 0 : _b.completion_tokens) || 0;
1658
- var modelInfo = OPENAI_MODELS.find(function (model) { return model.modelName === rawResponse.model; });
1659
- var price;
1585
+ const inputTokens = rawResponse.usage.prompt_tokens;
1586
+ const outputTokens = ((_b = rawResponse.usage) === null || _b === void 0 ? void 0 : _b.completion_tokens) || 0;
1587
+ const modelInfo = OPENAI_MODELS.find((model) => model.modelName === rawResponse.model);
1588
+ let price;
1660
1589
  if (modelInfo === undefined || modelInfo.pricing === undefined) {
1661
1590
  price = uncertainNumber();
1662
1591
  }
@@ -1664,9 +1593,15 @@ resultContent, rawResponse) {
1664
1593
  price = uncertainNumber(inputTokens * modelInfo.pricing.prompt + outputTokens * modelInfo.pricing.output);
1665
1594
  }
1666
1595
  return {
1667
- price: price,
1668
- input: __assign({ tokensCount: uncertainNumber(rawResponse.usage.prompt_tokens) }, computeUsageCounts(promptContent)),
1669
- output: __assign({ tokensCount: uncertainNumber(outputTokens) }, computeUsageCounts(resultContent)),
1596
+ price,
1597
+ input: {
1598
+ tokensCount: uncertainNumber(rawResponse.usage.prompt_tokens),
1599
+ ...computeUsageCounts(promptContent),
1600
+ },
1601
+ output: {
1602
+ tokensCount: uncertainNumber(outputTokens),
1603
+ ...computeUsageCounts(resultContent),
1604
+ },
1670
1605
  };
1671
1606
  }
1672
1607
  /**
@@ -1678,75 +1613,55 @@ resultContent, rawResponse) {
1678
1613
  *
1679
1614
  * @public exported from `@promptbook/openai`
1680
1615
  */
1681
- var OpenAiExecutionTools = /** @class */ (function () {
1616
+ class OpenAiExecutionTools {
1682
1617
  /**
1683
1618
  * Creates OpenAI Execution Tools.
1684
1619
  *
1685
1620
  * @param options which are relevant are directly passed to the OpenAI client
1686
1621
  */
1687
- function OpenAiExecutionTools(options) {
1622
+ constructor(options) {
1688
1623
  this.options = options;
1689
1624
  /**
1690
1625
  * OpenAI API client.
1691
1626
  */
1692
1627
  this.client = null;
1693
1628
  }
1694
- Object.defineProperty(OpenAiExecutionTools.prototype, "title", {
1695
- get: function () {
1696
- return 'OpenAI';
1697
- },
1698
- enumerable: false,
1699
- configurable: true
1700
- });
1701
- Object.defineProperty(OpenAiExecutionTools.prototype, "description", {
1702
- get: function () {
1703
- return 'Use all models provided by OpenAI';
1704
- },
1705
- enumerable: false,
1706
- configurable: true
1707
- });
1708
- OpenAiExecutionTools.prototype.getClient = function () {
1709
- return __awaiter(this, void 0, void 0, function () {
1710
- var openAiOptions;
1711
- return __generator(this, function (_a) {
1712
- if (this.client === null) {
1713
- openAiOptions = __assign({}, this.options);
1714
- delete openAiOptions.isVerbose;
1715
- delete openAiOptions.userId;
1716
- this.client = new OpenAI(openAiOptions);
1717
- }
1718
- return [2 /*return*/, this.client];
1719
- });
1720
- });
1721
- };
1629
+ get title() {
1630
+ return 'OpenAI';
1631
+ }
1632
+ get description() {
1633
+ return 'Use all models provided by OpenAI';
1634
+ }
1635
+ async getClient() {
1636
+ if (this.client === null) {
1637
+ // Note: Passing only OpenAI relevant options to OpenAI constructor
1638
+ const openAiOptions = { ...this.options };
1639
+ delete openAiOptions.isVerbose;
1640
+ delete openAiOptions.userId;
1641
+ this.client = new OpenAI(openAiOptions);
1642
+ }
1643
+ return this.client;
1644
+ }
1722
1645
  /**
1723
1646
  * Create (sub)tools for calling OpenAI API Assistants
1724
1647
  *
1725
1648
  * @param assistantId Which assistant to use
1726
1649
  * @returns Tools for calling OpenAI API Assistants with same token
1727
1650
  */
1728
- OpenAiExecutionTools.prototype.createAssistantSubtools = function (assistantId) {
1729
- return new OpenAiAssistantExecutionTools(__assign(__assign({}, this.options), { assistantId: assistantId }));
1730
- };
1651
+ createAssistantSubtools(assistantId) {
1652
+ return new OpenAiAssistantExecutionTools({ ...this.options, assistantId });
1653
+ }
1731
1654
  /**
1732
1655
  * Check the `options` passed to `constructor`
1733
1656
  */
1734
- OpenAiExecutionTools.prototype.checkConfiguration = function () {
1735
- return __awaiter(this, void 0, void 0, function () {
1736
- return __generator(this, function (_a) {
1737
- switch (_a.label) {
1738
- case 0: return [4 /*yield*/, this.getClient()];
1739
- case 1:
1740
- _a.sent();
1741
- return [2 /*return*/];
1742
- }
1743
- });
1744
- });
1745
- };
1657
+ async checkConfiguration() {
1658
+ await this.getClient();
1659
+ // TODO: [🎍] Do here a real check that API is online, working and API key is correct
1660
+ }
1746
1661
  /**
1747
1662
  * List all available OpenAI models that can be used
1748
1663
  */
1749
- OpenAiExecutionTools.prototype.listModels = function () {
1664
+ listModels() {
1750
1665
  /*
1751
1666
  Note: Dynamic lising of the models
1752
1667
  const models = await this.openai.models.list({});
@@ -1755,301 +1670,273 @@ var OpenAiExecutionTools = /** @class */ (function () {
1755
1670
  console.log(models.data);
1756
1671
  */
1757
1672
  return OPENAI_MODELS;
1758
- };
1673
+ }
1759
1674
  /**
1760
1675
  * Calls OpenAI API to use a chat model.
1761
1676
  */
1762
- OpenAiExecutionTools.prototype.callChatModel = function (prompt) {
1677
+ async callChatModel(prompt) {
1763
1678
  var _a;
1764
- return __awaiter(this, void 0, void 0, function () {
1765
- var content, parameters, modelRequirements, format, client, modelName, modelSettings, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
1766
- var _this = this;
1767
- return __generator(this, function (_b) {
1768
- switch (_b.label) {
1769
- case 0:
1770
- if (this.options.isVerbose) {
1771
- console.info('💬 OpenAI callChatModel call', { prompt: prompt });
1772
- }
1773
- content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements, format = prompt.format;
1774
- return [4 /*yield*/, this.getClient()];
1775
- case 1:
1776
- client = _b.sent();
1777
- // TODO: [] Use here more modelRequirements
1778
- if (modelRequirements.modelVariant !== 'CHAT') {
1779
- throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
1780
- }
1781
- modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
1782
- modelSettings = {
1783
- model: modelName,
1784
- max_tokens: modelRequirements.maxTokens,
1785
- // <- TODO: [🌾] Make some global max cap for maxTokens
1786
- temperature: modelRequirements.temperature,
1787
- // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
1788
- // <- Note: [🧆]
1789
- };
1790
- if (format === 'JSON') {
1791
- modelSettings.response_format = {
1792
- type: 'json_object',
1793
- };
1794
- }
1795
- rawPromptContent = templateParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
1796
- rawRequest = __assign(__assign({}, modelSettings), { messages: __spreadArray(__spreadArray([], __read((modelRequirements.systemMessage === undefined
1797
- ? []
1798
- : [
1799
- {
1800
- role: 'system',
1801
- content: modelRequirements.systemMessage,
1802
- },
1803
- ])), false), [
1804
- {
1805
- role: 'user',
1806
- content: rawPromptContent,
1807
- },
1808
- ], false), user: (_a = this.options.userId) === null || _a === void 0 ? void 0 : _a.toString() });
1809
- start = $getCurrentDate();
1810
- if (this.options.isVerbose) {
1811
- console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
1812
- }
1813
- return [4 /*yield*/, client.chat.completions.create(rawRequest).catch(function (error) {
1814
- if (_this.options.isVerbose) {
1815
- console.info(colors.bgRed('error'), error);
1816
- }
1817
- throw error;
1818
- })];
1819
- case 2:
1820
- rawResponse = _b.sent();
1821
- if (this.options.isVerbose) {
1822
- console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
1823
- }
1824
- if (!rawResponse.choices[0]) {
1825
- throw new PipelineExecutionError('No choises from OpenAI');
1826
- }
1827
- if (rawResponse.choices.length > 1) {
1828
- // TODO: This should be maybe only warning
1829
- throw new PipelineExecutionError('More than one choise from OpenAI');
1830
- }
1831
- resultContent = rawResponse.choices[0].message.content;
1832
- // eslint-disable-next-line prefer-const
1833
- complete = $getCurrentDate();
1834
- usage = computeOpenAiUsage(content || '', resultContent || '', rawResponse);
1835
- if (resultContent === null) {
1836
- throw new PipelineExecutionError('No response message from OpenAI');
1837
- }
1838
- return [2 /*return*/, exportJson({
1839
- name: 'promptResult',
1840
- message: "Result of `OpenAiExecutionTools.callChatModel`",
1841
- order: [],
1842
- value: {
1843
- content: resultContent,
1844
- modelName: rawResponse.model || modelName,
1845
- timing: {
1846
- start: start,
1847
- complete: complete,
1848
- },
1849
- usage: usage,
1850
- rawPromptContent: rawPromptContent,
1851
- rawRequest: rawRequest,
1852
- rawResponse: rawResponse,
1853
- // <- [🗯]
1854
- },
1855
- })];
1856
- }
1857
- });
1679
+ if (this.options.isVerbose) {
1680
+ console.info('💬 OpenAI callChatModel call', { prompt });
1681
+ }
1682
+ const { content, parameters, modelRequirements, format } = prompt;
1683
+ const client = await this.getClient();
1684
+ // TODO: [☂] Use here more modelRequirements
1685
+ if (modelRequirements.modelVariant !== 'CHAT') {
1686
+ throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
1687
+ }
1688
+ const modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
1689
+ const modelSettings = {
1690
+ model: modelName,
1691
+ max_tokens: modelRequirements.maxTokens,
1692
+ // <- TODO: [🌾] Make some global max cap for maxTokens
1693
+ temperature: modelRequirements.temperature,
1694
+ // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
1695
+ // <- Note: [🧆]
1696
+ }; // <- TODO: [💩] Guard here types better
1697
+ if (format === 'JSON') {
1698
+ modelSettings.response_format = {
1699
+ type: 'json_object',
1700
+ };
1701
+ }
1702
+ // <- TODO: [🚸] Not all models are compatible with JSON mode
1703
+ // > 'response_format' of type 'json_object' is not supported with this model.
1704
+ const rawPromptContent = templateParameters(content, { ...parameters, modelName });
1705
+ const rawRequest = {
1706
+ ...modelSettings,
1707
+ messages: [
1708
+ ...(modelRequirements.systemMessage === undefined
1709
+ ? []
1710
+ : [
1711
+ {
1712
+ role: 'system',
1713
+ content: modelRequirements.systemMessage,
1714
+ },
1715
+ ]),
1716
+ {
1717
+ role: 'user',
1718
+ content: rawPromptContent,
1719
+ },
1720
+ ],
1721
+ user: (_a = this.options.userId) === null || _a === void 0 ? void 0 : _a.toString(),
1722
+ };
1723
+ const start = $getCurrentDate();
1724
+ let complete;
1725
+ if (this.options.isVerbose) {
1726
+ console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
1727
+ }
1728
+ const rawResponse = await client.chat.completions.create(rawRequest).catch((error) => {
1729
+ if (this.options.isVerbose) {
1730
+ console.info(colors.bgRed('error'), error);
1731
+ }
1732
+ throw error;
1858
1733
  });
1859
- };
1734
+ if (this.options.isVerbose) {
1735
+ console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
1736
+ }
1737
+ if (!rawResponse.choices[0]) {
1738
+ throw new PipelineExecutionError('No choises from OpenAI');
1739
+ }
1740
+ if (rawResponse.choices.length > 1) {
1741
+ // TODO: This should be maybe only warning
1742
+ throw new PipelineExecutionError('More than one choise from OpenAI');
1743
+ }
1744
+ const resultContent = rawResponse.choices[0].message.content;
1745
+ // eslint-disable-next-line prefer-const
1746
+ complete = $getCurrentDate();
1747
+ const usage = computeOpenAiUsage(content || '', resultContent || '', rawResponse);
1748
+ if (resultContent === null) {
1749
+ throw new PipelineExecutionError('No response message from OpenAI');
1750
+ }
1751
+ return exportJson({
1752
+ name: 'promptResult',
1753
+ message: `Result of \`OpenAiExecutionTools.callChatModel\``,
1754
+ order: [],
1755
+ value: {
1756
+ content: resultContent,
1757
+ modelName: rawResponse.model || modelName,
1758
+ timing: {
1759
+ start,
1760
+ complete,
1761
+ },
1762
+ usage,
1763
+ rawPromptContent,
1764
+ rawRequest,
1765
+ rawResponse,
1766
+ // <- [🗯]
1767
+ },
1768
+ });
1769
+ }
1860
1770
  /**
1861
1771
  * Calls OpenAI API to use a complete model.
1862
1772
  */
1863
- OpenAiExecutionTools.prototype.callCompletionModel = function (prompt) {
1773
+ async callCompletionModel(prompt) {
1864
1774
  var _a;
1865
- return __awaiter(this, void 0, void 0, function () {
1866
- var content, parameters, modelRequirements, client, modelName, modelSettings, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
1867
- var _this = this;
1868
- return __generator(this, function (_b) {
1869
- switch (_b.label) {
1870
- case 0:
1871
- if (this.options.isVerbose) {
1872
- console.info('🖋 OpenAI callCompletionModel call', { prompt: prompt });
1873
- }
1874
- content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
1875
- return [4 /*yield*/, this.getClient()];
1876
- case 1:
1877
- client = _b.sent();
1878
- // TODO: [] Use here more modelRequirements
1879
- if (modelRequirements.modelVariant !== 'COMPLETION') {
1880
- throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
1881
- }
1882
- modelName = modelRequirements.modelName || this.getDefaultCompletionModel().modelName;
1883
- modelSettings = {
1884
- model: modelName,
1885
- max_tokens: modelRequirements.maxTokens || 2000,
1886
- // <- TODO: [🌾] Make some global max cap for maxTokens
1887
- temperature: modelRequirements.temperature,
1888
- // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
1889
- // <- Note: [🧆]
1890
- };
1891
- rawPromptContent = templateParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
1892
- rawRequest = __assign(__assign({}, modelSettings), { prompt: rawPromptContent, user: (_a = this.options.userId) === null || _a === void 0 ? void 0 : _a.toString() });
1893
- start = $getCurrentDate();
1894
- if (this.options.isVerbose) {
1895
- console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
1896
- }
1897
- return [4 /*yield*/, client.completions.create(rawRequest).catch(function (error) {
1898
- if (_this.options.isVerbose) {
1899
- console.info(colors.bgRed('error'), error);
1900
- }
1901
- throw error;
1902
- })];
1903
- case 2:
1904
- rawResponse = _b.sent();
1905
- if (this.options.isVerbose) {
1906
- console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
1907
- }
1908
- if (!rawResponse.choices[0]) {
1909
- throw new PipelineExecutionError('No choises from OpenAI');
1910
- }
1911
- if (rawResponse.choices.length > 1) {
1912
- // TODO: This should be maybe only warning
1913
- throw new PipelineExecutionError('More than one choise from OpenAI');
1914
- }
1915
- resultContent = rawResponse.choices[0].text;
1916
- // eslint-disable-next-line prefer-const
1917
- complete = $getCurrentDate();
1918
- usage = computeOpenAiUsage(content || '', resultContent || '', rawResponse);
1919
- return [2 /*return*/, exportJson({
1920
- name: 'promptResult',
1921
- message: "Result of `OpenAiExecutionTools.callCompletionModel`",
1922
- order: [],
1923
- value: {
1924
- content: resultContent,
1925
- modelName: rawResponse.model || modelName,
1926
- timing: {
1927
- start: start,
1928
- complete: complete,
1929
- },
1930
- usage: usage,
1931
- rawPromptContent: rawPromptContent,
1932
- rawRequest: rawRequest,
1933
- rawResponse: rawResponse,
1934
- // <- [🗯]
1935
- },
1936
- })];
1937
- }
1938
- });
1775
+ if (this.options.isVerbose) {
1776
+ console.info('🖋 OpenAI callCompletionModel call', { prompt });
1777
+ }
1778
+ const { content, parameters, modelRequirements } = prompt;
1779
+ const client = await this.getClient();
1780
+ // TODO: [☂] Use here more modelRequirements
1781
+ if (modelRequirements.modelVariant !== 'COMPLETION') {
1782
+ throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
1783
+ }
1784
+ const modelName = modelRequirements.modelName || this.getDefaultCompletionModel().modelName;
1785
+ const modelSettings = {
1786
+ model: modelName,
1787
+ max_tokens: modelRequirements.maxTokens || 2000,
1788
+ // <- TODO: [🌾] Make some global max cap for maxTokens
1789
+ temperature: modelRequirements.temperature,
1790
+ // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
1791
+ // <- Note: [🧆]
1792
+ };
1793
+ const rawPromptContent = templateParameters(content, { ...parameters, modelName });
1794
+ const rawRequest = {
1795
+ ...modelSettings,
1796
+ prompt: rawPromptContent,
1797
+ user: (_a = this.options.userId) === null || _a === void 0 ? void 0 : _a.toString(),
1798
+ };
1799
+ const start = $getCurrentDate();
1800
+ let complete;
1801
+ if (this.options.isVerbose) {
1802
+ console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
1803
+ }
1804
+ const rawResponse = await client.completions.create(rawRequest).catch((error) => {
1805
+ if (this.options.isVerbose) {
1806
+ console.info(colors.bgRed('error'), error);
1807
+ }
1808
+ throw error;
1939
1809
  });
1940
- };
1810
+ if (this.options.isVerbose) {
1811
+ console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
1812
+ }
1813
+ if (!rawResponse.choices[0]) {
1814
+ throw new PipelineExecutionError('No choises from OpenAI');
1815
+ }
1816
+ if (rawResponse.choices.length > 1) {
1817
+ // TODO: This should be maybe only warning
1818
+ throw new PipelineExecutionError('More than one choise from OpenAI');
1819
+ }
1820
+ const resultContent = rawResponse.choices[0].text;
1821
+ // eslint-disable-next-line prefer-const
1822
+ complete = $getCurrentDate();
1823
+ const usage = computeOpenAiUsage(content || '', resultContent || '', rawResponse);
1824
+ return exportJson({
1825
+ name: 'promptResult',
1826
+ message: `Result of \`OpenAiExecutionTools.callCompletionModel\``,
1827
+ order: [],
1828
+ value: {
1829
+ content: resultContent,
1830
+ modelName: rawResponse.model || modelName,
1831
+ timing: {
1832
+ start,
1833
+ complete,
1834
+ },
1835
+ usage,
1836
+ rawPromptContent,
1837
+ rawRequest,
1838
+ rawResponse,
1839
+ // <- [🗯]
1840
+ },
1841
+ });
1842
+ }
1941
1843
  /**
1942
1844
  * Calls OpenAI API to use a embedding model
1943
1845
  */
1944
- OpenAiExecutionTools.prototype.callEmbeddingModel = function (prompt) {
1945
- return __awaiter(this, void 0, void 0, function () {
1946
- var content, parameters, modelRequirements, client, modelName, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
1947
- var _this = this;
1948
- return __generator(this, function (_a) {
1949
- switch (_a.label) {
1950
- case 0:
1951
- if (this.options.isVerbose) {
1952
- console.info('🖋 OpenAI embedding call', { prompt: prompt });
1953
- }
1954
- content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
1955
- return [4 /*yield*/, this.getClient()];
1956
- case 1:
1957
- client = _a.sent();
1958
- // TODO: [☂] Use here more modelRequirements
1959
- if (modelRequirements.modelVariant !== 'EMBEDDING') {
1960
- throw new PipelineExecutionError('Use embed only for EMBEDDING variant');
1961
- }
1962
- modelName = modelRequirements.modelName || this.getDefaultEmbeddingModel().modelName;
1963
- rawPromptContent = templateParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
1964
- rawRequest = {
1965
- input: rawPromptContent,
1966
- model: modelName,
1967
- };
1968
- start = $getCurrentDate();
1969
- if (this.options.isVerbose) {
1970
- console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
1971
- }
1972
- return [4 /*yield*/, client.embeddings.create(rawRequest).catch(function (error) {
1973
- if (_this.options.isVerbose) {
1974
- console.info(colors.bgRed('error'), error);
1975
- }
1976
- throw error;
1977
- })];
1978
- case 2:
1979
- rawResponse = _a.sent();
1980
- if (this.options.isVerbose) {
1981
- console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
1982
- }
1983
- if (rawResponse.data.length !== 1) {
1984
- throw new PipelineExecutionError("Expected exactly 1 data item in response, got ".concat(rawResponse.data.length));
1985
- }
1986
- resultContent = rawResponse.data[0].embedding;
1987
- // eslint-disable-next-line prefer-const
1988
- complete = $getCurrentDate();
1989
- usage = computeOpenAiUsage(content || '', '',
1990
- // <- Note: Embedding does not have result content
1991
- rawResponse);
1992
- return [2 /*return*/, exportJson({
1993
- name: 'promptResult',
1994
- message: "Result of `OpenAiExecutionTools.callEmbeddingModel`",
1995
- order: [],
1996
- value: {
1997
- content: resultContent,
1998
- modelName: rawResponse.model || modelName,
1999
- timing: {
2000
- start: start,
2001
- complete: complete,
2002
- },
2003
- usage: usage,
2004
- rawPromptContent: rawPromptContent,
2005
- rawRequest: rawRequest,
2006
- rawResponse: rawResponse,
2007
- // <- [🗯]
2008
- },
2009
- })];
2010
- }
2011
- });
1846
+ async callEmbeddingModel(prompt) {
1847
+ if (this.options.isVerbose) {
1848
+ console.info('🖋 OpenAI embedding call', { prompt });
1849
+ }
1850
+ const { content, parameters, modelRequirements } = prompt;
1851
+ const client = await this.getClient();
1852
+ // TODO: [☂] Use here more modelRequirements
1853
+ if (modelRequirements.modelVariant !== 'EMBEDDING') {
1854
+ throw new PipelineExecutionError('Use embed only for EMBEDDING variant');
1855
+ }
1856
+ const modelName = modelRequirements.modelName || this.getDefaultEmbeddingModel().modelName;
1857
+ const rawPromptContent = templateParameters(content, { ...parameters, modelName });
1858
+ const rawRequest = {
1859
+ input: rawPromptContent,
1860
+ model: modelName,
1861
+ };
1862
+ const start = $getCurrentDate();
1863
+ let complete;
1864
+ if (this.options.isVerbose) {
1865
+ console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
1866
+ }
1867
+ const rawResponse = await client.embeddings.create(rawRequest).catch((error) => {
1868
+ if (this.options.isVerbose) {
1869
+ console.info(colors.bgRed('error'), error);
1870
+ }
1871
+ throw error;
2012
1872
  });
2013
- };
1873
+ if (this.options.isVerbose) {
1874
+ console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
1875
+ }
1876
+ if (rawResponse.data.length !== 1) {
1877
+ throw new PipelineExecutionError(`Expected exactly 1 data item in response, got ${rawResponse.data.length}`);
1878
+ }
1879
+ const resultContent = rawResponse.data[0].embedding;
1880
+ // eslint-disable-next-line prefer-const
1881
+ complete = $getCurrentDate();
1882
+ const usage = computeOpenAiUsage(content || '', '',
1883
+ // <- Note: Embedding does not have result content
1884
+ rawResponse);
1885
+ return exportJson({
1886
+ name: 'promptResult',
1887
+ message: `Result of \`OpenAiExecutionTools.callEmbeddingModel\``,
1888
+ order: [],
1889
+ value: {
1890
+ content: resultContent,
1891
+ modelName: rawResponse.model || modelName,
1892
+ timing: {
1893
+ start,
1894
+ complete,
1895
+ },
1896
+ usage,
1897
+ rawPromptContent,
1898
+ rawRequest,
1899
+ rawResponse,
1900
+ // <- [🗯]
1901
+ },
1902
+ });
1903
+ }
2014
1904
  // <- Note: [🤖] callXxxModel
2015
1905
  /**
2016
1906
  * Get the model that should be used as default
2017
1907
  */
2018
- OpenAiExecutionTools.prototype.getDefaultModel = function (defaultModelName) {
2019
- var model = OPENAI_MODELS.find(function (_a) {
2020
- var modelName = _a.modelName;
2021
- return modelName === defaultModelName;
2022
- });
1908
+ getDefaultModel(defaultModelName) {
1909
+ const model = OPENAI_MODELS.find(({ modelName }) => modelName === defaultModelName);
2023
1910
  if (model === undefined) {
2024
- throw new UnexpectedError(spaceTrim$1(function (block) {
2025
- return "\n Cannot find model in OpenAI models with name \"".concat(defaultModelName, "\" which should be used as default.\n\n Available models:\n ").concat(block(OPENAI_MODELS.map(function (_a) {
2026
- var modelName = _a.modelName;
2027
- return "- \"".concat(modelName, "\"");
2028
- }).join('\n')), "\n\n ");
2029
- }));
1911
+ throw new UnexpectedError(spaceTrim$1((block) => `
1912
+ Cannot find model in OpenAI models with name "${defaultModelName}" which should be used as default.
1913
+
1914
+ Available models:
1915
+ ${block(OPENAI_MODELS.map(({ modelName }) => `- "${modelName}"`).join('\n'))}
1916
+
1917
+ `));
2030
1918
  }
2031
1919
  return model;
2032
- };
1920
+ }
2033
1921
  /**
2034
1922
  * Default model for chat variant.
2035
1923
  */
2036
- OpenAiExecutionTools.prototype.getDefaultChatModel = function () {
1924
+ getDefaultChatModel() {
2037
1925
  return this.getDefaultModel('gpt-4o');
2038
- };
1926
+ }
2039
1927
  /**
2040
1928
  * Default model for completion variant.
2041
1929
  */
2042
- OpenAiExecutionTools.prototype.getDefaultCompletionModel = function () {
1930
+ getDefaultCompletionModel() {
2043
1931
  return this.getDefaultModel('gpt-3.5-turbo-instruct');
2044
- };
1932
+ }
2045
1933
  /**
2046
1934
  * Default model for completion variant.
2047
1935
  */
2048
- OpenAiExecutionTools.prototype.getDefaultEmbeddingModel = function () {
1936
+ getDefaultEmbeddingModel() {
2049
1937
  return this.getDefaultModel('text-embedding-3-large');
2050
- };
2051
- return OpenAiExecutionTools;
2052
- }());
1938
+ }
1939
+ }
2053
1940
  /**
2054
1941
  * TODO: [🧠][🧙‍♂️] Maybe there can be some wizzard for thoose who want to use just OpenAI
2055
1942
  * TODO: Maybe Create some common util for callChatModel and callCompletionModel
@@ -2065,168 +1952,158 @@ var OpenAiExecutionTools = /** @class */ (function () {
2065
1952
  *
2066
1953
  * @public exported from `@promptbook/openai`
2067
1954
  */
2068
- var OpenAiAssistantExecutionTools = /** @class */ (function (_super) {
2069
- __extends(OpenAiAssistantExecutionTools, _super);
1955
+ class OpenAiAssistantExecutionTools extends OpenAiExecutionTools {
2070
1956
  /**
2071
1957
  * Creates OpenAI Execution Tools.
2072
1958
  *
2073
1959
  * @param options which are relevant are directly passed to the OpenAI client
2074
1960
  */
2075
- function OpenAiAssistantExecutionTools(options) {
2076
- var _this = _super.call(this, options) || this;
2077
- _this.assistantId = options.assistantId;
2078
- return _this;
1961
+ constructor(options) {
1962
+ super(options);
1963
+ this.assistantId = options.assistantId;
1964
+ }
1965
+ get title() {
1966
+ return 'OpenAI Assistant';
1967
+ }
1968
+ get description() {
1969
+ return 'Use single assistant provided by OpenAI';
2079
1970
  }
2080
- Object.defineProperty(OpenAiAssistantExecutionTools.prototype, "title", {
2081
- get: function () {
2082
- return 'OpenAI Assistant';
2083
- },
2084
- enumerable: false,
2085
- configurable: true
2086
- });
2087
- Object.defineProperty(OpenAiAssistantExecutionTools.prototype, "description", {
2088
- get: function () {
2089
- return 'Use single assistant provided by OpenAI';
2090
- },
2091
- enumerable: false,
2092
- configurable: true
2093
- });
2094
1971
  /**
2095
1972
  * Calls OpenAI API to use a chat model.
2096
1973
  */
2097
- OpenAiAssistantExecutionTools.prototype.callChatModel = function (prompt) {
1974
+ async callChatModel(prompt) {
2098
1975
  var _a, _b, _c;
2099
- return __awaiter(this, void 0, void 0, function () {
2100
- var content, parameters, modelRequirements /*, format*/, client, _d, _e, key, rawPromptContent, rawRequest, start, complete, stream, rawResponse, resultContent, usage;
2101
- var e_1, _f;
2102
- var _this = this;
2103
- return __generator(this, function (_g) {
2104
- switch (_g.label) {
2105
- case 0:
2106
- if (this.options.isVerbose) {
2107
- console.info('💬 OpenAI callChatModel call', { prompt: prompt });
2108
- }
2109
- content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
2110
- return [4 /*yield*/, this.getClient()];
2111
- case 1:
2112
- client = _g.sent();
2113
- // TODO: [☂] Use here more modelRequirements
2114
- if (modelRequirements.modelVariant !== 'CHAT') {
2115
- throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
2116
- }
2117
- try {
2118
- // TODO: [👨‍👨‍👧‍👧] Remove:
2119
- for (_d = __values(['maxTokens', 'modelName', 'seed', 'temperature']), _e = _d.next(); !_e.done; _e = _d.next()) {
2120
- key = _e.value;
2121
- if (modelRequirements[key] !== undefined) {
2122
- throw new NotYetImplementedError("In `OpenAiAssistantExecutionTools` you cannot specify `".concat(key, "`"));
2123
- }
2124
- }
2125
- }
2126
- catch (e_1_1) { e_1 = { error: e_1_1 }; }
2127
- finally {
2128
- try {
2129
- if (_e && !_e.done && (_f = _d.return)) _f.call(_d);
2130
- }
2131
- finally { if (e_1) throw e_1.error; }
2132
- }
2133
- rawPromptContent = templateParameters(content, __assign(__assign({}, parameters), { modelName: 'assistant' }));
2134
- rawRequest = {
2135
- // TODO: [👨‍👨‍👧‍👧] ...modelSettings,
2136
- // TODO: [👨‍👨‍👧‍👧][🧠] What about system message for assistants, does it make sence - combination of OpenAI assistants with Promptbook Personas
2137
- assistant_id: this.assistantId,
2138
- thread: {
2139
- messages: [
2140
- // TODO: [🗯] Allow threads to be passed
2141
- { role: 'user', content: rawPromptContent },
2142
- ],
2143
- },
2144
- // <- TODO: Add user identification here> user: this.options.user,
2145
- };
2146
- start = $getCurrentDate();
2147
- if (this.options.isVerbose) {
2148
- console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
2149
- }
2150
- return [4 /*yield*/, client.beta.threads.createAndRunStream(rawRequest)];
2151
- case 2:
2152
- stream = _g.sent();
2153
- stream.on('connect', function () {
2154
- if (_this.options.isVerbose) {
2155
- console.info('connect', stream.currentEvent);
2156
- }
2157
- });
2158
- stream.on('messageDelta', function (messageDelta) {
2159
- var _a;
2160
- if (_this.options.isVerbose &&
2161
- messageDelta &&
2162
- messageDelta.content &&
2163
- messageDelta.content[0] &&
2164
- messageDelta.content[0].type === 'text') {
2165
- console.info('messageDelta', (_a = messageDelta.content[0].text) === null || _a === void 0 ? void 0 : _a.value);
2166
- }
2167
- // <- TODO: [🐚] Make streaming and running tasks working
2168
- });
2169
- stream.on('messageCreated', function (message) {
2170
- if (_this.options.isVerbose) {
2171
- console.info('messageCreated', message);
2172
- }
2173
- });
2174
- stream.on('messageDone', function (message) {
2175
- if (_this.options.isVerbose) {
2176
- console.info('messageDone', message);
2177
- }
2178
- });
2179
- return [4 /*yield*/, stream.finalMessages()];
2180
- case 3:
2181
- rawResponse = _g.sent();
2182
- if (this.options.isVerbose) {
2183
- console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
2184
- }
2185
- if (rawResponse.length !== 1) {
2186
- throw new PipelineExecutionError("There is NOT 1 BUT ".concat(rawResponse.length, " finalMessages from OpenAI"));
2187
- }
2188
- if (rawResponse[0].content.length !== 1) {
2189
- throw new PipelineExecutionError("There is NOT 1 BUT ".concat(rawResponse[0].content.length, " finalMessages content from OpenAI"));
2190
- }
2191
- if (((_a = rawResponse[0].content[0]) === null || _a === void 0 ? void 0 : _a.type) !== 'text') {
2192
- throw new PipelineExecutionError("There is NOT 'text' BUT ".concat((_b = rawResponse[0].content[0]) === null || _b === void 0 ? void 0 : _b.type, " finalMessages content type from OpenAI"));
2193
- }
2194
- resultContent = (_c = rawResponse[0].content[0]) === null || _c === void 0 ? void 0 : _c.text.value;
2195
- // <- TODO: [🧠] There are also annotations, maybe use them
2196
- // eslint-disable-next-line prefer-const
2197
- complete = $getCurrentDate();
2198
- usage = UNCERTAIN_USAGE;
2199
- // <- TODO: [🥘] Compute real usage for assistant
2200
- // ?> const usage = computeOpenAiUsage(content, resultContent || '', rawResponse);
2201
- if (resultContent === null) {
2202
- throw new PipelineExecutionError('No response message from OpenAI');
2203
- }
2204
- return [2 /*return*/, exportJson({
2205
- name: 'promptResult',
2206
- message: "Result of `OpenAiAssistantExecutionTools.callChatModel`",
2207
- order: [],
2208
- value: {
2209
- content: resultContent,
2210
- modelName: 'assistant',
2211
- // <- TODO: [🥘] Detect used model in assistant
2212
- // ?> model: rawResponse.model || modelName,
2213
- timing: {
2214
- start: start,
2215
- complete: complete,
2216
- },
2217
- usage: usage,
2218
- rawPromptContent: rawPromptContent,
2219
- rawRequest: rawRequest,
2220
- rawResponse: rawResponse,
2221
- // <- [🗯]
2222
- },
2223
- })];
2224
- }
2225
- });
1976
+ if (this.options.isVerbose) {
1977
+ console.info('💬 OpenAI callChatModel call', { prompt });
1978
+ }
1979
+ const { content, parameters, modelRequirements /*, format*/ } = prompt;
1980
+ const client = await this.getClient();
1981
+ // TODO: [☂] Use here more modelRequirements
1982
+ if (modelRequirements.modelVariant !== 'CHAT') {
1983
+ throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
1984
+ }
1985
+ // TODO: [👨‍👨‍👧‍👧] Remove:
1986
+ for (const key of ['maxTokens', 'modelName', 'seed', 'temperature']) {
1987
+ if (modelRequirements[key] !== undefined) {
1988
+ throw new NotYetImplementedError(`In \`OpenAiAssistantExecutionTools\` you cannot specify \`${key}\``);
1989
+ }
1990
+ }
1991
+ /*
1992
+ TODO: [👨‍👨‍👧‍👧] Implement all of this for Assistants
1993
+ const modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
1994
+ const modelSettings = {
1995
+ model: modelName,
1996
+ max_tokens: modelRequirements.maxTokens,
1997
+ // <- TODO: [🌾] Make some global max cap for maxTokens
1998
+
1999
+ temperature: modelRequirements.temperature,
2000
+
2001
+ // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
2002
+ // <- Note: [🧆]
2003
+ } as OpenAI.Chat.Completions.CompletionCreateParamsNonStreaming; // <- TODO: Guard here types better
2004
+
2005
+ if (format === 'JSON') {
2006
+ modelSettings.response_format = {
2007
+ type: 'json_object',
2008
+ };
2009
+ }
2010
+ */
2011
+ // <- TODO: [🚸] Not all models are compatible with JSON mode
2012
+ // > 'response_format' of type 'json_object' is not supported with this model.
2013
+ const rawPromptContent = templateParameters(content, {
2014
+ ...parameters,
2015
+ modelName: 'assistant',
2016
+ // <- [🧠] What is the best value here
2226
2017
  });
2227
- };
2228
- return OpenAiAssistantExecutionTools;
2229
- }(OpenAiExecutionTools));
2018
+ const rawRequest = {
2019
+ // TODO: [👨‍👨‍👧‍👧] ...modelSettings,
2020
+ // TODO: [👨‍👨‍👧‍👧][🧠] What about system message for assistants, does it make sence - combination of OpenAI assistants with Promptbook Personas
2021
+ assistant_id: this.assistantId,
2022
+ thread: {
2023
+ messages: [
2024
+ // TODO: [🗯] Allow threads to be passed
2025
+ { role: 'user', content: rawPromptContent },
2026
+ ],
2027
+ },
2028
+ // <- TODO: Add user identification here> user: this.options.user,
2029
+ };
2030
+ const start = $getCurrentDate();
2031
+ let complete;
2032
+ if (this.options.isVerbose) {
2033
+ console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
2034
+ }
2035
+ const stream = await client.beta.threads.createAndRunStream(rawRequest);
2036
+ stream.on('connect', () => {
2037
+ if (this.options.isVerbose) {
2038
+ console.info('connect', stream.currentEvent);
2039
+ }
2040
+ });
2041
+ stream.on('messageDelta', (messageDelta) => {
2042
+ var _a;
2043
+ if (this.options.isVerbose &&
2044
+ messageDelta &&
2045
+ messageDelta.content &&
2046
+ messageDelta.content[0] &&
2047
+ messageDelta.content[0].type === 'text') {
2048
+ console.info('messageDelta', (_a = messageDelta.content[0].text) === null || _a === void 0 ? void 0 : _a.value);
2049
+ }
2050
+ // <- TODO: [🐚] Make streaming and running tasks working
2051
+ });
2052
+ stream.on('messageCreated', (message) => {
2053
+ if (this.options.isVerbose) {
2054
+ console.info('messageCreated', message);
2055
+ }
2056
+ });
2057
+ stream.on('messageDone', (message) => {
2058
+ if (this.options.isVerbose) {
2059
+ console.info('messageDone', message);
2060
+ }
2061
+ });
2062
+ const rawResponse = await stream.finalMessages();
2063
+ if (this.options.isVerbose) {
2064
+ console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
2065
+ }
2066
+ if (rawResponse.length !== 1) {
2067
+ throw new PipelineExecutionError(`There is NOT 1 BUT ${rawResponse.length} finalMessages from OpenAI`);
2068
+ }
2069
+ if (rawResponse[0].content.length !== 1) {
2070
+ throw new PipelineExecutionError(`There is NOT 1 BUT ${rawResponse[0].content.length} finalMessages content from OpenAI`);
2071
+ }
2072
+ if (((_a = rawResponse[0].content[0]) === null || _a === void 0 ? void 0 : _a.type) !== 'text') {
2073
+ throw new PipelineExecutionError(`There is NOT 'text' BUT ${(_b = rawResponse[0].content[0]) === null || _b === void 0 ? void 0 : _b.type} finalMessages content type from OpenAI`);
2074
+ }
2075
+ const resultContent = (_c = rawResponse[0].content[0]) === null || _c === void 0 ? void 0 : _c.text.value;
2076
+ // <- TODO: [🧠] There are also annotations, maybe use them
2077
+ // eslint-disable-next-line prefer-const
2078
+ complete = $getCurrentDate();
2079
+ const usage = UNCERTAIN_USAGE;
2080
+ // <- TODO: [🥘] Compute real usage for assistant
2081
+ // ?> const usage = computeOpenAiUsage(content, resultContent || '', rawResponse);
2082
+ if (resultContent === null) {
2083
+ throw new PipelineExecutionError('No response message from OpenAI');
2084
+ }
2085
+ return exportJson({
2086
+ name: 'promptResult',
2087
+ message: `Result of \`OpenAiAssistantExecutionTools.callChatModel\``,
2088
+ order: [],
2089
+ value: {
2090
+ content: resultContent,
2091
+ modelName: 'assistant',
2092
+ // <- TODO: [🥘] Detect used model in assistant
2093
+ // ?> model: rawResponse.model || modelName,
2094
+ timing: {
2095
+ start,
2096
+ complete,
2097
+ },
2098
+ usage,
2099
+ rawPromptContent,
2100
+ rawRequest,
2101
+ rawResponse,
2102
+ // <- [🗯]
2103
+ },
2104
+ });
2105
+ }
2106
+ }
2230
2107
  /**
2231
2108
  * TODO: [🧠][🧙‍♂️] Maybe there can be some wizzard for thoose who want to use just OpenAI
2232
2109
  * TODO: Maybe make custom OpenAiError
@@ -2239,10 +2116,10 @@ var OpenAiAssistantExecutionTools = /** @class */ (function (_super) {
2239
2116
  *
2240
2117
  * @public exported from `@promptbook/openai`
2241
2118
  */
2242
- var createOpenAiAssistantExecutionTools = Object.assign(function (options) {
2119
+ const createOpenAiAssistantExecutionTools = Object.assign((options) => {
2243
2120
  // TODO: [🧠][main] !!4 If browser, auto add `dangerouslyAllowBrowser`
2244
2121
  if (($isRunningInBrowser() || $isRunningInWebWorker()) && !options.dangerouslyAllowBrowser) {
2245
- options = __assign(__assign({}, options), { dangerouslyAllowBrowser: true });
2122
+ options = { ...options, dangerouslyAllowBrowser: true };
2246
2123
  }
2247
2124
  return new OpenAiAssistantExecutionTools(options);
2248
2125
  }, {
@@ -2259,10 +2136,10 @@ var createOpenAiAssistantExecutionTools = Object.assign(function (options) {
2259
2136
  *
2260
2137
  * @public exported from `@promptbook/openai`
2261
2138
  */
2262
- var createOpenAiExecutionTools = Object.assign(function (options) {
2139
+ const createOpenAiExecutionTools = Object.assign((options) => {
2263
2140
  // TODO: [🧠][main] !!4 If browser, auto add `dangerouslyAllowBrowser`
2264
2141
  if (($isRunningInBrowser() || $isRunningInWebWorker()) && !options.dangerouslyAllowBrowser) {
2265
- options = __assign(__assign({}, options), { dangerouslyAllowBrowser: true });
2142
+ options = { ...options, dangerouslyAllowBrowser: true };
2266
2143
  }
2267
2144
  return new OpenAiExecutionTools(options);
2268
2145
  }, {
@@ -2295,46 +2172,35 @@ function $getGlobalScope() {
2295
2172
  * @public exported from `@promptbook/utils`
2296
2173
  */
2297
2174
  function normalizeTo_SCREAMING_CASE(text) {
2298
- var e_1, _a;
2299
- var charType;
2300
- var lastCharType = 'OTHER';
2301
- var normalizedName = '';
2302
- try {
2303
- for (var text_1 = __values(text), text_1_1 = text_1.next(); !text_1_1.done; text_1_1 = text_1.next()) {
2304
- var char = text_1_1.value;
2305
- var normalizedChar = void 0;
2306
- if (/^[a-z]$/.test(char)) {
2307
- charType = 'LOWERCASE';
2308
- normalizedChar = char.toUpperCase();
2309
- }
2310
- else if (/^[A-Z]$/.test(char)) {
2311
- charType = 'UPPERCASE';
2312
- normalizedChar = char;
2313
- }
2314
- else if (/^[0-9]$/.test(char)) {
2315
- charType = 'NUMBER';
2316
- normalizedChar = char;
2317
- }
2318
- else {
2319
- charType = 'OTHER';
2320
- normalizedChar = '_';
2321
- }
2322
- if (charType !== lastCharType &&
2323
- !(lastCharType === 'UPPERCASE' && charType === 'LOWERCASE') &&
2324
- !(lastCharType === 'NUMBER') &&
2325
- !(charType === 'NUMBER')) {
2326
- normalizedName += '_';
2327
- }
2328
- normalizedName += normalizedChar;
2329
- lastCharType = charType;
2175
+ let charType;
2176
+ let lastCharType = 'OTHER';
2177
+ let normalizedName = '';
2178
+ for (const char of text) {
2179
+ let normalizedChar;
2180
+ if (/^[a-z]$/.test(char)) {
2181
+ charType = 'LOWERCASE';
2182
+ normalizedChar = char.toUpperCase();
2330
2183
  }
2331
- }
2332
- catch (e_1_1) { e_1 = { error: e_1_1 }; }
2333
- finally {
2334
- try {
2335
- if (text_1_1 && !text_1_1.done && (_a = text_1.return)) _a.call(text_1);
2184
+ else if (/^[A-Z]$/.test(char)) {
2185
+ charType = 'UPPERCASE';
2186
+ normalizedChar = char;
2187
+ }
2188
+ else if (/^[0-9]$/.test(char)) {
2189
+ charType = 'NUMBER';
2190
+ normalizedChar = char;
2191
+ }
2192
+ else {
2193
+ charType = 'OTHER';
2194
+ normalizedChar = '_';
2195
+ }
2196
+ if (charType !== lastCharType &&
2197
+ !(lastCharType === 'UPPERCASE' && charType === 'LOWERCASE') &&
2198
+ !(lastCharType === 'NUMBER') &&
2199
+ !(charType === 'NUMBER')) {
2200
+ normalizedName += '_';
2336
2201
  }
2337
- finally { if (e_1) throw e_1.error; }
2202
+ normalizedName += normalizedChar;
2203
+ lastCharType = charType;
2338
2204
  }
2339
2205
  normalizedName = normalizedName.replace(/_+/g, '_');
2340
2206
  normalizedName = normalizedName.replace(/_?\/_?/g, '/');
@@ -2371,27 +2237,27 @@ function normalizeTo_snake_case(text) {
2371
2237
  *
2372
2238
  * @private internal utility, exported are only signleton instances of this class
2373
2239
  */
2374
- var $Register = /** @class */ (function () {
2375
- function $Register(registerName) {
2240
+ class $Register {
2241
+ constructor(registerName) {
2376
2242
  this.registerName = registerName;
2377
- var storageName = "_promptbook_".concat(normalizeTo_snake_case(registerName));
2378
- var globalScope = $getGlobalScope();
2243
+ const storageName = `_promptbook_${normalizeTo_snake_case(registerName)}`;
2244
+ const globalScope = $getGlobalScope();
2379
2245
  if (globalScope[storageName] === undefined) {
2380
2246
  globalScope[storageName] = [];
2381
2247
  }
2382
2248
  else if (!Array.isArray(globalScope[storageName])) {
2383
- throw new UnexpectedError("Expected (global) ".concat(storageName, " to be an array, but got ").concat(typeof globalScope[storageName]));
2249
+ throw new UnexpectedError(`Expected (global) ${storageName} to be an array, but got ${typeof globalScope[storageName]}`);
2384
2250
  }
2385
2251
  this.storage = globalScope[storageName];
2386
2252
  }
2387
- $Register.prototype.list = function () {
2253
+ list() {
2388
2254
  // <- TODO: ReadonlyDeep<ReadonlyArray<TRegistered>>
2389
2255
  return this.storage;
2390
- };
2391
- $Register.prototype.register = function (registered) {
2392
- var packageName = registered.packageName, className = registered.className;
2393
- var existingRegistrationIndex = this.storage.findIndex(function (item) { return item.packageName === packageName && item.className === className; });
2394
- var existingRegistration = this.storage[existingRegistrationIndex];
2256
+ }
2257
+ register(registered) {
2258
+ const { packageName, className } = registered;
2259
+ const existingRegistrationIndex = this.storage.findIndex((item) => item.packageName === packageName && item.className === className);
2260
+ const existingRegistration = this.storage[existingRegistrationIndex];
2395
2261
  if (!existingRegistration) {
2396
2262
  this.storage.push(registered);
2397
2263
  }
@@ -2400,18 +2266,17 @@ var $Register = /** @class */ (function () {
2400
2266
  }
2401
2267
  return {
2402
2268
  registerName: this.registerName,
2403
- packageName: packageName,
2404
- className: className,
2269
+ packageName,
2270
+ className,
2405
2271
  get isDestroyed() {
2406
2272
  return false;
2407
2273
  },
2408
- destroy: function () {
2409
- throw new NotYetImplementedError("Registration to ".concat(this.registerName, " is permanent in this version of Promptbook"));
2274
+ destroy() {
2275
+ throw new NotYetImplementedError(`Registration to ${this.registerName} is permanent in this version of Promptbook`);
2410
2276
  },
2411
2277
  };
2412
- };
2413
- return $Register;
2414
- }());
2278
+ }
2279
+ }
2415
2280
 
2416
2281
  /**
2417
2282
  * @@@
@@ -2420,7 +2285,7 @@ var $Register = /** @class */ (function () {
2420
2285
  * @singleton Only one instance of each register is created per build, but thare can be more @@@
2421
2286
  * @public exported from `@promptbook/core`
2422
2287
  */
2423
- var $llmToolsRegister = new $Register('llm_execution_tools_constructors');
2288
+ const $llmToolsRegister = new $Register('llm_execution_tools_constructors');
2424
2289
  /**
2425
2290
  * TODO: [®] DRY Register logic
2426
2291
  */
@@ -2434,7 +2299,7 @@ var $llmToolsRegister = new $Register('llm_execution_tools_constructors');
2434
2299
  * @public exported from `@promptbook/wizzard`
2435
2300
  * @public exported from `@promptbook/cli`
2436
2301
  */
2437
- var _OpenAiRegistration = $llmToolsRegister.register(createOpenAiExecutionTools);
2302
+ const _OpenAiRegistration = $llmToolsRegister.register(createOpenAiExecutionTools);
2438
2303
  /**
2439
2304
  * @@@ registration2
2440
2305
  *
@@ -2444,7 +2309,7 @@ var _OpenAiRegistration = $llmToolsRegister.register(createOpenAiExecutionTools)
2444
2309
  * @public exported from `@promptbook/wizzard`
2445
2310
  * @public exported from `@promptbook/cli`
2446
2311
  */
2447
- var _OpenAiAssistantRegistration = $llmToolsRegister.register(createOpenAiAssistantExecutionTools);
2312
+ const _OpenAiAssistantRegistration = $llmToolsRegister.register(createOpenAiAssistantExecutionTools);
2448
2313
  /**
2449
2314
  * TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
2450
2315
  * Note: [💞] Ignore a discrepancy between file name and entity name