@promptbook/openai 0.79.0 → 0.80.0-1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +4 -0
- package/esm/index.es.js +618 -520
- package/esm/index.es.js.map +1 -1
- package/esm/typings/books/index.d.ts +6 -6
- package/esm/typings/src/_packages/core.index.d.ts +8 -6
- package/esm/typings/src/_packages/types.index.d.ts +6 -0
- package/esm/typings/src/_packages/utils.index.d.ts +4 -0
- package/esm/typings/src/cli/cli-commands/runInteractiveChatbot.d.ts +32 -0
- package/esm/typings/src/commands/_common/getParserForCommand.d.ts +1 -1
- package/esm/typings/src/commands/_common/parseCommand.d.ts +1 -1
- package/esm/typings/src/commands/_common/stringifyCommand.d.ts +1 -1
- package/esm/typings/src/commands/_common/types/CommandParser.d.ts +3 -0
- package/esm/typings/src/config.d.ts +0 -25
- package/esm/typings/src/constants.d.ts +35 -0
- package/esm/typings/src/conversion/{pipelineStringToJson.d.ts → compilePipeline.d.ts} +3 -3
- package/esm/typings/src/conversion/pipelineJsonToString.d.ts +1 -0
- package/esm/typings/src/conversion/{pipelineStringToJsonSync.d.ts → precompilePipeline.d.ts} +4 -3
- package/esm/typings/src/high-level-abstractions/_common/HighLevelAbstraction.d.ts +20 -0
- package/esm/typings/src/high-level-abstractions/implicit-formfactor/ImplicitFormfactorHla.d.ts +10 -0
- package/esm/typings/src/high-level-abstractions/index.d.ts +44 -0
- package/esm/typings/src/high-level-abstractions/quick-chatbot/QuickChatbotHla.d.ts +10 -0
- package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +1 -1
- package/esm/typings/src/llm-providers/remote/startRemoteServer.d.ts +1 -1
- package/esm/typings/src/prepare/prepareTasks.d.ts +1 -0
- package/esm/typings/src/prepare/unpreparePipeline.d.ts +1 -0
- package/esm/typings/src/types/typeAliases.d.ts +1 -1
- package/esm/typings/src/utils/normalization/orderJson.d.ts +21 -0
- package/esm/typings/src/utils/normalization/orderJson.test.d.ts +4 -0
- package/esm/typings/src/utils/organization/keepTypeImported.d.ts +9 -0
- package/esm/typings/src/utils/serialization/$deepFreeze.d.ts +1 -1
- package/esm/typings/src/utils/serialization/checkSerializableAsJson.d.ts +20 -2
- package/esm/typings/src/utils/serialization/deepClone.test.d.ts +1 -0
- package/esm/typings/src/utils/serialization/exportJson.d.ts +29 -0
- package/esm/typings/src/utils/serialization/isSerializableAsJson.d.ts +2 -1
- package/package.json +2 -2
- package/umd/index.umd.js +618 -520
- package/umd/index.umd.js.map +1 -1
- package/esm/typings/src/utils/serialization/$asDeeplyFrozenSerializableJson.d.ts +0 -17
- /package/esm/typings/src/conversion/{pipelineStringToJson.test.d.ts → compilePipeline.test.d.ts} +0 -0
- /package/esm/typings/src/conversion/{pipelineStringToJsonSync.test.d.ts → precompilePipeline.test.d.ts} +0 -0
package/esm/index.es.js
CHANGED
|
@@ -14,7 +14,7 @@ var BOOK_LANGUAGE_VERSION = '1.0.0';
|
|
|
14
14
|
*
|
|
15
15
|
* @see https://github.com/webgptorg/promptbook
|
|
16
16
|
*/
|
|
17
|
-
var PROMPTBOOK_ENGINE_VERSION = '0.
|
|
17
|
+
var PROMPTBOOK_ENGINE_VERSION = '0.80.0-0';
|
|
18
18
|
/**
|
|
19
19
|
* TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
|
|
20
20
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
@@ -190,7 +190,7 @@ var PipelineExecutionError = /** @class */ (function (_super) {
|
|
|
190
190
|
}(Error));
|
|
191
191
|
|
|
192
192
|
/**
|
|
193
|
-
*
|
|
193
|
+
* Freezes the given object and all its nested objects recursively
|
|
194
194
|
*
|
|
195
195
|
* Note: `$` is used to indicate that this function is not a pure function - it mutates given object
|
|
196
196
|
* Note: This function mutates the object and returns the original (but mutated-deep-freezed) object
|
|
@@ -220,7 +220,8 @@ function $deepFreeze(objectValue) {
|
|
|
220
220
|
}
|
|
221
221
|
finally { if (e_1) throw e_1.error; }
|
|
222
222
|
}
|
|
223
|
-
|
|
223
|
+
Object.freeze(objectValue);
|
|
224
|
+
return objectValue;
|
|
224
225
|
}
|
|
225
226
|
/**
|
|
226
227
|
* TODO: [🧠] Is there a way how to meaningfully test this utility
|
|
@@ -321,24 +322,6 @@ var ADMIN_GITHUB_NAME = 'hejny';
|
|
|
321
322
|
* @private within the repository - too low-level in comparison with other `MAX_...`
|
|
322
323
|
*/
|
|
323
324
|
var LOOP_LIMIT = 1000;
|
|
324
|
-
/**
|
|
325
|
-
* Nonce which is used for replacing things in strings
|
|
326
|
-
*
|
|
327
|
-
* @private within the repository
|
|
328
|
-
*/
|
|
329
|
-
var REPLACING_NONCE = 'u$k42k%!V2zo34w7Fu#@QUHYPW';
|
|
330
|
-
/**
|
|
331
|
-
* @@@
|
|
332
|
-
*
|
|
333
|
-
* @private within the repository
|
|
334
|
-
*/
|
|
335
|
-
var RESERVED_PARAMETER_MISSING_VALUE = 'MISSING-' + REPLACING_NONCE;
|
|
336
|
-
/**
|
|
337
|
-
* @@@
|
|
338
|
-
*
|
|
339
|
-
* @private within the repository
|
|
340
|
-
*/
|
|
341
|
-
var RESERVED_PARAMETER_RESTRICTED = 'RESTRICTED-' + REPLACING_NONCE;
|
|
342
325
|
// <- TODO: [🧜♂️]
|
|
343
326
|
/**
|
|
344
327
|
* @@@
|
|
@@ -352,26 +335,21 @@ Object.freeze({
|
|
|
352
335
|
skipEmptyLines: true,
|
|
353
336
|
});
|
|
354
337
|
/**
|
|
355
|
-
* TODO: Extract `constants.ts` from `config.ts`
|
|
356
338
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
357
339
|
* TODO: [🧠][🧜♂️] Maybe join remoteUrl and path into single value
|
|
358
340
|
*/
|
|
359
341
|
|
|
360
342
|
/**
|
|
361
|
-
*
|
|
343
|
+
* Orders JSON object by keys
|
|
362
344
|
*
|
|
363
|
-
* @
|
|
345
|
+
* @returns The same type of object as the input re-ordered
|
|
346
|
+
* @public exported from `@promptbook/utils`
|
|
364
347
|
*/
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
function
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
Object.setPrototypeOf(_this, LimitReachedError.prototype);
|
|
371
|
-
return _this;
|
|
372
|
-
}
|
|
373
|
-
return LimitReachedError;
|
|
374
|
-
}(Error));
|
|
348
|
+
function orderJson(options) {
|
|
349
|
+
var value = options.value, order = options.order;
|
|
350
|
+
var orderedValue = __assign(__assign({}, (order === undefined ? {} : Object.fromEntries(order.map(function (key) { return [key, undefined]; })))), value);
|
|
351
|
+
return orderedValue;
|
|
352
|
+
}
|
|
375
353
|
|
|
376
354
|
/**
|
|
377
355
|
* Make error report URL for the given error
|
|
@@ -407,85 +385,6 @@ var UnexpectedError = /** @class */ (function (_super) {
|
|
|
407
385
|
return UnexpectedError;
|
|
408
386
|
}(Error));
|
|
409
387
|
|
|
410
|
-
/**
|
|
411
|
-
* Replaces parameters in template with values from parameters object
|
|
412
|
-
*
|
|
413
|
-
* @param template the template with parameters in {curly} braces
|
|
414
|
-
* @param parameters the object with parameters
|
|
415
|
-
* @returns the template with replaced parameters
|
|
416
|
-
* @throws {PipelineExecutionError} if parameter is not defined, not closed, or not opened
|
|
417
|
-
* @public exported from `@promptbook/utils`
|
|
418
|
-
*/
|
|
419
|
-
function replaceParameters(template, parameters) {
|
|
420
|
-
var e_1, _a;
|
|
421
|
-
try {
|
|
422
|
-
for (var _b = __values(Object.entries(parameters)), _c = _b.next(); !_c.done; _c = _b.next()) {
|
|
423
|
-
var _d = __read(_c.value, 2), parameterName = _d[0], parameterValue = _d[1];
|
|
424
|
-
if (parameterValue === RESERVED_PARAMETER_MISSING_VALUE) {
|
|
425
|
-
throw new UnexpectedError("Parameter `{".concat(parameterName, "}` has missing value"));
|
|
426
|
-
}
|
|
427
|
-
else if (parameterValue === RESERVED_PARAMETER_RESTRICTED) {
|
|
428
|
-
// TODO: [🍵]
|
|
429
|
-
throw new UnexpectedError("Parameter `{".concat(parameterName, "}` is restricted to use"));
|
|
430
|
-
}
|
|
431
|
-
}
|
|
432
|
-
}
|
|
433
|
-
catch (e_1_1) { e_1 = { error: e_1_1 }; }
|
|
434
|
-
finally {
|
|
435
|
-
try {
|
|
436
|
-
if (_c && !_c.done && (_a = _b.return)) _a.call(_b);
|
|
437
|
-
}
|
|
438
|
-
finally { if (e_1) throw e_1.error; }
|
|
439
|
-
}
|
|
440
|
-
var replacedTemplates = template;
|
|
441
|
-
var match;
|
|
442
|
-
var loopLimit = LOOP_LIMIT;
|
|
443
|
-
var _loop_1 = function () {
|
|
444
|
-
if (loopLimit-- < 0) {
|
|
445
|
-
throw new LimitReachedError('Loop limit reached during parameters replacement in `replaceParameters`');
|
|
446
|
-
}
|
|
447
|
-
var precol = match.groups.precol;
|
|
448
|
-
var parameterName = match.groups.parameterName;
|
|
449
|
-
if (parameterName === '') {
|
|
450
|
-
return "continue";
|
|
451
|
-
}
|
|
452
|
-
if (parameterName.indexOf('{') !== -1 || parameterName.indexOf('}') !== -1) {
|
|
453
|
-
throw new PipelineExecutionError('Parameter is already opened or not closed');
|
|
454
|
-
}
|
|
455
|
-
if (parameters[parameterName] === undefined) {
|
|
456
|
-
throw new PipelineExecutionError("Parameter `{".concat(parameterName, "}` is not defined"));
|
|
457
|
-
}
|
|
458
|
-
var parameterValue = parameters[parameterName];
|
|
459
|
-
if (parameterValue === undefined) {
|
|
460
|
-
throw new PipelineExecutionError("Parameter `{".concat(parameterName, "}` is not defined"));
|
|
461
|
-
}
|
|
462
|
-
parameterValue = parameterValue.toString();
|
|
463
|
-
if (parameterValue.includes('\n') && /^\s*\W{0,3}\s*$/.test(precol)) {
|
|
464
|
-
parameterValue = parameterValue
|
|
465
|
-
.split('\n')
|
|
466
|
-
.map(function (line, index) { return (index === 0 ? line : "".concat(precol).concat(line)); })
|
|
467
|
-
.join('\n');
|
|
468
|
-
}
|
|
469
|
-
replacedTemplates =
|
|
470
|
-
replacedTemplates.substring(0, match.index + precol.length) +
|
|
471
|
-
parameterValue +
|
|
472
|
-
replacedTemplates.substring(match.index + precol.length + parameterName.length + 2);
|
|
473
|
-
};
|
|
474
|
-
while ((match = /^(?<precol>.*){(?<parameterName>\w+)}(.*)/m /* <- Not global */
|
|
475
|
-
.exec(replacedTemplates))) {
|
|
476
|
-
_loop_1();
|
|
477
|
-
}
|
|
478
|
-
// [💫] Check if there are parameters that are not closed properly
|
|
479
|
-
if (/{\w+$/.test(replacedTemplates)) {
|
|
480
|
-
throw new PipelineExecutionError('Parameter is not closed');
|
|
481
|
-
}
|
|
482
|
-
// [💫] Check if there are parameters that are not opened properly
|
|
483
|
-
if (/^\w+}/.test(replacedTemplates)) {
|
|
484
|
-
throw new PipelineExecutionError('Parameter is not opened');
|
|
485
|
-
}
|
|
486
|
-
return replacedTemplates;
|
|
487
|
-
}
|
|
488
|
-
|
|
489
388
|
/**
|
|
490
389
|
* Checks if the value is [🚉] serializable as JSON
|
|
491
390
|
* If not, throws an UnexpectedError with a rich error message and tracking
|
|
@@ -506,8 +405,9 @@ function replaceParameters(template, parameters) {
|
|
|
506
405
|
* @throws UnexpectedError if the value is not serializable as JSON
|
|
507
406
|
* @public exported from `@promptbook/utils`
|
|
508
407
|
*/
|
|
509
|
-
function checkSerializableAsJson(
|
|
408
|
+
function checkSerializableAsJson(options) {
|
|
510
409
|
var e_1, _a;
|
|
410
|
+
var value = options.value, name = options.name, message = options.message;
|
|
511
411
|
if (value === undefined) {
|
|
512
412
|
throw new UnexpectedError("".concat(name, " is undefined"));
|
|
513
413
|
}
|
|
@@ -531,12 +431,12 @@ function checkSerializableAsJson(name, value) {
|
|
|
531
431
|
}
|
|
532
432
|
else if (typeof value === 'object' && Array.isArray(value)) {
|
|
533
433
|
for (var i = 0; i < value.length; i++) {
|
|
534
|
-
checkSerializableAsJson("".concat(name, "[").concat(i, "]"), value[i]);
|
|
434
|
+
checkSerializableAsJson({ name: "".concat(name, "[").concat(i, "]"), value: value[i], message: message });
|
|
535
435
|
}
|
|
536
436
|
}
|
|
537
437
|
else if (typeof value === 'object') {
|
|
538
438
|
if (value instanceof Date) {
|
|
539
|
-
throw new UnexpectedError(spaceTrim$1("\n
|
|
439
|
+
throw new UnexpectedError(spaceTrim$1(function (block) { return "\n `".concat(name, "` is Date\n\n Use `string_date_iso8601` instead\n\n Additional message for `").concat(name, "`:\n ").concat(block(message || '(nothing)'), "\n "); }));
|
|
540
440
|
}
|
|
541
441
|
else if (value instanceof Map) {
|
|
542
442
|
throw new UnexpectedError("".concat(name, " is Map"));
|
|
@@ -548,7 +448,7 @@ function checkSerializableAsJson(name, value) {
|
|
|
548
448
|
throw new UnexpectedError("".concat(name, " is RegExp"));
|
|
549
449
|
}
|
|
550
450
|
else if (value instanceof Error) {
|
|
551
|
-
throw new UnexpectedError(spaceTrim$1("\n
|
|
451
|
+
throw new UnexpectedError(spaceTrim$1(function (block) { return "\n `".concat(name, "` is unserialized Error\n\n Use function `serializeError`\n\n Additional message for `").concat(name, "`:\n ").concat(block(message || '(nothing)'), "\n\n "); }));
|
|
552
452
|
}
|
|
553
453
|
else {
|
|
554
454
|
try {
|
|
@@ -558,7 +458,7 @@ function checkSerializableAsJson(name, value) {
|
|
|
558
458
|
// Note: undefined in object is serializable - it is just omited
|
|
559
459
|
continue;
|
|
560
460
|
}
|
|
561
|
-
checkSerializableAsJson("".concat(name, ".").concat(subName), subValue);
|
|
461
|
+
checkSerializableAsJson({ name: "".concat(name, ".").concat(subName), value: subValue, message: message });
|
|
562
462
|
}
|
|
563
463
|
}
|
|
564
464
|
catch (e_1_1) { e_1 = { error: e_1_1 }; }
|
|
@@ -575,7 +475,7 @@ function checkSerializableAsJson(name, value) {
|
|
|
575
475
|
if (!(error instanceof Error)) {
|
|
576
476
|
throw error;
|
|
577
477
|
}
|
|
578
|
-
throw new UnexpectedError(spaceTrim$1(function (block) { return "\n ".concat(name, " is not serializable\n\n ").concat(block(error.toString()), "\n "); }));
|
|
478
|
+
throw new UnexpectedError(spaceTrim$1(function (block) { return "\n `".concat(name, "` is not serializable\n\n ").concat(block(error.toString()), "\n\n Additional message for `").concat(name, "`:\n ").concat(block(message || '(nothing)'), "\n "); }));
|
|
579
479
|
}
|
|
580
480
|
/*
|
|
581
481
|
TODO: [0] Is there some more elegant way to check circular references?
|
|
@@ -600,35 +500,210 @@ function checkSerializableAsJson(name, value) {
|
|
|
600
500
|
}
|
|
601
501
|
}
|
|
602
502
|
else {
|
|
603
|
-
throw new UnexpectedError("".concat(name, " is unknown"));
|
|
503
|
+
throw new UnexpectedError(spaceTrim$1(function (block) { return "\n `".concat(name, "` is unknown type\n\n Additional message for `").concat(name, "`:\n ").concat(block(message || '(nothing)'), "\n "); }));
|
|
604
504
|
}
|
|
605
505
|
}
|
|
606
506
|
/**
|
|
607
|
-
* TODO:
|
|
507
|
+
* TODO: Can be return type more type-safe? like `asserts options.value is JsonValue`
|
|
608
508
|
* TODO: [🧠][main] !!! In-memory cache of same values to prevent multiple checks
|
|
609
509
|
* Note: [🐠] This is how `checkSerializableAsJson` + `isSerializableAsJson` together can just retun true/false or rich error message
|
|
610
510
|
*/
|
|
611
511
|
|
|
612
512
|
/**
|
|
613
|
-
* @@@
|
|
614
513
|
* @@@
|
|
615
514
|
*
|
|
616
|
-
*
|
|
515
|
+
* @public exported from `@promptbook/utils`
|
|
516
|
+
*/
|
|
517
|
+
function deepClone(objectValue) {
|
|
518
|
+
return JSON.parse(JSON.stringify(objectValue));
|
|
519
|
+
/*
|
|
520
|
+
!!!!!!!!
|
|
521
|
+
TODO: [🧠] Is there a better implementation?
|
|
522
|
+
> const propertyNames = Object.getOwnPropertyNames(objectValue);
|
|
523
|
+
> for (const propertyName of propertyNames) {
|
|
524
|
+
> const value = (objectValue as really_any)[propertyName];
|
|
525
|
+
> if (value && typeof value === 'object') {
|
|
526
|
+
> deepClone(value);
|
|
527
|
+
> }
|
|
528
|
+
> }
|
|
529
|
+
> return Object.assign({}, objectValue);
|
|
530
|
+
*/
|
|
531
|
+
}
|
|
532
|
+
/**
|
|
533
|
+
* TODO: [🧠] Is there a way how to meaningfully test this utility
|
|
534
|
+
*/
|
|
535
|
+
|
|
536
|
+
/**
|
|
537
|
+
* Utility to export a JSON object from a function
|
|
617
538
|
*
|
|
618
|
-
*
|
|
619
|
-
*
|
|
620
|
-
*
|
|
621
|
-
*
|
|
539
|
+
* 1) Checks if the value is serializable as JSON
|
|
540
|
+
* 2) Makes a deep clone of the object
|
|
541
|
+
* 2) Orders the object properties
|
|
542
|
+
* 2) Deeply freezes the cloned object
|
|
543
|
+
*
|
|
544
|
+
* Note: This function does not mutates the given object
|
|
545
|
+
*
|
|
546
|
+
* @returns The same type of object as the input but read-only and re-ordered
|
|
547
|
+
* @public exported from `@promptbook/utils`
|
|
622
548
|
*/
|
|
623
|
-
function
|
|
624
|
-
|
|
625
|
-
|
|
549
|
+
function exportJson(options) {
|
|
550
|
+
var name = options.name, value = options.value, order = options.order, message = options.message;
|
|
551
|
+
checkSerializableAsJson({ name: name, value: value, message: message });
|
|
552
|
+
var orderedValue =
|
|
553
|
+
// TODO: Fix error "Type instantiation is excessively deep and possibly infinite."
|
|
554
|
+
// eslint-disable-next-line @typescript-eslint/ban-ts-comment
|
|
555
|
+
// @ts-ignore
|
|
556
|
+
order === undefined
|
|
557
|
+
? deepClone(value)
|
|
558
|
+
: orderJson({
|
|
559
|
+
value: value,
|
|
560
|
+
// <- Note: checkSerializableAsJson asserts that the value is serializable as JSON
|
|
561
|
+
order: order,
|
|
562
|
+
});
|
|
563
|
+
$deepFreeze(orderedValue);
|
|
564
|
+
return orderedValue;
|
|
626
565
|
}
|
|
627
566
|
/**
|
|
628
|
-
* TODO: [🧠][🛣] More elegant way to tracking than passing `name`
|
|
629
567
|
* TODO: [🧠] Is there a way how to meaningfully test this utility
|
|
630
568
|
*/
|
|
631
569
|
|
|
570
|
+
/**
|
|
571
|
+
* Nonce which is used for replacing things in strings
|
|
572
|
+
*
|
|
573
|
+
* @private within the repository
|
|
574
|
+
*/
|
|
575
|
+
var REPLACING_NONCE = 'u$k42k%!V2zo34w7Fu#@QUHYPW';
|
|
576
|
+
/**
|
|
577
|
+
* @@@
|
|
578
|
+
*
|
|
579
|
+
* @private within the repository
|
|
580
|
+
*/
|
|
581
|
+
var RESERVED_PARAMETER_MISSING_VALUE = 'MISSING-' + REPLACING_NONCE;
|
|
582
|
+
/**
|
|
583
|
+
* @@@
|
|
584
|
+
*
|
|
585
|
+
* @private within the repository
|
|
586
|
+
*/
|
|
587
|
+
var RESERVED_PARAMETER_RESTRICTED = 'RESTRICTED-' + REPLACING_NONCE;
|
|
588
|
+
/**
|
|
589
|
+
* The names of the parameters that are reserved for special purposes
|
|
590
|
+
*
|
|
591
|
+
* @public exported from `@promptbook/core`
|
|
592
|
+
*/
|
|
593
|
+
exportJson({
|
|
594
|
+
name: 'RESERVED_PARAMETER_NAMES',
|
|
595
|
+
message: "The names of the parameters that are reserved for special purposes",
|
|
596
|
+
value: [
|
|
597
|
+
'content',
|
|
598
|
+
'context',
|
|
599
|
+
'knowledge',
|
|
600
|
+
'examples',
|
|
601
|
+
'modelName',
|
|
602
|
+
'currentDate',
|
|
603
|
+
// <- TODO: list here all command names
|
|
604
|
+
// <- TODO: Add more like 'date', 'modelName',...
|
|
605
|
+
// <- TODO: Add [emoji] + instructions ACRY when adding new reserved parameter
|
|
606
|
+
],
|
|
607
|
+
});
|
|
608
|
+
/**
|
|
609
|
+
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
610
|
+
*/
|
|
611
|
+
|
|
612
|
+
/**
|
|
613
|
+
* This error type indicates that some limit was reached
|
|
614
|
+
*
|
|
615
|
+
* @public exported from `@promptbook/core`
|
|
616
|
+
*/
|
|
617
|
+
var LimitReachedError = /** @class */ (function (_super) {
|
|
618
|
+
__extends(LimitReachedError, _super);
|
|
619
|
+
function LimitReachedError(message) {
|
|
620
|
+
var _this = _super.call(this, message) || this;
|
|
621
|
+
_this.name = 'LimitReachedError';
|
|
622
|
+
Object.setPrototypeOf(_this, LimitReachedError.prototype);
|
|
623
|
+
return _this;
|
|
624
|
+
}
|
|
625
|
+
return LimitReachedError;
|
|
626
|
+
}(Error));
|
|
627
|
+
|
|
628
|
+
/**
|
|
629
|
+
* Replaces parameters in template with values from parameters object
|
|
630
|
+
*
|
|
631
|
+
* @param template the template with parameters in {curly} braces
|
|
632
|
+
* @param parameters the object with parameters
|
|
633
|
+
* @returns the template with replaced parameters
|
|
634
|
+
* @throws {PipelineExecutionError} if parameter is not defined, not closed, or not opened
|
|
635
|
+
* @public exported from `@promptbook/utils`
|
|
636
|
+
*/
|
|
637
|
+
function replaceParameters(template, parameters) {
|
|
638
|
+
var e_1, _a;
|
|
639
|
+
try {
|
|
640
|
+
for (var _b = __values(Object.entries(parameters)), _c = _b.next(); !_c.done; _c = _b.next()) {
|
|
641
|
+
var _d = __read(_c.value, 2), parameterName = _d[0], parameterValue = _d[1];
|
|
642
|
+
if (parameterValue === RESERVED_PARAMETER_MISSING_VALUE) {
|
|
643
|
+
throw new UnexpectedError("Parameter `{".concat(parameterName, "}` has missing value"));
|
|
644
|
+
}
|
|
645
|
+
else if (parameterValue === RESERVED_PARAMETER_RESTRICTED) {
|
|
646
|
+
// TODO: [🍵]
|
|
647
|
+
throw new UnexpectedError("Parameter `{".concat(parameterName, "}` is restricted to use"));
|
|
648
|
+
}
|
|
649
|
+
}
|
|
650
|
+
}
|
|
651
|
+
catch (e_1_1) { e_1 = { error: e_1_1 }; }
|
|
652
|
+
finally {
|
|
653
|
+
try {
|
|
654
|
+
if (_c && !_c.done && (_a = _b.return)) _a.call(_b);
|
|
655
|
+
}
|
|
656
|
+
finally { if (e_1) throw e_1.error; }
|
|
657
|
+
}
|
|
658
|
+
var replacedTemplates = template;
|
|
659
|
+
var match;
|
|
660
|
+
var loopLimit = LOOP_LIMIT;
|
|
661
|
+
var _loop_1 = function () {
|
|
662
|
+
if (loopLimit-- < 0) {
|
|
663
|
+
throw new LimitReachedError('Loop limit reached during parameters replacement in `replaceParameters`');
|
|
664
|
+
}
|
|
665
|
+
var precol = match.groups.precol;
|
|
666
|
+
var parameterName = match.groups.parameterName;
|
|
667
|
+
if (parameterName === '') {
|
|
668
|
+
return "continue";
|
|
669
|
+
}
|
|
670
|
+
if (parameterName.indexOf('{') !== -1 || parameterName.indexOf('}') !== -1) {
|
|
671
|
+
throw new PipelineExecutionError('Parameter is already opened or not closed');
|
|
672
|
+
}
|
|
673
|
+
if (parameters[parameterName] === undefined) {
|
|
674
|
+
throw new PipelineExecutionError("Parameter `{".concat(parameterName, "}` is not defined"));
|
|
675
|
+
}
|
|
676
|
+
var parameterValue = parameters[parameterName];
|
|
677
|
+
if (parameterValue === undefined) {
|
|
678
|
+
throw new PipelineExecutionError("Parameter `{".concat(parameterName, "}` is not defined"));
|
|
679
|
+
}
|
|
680
|
+
parameterValue = parameterValue.toString();
|
|
681
|
+
if (parameterValue.includes('\n') && /^\s*\W{0,3}\s*$/.test(precol)) {
|
|
682
|
+
parameterValue = parameterValue
|
|
683
|
+
.split('\n')
|
|
684
|
+
.map(function (line, index) { return (index === 0 ? line : "".concat(precol).concat(line)); })
|
|
685
|
+
.join('\n');
|
|
686
|
+
}
|
|
687
|
+
replacedTemplates =
|
|
688
|
+
replacedTemplates.substring(0, match.index + precol.length) +
|
|
689
|
+
parameterValue +
|
|
690
|
+
replacedTemplates.substring(match.index + precol.length + parameterName.length + 2);
|
|
691
|
+
};
|
|
692
|
+
while ((match = /^(?<precol>.*){(?<parameterName>\w+)}(.*)/m /* <- Not global */
|
|
693
|
+
.exec(replacedTemplates))) {
|
|
694
|
+
_loop_1();
|
|
695
|
+
}
|
|
696
|
+
// [💫] Check if there are parameters that are not closed properly
|
|
697
|
+
if (/{\w+$/.test(replacedTemplates)) {
|
|
698
|
+
throw new PipelineExecutionError('Parameter is not closed');
|
|
699
|
+
}
|
|
700
|
+
// [💫] Check if there are parameters that are not opened properly
|
|
701
|
+
if (/^\w+}/.test(replacedTemplates)) {
|
|
702
|
+
throw new PipelineExecutionError('Parameter is not opened');
|
|
703
|
+
}
|
|
704
|
+
return replacedTemplates;
|
|
705
|
+
}
|
|
706
|
+
|
|
632
707
|
/**
|
|
633
708
|
* Counts number of characters in the text
|
|
634
709
|
*
|
|
@@ -1036,381 +1111,384 @@ function computeUsage(value) {
|
|
|
1036
1111
|
* @see https://openai.com/api/pricing/
|
|
1037
1112
|
* @public exported from `@promptbook/openai`
|
|
1038
1113
|
*/
|
|
1039
|
-
var OPENAI_MODELS =
|
|
1040
|
-
|
|
1114
|
+
var OPENAI_MODELS = exportJson({
|
|
1115
|
+
name: 'OPENAI_MODELS',
|
|
1116
|
+
value: [
|
|
1117
|
+
/*/
|
|
1118
|
+
{
|
|
1119
|
+
modelTitle: 'dall-e-3',
|
|
1120
|
+
modelName: 'dall-e-3',
|
|
1121
|
+
},
|
|
1122
|
+
/**/
|
|
1123
|
+
/*/
|
|
1124
|
+
{
|
|
1125
|
+
modelTitle: 'whisper-1',
|
|
1126
|
+
modelName: 'whisper-1',
|
|
1127
|
+
},
|
|
1128
|
+
/**/
|
|
1129
|
+
/**/
|
|
1130
|
+
{
|
|
1131
|
+
modelVariant: 'COMPLETION',
|
|
1132
|
+
modelTitle: 'davinci-002',
|
|
1133
|
+
modelName: 'davinci-002',
|
|
1134
|
+
pricing: {
|
|
1135
|
+
prompt: computeUsage("$2.00 / 1M tokens"),
|
|
1136
|
+
output: computeUsage("$2.00 / 1M tokens"), // <- not sure
|
|
1137
|
+
},
|
|
1138
|
+
},
|
|
1139
|
+
/**/
|
|
1140
|
+
/*/
|
|
1041
1141
|
{
|
|
1042
|
-
modelTitle: 'dall-e-
|
|
1043
|
-
modelName: 'dall-e-
|
|
1142
|
+
modelTitle: 'dall-e-2',
|
|
1143
|
+
modelName: 'dall-e-2',
|
|
1044
1144
|
},
|
|
1045
1145
|
/**/
|
|
1046
|
-
|
|
1146
|
+
/**/
|
|
1147
|
+
{
|
|
1148
|
+
modelVariant: 'CHAT',
|
|
1149
|
+
modelTitle: 'gpt-3.5-turbo-16k',
|
|
1150
|
+
modelName: 'gpt-3.5-turbo-16k',
|
|
1151
|
+
pricing: {
|
|
1152
|
+
prompt: computeUsage("$3.00 / 1M tokens"),
|
|
1153
|
+
output: computeUsage("$4.00 / 1M tokens"),
|
|
1154
|
+
},
|
|
1155
|
+
},
|
|
1156
|
+
/**/
|
|
1157
|
+
/*/
|
|
1047
1158
|
{
|
|
1048
|
-
modelTitle: '
|
|
1049
|
-
modelName: '
|
|
1159
|
+
modelTitle: 'tts-1-hd-1106',
|
|
1160
|
+
modelName: 'tts-1-hd-1106',
|
|
1050
1161
|
},
|
|
1051
|
-
|
|
1052
|
-
|
|
1053
|
-
|
|
1054
|
-
|
|
1055
|
-
|
|
1056
|
-
|
|
1057
|
-
|
|
1058
|
-
|
|
1059
|
-
|
|
1060
|
-
|
|
1061
|
-
|
|
1062
|
-
|
|
1063
|
-
|
|
1064
|
-
|
|
1065
|
-
|
|
1066
|
-
|
|
1067
|
-
},
|
|
1068
|
-
/**/
|
|
1069
|
-
/**/
|
|
1070
|
-
{
|
|
1071
|
-
modelVariant: 'CHAT',
|
|
1072
|
-
modelTitle: 'gpt-3.5-turbo-16k',
|
|
1073
|
-
modelName: 'gpt-3.5-turbo-16k',
|
|
1074
|
-
pricing: {
|
|
1075
|
-
prompt: computeUsage("$3.00 / 1M tokens"),
|
|
1076
|
-
output: computeUsage("$4.00 / 1M tokens"),
|
|
1077
|
-
},
|
|
1078
|
-
},
|
|
1079
|
-
/**/
|
|
1080
|
-
/*/
|
|
1081
|
-
{
|
|
1082
|
-
modelTitle: 'tts-1-hd-1106',
|
|
1083
|
-
modelName: 'tts-1-hd-1106',
|
|
1084
|
-
},
|
|
1085
|
-
/**/
|
|
1086
|
-
/*/
|
|
1087
|
-
{
|
|
1088
|
-
modelTitle: 'tts-1-hd',
|
|
1089
|
-
modelName: 'tts-1-hd',
|
|
1090
|
-
},
|
|
1091
|
-
/**/
|
|
1092
|
-
/**/
|
|
1093
|
-
{
|
|
1094
|
-
modelVariant: 'CHAT',
|
|
1095
|
-
modelTitle: 'gpt-4',
|
|
1096
|
-
modelName: 'gpt-4',
|
|
1097
|
-
pricing: {
|
|
1098
|
-
prompt: computeUsage("$30.00 / 1M tokens"),
|
|
1099
|
-
output: computeUsage("$60.00 / 1M tokens"),
|
|
1162
|
+
/**/
|
|
1163
|
+
/*/
|
|
1164
|
+
{
|
|
1165
|
+
modelTitle: 'tts-1-hd',
|
|
1166
|
+
modelName: 'tts-1-hd',
|
|
1167
|
+
},
|
|
1168
|
+
/**/
|
|
1169
|
+
/**/
|
|
1170
|
+
{
|
|
1171
|
+
modelVariant: 'CHAT',
|
|
1172
|
+
modelTitle: 'gpt-4',
|
|
1173
|
+
modelName: 'gpt-4',
|
|
1174
|
+
pricing: {
|
|
1175
|
+
prompt: computeUsage("$30.00 / 1M tokens"),
|
|
1176
|
+
output: computeUsage("$60.00 / 1M tokens"),
|
|
1177
|
+
},
|
|
1100
1178
|
},
|
|
1101
|
-
|
|
1102
|
-
|
|
1103
|
-
|
|
1104
|
-
|
|
1105
|
-
|
|
1106
|
-
|
|
1107
|
-
|
|
1108
|
-
|
|
1109
|
-
|
|
1110
|
-
|
|
1179
|
+
/**/
|
|
1180
|
+
/**/
|
|
1181
|
+
{
|
|
1182
|
+
modelVariant: 'CHAT',
|
|
1183
|
+
modelTitle: 'gpt-4-32k',
|
|
1184
|
+
modelName: 'gpt-4-32k',
|
|
1185
|
+
pricing: {
|
|
1186
|
+
prompt: computeUsage("$60.00 / 1M tokens"),
|
|
1187
|
+
output: computeUsage("$120.00 / 1M tokens"),
|
|
1188
|
+
},
|
|
1111
1189
|
},
|
|
1112
|
-
|
|
1113
|
-
|
|
1114
|
-
|
|
1115
|
-
|
|
1116
|
-
|
|
1117
|
-
|
|
1118
|
-
|
|
1119
|
-
|
|
1120
|
-
|
|
1121
|
-
|
|
1190
|
+
/**/
|
|
1191
|
+
/*/
|
|
1192
|
+
{
|
|
1193
|
+
modelVariant: 'CHAT',
|
|
1194
|
+
modelTitle: 'gpt-4-0613',
|
|
1195
|
+
modelName: 'gpt-4-0613',
|
|
1196
|
+
pricing: {
|
|
1197
|
+
prompt: computeUsage(` / 1M tokens`),
|
|
1198
|
+
output: computeUsage(` / 1M tokens`),
|
|
1199
|
+
},
|
|
1122
1200
|
},
|
|
1123
|
-
|
|
1124
|
-
|
|
1125
|
-
|
|
1126
|
-
|
|
1127
|
-
|
|
1128
|
-
|
|
1129
|
-
|
|
1130
|
-
|
|
1131
|
-
|
|
1132
|
-
|
|
1201
|
+
/**/
|
|
1202
|
+
/**/
|
|
1203
|
+
{
|
|
1204
|
+
modelVariant: 'CHAT',
|
|
1205
|
+
modelTitle: 'gpt-4-turbo-2024-04-09',
|
|
1206
|
+
modelName: 'gpt-4-turbo-2024-04-09',
|
|
1207
|
+
pricing: {
|
|
1208
|
+
prompt: computeUsage("$10.00 / 1M tokens"),
|
|
1209
|
+
output: computeUsage("$30.00 / 1M tokens"),
|
|
1210
|
+
},
|
|
1133
1211
|
},
|
|
1134
|
-
|
|
1135
|
-
|
|
1136
|
-
|
|
1137
|
-
|
|
1138
|
-
|
|
1139
|
-
|
|
1140
|
-
|
|
1141
|
-
|
|
1142
|
-
|
|
1143
|
-
|
|
1212
|
+
/**/
|
|
1213
|
+
/**/
|
|
1214
|
+
{
|
|
1215
|
+
modelVariant: 'CHAT',
|
|
1216
|
+
modelTitle: 'gpt-3.5-turbo-1106',
|
|
1217
|
+
modelName: 'gpt-3.5-turbo-1106',
|
|
1218
|
+
pricing: {
|
|
1219
|
+
prompt: computeUsage("$1.00 / 1M tokens"),
|
|
1220
|
+
output: computeUsage("$2.00 / 1M tokens"),
|
|
1221
|
+
},
|
|
1144
1222
|
},
|
|
1145
|
-
|
|
1146
|
-
|
|
1147
|
-
|
|
1148
|
-
|
|
1149
|
-
|
|
1150
|
-
|
|
1151
|
-
|
|
1152
|
-
|
|
1153
|
-
|
|
1154
|
-
|
|
1223
|
+
/**/
|
|
1224
|
+
/**/
|
|
1225
|
+
{
|
|
1226
|
+
modelVariant: 'CHAT',
|
|
1227
|
+
modelTitle: 'gpt-4-turbo',
|
|
1228
|
+
modelName: 'gpt-4-turbo',
|
|
1229
|
+
pricing: {
|
|
1230
|
+
prompt: computeUsage("$10.00 / 1M tokens"),
|
|
1231
|
+
output: computeUsage("$30.00 / 1M tokens"),
|
|
1232
|
+
},
|
|
1155
1233
|
},
|
|
1156
|
-
|
|
1157
|
-
|
|
1158
|
-
|
|
1159
|
-
|
|
1160
|
-
|
|
1161
|
-
|
|
1162
|
-
|
|
1163
|
-
|
|
1164
|
-
|
|
1165
|
-
|
|
1234
|
+
/**/
|
|
1235
|
+
/**/
|
|
1236
|
+
{
|
|
1237
|
+
modelVariant: 'COMPLETION',
|
|
1238
|
+
modelTitle: 'gpt-3.5-turbo-instruct-0914',
|
|
1239
|
+
modelName: 'gpt-3.5-turbo-instruct-0914',
|
|
1240
|
+
pricing: {
|
|
1241
|
+
prompt: computeUsage("$1.50 / 1M tokens"),
|
|
1242
|
+
output: computeUsage("$2.00 / 1M tokens"), // <- For gpt-3.5-turbo-instruct
|
|
1243
|
+
},
|
|
1166
1244
|
},
|
|
1167
|
-
|
|
1168
|
-
|
|
1169
|
-
|
|
1170
|
-
|
|
1171
|
-
|
|
1172
|
-
|
|
1173
|
-
|
|
1174
|
-
|
|
1175
|
-
|
|
1176
|
-
|
|
1245
|
+
/**/
|
|
1246
|
+
/**/
|
|
1247
|
+
{
|
|
1248
|
+
modelVariant: 'COMPLETION',
|
|
1249
|
+
modelTitle: 'gpt-3.5-turbo-instruct',
|
|
1250
|
+
modelName: 'gpt-3.5-turbo-instruct',
|
|
1251
|
+
pricing: {
|
|
1252
|
+
prompt: computeUsage("$1.50 / 1M tokens"),
|
|
1253
|
+
output: computeUsage("$2.00 / 1M tokens"),
|
|
1254
|
+
},
|
|
1177
1255
|
},
|
|
1178
|
-
|
|
1179
|
-
|
|
1180
|
-
|
|
1181
|
-
|
|
1182
|
-
|
|
1183
|
-
|
|
1184
|
-
|
|
1185
|
-
|
|
1186
|
-
|
|
1187
|
-
|
|
1188
|
-
|
|
1189
|
-
|
|
1190
|
-
|
|
1191
|
-
|
|
1192
|
-
|
|
1193
|
-
|
|
1256
|
+
/**/
|
|
1257
|
+
/*/
|
|
1258
|
+
{
|
|
1259
|
+
modelTitle: 'tts-1',
|
|
1260
|
+
modelName: 'tts-1',
|
|
1261
|
+
},
|
|
1262
|
+
/**/
|
|
1263
|
+
/**/
|
|
1264
|
+
{
|
|
1265
|
+
modelVariant: 'CHAT',
|
|
1266
|
+
modelTitle: 'gpt-3.5-turbo',
|
|
1267
|
+
modelName: 'gpt-3.5-turbo',
|
|
1268
|
+
pricing: {
|
|
1269
|
+
prompt: computeUsage("$3.00 / 1M tokens"),
|
|
1270
|
+
output: computeUsage("$6.00 / 1M tokens"), // <- Not sure, refer to gpt-3.5-turbo in Fine-tuning models
|
|
1271
|
+
},
|
|
1194
1272
|
},
|
|
1195
|
-
|
|
1196
|
-
|
|
1197
|
-
|
|
1198
|
-
|
|
1199
|
-
|
|
1200
|
-
|
|
1201
|
-
|
|
1202
|
-
|
|
1203
|
-
|
|
1204
|
-
|
|
1273
|
+
/**/
|
|
1274
|
+
/**/
|
|
1275
|
+
{
|
|
1276
|
+
modelVariant: 'CHAT',
|
|
1277
|
+
modelTitle: 'gpt-3.5-turbo-0301',
|
|
1278
|
+
modelName: 'gpt-3.5-turbo-0301',
|
|
1279
|
+
pricing: {
|
|
1280
|
+
prompt: computeUsage("$1.50 / 1M tokens"),
|
|
1281
|
+
output: computeUsage("$2.00 / 1M tokens"),
|
|
1282
|
+
},
|
|
1205
1283
|
},
|
|
1206
|
-
|
|
1207
|
-
|
|
1208
|
-
|
|
1209
|
-
|
|
1210
|
-
|
|
1211
|
-
|
|
1212
|
-
|
|
1213
|
-
|
|
1214
|
-
|
|
1215
|
-
|
|
1284
|
+
/**/
|
|
1285
|
+
/**/
|
|
1286
|
+
{
|
|
1287
|
+
modelVariant: 'COMPLETION',
|
|
1288
|
+
modelTitle: 'babbage-002',
|
|
1289
|
+
modelName: 'babbage-002',
|
|
1290
|
+
pricing: {
|
|
1291
|
+
prompt: computeUsage("$0.40 / 1M tokens"),
|
|
1292
|
+
output: computeUsage("$0.40 / 1M tokens"), // <- Not sure
|
|
1293
|
+
},
|
|
1216
1294
|
},
|
|
1217
|
-
|
|
1218
|
-
|
|
1219
|
-
|
|
1220
|
-
|
|
1221
|
-
|
|
1222
|
-
|
|
1223
|
-
|
|
1224
|
-
|
|
1225
|
-
|
|
1226
|
-
|
|
1295
|
+
/**/
|
|
1296
|
+
/**/
|
|
1297
|
+
{
|
|
1298
|
+
modelVariant: 'CHAT',
|
|
1299
|
+
modelTitle: 'gpt-4-1106-preview',
|
|
1300
|
+
modelName: 'gpt-4-1106-preview',
|
|
1301
|
+
pricing: {
|
|
1302
|
+
prompt: computeUsage("$10.00 / 1M tokens"),
|
|
1303
|
+
output: computeUsage("$30.00 / 1M tokens"),
|
|
1304
|
+
},
|
|
1227
1305
|
},
|
|
1228
|
-
|
|
1229
|
-
|
|
1230
|
-
|
|
1231
|
-
|
|
1232
|
-
|
|
1233
|
-
|
|
1234
|
-
|
|
1235
|
-
|
|
1236
|
-
|
|
1237
|
-
|
|
1306
|
+
/**/
|
|
1307
|
+
/**/
|
|
1308
|
+
{
|
|
1309
|
+
modelVariant: 'CHAT',
|
|
1310
|
+
modelTitle: 'gpt-4-0125-preview',
|
|
1311
|
+
modelName: 'gpt-4-0125-preview',
|
|
1312
|
+
pricing: {
|
|
1313
|
+
prompt: computeUsage("$10.00 / 1M tokens"),
|
|
1314
|
+
output: computeUsage("$30.00 / 1M tokens"),
|
|
1315
|
+
},
|
|
1238
1316
|
},
|
|
1239
|
-
|
|
1240
|
-
|
|
1241
|
-
|
|
1242
|
-
|
|
1243
|
-
|
|
1244
|
-
|
|
1245
|
-
|
|
1246
|
-
|
|
1247
|
-
|
|
1248
|
-
|
|
1249
|
-
|
|
1250
|
-
|
|
1251
|
-
|
|
1252
|
-
|
|
1253
|
-
|
|
1254
|
-
|
|
1317
|
+
/**/
|
|
1318
|
+
/*/
|
|
1319
|
+
{
|
|
1320
|
+
modelTitle: 'tts-1-1106',
|
|
1321
|
+
modelName: 'tts-1-1106',
|
|
1322
|
+
},
|
|
1323
|
+
/**/
|
|
1324
|
+
/**/
|
|
1325
|
+
{
|
|
1326
|
+
modelVariant: 'CHAT',
|
|
1327
|
+
modelTitle: 'gpt-3.5-turbo-0125',
|
|
1328
|
+
modelName: 'gpt-3.5-turbo-0125',
|
|
1329
|
+
pricing: {
|
|
1330
|
+
prompt: computeUsage("$0.50 / 1M tokens"),
|
|
1331
|
+
output: computeUsage("$1.50 / 1M tokens"),
|
|
1332
|
+
},
|
|
1255
1333
|
},
|
|
1256
|
-
|
|
1257
|
-
|
|
1258
|
-
|
|
1259
|
-
|
|
1260
|
-
|
|
1261
|
-
|
|
1262
|
-
|
|
1263
|
-
|
|
1264
|
-
|
|
1265
|
-
|
|
1334
|
+
/**/
|
|
1335
|
+
/**/
|
|
1336
|
+
{
|
|
1337
|
+
modelVariant: 'CHAT',
|
|
1338
|
+
modelTitle: 'gpt-4-turbo-preview',
|
|
1339
|
+
modelName: 'gpt-4-turbo-preview',
|
|
1340
|
+
pricing: {
|
|
1341
|
+
prompt: computeUsage("$10.00 / 1M tokens"),
|
|
1342
|
+
output: computeUsage("$30.00 / 1M tokens"), // <- Not sure, just for gpt-4-turbo
|
|
1343
|
+
},
|
|
1266
1344
|
},
|
|
1267
|
-
|
|
1268
|
-
|
|
1269
|
-
|
|
1270
|
-
|
|
1271
|
-
|
|
1272
|
-
|
|
1273
|
-
|
|
1274
|
-
|
|
1275
|
-
|
|
1276
|
-
|
|
1277
|
-
|
|
1345
|
+
/**/
|
|
1346
|
+
/**/
|
|
1347
|
+
{
|
|
1348
|
+
modelVariant: 'EMBEDDING',
|
|
1349
|
+
modelTitle: 'text-embedding-3-large',
|
|
1350
|
+
modelName: 'text-embedding-3-large',
|
|
1351
|
+
pricing: {
|
|
1352
|
+
prompt: computeUsage("$0.13 / 1M tokens"),
|
|
1353
|
+
// TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
|
|
1354
|
+
output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
|
|
1355
|
+
},
|
|
1278
1356
|
},
|
|
1279
|
-
|
|
1280
|
-
|
|
1281
|
-
|
|
1282
|
-
|
|
1283
|
-
|
|
1284
|
-
|
|
1285
|
-
|
|
1286
|
-
|
|
1287
|
-
|
|
1288
|
-
|
|
1289
|
-
|
|
1357
|
+
/**/
|
|
1358
|
+
/**/
|
|
1359
|
+
{
|
|
1360
|
+
modelVariant: 'EMBEDDING',
|
|
1361
|
+
modelTitle: 'text-embedding-3-small',
|
|
1362
|
+
modelName: 'text-embedding-3-small',
|
|
1363
|
+
pricing: {
|
|
1364
|
+
prompt: computeUsage("$0.02 / 1M tokens"),
|
|
1365
|
+
// TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
|
|
1366
|
+
output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
|
|
1367
|
+
},
|
|
1290
1368
|
},
|
|
1291
|
-
|
|
1292
|
-
|
|
1293
|
-
|
|
1294
|
-
|
|
1295
|
-
|
|
1296
|
-
|
|
1297
|
-
|
|
1298
|
-
|
|
1299
|
-
|
|
1300
|
-
|
|
1369
|
+
/**/
|
|
1370
|
+
/**/
|
|
1371
|
+
{
|
|
1372
|
+
modelVariant: 'CHAT',
|
|
1373
|
+
modelTitle: 'gpt-3.5-turbo-0613',
|
|
1374
|
+
modelName: 'gpt-3.5-turbo-0613',
|
|
1375
|
+
pricing: {
|
|
1376
|
+
prompt: computeUsage("$1.50 / 1M tokens"),
|
|
1377
|
+
output: computeUsage("$2.00 / 1M tokens"),
|
|
1378
|
+
},
|
|
1301
1379
|
},
|
|
1302
|
-
|
|
1303
|
-
|
|
1304
|
-
|
|
1305
|
-
|
|
1306
|
-
|
|
1307
|
-
|
|
1308
|
-
|
|
1309
|
-
|
|
1310
|
-
|
|
1311
|
-
|
|
1312
|
-
|
|
1380
|
+
/**/
|
|
1381
|
+
/**/
|
|
1382
|
+
{
|
|
1383
|
+
modelVariant: 'EMBEDDING',
|
|
1384
|
+
modelTitle: 'text-embedding-ada-002',
|
|
1385
|
+
modelName: 'text-embedding-ada-002',
|
|
1386
|
+
pricing: {
|
|
1387
|
+
prompt: computeUsage("$0.1 / 1M tokens"),
|
|
1388
|
+
// TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
|
|
1389
|
+
output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
|
|
1390
|
+
},
|
|
1313
1391
|
},
|
|
1314
|
-
|
|
1315
|
-
|
|
1316
|
-
|
|
1317
|
-
|
|
1318
|
-
|
|
1319
|
-
|
|
1320
|
-
modelName: 'gpt-4-1106-vision-preview',
|
|
1321
|
-
},
|
|
1322
|
-
/**/
|
|
1323
|
-
/*/
|
|
1324
|
-
{
|
|
1325
|
-
modelVariant: 'CHAT',
|
|
1326
|
-
modelTitle: 'gpt-4-vision-preview',
|
|
1327
|
-
modelName: 'gpt-4-vision-preview',
|
|
1328
|
-
pricing: {
|
|
1329
|
-
prompt: computeUsage(`$10.00 / 1M tokens`),
|
|
1330
|
-
output: computeUsage(`$30.00 / 1M tokens`),
|
|
1392
|
+
/**/
|
|
1393
|
+
/*/
|
|
1394
|
+
{
|
|
1395
|
+
modelVariant: 'CHAT',
|
|
1396
|
+
modelTitle: 'gpt-4-1106-vision-preview',
|
|
1397
|
+
modelName: 'gpt-4-1106-vision-preview',
|
|
1331
1398
|
},
|
|
1332
|
-
|
|
1333
|
-
|
|
1334
|
-
|
|
1335
|
-
|
|
1336
|
-
|
|
1337
|
-
|
|
1338
|
-
|
|
1339
|
-
|
|
1340
|
-
|
|
1341
|
-
|
|
1399
|
+
/**/
|
|
1400
|
+
/*/
|
|
1401
|
+
{
|
|
1402
|
+
modelVariant: 'CHAT',
|
|
1403
|
+
modelTitle: 'gpt-4-vision-preview',
|
|
1404
|
+
modelName: 'gpt-4-vision-preview',
|
|
1405
|
+
pricing: {
|
|
1406
|
+
prompt: computeUsage(`$10.00 / 1M tokens`),
|
|
1407
|
+
output: computeUsage(`$30.00 / 1M tokens`),
|
|
1408
|
+
},
|
|
1409
|
+
},
|
|
1410
|
+
/**/
|
|
1411
|
+
/**/
|
|
1412
|
+
{
|
|
1413
|
+
modelVariant: 'CHAT',
|
|
1414
|
+
modelTitle: 'gpt-4o-2024-05-13',
|
|
1415
|
+
modelName: 'gpt-4o-2024-05-13',
|
|
1416
|
+
pricing: {
|
|
1417
|
+
prompt: computeUsage("$5.00 / 1M tokens"),
|
|
1418
|
+
output: computeUsage("$15.00 / 1M tokens"),
|
|
1419
|
+
},
|
|
1420
|
+
//TODO: [main] !!! Add gpt-4o-mini-2024-07-18 and all others to be up to date
|
|
1342
1421
|
},
|
|
1343
|
-
|
|
1344
|
-
|
|
1345
|
-
|
|
1346
|
-
|
|
1347
|
-
|
|
1348
|
-
|
|
1349
|
-
|
|
1350
|
-
|
|
1351
|
-
|
|
1352
|
-
|
|
1353
|
-
output: computeUsage("$15.00 / 1M tokens"),
|
|
1422
|
+
/**/
|
|
1423
|
+
/**/
|
|
1424
|
+
{
|
|
1425
|
+
modelVariant: 'CHAT',
|
|
1426
|
+
modelTitle: 'gpt-4o',
|
|
1427
|
+
modelName: 'gpt-4o',
|
|
1428
|
+
pricing: {
|
|
1429
|
+
prompt: computeUsage("$5.00 / 1M tokens"),
|
|
1430
|
+
output: computeUsage("$15.00 / 1M tokens"),
|
|
1431
|
+
},
|
|
1354
1432
|
},
|
|
1355
|
-
|
|
1356
|
-
|
|
1357
|
-
|
|
1358
|
-
|
|
1359
|
-
|
|
1360
|
-
|
|
1361
|
-
|
|
1362
|
-
|
|
1363
|
-
|
|
1364
|
-
|
|
1433
|
+
/**/
|
|
1434
|
+
/**/
|
|
1435
|
+
{
|
|
1436
|
+
modelVariant: 'CHAT',
|
|
1437
|
+
modelTitle: 'o1-preview',
|
|
1438
|
+
modelName: 'o1-preview',
|
|
1439
|
+
pricing: {
|
|
1440
|
+
prompt: computeUsage("$15.00 / 1M tokens"),
|
|
1441
|
+
output: computeUsage("$60.00 / 1M tokens"),
|
|
1442
|
+
},
|
|
1365
1443
|
},
|
|
1366
|
-
|
|
1367
|
-
|
|
1368
|
-
|
|
1369
|
-
|
|
1370
|
-
|
|
1371
|
-
|
|
1372
|
-
|
|
1373
|
-
|
|
1374
|
-
|
|
1375
|
-
|
|
1376
|
-
|
|
1444
|
+
/**/
|
|
1445
|
+
/**/
|
|
1446
|
+
{
|
|
1447
|
+
modelVariant: 'CHAT',
|
|
1448
|
+
modelTitle: 'o1-preview-2024-09-12',
|
|
1449
|
+
modelName: 'o1-preview-2024-09-12',
|
|
1450
|
+
// <- TODO: [💩] Some better system to organize theese date suffixes and versions
|
|
1451
|
+
pricing: {
|
|
1452
|
+
prompt: computeUsage("$15.00 / 1M tokens"),
|
|
1453
|
+
output: computeUsage("$60.00 / 1M tokens"),
|
|
1454
|
+
},
|
|
1377
1455
|
},
|
|
1378
|
-
|
|
1379
|
-
|
|
1380
|
-
|
|
1381
|
-
|
|
1382
|
-
|
|
1383
|
-
|
|
1384
|
-
|
|
1385
|
-
|
|
1386
|
-
|
|
1387
|
-
|
|
1456
|
+
/**/
|
|
1457
|
+
/**/
|
|
1458
|
+
{
|
|
1459
|
+
modelVariant: 'CHAT',
|
|
1460
|
+
modelTitle: 'o1-mini',
|
|
1461
|
+
modelName: 'o1-mini',
|
|
1462
|
+
pricing: {
|
|
1463
|
+
prompt: computeUsage("$3.00 / 1M tokens"),
|
|
1464
|
+
output: computeUsage("$12.00 / 1M tokens"),
|
|
1465
|
+
},
|
|
1388
1466
|
},
|
|
1389
|
-
|
|
1390
|
-
|
|
1391
|
-
|
|
1392
|
-
|
|
1393
|
-
|
|
1394
|
-
|
|
1395
|
-
|
|
1396
|
-
|
|
1397
|
-
|
|
1398
|
-
|
|
1467
|
+
/**/
|
|
1468
|
+
/**/
|
|
1469
|
+
{
|
|
1470
|
+
modelVariant: 'CHAT',
|
|
1471
|
+
modelTitle: 'o1-mini-2024-09-12',
|
|
1472
|
+
modelName: 'o1-mini-2024-09-12',
|
|
1473
|
+
pricing: {
|
|
1474
|
+
prompt: computeUsage("$3.00 / 1M tokens"),
|
|
1475
|
+
output: computeUsage("$12.00 / 1M tokens"),
|
|
1476
|
+
},
|
|
1399
1477
|
},
|
|
1400
|
-
|
|
1401
|
-
|
|
1402
|
-
|
|
1403
|
-
|
|
1404
|
-
|
|
1405
|
-
|
|
1406
|
-
|
|
1407
|
-
|
|
1408
|
-
|
|
1409
|
-
|
|
1478
|
+
/**/
|
|
1479
|
+
/**/
|
|
1480
|
+
{
|
|
1481
|
+
modelVariant: 'CHAT',
|
|
1482
|
+
modelTitle: 'gpt-3.5-turbo-16k-0613',
|
|
1483
|
+
modelName: 'gpt-3.5-turbo-16k-0613',
|
|
1484
|
+
pricing: {
|
|
1485
|
+
prompt: computeUsage("$3.00 / 1M tokens"),
|
|
1486
|
+
output: computeUsage("$4.00 / 1M tokens"),
|
|
1487
|
+
},
|
|
1410
1488
|
},
|
|
1411
|
-
|
|
1412
|
-
|
|
1413
|
-
|
|
1489
|
+
/**/
|
|
1490
|
+
],
|
|
1491
|
+
});
|
|
1414
1492
|
/**
|
|
1415
1493
|
* Note: [🤖] Add models of new variant
|
|
1416
1494
|
* TODO: [🧠] Some mechanism to propagate unsureness
|
|
@@ -1626,18 +1704,23 @@ var OpenAiExecutionTools = /** @class */ (function () {
|
|
|
1626
1704
|
if (resultContent === null) {
|
|
1627
1705
|
throw new PipelineExecutionError('No response message from OpenAI');
|
|
1628
1706
|
}
|
|
1629
|
-
return [2 /*return*/,
|
|
1630
|
-
|
|
1631
|
-
|
|
1632
|
-
|
|
1633
|
-
|
|
1634
|
-
|
|
1707
|
+
return [2 /*return*/, exportJson({
|
|
1708
|
+
name: 'promptResult',
|
|
1709
|
+
message: "Result of `OpenAiExecutionTools.callChatModel`",
|
|
1710
|
+
order: [],
|
|
1711
|
+
value: {
|
|
1712
|
+
content: resultContent,
|
|
1713
|
+
modelName: rawResponse.model || modelName,
|
|
1714
|
+
timing: {
|
|
1715
|
+
start: start,
|
|
1716
|
+
complete: complete,
|
|
1717
|
+
},
|
|
1718
|
+
usage: usage,
|
|
1719
|
+
rawPromptContent: rawPromptContent,
|
|
1720
|
+
rawRequest: rawRequest,
|
|
1721
|
+
rawResponse: rawResponse,
|
|
1722
|
+
// <- [🗯]
|
|
1635
1723
|
},
|
|
1636
|
-
usage: usage,
|
|
1637
|
-
rawPromptContent: rawPromptContent,
|
|
1638
|
-
rawRequest: rawRequest,
|
|
1639
|
-
rawResponse: rawResponse,
|
|
1640
|
-
// <- [🗯]
|
|
1641
1724
|
})];
|
|
1642
1725
|
}
|
|
1643
1726
|
});
|
|
@@ -1702,18 +1785,23 @@ var OpenAiExecutionTools = /** @class */ (function () {
|
|
|
1702
1785
|
// eslint-disable-next-line prefer-const
|
|
1703
1786
|
complete = $getCurrentDate();
|
|
1704
1787
|
usage = computeOpenAiUsage(content || '', resultContent || '', rawResponse);
|
|
1705
|
-
return [2 /*return*/,
|
|
1706
|
-
|
|
1707
|
-
|
|
1708
|
-
|
|
1709
|
-
|
|
1710
|
-
|
|
1788
|
+
return [2 /*return*/, exportJson({
|
|
1789
|
+
name: 'promptResult',
|
|
1790
|
+
message: "Result of `OpenAiExecutionTools.callCompletionModel`",
|
|
1791
|
+
order: [],
|
|
1792
|
+
value: {
|
|
1793
|
+
content: resultContent,
|
|
1794
|
+
modelName: rawResponse.model || modelName,
|
|
1795
|
+
timing: {
|
|
1796
|
+
start: start,
|
|
1797
|
+
complete: complete,
|
|
1798
|
+
},
|
|
1799
|
+
usage: usage,
|
|
1800
|
+
rawPromptContent: rawPromptContent,
|
|
1801
|
+
rawRequest: rawRequest,
|
|
1802
|
+
rawResponse: rawResponse,
|
|
1803
|
+
// <- [🗯]
|
|
1711
1804
|
},
|
|
1712
|
-
usage: usage,
|
|
1713
|
-
rawPromptContent: rawPromptContent,
|
|
1714
|
-
rawRequest: rawRequest,
|
|
1715
|
-
rawResponse: rawResponse,
|
|
1716
|
-
// <- [🗯]
|
|
1717
1805
|
})];
|
|
1718
1806
|
}
|
|
1719
1807
|
});
|
|
@@ -1770,18 +1858,23 @@ var OpenAiExecutionTools = /** @class */ (function () {
|
|
|
1770
1858
|
usage = computeOpenAiUsage(content || '', '',
|
|
1771
1859
|
// <- Note: Embedding does not have result content
|
|
1772
1860
|
rawResponse);
|
|
1773
|
-
return [2 /*return*/,
|
|
1774
|
-
|
|
1775
|
-
|
|
1776
|
-
|
|
1777
|
-
|
|
1778
|
-
|
|
1861
|
+
return [2 /*return*/, exportJson({
|
|
1862
|
+
name: 'promptResult',
|
|
1863
|
+
message: "Result of `OpenAiExecutionTools.callEmbeddingModel`",
|
|
1864
|
+
order: [],
|
|
1865
|
+
value: {
|
|
1866
|
+
content: resultContent,
|
|
1867
|
+
modelName: rawResponse.model || modelName,
|
|
1868
|
+
timing: {
|
|
1869
|
+
start: start,
|
|
1870
|
+
complete: complete,
|
|
1871
|
+
},
|
|
1872
|
+
usage: usage,
|
|
1873
|
+
rawPromptContent: rawPromptContent,
|
|
1874
|
+
rawRequest: rawRequest,
|
|
1875
|
+
rawResponse: rawResponse,
|
|
1876
|
+
// <- [🗯]
|
|
1779
1877
|
},
|
|
1780
|
-
usage: usage,
|
|
1781
|
-
rawPromptContent: rawPromptContent,
|
|
1782
|
-
rawRequest: rawRequest,
|
|
1783
|
-
rawResponse: rawResponse,
|
|
1784
|
-
// <- [🗯]
|
|
1785
1878
|
})];
|
|
1786
1879
|
}
|
|
1787
1880
|
});
|
|
@@ -1977,20 +2070,25 @@ var OpenAiAssistantExecutionTools = /** @class */ (function (_super) {
|
|
|
1977
2070
|
if (resultContent === null) {
|
|
1978
2071
|
throw new PipelineExecutionError('No response message from OpenAI');
|
|
1979
2072
|
}
|
|
1980
|
-
return [2 /*return*/,
|
|
1981
|
-
|
|
1982
|
-
|
|
1983
|
-
|
|
1984
|
-
|
|
1985
|
-
|
|
1986
|
-
|
|
1987
|
-
|
|
2073
|
+
return [2 /*return*/, exportJson({
|
|
2074
|
+
name: 'promptResult',
|
|
2075
|
+
message: "Result of `OpenAiAssistantExecutionTools.callChatModel`",
|
|
2076
|
+
order: [],
|
|
2077
|
+
value: {
|
|
2078
|
+
content: resultContent,
|
|
2079
|
+
modelName: 'assistant',
|
|
2080
|
+
// <- TODO: [🥘] Detect used model in assistant
|
|
2081
|
+
// ?> model: rawResponse.model || modelName,
|
|
2082
|
+
timing: {
|
|
2083
|
+
start: start,
|
|
2084
|
+
complete: complete,
|
|
2085
|
+
},
|
|
2086
|
+
usage: usage,
|
|
2087
|
+
rawPromptContent: rawPromptContent,
|
|
2088
|
+
rawRequest: rawRequest,
|
|
2089
|
+
rawResponse: rawResponse,
|
|
2090
|
+
// <- [🗯]
|
|
1988
2091
|
},
|
|
1989
|
-
usage: usage,
|
|
1990
|
-
rawPromptContent: rawPromptContent,
|
|
1991
|
-
rawRequest: rawRequest,
|
|
1992
|
-
rawResponse: rawResponse,
|
|
1993
|
-
// <- [🗯]
|
|
1994
2092
|
})];
|
|
1995
2093
|
}
|
|
1996
2094
|
});
|