@promptbook/cli 0.79.0 → 0.80.0-1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. package/README.md +4 -0
  2. package/esm/index.es.js +1473 -1080
  3. package/esm/index.es.js.map +1 -1
  4. package/esm/typings/books/index.d.ts +6 -6
  5. package/esm/typings/src/_packages/core.index.d.ts +8 -6
  6. package/esm/typings/src/_packages/types.index.d.ts +6 -0
  7. package/esm/typings/src/_packages/utils.index.d.ts +4 -0
  8. package/esm/typings/src/cli/cli-commands/runInteractiveChatbot.d.ts +32 -0
  9. package/esm/typings/src/commands/_common/getParserForCommand.d.ts +1 -1
  10. package/esm/typings/src/commands/_common/parseCommand.d.ts +1 -1
  11. package/esm/typings/src/commands/_common/stringifyCommand.d.ts +1 -1
  12. package/esm/typings/src/commands/_common/types/CommandParser.d.ts +3 -0
  13. package/esm/typings/src/config.d.ts +0 -25
  14. package/esm/typings/src/constants.d.ts +35 -0
  15. package/esm/typings/src/conversion/{pipelineStringToJson.d.ts → compilePipeline.d.ts} +3 -3
  16. package/esm/typings/src/conversion/pipelineJsonToString.d.ts +1 -0
  17. package/esm/typings/src/conversion/{pipelineStringToJsonSync.d.ts → precompilePipeline.d.ts} +4 -3
  18. package/esm/typings/src/high-level-abstractions/_common/HighLevelAbstraction.d.ts +20 -0
  19. package/esm/typings/src/high-level-abstractions/implicit-formfactor/ImplicitFormfactorHla.d.ts +10 -0
  20. package/esm/typings/src/high-level-abstractions/index.d.ts +44 -0
  21. package/esm/typings/src/high-level-abstractions/quick-chatbot/QuickChatbotHla.d.ts +10 -0
  22. package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +1 -1
  23. package/esm/typings/src/llm-providers/remote/startRemoteServer.d.ts +1 -1
  24. package/esm/typings/src/prepare/prepareTasks.d.ts +1 -0
  25. package/esm/typings/src/prepare/unpreparePipeline.d.ts +1 -0
  26. package/esm/typings/src/types/typeAliases.d.ts +1 -1
  27. package/esm/typings/src/utils/normalization/orderJson.d.ts +21 -0
  28. package/esm/typings/src/utils/normalization/orderJson.test.d.ts +4 -0
  29. package/esm/typings/src/utils/organization/keepTypeImported.d.ts +9 -0
  30. package/esm/typings/src/utils/serialization/$deepFreeze.d.ts +1 -1
  31. package/esm/typings/src/utils/serialization/checkSerializableAsJson.d.ts +20 -2
  32. package/esm/typings/src/utils/serialization/deepClone.test.d.ts +1 -0
  33. package/esm/typings/src/utils/serialization/exportJson.d.ts +29 -0
  34. package/esm/typings/src/utils/serialization/isSerializableAsJson.d.ts +2 -1
  35. package/package.json +2 -1
  36. package/umd/index.umd.js +1473 -1080
  37. package/umd/index.umd.js.map +1 -1
  38. package/esm/typings/src/utils/serialization/$asDeeplyFrozenSerializableJson.d.ts +0 -17
  39. /package/esm/typings/src/conversion/{pipelineStringToJson.test.d.ts → compilePipeline.test.d.ts} +0 -0
  40. /package/esm/typings/src/conversion/{pipelineStringToJsonSync.test.d.ts → precompilePipeline.test.d.ts} +0 -0
package/umd/index.umd.js CHANGED
@@ -49,7 +49,7 @@
49
49
  *
50
50
  * @see https://github.com/webgptorg/promptbook
51
51
  */
52
- var PROMPTBOOK_ENGINE_VERSION = '0.78.4';
52
+ var PROMPTBOOK_ENGINE_VERSION = '0.80.0-0';
53
53
  /**
54
54
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
55
55
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -320,41 +320,6 @@
320
320
  * @public exported from `@promptbook/core`
321
321
  */
322
322
  var DEFAULT_PIPELINE_COLLECTION_BASE_FILENAME = "index";
323
- /**
324
- * Nonce which is used for replacing things in strings
325
- *
326
- * @private within the repository
327
- */
328
- var REPLACING_NONCE = 'u$k42k%!V2zo34w7Fu#@QUHYPW';
329
- /**
330
- * The names of the parameters that are reserved for special purposes
331
- *
332
- * @public exported from `@promptbook/core`
333
- */
334
- var RESERVED_PARAMETER_NAMES =
335
- /* !!!!!! $asDeeplyFrozenSerializableJson('RESERVED_PARAMETER_NAMES', _____ as const); */ [
336
- 'content',
337
- 'context',
338
- 'knowledge',
339
- 'examples',
340
- 'modelName',
341
- 'currentDate',
342
- // <- TODO: list here all command names
343
- // <- TODO: Add more like 'date', 'modelName',...
344
- // <- TODO: Add [emoji] + instructions ACRY when adding new reserved parameter
345
- ];
346
- /**
347
- * @@@
348
- *
349
- * @private within the repository
350
- */
351
- var RESERVED_PARAMETER_MISSING_VALUE = 'MISSING-' + REPLACING_NONCE;
352
- /**
353
- * @@@
354
- *
355
- * @private within the repository
356
- */
357
- var RESERVED_PARAMETER_RESTRICTED = 'RESTRICTED-' + REPLACING_NONCE;
358
323
  /**
359
324
  * The thresholds for the relative time in the `moment` NPM package.
360
325
  *
@@ -410,7 +375,6 @@
410
375
  // Note: In normal situations, we check the pipeline logic:
411
376
  true);
412
377
  /**
413
- * TODO: Extract `constants.ts` from `config.ts`
414
378
  * Note: [💞] Ignore a discrepancy between file name and entity name
415
379
  * TODO: [🧠][🧜‍♂️] Maybe join remoteUrl and path into single value
416
380
  */
@@ -536,6 +500,56 @@
536
500
  * Note: [🟢] Code in this file should never be never released in packages that could be imported into browser environment
537
501
  */
538
502
 
503
+ /**
504
+ * Orders JSON object by keys
505
+ *
506
+ * @returns The same type of object as the input re-ordered
507
+ * @public exported from `@promptbook/utils`
508
+ */
509
+ function orderJson(options) {
510
+ var value = options.value, order = options.order;
511
+ var orderedValue = __assign(__assign({}, (order === undefined ? {} : Object.fromEntries(order.map(function (key) { return [key, undefined]; })))), value);
512
+ return orderedValue;
513
+ }
514
+
515
+ /**
516
+ * Freezes the given object and all its nested objects recursively
517
+ *
518
+ * Note: `$` is used to indicate that this function is not a pure function - it mutates given object
519
+ * Note: This function mutates the object and returns the original (but mutated-deep-freezed) object
520
+ *
521
+ * @returns The same object as the input, but deeply frozen
522
+ * @public exported from `@promptbook/utils`
523
+ */
524
+ function $deepFreeze(objectValue) {
525
+ var e_1, _a;
526
+ if (Array.isArray(objectValue)) {
527
+ return Object.freeze(objectValue.map(function (item) { return $deepFreeze(item); }));
528
+ }
529
+ var propertyNames = Object.getOwnPropertyNames(objectValue);
530
+ try {
531
+ for (var propertyNames_1 = __values(propertyNames), propertyNames_1_1 = propertyNames_1.next(); !propertyNames_1_1.done; propertyNames_1_1 = propertyNames_1.next()) {
532
+ var propertyName = propertyNames_1_1.value;
533
+ var value = objectValue[propertyName];
534
+ if (value && typeof value === 'object') {
535
+ $deepFreeze(value);
536
+ }
537
+ }
538
+ }
539
+ catch (e_1_1) { e_1 = { error: e_1_1 }; }
540
+ finally {
541
+ try {
542
+ if (propertyNames_1_1 && !propertyNames_1_1.done && (_a = propertyNames_1.return)) _a.call(propertyNames_1);
543
+ }
544
+ finally { if (e_1) throw e_1.error; }
545
+ }
546
+ Object.freeze(objectValue);
547
+ return objectValue;
548
+ }
549
+ /**
550
+ * TODO: [🧠] Is there a way how to meaningfully test this utility
551
+ */
552
+
539
553
  /**
540
554
  * Make error report URL for the given error
541
555
  *
@@ -590,8 +604,9 @@
590
604
  * @throws UnexpectedError if the value is not serializable as JSON
591
605
  * @public exported from `@promptbook/utils`
592
606
  */
593
- function checkSerializableAsJson(name, value) {
607
+ function checkSerializableAsJson(options) {
594
608
  var e_1, _a;
609
+ var value = options.value, name = options.name, message = options.message;
595
610
  if (value === undefined) {
596
611
  throw new UnexpectedError("".concat(name, " is undefined"));
597
612
  }
@@ -615,12 +630,12 @@
615
630
  }
616
631
  else if (typeof value === 'object' && Array.isArray(value)) {
617
632
  for (var i = 0; i < value.length; i++) {
618
- checkSerializableAsJson("".concat(name, "[").concat(i, "]"), value[i]);
633
+ checkSerializableAsJson({ name: "".concat(name, "[").concat(i, "]"), value: value[i], message: message });
619
634
  }
620
635
  }
621
636
  else if (typeof value === 'object') {
622
637
  if (value instanceof Date) {
623
- throw new UnexpectedError(spaceTrim__default["default"]("\n ".concat(name, " is Date\n\n Use `string_date_iso8601` instead\n ")));
638
+ throw new UnexpectedError(spaceTrim__default["default"](function (block) { return "\n `".concat(name, "` is Date\n\n Use `string_date_iso8601` instead\n\n Additional message for `").concat(name, "`:\n ").concat(block(message || '(nothing)'), "\n "); }));
624
639
  }
625
640
  else if (value instanceof Map) {
626
641
  throw new UnexpectedError("".concat(name, " is Map"));
@@ -632,7 +647,7 @@
632
647
  throw new UnexpectedError("".concat(name, " is RegExp"));
633
648
  }
634
649
  else if (value instanceof Error) {
635
- throw new UnexpectedError(spaceTrim__default["default"]("\n ".concat(name, " is unserialized Error\n\n Use function `serializeError`\n ")));
650
+ throw new UnexpectedError(spaceTrim__default["default"](function (block) { return "\n `".concat(name, "` is unserialized Error\n\n Use function `serializeError`\n\n Additional message for `").concat(name, "`:\n ").concat(block(message || '(nothing)'), "\n\n "); }));
636
651
  }
637
652
  else {
638
653
  try {
@@ -642,7 +657,7 @@
642
657
  // Note: undefined in object is serializable - it is just omited
643
658
  continue;
644
659
  }
645
- checkSerializableAsJson("".concat(name, ".").concat(subName), subValue);
660
+ checkSerializableAsJson({ name: "".concat(name, ".").concat(subName), value: subValue, message: message });
646
661
  }
647
662
  }
648
663
  catch (e_1_1) { e_1 = { error: e_1_1 }; }
@@ -659,7 +674,7 @@
659
674
  if (!(error instanceof Error)) {
660
675
  throw error;
661
676
  }
662
- throw new UnexpectedError(spaceTrim__default["default"](function (block) { return "\n ".concat(name, " is not serializable\n\n ").concat(block(error.toString()), "\n "); }));
677
+ throw new UnexpectedError(spaceTrim__default["default"](function (block) { return "\n `".concat(name, "` is not serializable\n\n ").concat(block(error.toString()), "\n\n Additional message for `").concat(name, "`:\n ").concat(block(message || '(nothing)'), "\n "); }));
663
678
  }
664
679
  /*
665
680
  TODO: [0] Is there some more elegant way to check circular references?
@@ -684,15 +699,134 @@
684
699
  }
685
700
  }
686
701
  else {
687
- throw new UnexpectedError("".concat(name, " is unknown"));
702
+ throw new UnexpectedError(spaceTrim__default["default"](function (block) { return "\n `".concat(name, "` is unknown type\n\n Additional message for `").concat(name, "`:\n ").concat(block(message || '(nothing)'), "\n "); }));
688
703
  }
689
704
  }
690
705
  /**
691
- * TODO: [🧠][🛣] More elegant way to tracking than passing `name`
706
+ * TODO: Can be return type more type-safe? like `asserts options.value is JsonValue`
692
707
  * TODO: [🧠][main] !!! In-memory cache of same values to prevent multiple checks
693
708
  * Note: [🐠] This is how `checkSerializableAsJson` + `isSerializableAsJson` together can just retun true/false or rich error message
694
709
  */
695
710
 
711
+ /**
712
+ * @@@
713
+ *
714
+ * @public exported from `@promptbook/utils`
715
+ */
716
+ function deepClone(objectValue) {
717
+ return JSON.parse(JSON.stringify(objectValue));
718
+ /*
719
+ !!!!!!!!
720
+ TODO: [🧠] Is there a better implementation?
721
+ > const propertyNames = Object.getOwnPropertyNames(objectValue);
722
+ > for (const propertyName of propertyNames) {
723
+ > const value = (objectValue as really_any)[propertyName];
724
+ > if (value && typeof value === 'object') {
725
+ > deepClone(value);
726
+ > }
727
+ > }
728
+ > return Object.assign({}, objectValue);
729
+ */
730
+ }
731
+ /**
732
+ * TODO: [🧠] Is there a way how to meaningfully test this utility
733
+ */
734
+
735
+ /**
736
+ * Utility to export a JSON object from a function
737
+ *
738
+ * 1) Checks if the value is serializable as JSON
739
+ * 2) Makes a deep clone of the object
740
+ * 2) Orders the object properties
741
+ * 2) Deeply freezes the cloned object
742
+ *
743
+ * Note: This function does not mutates the given object
744
+ *
745
+ * @returns The same type of object as the input but read-only and re-ordered
746
+ * @public exported from `@promptbook/utils`
747
+ */
748
+ function exportJson(options) {
749
+ var name = options.name, value = options.value, order = options.order, message = options.message;
750
+ checkSerializableAsJson({ name: name, value: value, message: message });
751
+ var orderedValue =
752
+ // TODO: Fix error "Type instantiation is excessively deep and possibly infinite."
753
+ // eslint-disable-next-line @typescript-eslint/ban-ts-comment
754
+ // @ts-ignore
755
+ order === undefined
756
+ ? deepClone(value)
757
+ : orderJson({
758
+ value: value,
759
+ // <- Note: checkSerializableAsJson asserts that the value is serializable as JSON
760
+ order: order,
761
+ });
762
+ $deepFreeze(orderedValue);
763
+ return orderedValue;
764
+ }
765
+ /**
766
+ * TODO: [🧠] Is there a way how to meaningfully test this utility
767
+ */
768
+
769
+ /**
770
+ * Order of keys in the pipeline JSON
771
+ *
772
+ * @public exported from `@promptbook/core`
773
+ */
774
+ var ORDER_OF_PIPELINE_JSON = [
775
+ 'title',
776
+ 'pipelineUrl',
777
+ 'bookVersion',
778
+ 'description',
779
+ 'formfactorName',
780
+ 'parameters',
781
+ 'tasks',
782
+ 'personas',
783
+ 'preparations',
784
+ 'knowledgeSources',
785
+ 'knowledgePieces',
786
+ ];
787
+ /**
788
+ * Nonce which is used for replacing things in strings
789
+ *
790
+ * @private within the repository
791
+ */
792
+ var REPLACING_NONCE = 'u$k42k%!V2zo34w7Fu#@QUHYPW';
793
+ /**
794
+ * @@@
795
+ *
796
+ * @private within the repository
797
+ */
798
+ var RESERVED_PARAMETER_MISSING_VALUE = 'MISSING-' + REPLACING_NONCE;
799
+ /**
800
+ * @@@
801
+ *
802
+ * @private within the repository
803
+ */
804
+ var RESERVED_PARAMETER_RESTRICTED = 'RESTRICTED-' + REPLACING_NONCE;
805
+ /**
806
+ * The names of the parameters that are reserved for special purposes
807
+ *
808
+ * @public exported from `@promptbook/core`
809
+ */
810
+ var RESERVED_PARAMETER_NAMES = exportJson({
811
+ name: 'RESERVED_PARAMETER_NAMES',
812
+ message: "The names of the parameters that are reserved for special purposes",
813
+ value: [
814
+ 'content',
815
+ 'context',
816
+ 'knowledge',
817
+ 'examples',
818
+ 'modelName',
819
+ 'currentDate',
820
+ // <- TODO: list here all command names
821
+ // <- TODO: Add more like 'date', 'modelName',...
822
+ // <- TODO: Add [emoji] + instructions ACRY when adding new reserved parameter
823
+ ],
824
+ });
825
+ /**
826
+ * Note: [💞] Ignore a discrepancy between file name and entity name
827
+ */
828
+
829
+ // <- TODO: !!!!!!! Auto convert to type `import { ... } from 'type-fest';`
696
830
  /**
697
831
  * Tests if the value is [🚉] serializable as JSON
698
832
  *
@@ -714,7 +848,7 @@
714
848
  */
715
849
  function isSerializableAsJson(value) {
716
850
  try {
717
- checkSerializableAsJson('', value);
851
+ checkSerializableAsJson({ value: value });
718
852
  return true;
719
853
  }
720
854
  catch (error) {
@@ -1512,66 +1646,6 @@
1512
1646
  * @@@ write how to combine multiple interceptors
1513
1647
  */
1514
1648
 
1515
- /**
1516
- * @@@
1517
- *
1518
- * @public exported from `@promptbook/utils`
1519
- */
1520
- function deepClone(objectValue) {
1521
- return JSON.parse(JSON.stringify(objectValue));
1522
- /*
1523
- TODO: [🧠] Is there a better implementation?
1524
- > const propertyNames = Object.getOwnPropertyNames(objectValue);
1525
- > for (const propertyName of propertyNames) {
1526
- > const value = (objectValue as really_any)[propertyName];
1527
- > if (value && typeof value === 'object') {
1528
- > deepClone(value);
1529
- > }
1530
- > }
1531
- > return Object.assign({}, objectValue);
1532
- */
1533
- }
1534
- /**
1535
- * TODO: [🧠] Is there a way how to meaningfully test this utility
1536
- */
1537
-
1538
- /**
1539
- * @@@
1540
- *
1541
- * Note: `$` is used to indicate that this function is not a pure function - it mutates given object
1542
- * Note: This function mutates the object and returns the original (but mutated-deep-freezed) object
1543
- *
1544
- * @returns The same object as the input, but deeply frozen
1545
- * @public exported from `@promptbook/utils`
1546
- */
1547
- function $deepFreeze(objectValue) {
1548
- var e_1, _a;
1549
- if (Array.isArray(objectValue)) {
1550
- return Object.freeze(objectValue.map(function (item) { return $deepFreeze(item); }));
1551
- }
1552
- var propertyNames = Object.getOwnPropertyNames(objectValue);
1553
- try {
1554
- for (var propertyNames_1 = __values(propertyNames), propertyNames_1_1 = propertyNames_1.next(); !propertyNames_1_1.done; propertyNames_1_1 = propertyNames_1.next()) {
1555
- var propertyName = propertyNames_1_1.value;
1556
- var value = objectValue[propertyName];
1557
- if (value && typeof value === 'object') {
1558
- $deepFreeze(value);
1559
- }
1560
- }
1561
- }
1562
- catch (e_1_1) { e_1 = { error: e_1_1 }; }
1563
- finally {
1564
- try {
1565
- if (propertyNames_1_1 && !propertyNames_1_1.done && (_a = propertyNames_1.return)) _a.call(propertyNames_1);
1566
- }
1567
- finally { if (e_1) throw e_1.error; }
1568
- }
1569
- return Object.freeze(objectValue);
1570
- }
1571
- /**
1572
- * TODO: [🧠] Is there a way how to meaningfully test this utility
1573
- */
1574
-
1575
1649
  /**
1576
1650
  * Represents the usage with no resources consumed
1577
1651
  *
@@ -2590,6 +2664,100 @@
2590
2664
  * TODO: [🧠] Maybe clear `sourceFile` or clear when exposing through API or remote server
2591
2665
  */
2592
2666
 
2667
+ /**
2668
+ * This error type indicates that some tools are missing for pipeline execution or preparation
2669
+ *
2670
+ * @public exported from `@promptbook/core`
2671
+ */
2672
+ var MissingToolsError = /** @class */ (function (_super) {
2673
+ __extends(MissingToolsError, _super);
2674
+ function MissingToolsError(message) {
2675
+ var _this = _super.call(this, spaceTrim.spaceTrim(function (block) { return "\n ".concat(block(message), "\n\n Note: You have probbably forgot to provide some tools for pipeline execution or preparation\n\n "); })) || this;
2676
+ _this.name = 'MissingToolsError';
2677
+ Object.setPrototypeOf(_this, MissingToolsError.prototype);
2678
+ return _this;
2679
+ }
2680
+ return MissingToolsError;
2681
+ }(Error));
2682
+
2683
+ /**
2684
+ * Async version of Array.forEach
2685
+ *
2686
+ * @param array - Array to iterate over
2687
+ * @param options - Options for the function
2688
+ * @param callbackfunction - Function to call for each item
2689
+ * @public exported from `@promptbook/utils`
2690
+ * @deprecated [🪂] Use queues instead
2691
+ */
2692
+ function forEachAsync(array, options, callbackfunction) {
2693
+ return __awaiter(this, void 0, void 0, function () {
2694
+ var _a, maxParallelCount, index, runningTasks, tasks, _loop_1, _b, _c, item, e_1_1;
2695
+ var e_1, _d;
2696
+ return __generator(this, function (_e) {
2697
+ switch (_e.label) {
2698
+ case 0:
2699
+ _a = options.maxParallelCount, maxParallelCount = _a === void 0 ? Infinity : _a;
2700
+ index = 0;
2701
+ runningTasks = [];
2702
+ tasks = [];
2703
+ _loop_1 = function (item) {
2704
+ var currentIndex, task;
2705
+ return __generator(this, function (_f) {
2706
+ switch (_f.label) {
2707
+ case 0:
2708
+ currentIndex = index++;
2709
+ task = callbackfunction(item, currentIndex, array);
2710
+ tasks.push(task);
2711
+ runningTasks.push(task);
2712
+ /* not await */ Promise.resolve(task).then(function () {
2713
+ runningTasks = runningTasks.filter(function (t) { return t !== task; });
2714
+ });
2715
+ if (!(maxParallelCount < runningTasks.length)) return [3 /*break*/, 2];
2716
+ return [4 /*yield*/, Promise.race(runningTasks)];
2717
+ case 1:
2718
+ _f.sent();
2719
+ _f.label = 2;
2720
+ case 2: return [2 /*return*/];
2721
+ }
2722
+ });
2723
+ };
2724
+ _e.label = 1;
2725
+ case 1:
2726
+ _e.trys.push([1, 6, 7, 8]);
2727
+ _b = __values(array), _c = _b.next();
2728
+ _e.label = 2;
2729
+ case 2:
2730
+ if (!!_c.done) return [3 /*break*/, 5];
2731
+ item = _c.value;
2732
+ return [5 /*yield**/, _loop_1(item)];
2733
+ case 3:
2734
+ _e.sent();
2735
+ _e.label = 4;
2736
+ case 4:
2737
+ _c = _b.next();
2738
+ return [3 /*break*/, 2];
2739
+ case 5: return [3 /*break*/, 8];
2740
+ case 6:
2741
+ e_1_1 = _e.sent();
2742
+ e_1 = { error: e_1_1 };
2743
+ return [3 /*break*/, 8];
2744
+ case 7:
2745
+ try {
2746
+ if (_c && !_c.done && (_d = _b.return)) _d.call(_b);
2747
+ }
2748
+ finally { if (e_1) throw e_1.error; }
2749
+ return [7 /*endfinally*/];
2750
+ case 8: return [4 /*yield*/, Promise.all(tasks)];
2751
+ case 9:
2752
+ _e.sent();
2753
+ return [2 /*return*/];
2754
+ }
2755
+ });
2756
+ });
2757
+ }
2758
+
2759
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sourceFile:"./books/prepare-knowledge-from-markdown.book.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sourceFile:"./books/prepare-knowledge-keywords.book.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sourceFile:"./books/prepare-knowledge-title.book.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book.md",formfactorName:"GENERIC",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelRequirements",format:"JSON",dependentParameterNames:["availableModelNames","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sourceFile:"./books/prepare-persona.book.md"}];
2760
+
2593
2761
  /**
2594
2762
  * Prettify the html code
2595
2763
  *
@@ -2636,6 +2804,7 @@
2636
2804
  /**
2637
2805
  * Converts promptbook in JSON format to string format
2638
2806
  *
2807
+ * @deprecated TODO: [🥍][🧠] Backup original files in `PipelineJson` same as in Promptbook.studio
2639
2808
  * @param pipelineJson Promptbook in JSON format (.book.json)
2640
2809
  * @returns Promptbook in string format (.book.md)
2641
2810
  * @public exported from `@promptbook/core`
@@ -2772,166 +2941,72 @@
2772
2941
  var _p = __read(_o.value, 2), unit = _p[0], _q = _p[1], min = _q.min, max = _q.max;
2773
2942
  if (min === max) {
2774
2943
  commands_1.push("EXPECT EXACTLY ".concat(min, " ").concat(capitalize(unit + (min > 1 ? 's' : ''))));
2775
- }
2776
- else {
2777
- if (min !== undefined) {
2778
- commands_1.push("EXPECT MIN ".concat(min, " ").concat(capitalize(unit + (min > 1 ? 's' : ''))));
2779
- } /* not else */
2780
- if (max !== undefined) {
2781
- commands_1.push("EXPECT MAX ".concat(max, " ").concat(capitalize(unit + (max > 1 ? 's' : ''))));
2782
- }
2783
- }
2784
- }
2785
- }
2786
- catch (e_6_1) { e_6 = { error: e_6_1 }; }
2787
- finally {
2788
- try {
2789
- if (_o && !_o.done && (_f = _m.return)) _f.call(_m);
2790
- }
2791
- finally { if (e_6) throw e_6.error; }
2792
- }
2793
- } /* not else */
2794
- if (format) {
2795
- if (format === 'JSON') {
2796
- // TODO: @deprecated remove
2797
- commands_1.push("FORMAT JSON");
2798
- }
2799
- } /* not else */
2800
- pipelineString += '\n\n';
2801
- pipelineString += commands_1.map(function (command) { return "- ".concat(command); }).join('\n');
2802
- pipelineString += '\n\n';
2803
- pipelineString += '```' + contentLanguage;
2804
- pipelineString += '\n';
2805
- pipelineString += spaceTrim__default["default"](content);
2806
- // <- TODO: [main] !!! Escape
2807
- // <- TODO: [🧠] Some clear strategy how to spaceTrim the blocks
2808
- pipelineString += '\n';
2809
- pipelineString += '```';
2810
- pipelineString += '\n\n';
2811
- pipelineString += "`-> {".concat(resultingParameterName, "}`"); // <- TODO: [main] !!! If the parameter here has description, add it and use taskParameterJsonToString
2812
- }
2813
- }
2814
- catch (e_3_1) { e_3 = { error: e_3_1 }; }
2815
- finally {
2816
- try {
2817
- if (tasks_1_1 && !tasks_1_1.done && (_c = tasks_1.return)) _c.call(tasks_1);
2818
- }
2819
- finally { if (e_3) throw e_3.error; }
2820
- }
2821
- return pipelineString;
2822
- }
2823
- /**
2824
- * @private internal utility of `pipelineJsonToString`
2825
- */
2826
- function taskParameterJsonToString(taskParameterJson) {
2827
- var name = taskParameterJson.name, description = taskParameterJson.description;
2828
- var parameterString = "{".concat(name, "}");
2829
- if (description) {
2830
- parameterString = "".concat(parameterString, " ").concat(description);
2831
- }
2832
- return parameterString;
2833
- }
2834
- /**
2835
- * TODO: [🛋] Implement new features and commands into `pipelineJsonToString` + `taskParameterJsonToString` , use `stringifyCommand`
2836
- * TODO: [🧠] Is there a way to auto-detect missing features in pipelineJsonToString
2837
- * TODO: [🏛] Maybe make some markdown builder
2838
- * TODO: [🏛] Escape all
2839
- * TODO: [🧠] Should be in generated .book.md file GENERATOR_WARNING
2840
- */
2841
-
2842
- /**
2843
- * This error type indicates that some tools are missing for pipeline execution or preparation
2844
- *
2845
- * @public exported from `@promptbook/core`
2846
- */
2847
- var MissingToolsError = /** @class */ (function (_super) {
2848
- __extends(MissingToolsError, _super);
2849
- function MissingToolsError(message) {
2850
- var _this = _super.call(this, spaceTrim.spaceTrim(function (block) { return "\n ".concat(block(message), "\n\n Note: You have probbably forgot to provide some tools for pipeline execution or preparation\n\n "); })) || this;
2851
- _this.name = 'MissingToolsError';
2852
- Object.setPrototypeOf(_this, MissingToolsError.prototype);
2853
- return _this;
2854
- }
2855
- return MissingToolsError;
2856
- }(Error));
2857
-
2858
- /**
2859
- * Async version of Array.forEach
2860
- *
2861
- * @param array - Array to iterate over
2862
- * @param options - Options for the function
2863
- * @param callbackfunction - Function to call for each item
2864
- * @public exported from `@promptbook/utils`
2865
- * @deprecated [🪂] Use queues instead
2866
- */
2867
- function forEachAsync(array, options, callbackfunction) {
2868
- return __awaiter(this, void 0, void 0, function () {
2869
- var _a, maxParallelCount, index, runningTasks, tasks, _loop_1, _b, _c, item, e_1_1;
2870
- var e_1, _d;
2871
- return __generator(this, function (_e) {
2872
- switch (_e.label) {
2873
- case 0:
2874
- _a = options.maxParallelCount, maxParallelCount = _a === void 0 ? Infinity : _a;
2875
- index = 0;
2876
- runningTasks = [];
2877
- tasks = [];
2878
- _loop_1 = function (item) {
2879
- var currentIndex, task;
2880
- return __generator(this, function (_f) {
2881
- switch (_f.label) {
2882
- case 0:
2883
- currentIndex = index++;
2884
- task = callbackfunction(item, currentIndex, array);
2885
- tasks.push(task);
2886
- runningTasks.push(task);
2887
- /* not await */ Promise.resolve(task).then(function () {
2888
- runningTasks = runningTasks.filter(function (t) { return t !== task; });
2889
- });
2890
- if (!(maxParallelCount < runningTasks.length)) return [3 /*break*/, 2];
2891
- return [4 /*yield*/, Promise.race(runningTasks)];
2892
- case 1:
2893
- _f.sent();
2894
- _f.label = 2;
2895
- case 2: return [2 /*return*/];
2896
- }
2897
- });
2898
- };
2899
- _e.label = 1;
2900
- case 1:
2901
- _e.trys.push([1, 6, 7, 8]);
2902
- _b = __values(array), _c = _b.next();
2903
- _e.label = 2;
2904
- case 2:
2905
- if (!!_c.done) return [3 /*break*/, 5];
2906
- item = _c.value;
2907
- return [5 /*yield**/, _loop_1(item)];
2908
- case 3:
2909
- _e.sent();
2910
- _e.label = 4;
2911
- case 4:
2912
- _c = _b.next();
2913
- return [3 /*break*/, 2];
2914
- case 5: return [3 /*break*/, 8];
2915
- case 6:
2916
- e_1_1 = _e.sent();
2917
- e_1 = { error: e_1_1 };
2918
- return [3 /*break*/, 8];
2919
- case 7:
2944
+ }
2945
+ else {
2946
+ if (min !== undefined) {
2947
+ commands_1.push("EXPECT MIN ".concat(min, " ").concat(capitalize(unit + (min > 1 ? 's' : ''))));
2948
+ } /* not else */
2949
+ if (max !== undefined) {
2950
+ commands_1.push("EXPECT MAX ".concat(max, " ").concat(capitalize(unit + (max > 1 ? 's' : ''))));
2951
+ }
2952
+ }
2953
+ }
2954
+ }
2955
+ catch (e_6_1) { e_6 = { error: e_6_1 }; }
2956
+ finally {
2920
2957
  try {
2921
- if (_c && !_c.done && (_d = _b.return)) _d.call(_b);
2958
+ if (_o && !_o.done && (_f = _m.return)) _f.call(_m);
2922
2959
  }
2923
- finally { if (e_1) throw e_1.error; }
2924
- return [7 /*endfinally*/];
2925
- case 8: return [4 /*yield*/, Promise.all(tasks)];
2926
- case 9:
2927
- _e.sent();
2928
- return [2 /*return*/];
2929
- }
2930
- });
2931
- });
2960
+ finally { if (e_6) throw e_6.error; }
2961
+ }
2962
+ } /* not else */
2963
+ if (format) {
2964
+ if (format === 'JSON') {
2965
+ // TODO: @deprecated remove
2966
+ commands_1.push("FORMAT JSON");
2967
+ }
2968
+ } /* not else */
2969
+ pipelineString += '\n\n';
2970
+ pipelineString += commands_1.map(function (command) { return "- ".concat(command); }).join('\n');
2971
+ pipelineString += '\n\n';
2972
+ pipelineString += '```' + contentLanguage;
2973
+ pipelineString += '\n';
2974
+ pipelineString += spaceTrim__default["default"](content);
2975
+ // <- TODO: [main] !!! Escape
2976
+ // <- TODO: [🧠] Some clear strategy how to spaceTrim the blocks
2977
+ pipelineString += '\n';
2978
+ pipelineString += '```';
2979
+ pipelineString += '\n\n';
2980
+ pipelineString += "`-> {".concat(resultingParameterName, "}`"); // <- TODO: [main] !!! If the parameter here has description, add it and use taskParameterJsonToString
2981
+ }
2982
+ }
2983
+ catch (e_3_1) { e_3 = { error: e_3_1 }; }
2984
+ finally {
2985
+ try {
2986
+ if (tasks_1_1 && !tasks_1_1.done && (_c = tasks_1.return)) _c.call(tasks_1);
2987
+ }
2988
+ finally { if (e_3) throw e_3.error; }
2989
+ }
2990
+ return pipelineString;
2932
2991
  }
2933
-
2934
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./books/prepare-knowledge-from-markdown.book.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./books/prepare-knowledge-keywords.book.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./books/prepare-knowledge-title.book.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book.md",formfactorName:"GENERIC",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelRequirements",format:"JSON",dependentParameterNames:["availableModelNames","personaDescription"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./books/prepare-persona.book.md"}];
2992
+ /**
2993
+ * @private internal utility of `pipelineJsonToString`
2994
+ */
2995
+ function taskParameterJsonToString(taskParameterJson) {
2996
+ var name = taskParameterJson.name, description = taskParameterJson.description;
2997
+ var parameterString = "{".concat(name, "}");
2998
+ if (description) {
2999
+ parameterString = "".concat(parameterString, " ").concat(description);
3000
+ }
3001
+ return parameterString;
3002
+ }
3003
+ /**
3004
+ * TODO: [🛋] Implement new features and commands into `pipelineJsonToString` + `taskParameterJsonToString` , use `stringifyCommand`
3005
+ * TODO: [🧠] Is there a way to auto-detect missing features in pipelineJsonToString
3006
+ * TODO: [🏛] Maybe make some markdown builder
3007
+ * TODO: [🏛] Escape all
3008
+ * TODO: [🧠] Should be in generated .book.md file GENERATOR_WARNING
3009
+ */
2935
3010
 
2936
3011
  /**
2937
3012
  * This error indicates that the promptbook in a markdown format cannot be parsed into a valid promptbook object
@@ -3176,6 +3251,21 @@
3176
3251
  }
3177
3252
  };
3178
3253
  try {
3254
+ /*
3255
+ TODO: [🧠][🅾] Should be empty pipeline valid or not
3256
+ // Note: Check that pipeline has some tasks
3257
+ if (pipeline.tasks.length === 0) {
3258
+ throw new PipelineLogicError(
3259
+ spaceTrim(
3260
+ (block) => `
3261
+ Pipeline must have at least one task
3262
+
3263
+ ${block(pipelineIdentification)}
3264
+ `,
3265
+ ),
3266
+ );
3267
+ }
3268
+ */
3179
3269
  // Note: Check each parameter individually
3180
3270
  for (var _d = __values(pipeline.parameters), _e = _d.next(); !_e.done; _e = _d.next()) {
3181
3271
  var parameter = _e.value;
@@ -3336,6 +3426,9 @@
3336
3426
  while (unresovedTasks.length > 0) {
3337
3427
  _loop_3();
3338
3428
  }
3429
+ // Note: Check that formfactor is corresponding to the pipeline interface
3430
+ // TODO: !!!!!! Implement this
3431
+ // pipeline.formfactorName
3339
3432
  }
3340
3433
  /**
3341
3434
  * TODO: !! [🧞‍♀️] Do not allow joker + foreach
@@ -3419,29 +3512,10 @@
3419
3512
  return parameterNames;
3420
3513
  }
3421
3514
 
3422
- /**
3423
- * @@@
3424
- * @@@
3425
- *
3426
- * Note: This function mutates the object and returns the original (but mutated-deep-freezed) object
3427
- *
3428
- * @param name - Name of the object for debugging purposes
3429
- * @param objectValue - Object to be deeply frozen
3430
- * @returns The same object as the input, but deeply frozen
3431
- * @private this is in comparison to `deepFreeze` a more specific utility and maybe not very good practice to use without specific reason and considerations
3432
- */
3433
- function $asDeeplyFrozenSerializableJson(name, objectValue) {
3434
- checkSerializableAsJson(name, objectValue);
3435
- return $deepFreeze(objectValue);
3436
- }
3437
- /**
3438
- * TODO: [🧠][🛣] More elegant way to tracking than passing `name`
3439
- * TODO: [🧠] Is there a way how to meaningfully test this utility
3440
- */
3441
-
3442
3515
  /**
3443
3516
  * Unprepare just strips the preparation data of the pipeline
3444
3517
  *
3518
+ * @deprecated In future version this function will be removed or deprecated
3445
3519
  * @public exported from `@promptbook/core`
3446
3520
  */
3447
3521
  function unpreparePipeline(pipeline) {
@@ -3456,7 +3530,12 @@
3456
3530
  delete taskUnprepared.preparedContent;
3457
3531
  return taskUnprepared;
3458
3532
  });
3459
- return $asDeeplyFrozenSerializableJson('Unprepared PipelineJson', __assign(__assign({}, pipeline), { tasks: tasks, knowledgeSources: knowledgeSources, knowledgePieces: [], personas: personas, preparations: [] }));
3533
+ return exportJson({
3534
+ name: 'pipelineJson',
3535
+ message: "Result of `unpreparePipeline`",
3536
+ order: ORDER_OF_PIPELINE_JSON,
3537
+ value: __assign(__assign({}, pipeline), { tasks: tasks, knowledgeSources: knowledgeSources, knowledgePieces: [], personas: personas, preparations: [] }),
3538
+ });
3460
3539
  }
3461
3540
  /**
3462
3541
  * TODO: [🧿] Maybe do same process with same granularity and subfinctions as `preparePipeline`
@@ -5524,16 +5603,21 @@
5524
5603
  // Note: Wait a short time to prevent race conditions
5525
5604
  _g.sent();
5526
5605
  _g.label = 6;
5527
- case 6: return [2 /*return*/, $asDeeplyFrozenSerializableJson("Unuccessful PipelineExecutorResult (with missing parameter {".concat(parameter.name, "}) PipelineExecutorResult"), {
5528
- isSuccessful: false,
5529
- errors: __spreadArray([
5530
- new PipelineExecutionError("Parameter `{".concat(parameter.name, "}` is required as an input parameter"))
5531
- ], __read(errors), false).map(serializeError),
5532
- warnings: [],
5533
- executionReport: executionReport,
5534
- outputParameters: {},
5535
- usage: ZERO_USAGE,
5536
- preparedPipeline: preparedPipeline,
5606
+ case 6: return [2 /*return*/, exportJson({
5607
+ name: "executionReport",
5608
+ message: "Unuccessful PipelineExecutorResult (with missing parameter {".concat(parameter.name, "}) PipelineExecutorResult"),
5609
+ order: [],
5610
+ value: {
5611
+ isSuccessful: false,
5612
+ errors: __spreadArray([
5613
+ new PipelineExecutionError("Parameter `{".concat(parameter.name, "}` is required as an input parameter"))
5614
+ ], __read(errors), false).map(serializeError),
5615
+ warnings: [],
5616
+ executionReport: executionReport,
5617
+ outputParameters: {},
5618
+ usage: ZERO_USAGE,
5619
+ preparedPipeline: preparedPipeline,
5620
+ },
5537
5621
  })];
5538
5622
  case 7:
5539
5623
  _b = _a.next();
@@ -5572,16 +5656,21 @@
5572
5656
  // Note: Wait a short time to prevent race conditions
5573
5657
  _h.sent();
5574
5658
  _h.label = 3;
5575
- case 3: return [2 /*return*/, { value: $asDeeplyFrozenSerializableJson(spaceTrim.spaceTrim(function (block) { return "\n Unuccessful PipelineExecutorResult (with extra parameter {".concat(parameter.name, "}) PipelineExecutorResult\n\n ").concat(block(pipelineIdentification), "\n "); }), {
5576
- isSuccessful: false,
5577
- errors: __spreadArray([
5578
- new PipelineExecutionError(spaceTrim.spaceTrim(function (block) { return "\n Parameter `{".concat(parameter.name, "}` is passed as input parameter but it is not input\n\n ").concat(block(pipelineIdentification), "\n "); }))
5579
- ], __read(errors), false).map(serializeError),
5580
- warnings: warnings.map(serializeError),
5581
- executionReport: executionReport,
5582
- outputParameters: {},
5583
- usage: ZERO_USAGE,
5584
- preparedPipeline: preparedPipeline,
5659
+ case 3: return [2 /*return*/, { value: exportJson({
5660
+ name: 'pipelineExecutorResult',
5661
+ message: spaceTrim.spaceTrim(function (block) { return "\n Unuccessful PipelineExecutorResult (with extra parameter {".concat(parameter.name, "}) PipelineExecutorResult\n\n ").concat(block(pipelineIdentification), "\n "); }),
5662
+ order: [],
5663
+ value: {
5664
+ isSuccessful: false,
5665
+ errors: __spreadArray([
5666
+ new PipelineExecutionError(spaceTrim.spaceTrim(function (block) { return "\n Parameter `{".concat(parameter.name, "}` is passed as input parameter but it is not input\n\n ").concat(block(pipelineIdentification), "\n "); }))
5667
+ ], __read(errors), false).map(serializeError),
5668
+ warnings: warnings.map(serializeError),
5669
+ executionReport: executionReport,
5670
+ outputParameters: {},
5671
+ usage: ZERO_USAGE,
5672
+ preparedPipeline: preparedPipeline,
5673
+ },
5585
5674
  }) }];
5586
5675
  case 4: return [2 /*return*/];
5587
5676
  }
@@ -5735,14 +5824,19 @@
5735
5824
  // Note: Wait a short time to prevent race conditions
5736
5825
  _g.sent();
5737
5826
  _g.label = 27;
5738
- case 27: return [2 /*return*/, $asDeeplyFrozenSerializableJson('Unuccessful PipelineExecutorResult (with misc errors) PipelineExecutorResult', {
5739
- isSuccessful: false,
5740
- errors: __spreadArray([error_1], __read(errors), false).map(serializeError),
5741
- warnings: warnings.map(serializeError),
5742
- usage: usage_1,
5743
- executionReport: executionReport,
5744
- outputParameters: outputParameters_1,
5745
- preparedPipeline: preparedPipeline,
5827
+ case 27: return [2 /*return*/, exportJson({
5828
+ name: 'pipelineExecutorResult',
5829
+ message: "Unuccessful PipelineExecutorResult (with misc errors) PipelineExecutorResult",
5830
+ order: [],
5831
+ value: {
5832
+ isSuccessful: false,
5833
+ errors: __spreadArray([error_1], __read(errors), false).map(serializeError),
5834
+ warnings: warnings.map(serializeError),
5835
+ usage: usage_1,
5836
+ executionReport: executionReport,
5837
+ outputParameters: outputParameters_1,
5838
+ preparedPipeline: preparedPipeline,
5839
+ },
5746
5840
  })];
5747
5841
  case 28:
5748
5842
  usage = addUsage.apply(void 0, __spreadArray([], __read(executionReport.promptExecutions.map(function (_a) {
@@ -5763,14 +5857,19 @@
5763
5857
  // Note: Wait a short time to prevent race conditions
5764
5858
  _g.sent();
5765
5859
  _g.label = 30;
5766
- case 30: return [2 /*return*/, $asDeeplyFrozenSerializableJson('Successful PipelineExecutorResult', {
5767
- isSuccessful: true,
5768
- errors: errors.map(serializeError),
5769
- warnings: warnings.map(serializeError),
5770
- usage: usage,
5771
- executionReport: executionReport,
5772
- outputParameters: outputParameters,
5773
- preparedPipeline: preparedPipeline,
5860
+ case 30: return [2 /*return*/, exportJson({
5861
+ name: 'pipelineExecutorResult',
5862
+ message: "Successful PipelineExecutorResult",
5863
+ order: [],
5864
+ value: {
5865
+ isSuccessful: true,
5866
+ errors: errors.map(serializeError),
5867
+ warnings: warnings.map(serializeError),
5868
+ usage: usage,
5869
+ executionReport: executionReport,
5870
+ outputParameters: outputParameters,
5871
+ preparedPipeline: preparedPipeline,
5872
+ },
5774
5873
  })];
5775
5874
  }
5776
5875
  });
@@ -6372,36 +6471,6 @@
6372
6471
  * [ ] One piece can have multiple sources
6373
6472
  */
6374
6473
 
6375
- /**
6376
- * @@@
6377
- *
6378
- * Note: It is usefull @@@
6379
- *
6380
- * @param pipeline
6381
- * @public exported from `@promptbook/utils`
6382
- */
6383
- function clonePipeline(pipeline) {
6384
- // Note: Not using spread operator (...) because @@@
6385
- var pipelineUrl = pipeline.pipelineUrl, sourceFile = pipeline.sourceFile, title = pipeline.title, bookVersion = pipeline.bookVersion, description = pipeline.description, formfactorName = pipeline.formfactorName, parameters = pipeline.parameters, tasks = pipeline.tasks, knowledgeSources = pipeline.knowledgeSources, knowledgePieces = pipeline.knowledgePieces, personas = pipeline.personas, preparations = pipeline.preparations;
6386
- return {
6387
- pipelineUrl: pipelineUrl,
6388
- sourceFile: sourceFile,
6389
- title: title,
6390
- bookVersion: bookVersion,
6391
- description: description,
6392
- formfactorName: formfactorName,
6393
- parameters: parameters,
6394
- tasks: tasks,
6395
- knowledgeSources: knowledgeSources,
6396
- knowledgePieces: knowledgePieces,
6397
- personas: personas,
6398
- preparations: preparations,
6399
- };
6400
- }
6401
- /**
6402
- * TODO: [🍙] Make some standard order of json properties
6403
- */
6404
-
6405
6474
  /**
6406
6475
  * @@@
6407
6476
  *
@@ -6442,6 +6511,7 @@
6442
6511
  });
6443
6512
  }
6444
6513
  /**
6514
+ * TODO: [😂] Adding knowledge should be convert to async high-level abstractions, simmilar thing with expectations to sync high-level abstractions
6445
6515
  * TODO: [🧠] Add context to each task (if missing)
6446
6516
  * TODO: [🧠] What is better name `prepareTask` or `prepareTaskAndParameters`
6447
6517
  * TODO: [♨][main] !!! Prepare index the examples and maybe tasks
@@ -6512,168 +6582,49 @@
6512
6582
  });
6513
6583
  }); })];
6514
6584
  case 1:
6515
- _c.sent();
6516
- knowledgeSourcesPrepared = knowledgeSources.map(function (source) { return (__assign(__assign({}, source), { preparationIds: [/* TODO: [🧊] -> */ currentPreparation.id] })); });
6517
- return [4 /*yield*/, prepareKnowledgePieces(knowledgeSources /* <- TODO: [🧊] {knowledgeSources, knowledgePieces} */, __assign(__assign({}, tools), { llm: llmToolsWithUsage }), __assign(__assign({}, options), { rootDirname: rootDirname, maxParallelCount: maxParallelCount /* <- TODO: [🪂] */, isVerbose: isVerbose }))];
6518
- case 2:
6519
- partialknowledgePiecesPrepared = _c.sent();
6520
- knowledgePiecesPrepared = partialknowledgePiecesPrepared.map(function (piece) { return (__assign(__assign({}, piece), { preparationIds: [/* TODO: [🧊] -> */ currentPreparation.id] })); });
6521
- return [4 /*yield*/, prepareTasks({
6522
- parameters: parameters,
6523
- tasks: tasks,
6524
- knowledgePiecesCount: knowledgePiecesPrepared.length,
6525
- }, __assign(__assign({}, tools), { llm: llmToolsWithUsage }), {
6526
- rootDirname: rootDirname,
6527
- maxParallelCount: maxParallelCount /* <- TODO: [🪂] */,
6528
- isVerbose: isVerbose,
6529
- })];
6530
- case 3:
6531
- tasksPrepared = (_c.sent()).tasksPrepared;
6532
- // ----- /Tasks preparation -----
6533
- // Note: Count total usage
6534
- currentPreparation.usage = llmToolsWithUsage.getTotalUsage();
6535
- return [2 /*return*/, $asDeeplyFrozenSerializableJson('Prepared PipelineJson', __assign(__assign({}, clonePipeline(pipeline)), { tasks: __spreadArray([], __read(tasksPrepared), false),
6536
- // <- TODO: [🪓] Here should be no need for spreading new array, just ` tasks: tasksPrepared`
6537
- knowledgeSources: knowledgeSourcesPrepared, knowledgePieces: knowledgePiecesPrepared, personas: preparedPersonas, preparations: __spreadArray([], __read(preparations), false) }))];
6538
- }
6539
- });
6540
- });
6541
- }
6542
- /**
6543
- * TODO: Write tests for `preparePipeline`
6544
- * TODO: [🏏] Leverage the batch API and build queues @see https://platform.openai.com/docs/guides/batch
6545
- * TODO: [🧊] In future one preparation can take data from previous preparation and save tokens and time
6546
- * TODO: [🛠] Actions, instruments (and maybe knowledge) => Functions and tools
6547
- * TODO: [🧠][♏] Maybe if expecting JSON (In Anthropic Claude and other models without non-json) and its not specified in prompt content, append the instructions
6548
- * @see https://docs.anthropic.com/en/docs/test-and-evaluate/strengthen-guardrails/increase-consistency#specify-the-desired-output-format
6549
- */
6550
-
6551
- /**
6552
- * @@@
6553
- *
6554
- * @deprecated https://github.com/webgptorg/promptbook/pull/186
6555
- * @see https://github.com/webgptorg/promptbook/discussions/171
6556
- *
6557
- * @public exported from `@promptbook/core`
6558
- */
6559
- function getPipelineInterface(pipeline) {
6560
- var e_1, _a, e_2, _b;
6561
- var pipelineInterface = {
6562
- inputParameters: [],
6563
- outputParameters: [],
6564
- };
6565
- try {
6566
- for (var _c = __values(pipeline.parameters), _d = _c.next(); !_d.done; _d = _c.next()) {
6567
- var parameter = _d.value;
6568
- var isInput = parameter.isInput, isOutput = parameter.isOutput;
6569
- if (isInput) {
6570
- pipelineInterface.inputParameters.push(deepClone(parameter));
6571
- }
6572
- if (isOutput) {
6573
- pipelineInterface.outputParameters.push(deepClone(parameter));
6574
- }
6575
- }
6576
- }
6577
- catch (e_1_1) { e_1 = { error: e_1_1 }; }
6578
- finally {
6579
- try {
6580
- if (_d && !_d.done && (_a = _c.return)) _a.call(_c);
6581
- }
6582
- finally { if (e_1) throw e_1.error; }
6583
- }
6584
- try {
6585
- for (var _e = __values(['inputParameters', 'outputParameters']), _f = _e.next(); !_f.done; _f = _e.next()) {
6586
- var key = _f.value;
6587
- pipelineInterface[key].sort(function (_a, _b) {
6588
- var name1 = _a.name;
6589
- var name2 = _b.name;
6590
- return name1.localeCompare(name2);
6591
- });
6592
- }
6593
- }
6594
- catch (e_2_1) { e_2 = { error: e_2_1 }; }
6595
- finally {
6596
- try {
6597
- if (_f && !_f.done && (_b = _e.return)) _b.call(_e);
6598
- }
6599
- finally { if (e_2) throw e_2.error; }
6600
- }
6601
- return $deepFreeze(pipelineInterface);
6602
- }
6603
-
6604
- /**
6605
- * @@@
6606
- *
6607
- * @deprecated https://github.com/webgptorg/promptbook/pull/186
6608
- * @see https://github.com/webgptorg/promptbook/discussions/171
6609
- *
6610
- * @public exported from `@promptbook/core`
6611
- */
6612
- function isPipelineInterfacesEqual(pipelineInterface1, pipelineInterface2) {
6613
- var e_1, _a, e_2, _b;
6614
- try {
6615
- for (var _c = __values(['inputParameters', 'outputParameters']), _d = _c.next(); !_d.done; _d = _c.next()) {
6616
- var whichParameters = _d.value;
6617
- var parameters1 = pipelineInterface1[whichParameters]; // <- Note: `isPipelineInterfacesEqual` is just temporary solution, no need to fix this
6618
- var parameters2 = pipelineInterface2[whichParameters];
6619
- if (parameters1.length !== parameters2.length) {
6620
- return false;
6621
- }
6622
- var _loop_1 = function (parameter) {
6623
- var matchingParameter = parameters2.find(function (_a) {
6624
- var name = _a.name;
6625
- return name === parameter.name;
6626
- });
6627
- if (!matchingParameter) {
6628
- return { value: false };
6629
- }
6630
- // Note: Do not compare description, it is not relevant for compatibility
6631
- if (matchingParameter.isInput !== parameter.isInput) {
6632
- return { value: false };
6633
- }
6634
- if (matchingParameter.isOutput !== parameter.isOutput) {
6635
- return { value: false };
6636
- }
6637
- };
6638
- try {
6639
- for (var parameters1_1 = (e_2 = void 0, __values(parameters1)), parameters1_1_1 = parameters1_1.next(); !parameters1_1_1.done; parameters1_1_1 = parameters1_1.next()) {
6640
- var parameter = parameters1_1_1.value;
6641
- var state_1 = _loop_1(parameter);
6642
- if (typeof state_1 === "object")
6643
- return state_1.value;
6644
- }
6645
- }
6646
- catch (e_2_1) { e_2 = { error: e_2_1 }; }
6647
- finally {
6648
- try {
6649
- if (parameters1_1_1 && !parameters1_1_1.done && (_b = parameters1_1.return)) _b.call(parameters1_1);
6650
- }
6651
- finally { if (e_2) throw e_2.error; }
6585
+ _c.sent();
6586
+ knowledgeSourcesPrepared = knowledgeSources.map(function (source) { return (__assign(__assign({}, source), { preparationIds: [/* TODO: [🧊] -> */ currentPreparation.id] })); });
6587
+ return [4 /*yield*/, prepareKnowledgePieces(knowledgeSources /* <- TODO: [🧊] {knowledgeSources, knowledgePieces} */, __assign(__assign({}, tools), { llm: llmToolsWithUsage }), __assign(__assign({}, options), { rootDirname: rootDirname, maxParallelCount: maxParallelCount /* <- TODO: [🪂] */, isVerbose: isVerbose }))];
6588
+ case 2:
6589
+ partialknowledgePiecesPrepared = _c.sent();
6590
+ knowledgePiecesPrepared = partialknowledgePiecesPrepared.map(function (piece) { return (__assign(__assign({}, piece), { preparationIds: [/* TODO: [🧊] -> */ currentPreparation.id] })); });
6591
+ return [4 /*yield*/, prepareTasks({
6592
+ parameters: parameters,
6593
+ tasks: tasks,
6594
+ knowledgePiecesCount: knowledgePiecesPrepared.length,
6595
+ }, __assign(__assign({}, tools), { llm: llmToolsWithUsage }), {
6596
+ rootDirname: rootDirname,
6597
+ maxParallelCount: maxParallelCount /* <- TODO: [🪂] */,
6598
+ isVerbose: isVerbose,
6599
+ })];
6600
+ case 3:
6601
+ tasksPrepared = (_c.sent()).tasksPrepared;
6602
+ // ----- /Tasks preparation -----
6603
+ // TODO: [😂] Use here all `AsyncHighLevelAbstraction`
6604
+ // Note: Count total usage
6605
+ currentPreparation.usage = llmToolsWithUsage.getTotalUsage();
6606
+ return [2 /*return*/, exportJson({
6607
+ name: 'pipelineJson',
6608
+ message: "Result of `preparePipeline`",
6609
+ order: ORDER_OF_PIPELINE_JSON,
6610
+ value: __assign(__assign({}, pipeline), {
6611
+ // <- TODO: Probbably deeply clone the pipeline because `$exportJson` freezes the subobjects
6612
+ knowledgeSources: knowledgeSourcesPrepared, knowledgePieces: knowledgePiecesPrepared, tasks: __spreadArray([], __read(tasksPrepared), false),
6613
+ // <- TODO: [🪓] Here should be no need for spreading new array, just ` tasks: tasksPrepared`
6614
+ personas: preparedPersonas, preparations: __spreadArray([], __read(preparations), false) }),
6615
+ })];
6652
6616
  }
6653
- }
6654
- }
6655
- catch (e_1_1) { e_1 = { error: e_1_1 }; }
6656
- finally {
6657
- try {
6658
- if (_d && !_d.done && (_a = _c.return)) _a.call(_c);
6659
- }
6660
- finally { if (e_1) throw e_1.error; }
6661
- }
6662
- return true;
6617
+ });
6618
+ });
6663
6619
  }
6664
-
6665
6620
  /**
6666
- * @@@
6667
- *
6668
- * @deprecated https://github.com/webgptorg/promptbook/pull/186
6669
- * @see https://github.com/webgptorg/promptbook/discussions/171
6670
- *
6671
- * @public exported from `@promptbook/core`
6621
+ * TODO: Write tests for `preparePipeline`
6622
+ * TODO: [🏏] Leverage the batch API and build queues @see https://platform.openai.com/docs/guides/batch
6623
+ * TODO: [🧊] In future one preparation can take data from previous preparation and save tokens and time
6624
+ * TODO: [🛠] Actions, instruments (and maybe knowledge) => Functions and tools
6625
+ * TODO: [🧠][♏] Maybe if expecting JSON (In Anthropic Claude and other models without non-json) and its not specified in prompt content, append the instructions
6626
+ * @see https://docs.anthropic.com/en/docs/test-and-evaluate/strengthen-guardrails/increase-consistency#specify-the-desired-output-format
6672
6627
  */
6673
- function isPipelineImplementingInterface(options) {
6674
- var pipeline = options.pipeline, pipelineInterface = options.pipelineInterface;
6675
- return isPipelineInterfacesEqual(getPipelineInterface(pipeline), pipelineInterface);
6676
- }
6677
6628
 
6678
6629
  /**
6679
6630
  * All available task types
@@ -6925,7 +6876,8 @@
6925
6876
  expectResultingParameterName();
6926
6877
  var parameter = $pipelineJson.parameters.find(function (param) { return param.name === $taskJson.resultingParameterName; });
6927
6878
  if (parameter === undefined) {
6928
- throw new ParseError("Can not find parameter {".concat($taskJson.resultingParameterName, "} to assign example value on it"));
6879
+ // TODO: !!!!!! Change to logic error for higher level abstractions to work
6880
+ throw new ParseError("Parameter `{".concat($taskJson.resultingParameterName, "}` is not defined so can not define example value of it"));
6929
6881
  }
6930
6882
  parameter.exampleValues = parameter.exampleValues || [];
6931
6883
  parameter.exampleValues.push($taskJson.content);
@@ -8343,7 +8295,7 @@
8343
8295
  * Note: `$` is used to indicate that this function mutates given `pipelineJson`
8344
8296
  */
8345
8297
  $applyToPipelineJson: function (command, $pipelineJson) {
8346
- // Note: [🍣] Do nothing, its application is implemented separately in `pipelineStringToJsonSync`
8298
+ // Note: [🍣] Do nothing, its application is implemented separately in `precompilePipeline`
8347
8299
  },
8348
8300
  /**
8349
8301
  * Apply the PARAMETER command to the `pipelineJson`
@@ -8351,7 +8303,7 @@
8351
8303
  * Note: `$` is used to indicate that this function mutates given `taskJson`
8352
8304
  */
8353
8305
  $applyToTaskJson: function (command, $taskJson, $pipelineJson) {
8354
- // Note: [🍣] Do nothing, its application is implemented separately in `pipelineStringToJsonSync`
8306
+ // Note: [🍣] Do nothing, its application is implemented separately in `precompilePipeline`
8355
8307
  },
8356
8308
  /**
8357
8309
  * Converts the PARAMETER command back to string
@@ -8859,7 +8811,7 @@
8859
8811
  * @returns the parser for the command
8860
8812
  * @throws {UnexpectedError} if the parser is not found
8861
8813
  *
8862
- * @private within the pipelineStringToJson
8814
+ * @private within the compilePipeline
8863
8815
  */
8864
8816
  function getParserForCommand(command) {
8865
8817
  var commandParser = COMMANDS.find(function (commandParser) { return commandParser.name === command.type; });
@@ -8895,7 +8847,7 @@
8895
8847
  * @returns parsed command object
8896
8848
  * @throws {ParseError} if the command is invalid
8897
8849
  *
8898
- * @private within the pipelineStringToJson
8850
+ * @private within the compilePipeline
8899
8851
  */
8900
8852
  function parseCommand(raw, usagePlace) {
8901
8853
  if (raw.includes('\n') || raw.includes('\r')) {
@@ -9033,6 +8985,295 @@
9033
8985
  return null;
9034
8986
  }
9035
8987
 
8988
+ /**
8989
+ * @@@
8990
+ *
8991
+ * @deprecated https://github.com/webgptorg/promptbook/pull/186
8992
+ * @see https://github.com/webgptorg/promptbook/discussions/171
8993
+ *
8994
+ * @public exported from `@promptbook/core`
8995
+ */
8996
+ function getPipelineInterface(pipeline) {
8997
+ var e_1, _a, e_2, _b;
8998
+ var pipelineInterface = {
8999
+ inputParameters: [],
9000
+ outputParameters: [],
9001
+ };
9002
+ try {
9003
+ for (var _c = __values(pipeline.parameters), _d = _c.next(); !_d.done; _d = _c.next()) {
9004
+ var parameter = _d.value;
9005
+ var isInput = parameter.isInput, isOutput = parameter.isOutput;
9006
+ if (isInput) {
9007
+ pipelineInterface.inputParameters.push(deepClone(parameter));
9008
+ }
9009
+ if (isOutput) {
9010
+ pipelineInterface.outputParameters.push(deepClone(parameter));
9011
+ }
9012
+ }
9013
+ }
9014
+ catch (e_1_1) { e_1 = { error: e_1_1 }; }
9015
+ finally {
9016
+ try {
9017
+ if (_d && !_d.done && (_a = _c.return)) _a.call(_c);
9018
+ }
9019
+ finally { if (e_1) throw e_1.error; }
9020
+ }
9021
+ try {
9022
+ for (var _e = __values(['inputParameters', 'outputParameters']), _f = _e.next(); !_f.done; _f = _e.next()) {
9023
+ var key = _f.value;
9024
+ pipelineInterface[key].sort(function (_a, _b) {
9025
+ var name1 = _a.name;
9026
+ var name2 = _b.name;
9027
+ return name1.localeCompare(name2);
9028
+ });
9029
+ }
9030
+ }
9031
+ catch (e_2_1) { e_2 = { error: e_2_1 }; }
9032
+ finally {
9033
+ try {
9034
+ if (_f && !_f.done && (_b = _e.return)) _b.call(_e);
9035
+ }
9036
+ finally { if (e_2) throw e_2.error; }
9037
+ }
9038
+ return exportJson({
9039
+ name: "pipelineInterface",
9040
+ message: "Result of `getPipelineInterface`",
9041
+ order: ['inputParameters', 'outputParameters'],
9042
+ value: pipelineInterface,
9043
+ });
9044
+ }
9045
+
9046
+ /**
9047
+ * @@@
9048
+ *
9049
+ * @deprecated https://github.com/webgptorg/promptbook/pull/186
9050
+ * @see https://github.com/webgptorg/promptbook/discussions/171
9051
+ *
9052
+ * @public exported from `@promptbook/core`
9053
+ */
9054
+ function isPipelineInterfacesEqual(pipelineInterface1, pipelineInterface2) {
9055
+ var e_1, _a, e_2, _b;
9056
+ try {
9057
+ for (var _c = __values(['inputParameters', 'outputParameters']), _d = _c.next(); !_d.done; _d = _c.next()) {
9058
+ var whichParameters = _d.value;
9059
+ var parameters1 = pipelineInterface1[whichParameters]; // <- Note: `isPipelineInterfacesEqual` is just temporary solution, no need to fix this
9060
+ var parameters2 = pipelineInterface2[whichParameters];
9061
+ if (parameters1.length !== parameters2.length) {
9062
+ return false;
9063
+ }
9064
+ var _loop_1 = function (parameter) {
9065
+ var matchingParameter = parameters2.find(function (_a) {
9066
+ var name = _a.name;
9067
+ return name === parameter.name;
9068
+ });
9069
+ if (!matchingParameter) {
9070
+ return { value: false };
9071
+ }
9072
+ // Note: Do not compare description, it is not relevant for compatibility
9073
+ if (matchingParameter.isInput !== parameter.isInput) {
9074
+ return { value: false };
9075
+ }
9076
+ if (matchingParameter.isOutput !== parameter.isOutput) {
9077
+ return { value: false };
9078
+ }
9079
+ };
9080
+ try {
9081
+ for (var parameters1_1 = (e_2 = void 0, __values(parameters1)), parameters1_1_1 = parameters1_1.next(); !parameters1_1_1.done; parameters1_1_1 = parameters1_1.next()) {
9082
+ var parameter = parameters1_1_1.value;
9083
+ var state_1 = _loop_1(parameter);
9084
+ if (typeof state_1 === "object")
9085
+ return state_1.value;
9086
+ }
9087
+ }
9088
+ catch (e_2_1) { e_2 = { error: e_2_1 }; }
9089
+ finally {
9090
+ try {
9091
+ if (parameters1_1_1 && !parameters1_1_1.done && (_b = parameters1_1.return)) _b.call(parameters1_1);
9092
+ }
9093
+ finally { if (e_2) throw e_2.error; }
9094
+ }
9095
+ }
9096
+ }
9097
+ catch (e_1_1) { e_1 = { error: e_1_1 }; }
9098
+ finally {
9099
+ try {
9100
+ if (_d && !_d.done && (_a = _c.return)) _a.call(_c);
9101
+ }
9102
+ finally { if (e_1) throw e_1.error; }
9103
+ }
9104
+ return true;
9105
+ }
9106
+
9107
+ /**
9108
+ * @@@
9109
+ *
9110
+ * @deprecated https://github.com/webgptorg/promptbook/pull/186
9111
+ * @see https://github.com/webgptorg/promptbook/discussions/171
9112
+ *
9113
+ * @public exported from `@promptbook/core`
9114
+ */
9115
+ function isPipelineImplementingInterface(options) {
9116
+ var pipeline = options.pipeline, pipelineInterface = options.pipelineInterface;
9117
+ return isPipelineInterfacesEqual(getPipelineInterface(pipeline), pipelineInterface);
9118
+ }
9119
+
9120
+ /**
9121
+ * Set formfactor based on the pipeline interface e
9122
+ *
9123
+ * @private
9124
+ */
9125
+ var ImplicitFormfactorHla = {
9126
+ type: 'SYNC',
9127
+ $applyToPipelineJson: function ($pipelineJson) {
9128
+ var e_1, _a;
9129
+ if ($pipelineJson.formfactorName !== undefined) {
9130
+ // Note: When formfactor is already set, do nothing
9131
+ return;
9132
+ }
9133
+ try {
9134
+ for (var _b = __values(FORMFACTOR_DEFINITIONS.filter(function (_a) {
9135
+ var name = _a.name;
9136
+ return name !== 'GENERIC';
9137
+ })), _c = _b.next(); !_c.done; _c = _b.next()) {
9138
+ var formfactorDefinition = _c.value;
9139
+ // <- Note: [♓️][💩] This is the order of the formfactors, make some explicit priority
9140
+ var name_1 = formfactorDefinition.name, pipelineInterface = formfactorDefinition.pipelineInterface;
9141
+ var isCompatible = isPipelineImplementingInterface({
9142
+ pipeline: __assign({ formfactorName: name_1 }, $pipelineJson),
9143
+ pipelineInterface: pipelineInterface,
9144
+ });
9145
+ /*/
9146
+ console.log({
9147
+ subject: `${$pipelineJson.title} implements ${name}`,
9148
+ pipelineTitle: $pipelineJson.title,
9149
+ formfactorName: name,
9150
+ isCompatible,
9151
+ formfactorInterface: pipelineInterface,
9152
+ pipelineInterface: getPipelineInterface($pipelineJson as PipelineJson),
9153
+ });
9154
+ /**/
9155
+ if (isCompatible) {
9156
+ $pipelineJson.formfactorName = name_1;
9157
+ return;
9158
+ }
9159
+ }
9160
+ }
9161
+ catch (e_1_1) { e_1 = { error: e_1_1 }; }
9162
+ finally {
9163
+ try {
9164
+ if (_c && !_c.done && (_a = _b.return)) _a.call(_b);
9165
+ }
9166
+ finally { if (e_1) throw e_1.error; }
9167
+ }
9168
+ },
9169
+ };
9170
+
9171
+ /**
9172
+ * Allow to define chatbot with no need to write full interface
9173
+ *
9174
+ * @private
9175
+ */
9176
+ var QuickChatbotHla = {
9177
+ type: 'SYNC',
9178
+ $applyToPipelineJson: function ($pipelineJson) {
9179
+ if ($pipelineJson.tasks.length !== 0) {
9180
+ // Note: When there are already tasks, do nothing
9181
+ return;
9182
+ }
9183
+ if ($pipelineJson.parameters.length !== 0) {
9184
+ // Note: When there are already parameters, do nothing
9185
+ return;
9186
+ }
9187
+ if ($pipelineJson.personas.length === 0) {
9188
+ // Note: When no personas defined, do nothing
9189
+ return;
9190
+ }
9191
+ var personaName = $pipelineJson.personas[0].name;
9192
+ $pipelineJson.formfactorName = 'CHATBOT';
9193
+ $pipelineJson.parameters.push({
9194
+ name: 'previousTitle',
9195
+ description: 'Previous title of the conversation',
9196
+ isInput: true,
9197
+ isOutput: false,
9198
+ }, {
9199
+ name: 'previousConversationSummary',
9200
+ description: 'Previous conversation summary',
9201
+ isInput: true,
9202
+ isOutput: false,
9203
+ }, {
9204
+ name: 'userMessage',
9205
+ description: 'User message',
9206
+ isInput: true,
9207
+ isOutput: false,
9208
+ }, {
9209
+ name: 'title',
9210
+ description: 'Title of the conversation',
9211
+ isInput: false,
9212
+ isOutput: true,
9213
+ }, {
9214
+ name: 'conversationSummary',
9215
+ description: 'Summary of the conversation',
9216
+ isInput: false,
9217
+ isOutput: true,
9218
+ }, {
9219
+ name: 'chatbotResponse',
9220
+ description: 'Chatbot response',
9221
+ isInput: false,
9222
+ isOutput: true,
9223
+ exampleValues: ['Hello, I am a Pavol`s virtual avatar. How can I help you?'],
9224
+ });
9225
+ // TODO: !!!!!! spaceTrim
9226
+ $pipelineJson.tasks.push({
9227
+ taskType: 'PROMPT_TASK',
9228
+ name: 'create-an-answer',
9229
+ title: 'Create an answer',
9230
+ content: 'Write a response to the user message:\n\n**Question from user**\n\n> {userMessage}\n\n**Previous conversation**\n\n> {previousConversationSummary}',
9231
+ resultingParameterName: 'chatbotResponse',
9232
+ personaName: personaName,
9233
+ dependentParameterNames: ['userMessage', 'previousConversationSummary' /* !!!!!!, 'knowledge'*/],
9234
+ // !!!!!! preparedContent: '{content}\n\n## Knowledge\n\n{knowledge}',
9235
+ }, {
9236
+ taskType: 'PROMPT_TASK',
9237
+ name: 'summarize-the-conversation',
9238
+ title: 'Summarize the conversation',
9239
+ content: 'Summarize the conversation in a few words:\n\n## Rules\n\n- Summarise the text of the conversation in a few words\n- Convert the text to its basic idea\n- Imagine you are writing the headline or subject line of an email\n- Respond with a few words of summary only\n\n## Conversation\n\n**User:**\n\n> {userMessage}\n\n**You:**\n\n> {chatbotResponse}',
9240
+ resultingParameterName: 'conversationSummary',
9241
+ personaName: personaName,
9242
+ expectations: {
9243
+ words: {
9244
+ min: 1,
9245
+ max: 10,
9246
+ },
9247
+ },
9248
+ dependentParameterNames: ['userMessage', 'chatbotResponse' /* !!!!!!, 'knowledge'*/],
9249
+ // !!!!!! preparedContent: '{content}\n\n## Knowledge\n\n{knowledge}',
9250
+ }, {
9251
+ taskType: 'SIMPLE_TASK',
9252
+ name: 'title',
9253
+ title: 'Title',
9254
+ content: '{conversationSummary}',
9255
+ resultingParameterName: 'title',
9256
+ dependentParameterNames: ['conversationSummary' /* !!!!!!, 'knowledge'*/],
9257
+ // !!!!!! preparedContent: '{content}\n\n## Knowledge\n\n{knowledge}',
9258
+ });
9259
+ },
9260
+ };
9261
+
9262
+ /**
9263
+ * All high-level abstractions
9264
+ *
9265
+ * @private internal index of `precompilePipeline` (= used for sync) and `preparePipeline` (= used for async)
9266
+ */
9267
+ var HIGH_LEVEL_ABSTRACTIONS = [
9268
+ ImplicitFormfactorHla,
9269
+ QuickChatbotHla,
9270
+ // <- Note: [♓️][💩] This is the order of the application of high-level abstractions application on pipeline JSON
9271
+ ];
9272
+ /**
9273
+ * TODO: Test that all sync high-level abstractions are before async high-level abstractions
9274
+ * Note: [💞] Ignore a discrepancy between file name and entity name
9275
+ */
9276
+
9036
9277
  /**
9037
9278
  * Supported script languages
9038
9279
  *
@@ -9271,8 +9512,8 @@
9271
9512
  * Compile pipeline from string (markdown) format to JSON format synchronously
9272
9513
  *
9273
9514
  * Note: There are 3 similar functions:
9274
- * - `pipelineStringToJson` **(preferred)** - which propperly compiles the promptbook and use embedding for external knowledge
9275
- * - `pipelineStringToJsonSync` - use only if you need to compile promptbook synchronously and it contains NO external knowledge
9515
+ * - `compilePipeline` **(preferred)** - which propperly compiles the promptbook and use embedding for external knowledge
9516
+ * - `precompilePipeline` - use only if you need to compile promptbook synchronously and it contains NO external knowledge
9276
9517
  * - `preparePipeline` - just one step in the compilation process
9277
9518
  *
9278
9519
  * Note: This function does not validate logic of the pipeline only the parsing
@@ -9283,7 +9524,7 @@
9283
9524
  * @throws {ParseError} if the promptbook string is not valid
9284
9525
  * @public exported from `@promptbook/core`
9285
9526
  */
9286
- function pipelineStringToJsonSync(pipelineString) {
9527
+ function precompilePipeline(pipelineString) {
9287
9528
  var e_1, _a, e_2, _b, e_3, _c, e_4, _d, e_5, _e, e_6, _f;
9288
9529
  var $pipelineJson = {
9289
9530
  title: DEFAULT_TITLE,
@@ -9383,7 +9624,7 @@
9383
9624
  }
9384
9625
  try {
9385
9626
  commandParser.$applyToPipelineJson(command, $pipelineJson);
9386
- // <- Note: [🦦] Its strange that this assertion must be here, [🦦][4] should do this assertion implicitelly
9627
+ // <- Note: [🦦] Its strange that this assertion must be here, [🦦][4] should do this assertion implicitly
9387
9628
  }
9388
9629
  catch (error) {
9389
9630
  if (!(error instanceof ParseError)) {
@@ -9440,10 +9681,10 @@
9440
9681
  return nameWithSuffix;
9441
9682
  };
9442
9683
  var _loop_2 = function (section) {
9443
- var e_7, _o, e_8, _p;
9684
+ var e_7, _q, e_8, _r;
9444
9685
  // TODO: Parse section's description (the content out of the codeblock and lists)
9445
9686
  var listItems_2 = extractAllListItemsFromMarkdown(section.content);
9446
- var _q = extractOneBlockFromMarkdown(section.content), language = _q.language, content = _q.content;
9687
+ var _s = extractOneBlockFromMarkdown(section.content), language = _s.language, content = _s.content;
9447
9688
  // TODO: [🎾][1] DRY description
9448
9689
  var description_1 = section.content;
9449
9690
  // Note: Remove codeblocks - TODO: [🎾]
@@ -9491,7 +9732,7 @@
9491
9732
  }
9492
9733
  try {
9493
9734
  commandParser.$applyToTaskJson(
9494
- // <- Note: [🦦] Its strange that this assertion must be here, [🦦][4] should do this assertion implicitelly
9735
+ // <- Note: [🦦] Its strange that this assertion must be here, [🦦][4] should do this assertion implicitly
9495
9736
  command, $taskJson, $pipelineJson);
9496
9737
  }
9497
9738
  catch (error) {
@@ -9508,14 +9749,14 @@
9508
9749
  try {
9509
9750
  // TODO [♓️] List commands and before apply order them to achieve order-agnostic commands
9510
9751
  for (var commands_1 = (e_7 = void 0, __values(commands)), commands_1_1 = commands_1.next(); !commands_1_1.done; commands_1_1 = commands_1.next()) {
9511
- var _r = commands_1_1.value, listItem = _r.listItem, command = _r.command;
9752
+ var _t = commands_1_1.value, listItem = _t.listItem, command = _t.command;
9512
9753
  _loop_4(listItem, command);
9513
9754
  }
9514
9755
  }
9515
9756
  catch (e_7_1) { e_7 = { error: e_7_1 }; }
9516
9757
  finally {
9517
9758
  try {
9518
- if (commands_1_1 && !commands_1_1.done && (_o = commands_1.return)) _o.call(commands_1);
9759
+ if (commands_1_1 && !commands_1_1.done && (_q = commands_1.return)) _q.call(commands_1);
9519
9760
  }
9520
9761
  finally { if (e_7) throw e_7.error; }
9521
9762
  }
@@ -9531,8 +9772,8 @@
9531
9772
  }
9532
9773
  $taskJson.dependentParameterNames = Array.from(extractParameterNamesFromTask($taskJson));
9533
9774
  try {
9534
- for (var _s = (e_8 = void 0, __values($taskJson.dependentParameterNames)), _t = _s.next(); !_t.done; _t = _s.next()) {
9535
- var parameterName = _t.value;
9775
+ for (var _u = (e_8 = void 0, __values($taskJson.dependentParameterNames)), _v = _u.next(); !_v.done; _v = _u.next()) {
9776
+ var parameterName = _v.value;
9536
9777
  // TODO: [🧠] This definition should be made first in the task
9537
9778
  defineParam({
9538
9779
  parameterName: parameterName,
@@ -9546,7 +9787,7 @@
9546
9787
  catch (e_8_1) { e_8 = { error: e_8_1 }; }
9547
9788
  finally {
9548
9789
  try {
9549
- if (_t && !_t.done && (_p = _s.return)) _p.call(_s);
9790
+ if (_v && !_v.done && (_r = _u.return)) _r.call(_u);
9550
9791
  }
9551
9792
  finally { if (e_8) throw e_8.error; }
9552
9793
  }
@@ -9677,53 +9918,39 @@
9677
9918
  });
9678
9919
  try {
9679
9920
  // =============================================================
9680
- // Note: 9️⃣ Implicit and default formfactor
9681
- for (var FORMFACTOR_DEFINITIONS_1 = __values(FORMFACTOR_DEFINITIONS), FORMFACTOR_DEFINITIONS_1_1 = FORMFACTOR_DEFINITIONS_1.next(); !FORMFACTOR_DEFINITIONS_1_1.done; FORMFACTOR_DEFINITIONS_1_1 = FORMFACTOR_DEFINITIONS_1.next()) {
9682
- var formfactorDefinition = FORMFACTOR_DEFINITIONS_1_1.value;
9683
- // <- Note: [♓️][💩] This is the order of the formfactors, make some explicit priority
9684
- var name_2 = formfactorDefinition.name, pipelineInterface = formfactorDefinition.pipelineInterface;
9685
- // Note: Skip GENERIC formfactor, it will be used as a fallback if no other formfactor is compatible
9686
- if (name_2 === 'GENERIC') {
9687
- continue;
9688
- }
9689
- var isCompatible = isPipelineImplementingInterface({
9690
- pipeline: __assign({ formfactorName: name_2 }, $pipelineJson),
9691
- pipelineInterface: pipelineInterface,
9692
- });
9693
- /*/
9694
- console.log({
9695
- subject: `${$pipelineJson.title} implements ${name}`,
9696
- pipelineTitle: $pipelineJson.title,
9697
- formfactorName: name,
9698
- isCompatible,
9699
- formfactorInterface: pipelineInterface,
9700
- pipelineInterface: getPipelineInterface($pipelineJson as PipelineJson),
9701
- });
9702
- /**/
9703
- if (isCompatible) {
9704
- $pipelineJson.formfactorName = name_2;
9705
- break;
9706
- }
9921
+ // Note: 9️⃣ Apply sync high-level abstractions
9922
+ for (var _o = __values(HIGH_LEVEL_ABSTRACTIONS.filter(function (_a) {
9923
+ var type = _a.type;
9924
+ return type === 'SYNC';
9925
+ })), _p = _o.next(); !_p.done; _p = _o.next()) {
9926
+ var highLevelAbstraction = _p.value;
9927
+ highLevelAbstraction.$applyToPipelineJson($pipelineJson);
9707
9928
  }
9708
9929
  }
9709
9930
  catch (e_6_1) { e_6 = { error: e_6_1 }; }
9710
9931
  finally {
9711
9932
  try {
9712
- if (FORMFACTOR_DEFINITIONS_1_1 && !FORMFACTOR_DEFINITIONS_1_1.done && (_f = FORMFACTOR_DEFINITIONS_1.return)) _f.call(FORMFACTOR_DEFINITIONS_1);
9933
+ if (_p && !_p.done && (_f = _o.return)) _f.call(_o);
9713
9934
  }
9714
9935
  finally { if (e_6) throw e_6.error; }
9715
9936
  }
9937
+ // =============================================================
9938
+ // Note: 🔟 Default formfactor
9716
9939
  // Note: [🔆] If formfactor is still not set, set it to 'GENERIC'
9717
9940
  if ($pipelineJson.formfactorName === undefined) {
9718
9941
  $pipelineJson.formfactorName = 'GENERIC';
9719
9942
  }
9720
9943
  // =============================================================
9721
9944
  // TODO: [🍙] Maybe do reorder of `$pipelineJson` here
9722
- return $asDeeplyFrozenSerializableJson('pipelineJson', __assign({ title: DEFAULT_TITLE, pipelineUrl: undefined, bookVersion: undefined, description: undefined, formfactorName: 'GENERIC',
9723
- // <- Note: [🔆] Setting `formfactorName` is redundant to satisfy the typescript
9724
- parameters: [], tasks: [], knowledgeSources: [], knowledgePieces: [], personas: [], preparations: [] }, $pipelineJson));
9945
+ return exportJson({
9946
+ name: 'pipelineJson',
9947
+ message: "Result of `precompilePipeline`",
9948
+ order: ORDER_OF_PIPELINE_JSON,
9949
+ value: __assign({ formfactorName: 'GENERIC' }, $pipelineJson),
9950
+ });
9725
9951
  }
9726
9952
  /**
9953
+ * TODO: [🧠] Maybe more things here can be refactored as high-level abstractions
9727
9954
  * TODO: [main] !!!! Warn if used only sync version
9728
9955
  * TODO: [🚞] Report here line/column of error
9729
9956
  * TODO: Use spaceTrim more effectively
@@ -9738,8 +9965,8 @@
9738
9965
  * Compile pipeline from string (markdown) format to JSON format
9739
9966
  *
9740
9967
  * Note: There are 3 similar functions:
9741
- * - `pipelineStringToJson` **(preferred)** - which propperly compiles the promptbook and use embedding for external knowledge
9742
- * - `pipelineStringToJsonSync` - use only if you need to compile promptbook synchronously and it contains NO external knowledge
9968
+ * - `compilePipeline` **(preferred)** - which propperly compiles the promptbook and use embedding for external knowledge
9969
+ * - `precompilePipeline` - use only if you need to compile promptbook synchronously and it contains NO external knowledge
9743
9970
  * - `preparePipeline` - just one step in the compilation process
9744
9971
  *
9745
9972
  * Note: This function does not validate logic of the pipeline only the parsing
@@ -9752,13 +9979,13 @@
9752
9979
  * @throws {ParseError} if the promptbook string is not valid
9753
9980
  * @public exported from `@promptbook/core`
9754
9981
  */
9755
- function pipelineStringToJson(pipelineString, tools, options) {
9982
+ function compilePipeline(pipelineString, tools, options) {
9756
9983
  return __awaiter(this, void 0, void 0, function () {
9757
9984
  var pipelineJson;
9758
9985
  return __generator(this, function (_a) {
9759
9986
  switch (_a.label) {
9760
9987
  case 0:
9761
- pipelineJson = pipelineStringToJsonSync(pipelineString);
9988
+ pipelineJson = precompilePipeline(pipelineString);
9762
9989
  if (!(tools !== undefined && tools.llm !== undefined)) return [3 /*break*/, 2];
9763
9990
  return [4 /*yield*/, preparePipeline(pipelineJson, tools, options || {
9764
9991
  rootDirname: null,
@@ -9767,7 +9994,7 @@
9767
9994
  pipelineJson = _a.sent();
9768
9995
  _a.label = 2;
9769
9996
  case 2:
9770
- // Note: No need to use `$asDeeplyFrozenSerializableJson` because `pipelineStringToJsonSync` and `preparePipeline` already do that
9997
+ // Note: No need to use `$exportJson` because `precompilePipeline` and `preparePipeline` already do that
9771
9998
  return [2 /*return*/, pipelineJson];
9772
9999
  }
9773
10000
  });
@@ -10894,7 +11121,7 @@
10894
11121
  return [4 /*yield*/, promises.readFile(fileName, 'utf-8')];
10895
11122
  case 2:
10896
11123
  pipelineString = (_e.sent());
10897
- return [4 /*yield*/, pipelineStringToJson(pipelineString, tools, {
11124
+ return [4 /*yield*/, compilePipeline(pipelineString, tools, {
10898
11125
  rootDirname: rootDirname,
10899
11126
  })];
10900
11127
  case 3:
@@ -11391,7 +11618,7 @@
11391
11618
  case 0:
11392
11619
  isGraphAdded = options.isGraphAdded, isPrettifyed = options.isPrettifyed;
11393
11620
  if (!isGraphAdded) return [3 /*break*/, 2];
11394
- return [4 /*yield*/, pipelineStringToJson(pipelineString)];
11621
+ return [4 /*yield*/, compilePipeline(pipelineString)];
11395
11622
  case 1:
11396
11623
  pipelineJson = _a.sent();
11397
11624
  promptbookMermaid_1 = renderPromptbookMermaid(pipelineJson, {
@@ -11835,9 +12062,125 @@
11835
12062
  return executionReportString;
11836
12063
  }
11837
12064
  /**
11838
- * TODO: Add mermaid chart for every report
11839
- * TODO: [🧠] Allow to filter out some parts of the report by options
11840
- * TODO: [🧠] Should be in generated file GENERATOR_WARNING
12065
+ * TODO: Add mermaid chart for every report
12066
+ * TODO: [🧠] Allow to filter out some parts of the report by options
12067
+ * TODO: [🧠] Should be in generated file GENERATOR_WARNING
12068
+ */
12069
+
12070
+ /**
12071
+ * Run the interactive chatbot in CLI
12072
+ *
12073
+ * @returns Never-ending promise or process exit
12074
+ * @private internal function of `promptbookCli` and `initializeRunCommand`
12075
+ */
12076
+ function runInteractiveChatbot(options) {
12077
+ var _a;
12078
+ return __awaiter(this, void 0, void 0, function () {
12079
+ var pipeline, pipelineExecutor, isVerbose, ongoingParameters, initialMessage, _loop_1, state_1;
12080
+ return __generator(this, function (_b) {
12081
+ switch (_b.label) {
12082
+ case 0:
12083
+ pipeline = options.pipeline, pipelineExecutor = options.pipelineExecutor, isVerbose = options.isVerbose;
12084
+ ongoingParameters = {
12085
+ /**
12086
+ * Title of the conversation
12087
+ */
12088
+ title: '',
12089
+ /**
12090
+ * Summary of the conversation
12091
+ */
12092
+ conversationSummary: '',
12093
+ /**
12094
+ * Chatbot response
12095
+ */
12096
+ chatbotResponse: '',
12097
+ };
12098
+ if (isVerbose) {
12099
+ console.info(colors__default["default"].gray('--- Running interactive chatbot ---'));
12100
+ }
12101
+ initialMessage = (((_a = pipeline.parameters.find(function (_a) {
12102
+ var name = _a.name;
12103
+ return name === 'chatbotResponse';
12104
+ })) === null || _a === void 0 ? void 0 : _a.exampleValues) || [])[0];
12105
+ if (initialMessage) {
12106
+ console.info("\n");
12107
+ console.info(spaceTrim__default["default"](function (block) { return "\n\n ".concat(colors__default["default"].bold(colors__default["default"].green('Chatbot:')), "\n ").concat(block(colors__default["default"].green(initialMessage)), "\n\n "); }));
12108
+ }
12109
+ _loop_1 = function () {
12110
+ var title_1, conversationSummary_1, response, userMessage_1, inputParameters, result_1, error_1;
12111
+ return __generator(this, function (_c) {
12112
+ switch (_c.label) {
12113
+ case 0:
12114
+ _c.trys.push([0, 4, , 5]);
12115
+ return [4 /*yield*/, waitasecond.forTime(100)];
12116
+ case 1:
12117
+ _c.sent();
12118
+ title_1 = ongoingParameters.title, conversationSummary_1 = ongoingParameters.conversationSummary;
12119
+ console.info("\n");
12120
+ if (title_1 !== '' &&
12121
+ just(false) /* <- TODO: [⛲️] Some better way how to show the title of ongoing conversation */) {
12122
+ console.info(colors__default["default"].gray("--- ".concat(title_1, " ---")));
12123
+ }
12124
+ else {
12125
+ console.info(colors__default["default"].gray("---"));
12126
+ }
12127
+ return [4 /*yield*/, prompts__default["default"]({
12128
+ type: 'text',
12129
+ name: 'userMessage',
12130
+ message: 'User message',
12131
+ hint: spaceTrim__default["default"](function (block) { return "\n Type \"exit\" to exit,\n\n previousTitle\n ".concat(block(title_1), "\n\n previousConversationSummary\n ").concat(block(conversationSummary_1), "\n\n "); }),
12132
+ })];
12133
+ case 2:
12134
+ response = _c.sent();
12135
+ userMessage_1 = response.userMessage;
12136
+ if (userMessage_1 === 'exit' || userMessage_1 === 'quit' || userMessage_1 === undefined) {
12137
+ return [2 /*return*/, { value: process.exit(0) }];
12138
+ }
12139
+ console.info("\n");
12140
+ console.info(spaceTrim__default["default"](function (block) { return "\n\n ".concat(colors__default["default"].bold(colors__default["default"].blue('User:')), "\n ").concat(block(colors__default["default"].blue(userMessage_1)), "\n\n "); }));
12141
+ inputParameters = {
12142
+ previousTitle: title_1,
12143
+ previousConversationSummary: conversationSummary_1,
12144
+ userMessage: userMessage_1,
12145
+ };
12146
+ return [4 /*yield*/, pipelineExecutor(inputParameters)];
12147
+ case 3:
12148
+ result_1 = _c.sent();
12149
+ assertsExecutionSuccessful(result_1);
12150
+ console.info("\n");
12151
+ console.info(spaceTrim__default["default"](function (block) { return "\n\n ".concat(colors__default["default"].bold(colors__default["default"].green('Chatbot:')), "\n ").concat(block(colors__default["default"].green(result_1.outputParameters.chatbotResponse)), "\n\n "); }));
12152
+ ongoingParameters = result_1.outputParameters;
12153
+ return [3 /*break*/, 5];
12154
+ case 4:
12155
+ error_1 = _c.sent();
12156
+ if (!(error_1 instanceof Error)) {
12157
+ throw error_1;
12158
+ }
12159
+ // TODO: Allow to ressurect the chatbot after an error - prompt the user to continue
12160
+ console.error(colors__default["default"].red(error_1.stack || error_1.message));
12161
+ return [2 /*return*/, { value: process.exit(1) }];
12162
+ case 5: return [2 /*return*/];
12163
+ }
12164
+ });
12165
+ };
12166
+ _b.label = 1;
12167
+ case 1:
12168
+ if (!just(true)) return [3 /*break*/, 3];
12169
+ return [5 /*yield**/, _loop_1()];
12170
+ case 2:
12171
+ state_1 = _b.sent();
12172
+ if (typeof state_1 === "object")
12173
+ return [2 /*return*/, state_1.value];
12174
+ return [3 /*break*/, 1];
12175
+ case 3: return [2 /*return*/];
12176
+ }
12177
+ });
12178
+ });
12179
+ }
12180
+ /**
12181
+ * TODO: Saving reports from the chatbot conversation
12182
+ * TODO: [⛲️] This is the right place to start implementing INK
12183
+ * Note: [🟡] Code in this file should never be published outside of `@promptbook/cli`
11841
12184
  */
11842
12185
 
11843
12186
  /**
@@ -11856,15 +12199,16 @@
11856
12199
  runCommand.option('-r, --reload', "Call LLM models even if same prompt with result is in the cache", false);
11857
12200
  runCommand.option('-v, --verbose', "Is output verbose", false);
11858
12201
  runCommand.option('--no-interactive', "Input is not interactive, if true you need to pass all the input parameters through --json");
12202
+ runCommand.option('--no-formfactor', "When set, behavior of the interactive mode is not changed by the formfactor of the pipeline");
11859
12203
  runCommand.option('-j, --json <json>', "Pass all or some input parameters as JSON record, if used the output is also returned as JSON");
11860
12204
  runCommand.option('-s, --save-report <path>', "Save report to file");
11861
12205
  runCommand.action(function (filePathRaw, options) { return __awaiter(_this, void 0, void 0, function () {
11862
- var isCacheReloaded, isInteractive, json, isVerbose, saveReport, inputParameters, prepareAndScrapeOptions, fs, filePath, filePathCandidates, filePathCandidates_1, filePathCandidates_1_1, filePathCandidate, e_1_1, llm, executables, tools, pipelineString, pipeline, error_1, pipelineExecutor, questions, response, result, isSuccessful, errors, warnings, outputParameters, executionReport, executionReportString, _a, _b, error, _c, _d, warning, _e, _f, key, value, separator;
12206
+ var isCacheReloaded, isInteractive, isFormfactorUsed, json, isVerbose, saveReport, inputParameters, prepareAndScrapeOptions, fs, filePath, filePathCandidates, filePathCandidates_1, filePathCandidates_1_1, filePathCandidate, e_1_1, llm, executables, tools, pipelineString, pipeline, error_1, pipelineExecutor, questions, response, result, isSuccessful, errors, warnings, outputParameters, executionReport, executionReportString, _a, _b, error, _c, _d, warning, _e, _f, key, value, separator;
11863
12207
  var e_1, _g, _h, e_2, _j, e_3, _k, e_4, _l;
11864
12208
  return __generator(this, function (_m) {
11865
12209
  switch (_m.label) {
11866
12210
  case 0:
11867
- isCacheReloaded = options.reload, isInteractive = options.interactive, json = options.json, isVerbose = options.verbose, saveReport = options.saveReport;
12211
+ isCacheReloaded = options.reload, isInteractive = options.interactive, isFormfactorUsed = options.formfactor, json = options.json, isVerbose = options.verbose, saveReport = options.saveReport;
11868
12212
  if (saveReport && !saveReport.endsWith('.json') && !saveReport.endsWith('.md')) {
11869
12213
  console.error(colors__default["default"].red("Report file must be .json or .md"));
11870
12214
  return [2 /*return*/, process.exit(1)];
@@ -11933,7 +12277,7 @@
11933
12277
  if (!error.message.includes('No LLM tools')) {
11934
12278
  throw error;
11935
12279
  }
11936
- console.error(colors__default["default"].red(spaceTrim__default["default"](function (block) { return "\n You need to configure LLM tools first\n\n 1) Create .env file at the root of your project\n 2) Configure API keys for LLM tools\n \n For example:\n ".concat(block($llmToolsMetadataRegister
12280
+ console.error(colors__default["default"].red(spaceTrim__default["default"](function (block) { return "\n You need to configure LLM tools first\n\n 1) Create .env file at the root of your project\n 2) Configure API keys for LLM tools\n\n For example:\n ".concat(block($llmToolsMetadataRegister
11937
12281
  .list()
11938
12282
  .map(function (_a) {
11939
12283
  var title = _a.title, envVariables = _a.envVariables;
@@ -11968,7 +12312,7 @@
11968
12312
  _m.label = 12;
11969
12313
  case 12:
11970
12314
  _m.trys.push([12, 14, , 15]);
11971
- return [4 /*yield*/, pipelineStringToJson(pipelineString, tools)];
12315
+ return [4 /*yield*/, compilePipeline(pipelineString, tools)];
11972
12316
  case 13:
11973
12317
  pipeline = _m.sent();
11974
12318
  return [3 /*break*/, 15];
@@ -11996,6 +12340,10 @@
11996
12340
  // <- TODO: Why "LLM execution failed undefinedx"
11997
12341
  maxParallelCount: 1, // <- TODO: Pass CLI argument
11998
12342
  });
12343
+ // TODO: Make some better system for formfactors and interactive mode - here is just a quick hardcoded solution for chatbot
12344
+ if (isInteractive === true && isFormfactorUsed === true && pipeline.formfactorName === 'CHATBOT') {
12345
+ return [2 /*return*/, /* not await */ runInteractiveChatbot({ pipeline: pipeline, pipelineExecutor: pipelineExecutor, isVerbose: isVerbose })];
12346
+ }
11999
12347
  if (isVerbose) {
12000
12348
  console.info(colors__default["default"].gray('--- Getting input parameters ---'));
12001
12349
  }
@@ -12228,7 +12576,7 @@
12228
12576
  return [4 /*yield*/, promises.readFile(filename, 'utf-8')];
12229
12577
  case 7:
12230
12578
  pipelineMarkdown = (_f.sent());
12231
- return [4 /*yield*/, pipelineStringToJson(pipelineMarkdown, tools)];
12579
+ return [4 /*yield*/, compilePipeline(pipelineMarkdown, tools)];
12232
12580
  case 8:
12233
12581
  pipeline = _f.sent();
12234
12582
  if (isVerbose) {
@@ -12456,7 +12804,7 @@
12456
12804
  isAnonymous: true,
12457
12805
  userId: this.options.userId,
12458
12806
  llmToolsConfiguration: this.options.llmToolsConfiguration,
12459
- } /* <- TODO: [🤛] */);
12807
+ } /* <- Note: [🤛] */);
12460
12808
  }
12461
12809
  else {
12462
12810
  socket.emit('listModels-request', {
@@ -12464,7 +12812,7 @@
12464
12812
  appId: this.options.appId,
12465
12813
  userId: this.options.userId,
12466
12814
  customOptions: this.options.customOptions,
12467
- } /* <- TODO: [🤛] */);
12815
+ } /* <- Note: [🤛] */);
12468
12816
  }
12469
12817
  return [4 /*yield*/, new Promise(function (resolve, reject) {
12470
12818
  socket.on('listModels-response', function (response) {
@@ -12552,7 +12900,7 @@
12552
12900
  userId: this.options.userId,
12553
12901
  llmToolsConfiguration: this.options.llmToolsConfiguration,
12554
12902
  prompt: prompt,
12555
- } /* <- TODO: [🤛] */);
12903
+ } /* <- Note: [🤛] */);
12556
12904
  }
12557
12905
  else {
12558
12906
  socket.emit('prompt-request', {
@@ -12561,7 +12909,7 @@
12561
12909
  userId: this.options.userId,
12562
12910
  customOptions: this.options.customOptions,
12563
12911
  prompt: prompt,
12564
- } /* <- TODO: [🤛] */);
12912
+ } /* <- Note: [🤛] */);
12565
12913
  }
12566
12914
  return [4 /*yield*/, new Promise(function (resolve, reject) {
12567
12915
  socket.on('prompt-response', function (response) {
@@ -12584,7 +12932,7 @@
12584
12932
  return RemoteLlmExecutionTools;
12585
12933
  }());
12586
12934
  /**
12587
- * TODO: Maybe use `$asDeeplyFrozenSerializableJson`
12935
+ * TODO: Maybe use `$exportJson`
12588
12936
  * TODO: [🧠][🛍] Maybe not `isAnonymous: boolean` BUT `mode: 'ANONYMOUS'|'COLLECTION'`
12589
12937
  * TODO: [🍓] Allow to list compatible models with each variant
12590
12938
  * TODO: [🗯] RemoteLlmExecutionTools should extend Destroyable and implement IDestroyable
@@ -12610,72 +12958,75 @@
12610
12958
  * @see https://docs.anthropic.com/en/docs/models-overview
12611
12959
  * @public exported from `@promptbook/anthropic-claude`
12612
12960
  */
12613
- var ANTHROPIC_CLAUDE_MODELS = $asDeeplyFrozenSerializableJson('ANTHROPIC_CLAUDE_MODELS', [
12614
- {
12615
- modelVariant: 'CHAT',
12616
- modelTitle: 'Claude 3.5 Sonnet',
12617
- modelName: 'claude-3-5-sonnet-20240620',
12618
- pricing: {
12619
- prompt: computeUsage("$3.00 / 1M tokens"),
12620
- output: computeUsage("$15.00 / 1M tokens"),
12961
+ var ANTHROPIC_CLAUDE_MODELS = exportJson({
12962
+ name: 'ANTHROPIC_CLAUDE_MODELS',
12963
+ value: [
12964
+ {
12965
+ modelVariant: 'CHAT',
12966
+ modelTitle: 'Claude 3.5 Sonnet',
12967
+ modelName: 'claude-3-5-sonnet-20240620',
12968
+ pricing: {
12969
+ prompt: computeUsage("$3.00 / 1M tokens"),
12970
+ output: computeUsage("$15.00 / 1M tokens"),
12971
+ },
12621
12972
  },
12622
- },
12623
- {
12624
- modelVariant: 'CHAT',
12625
- modelTitle: 'Claude 3 Opus',
12626
- modelName: 'claude-3-opus-20240229',
12627
- pricing: {
12628
- prompt: computeUsage("$15.00 / 1M tokens"),
12629
- output: computeUsage("$75.00 / 1M tokens"),
12973
+ {
12974
+ modelVariant: 'CHAT',
12975
+ modelTitle: 'Claude 3 Opus',
12976
+ modelName: 'claude-3-opus-20240229',
12977
+ pricing: {
12978
+ prompt: computeUsage("$15.00 / 1M tokens"),
12979
+ output: computeUsage("$75.00 / 1M tokens"),
12980
+ },
12630
12981
  },
12631
- },
12632
- {
12633
- modelVariant: 'CHAT',
12634
- modelTitle: 'Claude 3 Sonnet',
12635
- modelName: 'claude-3-sonnet-20240229',
12636
- pricing: {
12637
- prompt: computeUsage("$3.00 / 1M tokens"),
12638
- output: computeUsage("$15.00 / 1M tokens"),
12982
+ {
12983
+ modelVariant: 'CHAT',
12984
+ modelTitle: 'Claude 3 Sonnet',
12985
+ modelName: 'claude-3-sonnet-20240229',
12986
+ pricing: {
12987
+ prompt: computeUsage("$3.00 / 1M tokens"),
12988
+ output: computeUsage("$15.00 / 1M tokens"),
12989
+ },
12639
12990
  },
12640
- },
12641
- {
12642
- modelVariant: 'CHAT',
12643
- modelTitle: 'Claude 3 Haiku',
12644
- modelName: ' claude-3-haiku-20240307',
12645
- pricing: {
12646
- prompt: computeUsage("$0.25 / 1M tokens"),
12647
- output: computeUsage("$1.25 / 1M tokens"),
12991
+ {
12992
+ modelVariant: 'CHAT',
12993
+ modelTitle: 'Claude 3 Haiku',
12994
+ modelName: ' claude-3-haiku-20240307',
12995
+ pricing: {
12996
+ prompt: computeUsage("$0.25 / 1M tokens"),
12997
+ output: computeUsage("$1.25 / 1M tokens"),
12998
+ },
12648
12999
  },
12649
- },
12650
- {
12651
- modelVariant: 'CHAT',
12652
- modelTitle: 'Claude 2.1',
12653
- modelName: 'claude-2.1',
12654
- pricing: {
12655
- prompt: computeUsage("$8.00 / 1M tokens"),
12656
- output: computeUsage("$24.00 / 1M tokens"),
13000
+ {
13001
+ modelVariant: 'CHAT',
13002
+ modelTitle: 'Claude 2.1',
13003
+ modelName: 'claude-2.1',
13004
+ pricing: {
13005
+ prompt: computeUsage("$8.00 / 1M tokens"),
13006
+ output: computeUsage("$24.00 / 1M tokens"),
13007
+ },
12657
13008
  },
12658
- },
12659
- {
12660
- modelVariant: 'CHAT',
12661
- modelTitle: 'Claude 2',
12662
- modelName: 'claude-2.0',
12663
- pricing: {
12664
- prompt: computeUsage("$8.00 / 1M tokens"),
12665
- output: computeUsage("$24.00 / 1M tokens"),
13009
+ {
13010
+ modelVariant: 'CHAT',
13011
+ modelTitle: 'Claude 2',
13012
+ modelName: 'claude-2.0',
13013
+ pricing: {
13014
+ prompt: computeUsage("$8.00 / 1M tokens"),
13015
+ output: computeUsage("$24.00 / 1M tokens"),
13016
+ },
12666
13017
  },
12667
- },
12668
- {
12669
- modelVariant: 'CHAT',
12670
- modelTitle: ' Claude Instant 1.2',
12671
- modelName: 'claude-instant-1.2',
12672
- pricing: {
12673
- prompt: computeUsage("$0.80 / 1M tokens"),
12674
- output: computeUsage("$2.40 / 1M tokens"),
13018
+ {
13019
+ modelVariant: 'CHAT',
13020
+ modelTitle: ' Claude Instant 1.2',
13021
+ modelName: 'claude-instant-1.2',
13022
+ pricing: {
13023
+ prompt: computeUsage("$0.80 / 1M tokens"),
13024
+ output: computeUsage("$2.40 / 1M tokens"),
13025
+ },
12675
13026
  },
12676
- },
12677
- // TODO: [main] !!! Claude 1 and 2 has also completion versions - ask Hoagy
12678
- ]);
13027
+ // TODO: [main] !!! Claude 1 and 2 has also completion versions - ask Hoagy
13028
+ ],
13029
+ });
12679
13030
  /**
12680
13031
  * Note: [🤖] Add models of new variant
12681
13032
  * TODO: [🧠][main] !!! Add embedding models OR Anthropic has only chat+completion models?
@@ -12895,18 +13246,23 @@
12895
13246
  // eslint-disable-next-line prefer-const
12896
13247
  complete = $getCurrentDate();
12897
13248
  usage = computeAnthropicClaudeUsage(rawPromptContent || '', resultContent || '', rawResponse);
12898
- return [2 /*return*/, $asDeeplyFrozenSerializableJson('AnthropicClaudeExecutionTools ChatPromptResult', {
12899
- content: resultContent,
12900
- modelName: rawResponse.model,
12901
- timing: {
12902
- start: start,
12903
- complete: complete,
13249
+ return [2 /*return*/, exportJson({
13250
+ name: 'promptResult',
13251
+ message: "Result of `AzureOpenAiExecutionTools.callChatModel`",
13252
+ order: [],
13253
+ value: {
13254
+ content: resultContent,
13255
+ modelName: rawResponse.model,
13256
+ timing: {
13257
+ start: start,
13258
+ complete: complete,
13259
+ },
13260
+ usage: usage,
13261
+ rawPromptContent: rawPromptContent,
13262
+ rawRequest: rawRequest,
13263
+ rawResponse: rawResponse,
13264
+ // <- [🗯]
12904
13265
  },
12905
- usage: usage,
12906
- rawPromptContent: rawPromptContent,
12907
- rawRequest: rawRequest,
12908
- rawResponse: rawResponse,
12909
- // <- [🗯]
12910
13266
  })];
12911
13267
  }
12912
13268
  });
@@ -12976,7 +13332,7 @@
12976
13332
 
12977
13333
 
12978
13334
 
12979
- return $asDeeplyFrozenSerializableJson('AnthropicClaudeExecutionTools CompletionPromptResult',{
13335
+ return $exportJson({ name: 'promptResult',message: Result of \`AzureOpenAiExecutionTools callChatModel\`, order: [],value:{
12980
13336
  content: resultContent,
12981
13337
  modelName: rawResponse.model || model,
12982
13338
  timing: {
@@ -13145,381 +13501,384 @@
13145
13501
  * @see https://openai.com/api/pricing/
13146
13502
  * @public exported from `@promptbook/openai`
13147
13503
  */
13148
- var OPENAI_MODELS = $asDeeplyFrozenSerializableJson('OPENAI_MODELS', [
13149
- /*/
13504
+ var OPENAI_MODELS = exportJson({
13505
+ name: 'OPENAI_MODELS',
13506
+ value: [
13507
+ /*/
13508
+ {
13509
+ modelTitle: 'dall-e-3',
13510
+ modelName: 'dall-e-3',
13511
+ },
13512
+ /**/
13513
+ /*/
13514
+ {
13515
+ modelTitle: 'whisper-1',
13516
+ modelName: 'whisper-1',
13517
+ },
13518
+ /**/
13519
+ /**/
13520
+ {
13521
+ modelVariant: 'COMPLETION',
13522
+ modelTitle: 'davinci-002',
13523
+ modelName: 'davinci-002',
13524
+ pricing: {
13525
+ prompt: computeUsage("$2.00 / 1M tokens"),
13526
+ output: computeUsage("$2.00 / 1M tokens"), // <- not sure
13527
+ },
13528
+ },
13529
+ /**/
13530
+ /*/
13150
13531
  {
13151
- modelTitle: 'dall-e-3',
13152
- modelName: 'dall-e-3',
13532
+ modelTitle: 'dall-e-2',
13533
+ modelName: 'dall-e-2',
13153
13534
  },
13154
13535
  /**/
13155
- /*/
13536
+ /**/
13537
+ {
13538
+ modelVariant: 'CHAT',
13539
+ modelTitle: 'gpt-3.5-turbo-16k',
13540
+ modelName: 'gpt-3.5-turbo-16k',
13541
+ pricing: {
13542
+ prompt: computeUsage("$3.00 / 1M tokens"),
13543
+ output: computeUsage("$4.00 / 1M tokens"),
13544
+ },
13545
+ },
13546
+ /**/
13547
+ /*/
13156
13548
  {
13157
- modelTitle: 'whisper-1',
13158
- modelName: 'whisper-1',
13549
+ modelTitle: 'tts-1-hd-1106',
13550
+ modelName: 'tts-1-hd-1106',
13159
13551
  },
13160
- /**/
13161
- /**/
13162
- {
13163
- modelVariant: 'COMPLETION',
13164
- modelTitle: 'davinci-002',
13165
- modelName: 'davinci-002',
13166
- pricing: {
13167
- prompt: computeUsage("$2.00 / 1M tokens"),
13168
- output: computeUsage("$2.00 / 1M tokens"), // <- not sure
13169
- },
13170
- },
13171
- /**/
13172
- /*/
13173
- {
13174
- modelTitle: 'dall-e-2',
13175
- modelName: 'dall-e-2',
13176
- },
13177
- /**/
13178
- /**/
13179
- {
13180
- modelVariant: 'CHAT',
13181
- modelTitle: 'gpt-3.5-turbo-16k',
13182
- modelName: 'gpt-3.5-turbo-16k',
13183
- pricing: {
13184
- prompt: computeUsage("$3.00 / 1M tokens"),
13185
- output: computeUsage("$4.00 / 1M tokens"),
13186
- },
13187
- },
13188
- /**/
13189
- /*/
13190
- {
13191
- modelTitle: 'tts-1-hd-1106',
13192
- modelName: 'tts-1-hd-1106',
13193
- },
13194
- /**/
13195
- /*/
13196
- {
13197
- modelTitle: 'tts-1-hd',
13198
- modelName: 'tts-1-hd',
13199
- },
13200
- /**/
13201
- /**/
13202
- {
13203
- modelVariant: 'CHAT',
13204
- modelTitle: 'gpt-4',
13205
- modelName: 'gpt-4',
13206
- pricing: {
13207
- prompt: computeUsage("$30.00 / 1M tokens"),
13208
- output: computeUsage("$60.00 / 1M tokens"),
13552
+ /**/
13553
+ /*/
13554
+ {
13555
+ modelTitle: 'tts-1-hd',
13556
+ modelName: 'tts-1-hd',
13557
+ },
13558
+ /**/
13559
+ /**/
13560
+ {
13561
+ modelVariant: 'CHAT',
13562
+ modelTitle: 'gpt-4',
13563
+ modelName: 'gpt-4',
13564
+ pricing: {
13565
+ prompt: computeUsage("$30.00 / 1M tokens"),
13566
+ output: computeUsage("$60.00 / 1M tokens"),
13567
+ },
13209
13568
  },
13210
- },
13211
- /**/
13212
- /**/
13213
- {
13214
- modelVariant: 'CHAT',
13215
- modelTitle: 'gpt-4-32k',
13216
- modelName: 'gpt-4-32k',
13217
- pricing: {
13218
- prompt: computeUsage("$60.00 / 1M tokens"),
13219
- output: computeUsage("$120.00 / 1M tokens"),
13569
+ /**/
13570
+ /**/
13571
+ {
13572
+ modelVariant: 'CHAT',
13573
+ modelTitle: 'gpt-4-32k',
13574
+ modelName: 'gpt-4-32k',
13575
+ pricing: {
13576
+ prompt: computeUsage("$60.00 / 1M tokens"),
13577
+ output: computeUsage("$120.00 / 1M tokens"),
13578
+ },
13220
13579
  },
13221
- },
13222
- /**/
13223
- /*/
13224
- {
13225
- modelVariant: 'CHAT',
13226
- modelTitle: 'gpt-4-0613',
13227
- modelName: 'gpt-4-0613',
13228
- pricing: {
13229
- prompt: computeUsage(` / 1M tokens`),
13230
- output: computeUsage(` / 1M tokens`),
13580
+ /**/
13581
+ /*/
13582
+ {
13583
+ modelVariant: 'CHAT',
13584
+ modelTitle: 'gpt-4-0613',
13585
+ modelName: 'gpt-4-0613',
13586
+ pricing: {
13587
+ prompt: computeUsage(` / 1M tokens`),
13588
+ output: computeUsage(` / 1M tokens`),
13589
+ },
13231
13590
  },
13232
- },
13233
- /**/
13234
- /**/
13235
- {
13236
- modelVariant: 'CHAT',
13237
- modelTitle: 'gpt-4-turbo-2024-04-09',
13238
- modelName: 'gpt-4-turbo-2024-04-09',
13239
- pricing: {
13240
- prompt: computeUsage("$10.00 / 1M tokens"),
13241
- output: computeUsage("$30.00 / 1M tokens"),
13591
+ /**/
13592
+ /**/
13593
+ {
13594
+ modelVariant: 'CHAT',
13595
+ modelTitle: 'gpt-4-turbo-2024-04-09',
13596
+ modelName: 'gpt-4-turbo-2024-04-09',
13597
+ pricing: {
13598
+ prompt: computeUsage("$10.00 / 1M tokens"),
13599
+ output: computeUsage("$30.00 / 1M tokens"),
13600
+ },
13242
13601
  },
13243
- },
13244
- /**/
13245
- /**/
13246
- {
13247
- modelVariant: 'CHAT',
13248
- modelTitle: 'gpt-3.5-turbo-1106',
13249
- modelName: 'gpt-3.5-turbo-1106',
13250
- pricing: {
13251
- prompt: computeUsage("$1.00 / 1M tokens"),
13252
- output: computeUsage("$2.00 / 1M tokens"),
13602
+ /**/
13603
+ /**/
13604
+ {
13605
+ modelVariant: 'CHAT',
13606
+ modelTitle: 'gpt-3.5-turbo-1106',
13607
+ modelName: 'gpt-3.5-turbo-1106',
13608
+ pricing: {
13609
+ prompt: computeUsage("$1.00 / 1M tokens"),
13610
+ output: computeUsage("$2.00 / 1M tokens"),
13611
+ },
13253
13612
  },
13254
- },
13255
- /**/
13256
- /**/
13257
- {
13258
- modelVariant: 'CHAT',
13259
- modelTitle: 'gpt-4-turbo',
13260
- modelName: 'gpt-4-turbo',
13261
- pricing: {
13262
- prompt: computeUsage("$10.00 / 1M tokens"),
13263
- output: computeUsage("$30.00 / 1M tokens"),
13613
+ /**/
13614
+ /**/
13615
+ {
13616
+ modelVariant: 'CHAT',
13617
+ modelTitle: 'gpt-4-turbo',
13618
+ modelName: 'gpt-4-turbo',
13619
+ pricing: {
13620
+ prompt: computeUsage("$10.00 / 1M tokens"),
13621
+ output: computeUsage("$30.00 / 1M tokens"),
13622
+ },
13264
13623
  },
13265
- },
13266
- /**/
13267
- /**/
13268
- {
13269
- modelVariant: 'COMPLETION',
13270
- modelTitle: 'gpt-3.5-turbo-instruct-0914',
13271
- modelName: 'gpt-3.5-turbo-instruct-0914',
13272
- pricing: {
13273
- prompt: computeUsage("$1.50 / 1M tokens"),
13274
- output: computeUsage("$2.00 / 1M tokens"), // <- For gpt-3.5-turbo-instruct
13624
+ /**/
13625
+ /**/
13626
+ {
13627
+ modelVariant: 'COMPLETION',
13628
+ modelTitle: 'gpt-3.5-turbo-instruct-0914',
13629
+ modelName: 'gpt-3.5-turbo-instruct-0914',
13630
+ pricing: {
13631
+ prompt: computeUsage("$1.50 / 1M tokens"),
13632
+ output: computeUsage("$2.00 / 1M tokens"), // <- For gpt-3.5-turbo-instruct
13633
+ },
13275
13634
  },
13276
- },
13277
- /**/
13278
- /**/
13279
- {
13280
- modelVariant: 'COMPLETION',
13281
- modelTitle: 'gpt-3.5-turbo-instruct',
13282
- modelName: 'gpt-3.5-turbo-instruct',
13283
- pricing: {
13284
- prompt: computeUsage("$1.50 / 1M tokens"),
13285
- output: computeUsage("$2.00 / 1M tokens"),
13635
+ /**/
13636
+ /**/
13637
+ {
13638
+ modelVariant: 'COMPLETION',
13639
+ modelTitle: 'gpt-3.5-turbo-instruct',
13640
+ modelName: 'gpt-3.5-turbo-instruct',
13641
+ pricing: {
13642
+ prompt: computeUsage("$1.50 / 1M tokens"),
13643
+ output: computeUsage("$2.00 / 1M tokens"),
13644
+ },
13286
13645
  },
13287
- },
13288
- /**/
13289
- /*/
13290
- {
13291
- modelTitle: 'tts-1',
13292
- modelName: 'tts-1',
13293
- },
13294
- /**/
13295
- /**/
13296
- {
13297
- modelVariant: 'CHAT',
13298
- modelTitle: 'gpt-3.5-turbo',
13299
- modelName: 'gpt-3.5-turbo',
13300
- pricing: {
13301
- prompt: computeUsage("$3.00 / 1M tokens"),
13302
- output: computeUsage("$6.00 / 1M tokens"), // <- Not sure, refer to gpt-3.5-turbo in Fine-tuning models
13646
+ /**/
13647
+ /*/
13648
+ {
13649
+ modelTitle: 'tts-1',
13650
+ modelName: 'tts-1',
13651
+ },
13652
+ /**/
13653
+ /**/
13654
+ {
13655
+ modelVariant: 'CHAT',
13656
+ modelTitle: 'gpt-3.5-turbo',
13657
+ modelName: 'gpt-3.5-turbo',
13658
+ pricing: {
13659
+ prompt: computeUsage("$3.00 / 1M tokens"),
13660
+ output: computeUsage("$6.00 / 1M tokens"), // <- Not sure, refer to gpt-3.5-turbo in Fine-tuning models
13661
+ },
13303
13662
  },
13304
- },
13305
- /**/
13306
- /**/
13307
- {
13308
- modelVariant: 'CHAT',
13309
- modelTitle: 'gpt-3.5-turbo-0301',
13310
- modelName: 'gpt-3.5-turbo-0301',
13311
- pricing: {
13312
- prompt: computeUsage("$1.50 / 1M tokens"),
13313
- output: computeUsage("$2.00 / 1M tokens"),
13663
+ /**/
13664
+ /**/
13665
+ {
13666
+ modelVariant: 'CHAT',
13667
+ modelTitle: 'gpt-3.5-turbo-0301',
13668
+ modelName: 'gpt-3.5-turbo-0301',
13669
+ pricing: {
13670
+ prompt: computeUsage("$1.50 / 1M tokens"),
13671
+ output: computeUsage("$2.00 / 1M tokens"),
13672
+ },
13314
13673
  },
13315
- },
13316
- /**/
13317
- /**/
13318
- {
13319
- modelVariant: 'COMPLETION',
13320
- modelTitle: 'babbage-002',
13321
- modelName: 'babbage-002',
13322
- pricing: {
13323
- prompt: computeUsage("$0.40 / 1M tokens"),
13324
- output: computeUsage("$0.40 / 1M tokens"), // <- Not sure
13674
+ /**/
13675
+ /**/
13676
+ {
13677
+ modelVariant: 'COMPLETION',
13678
+ modelTitle: 'babbage-002',
13679
+ modelName: 'babbage-002',
13680
+ pricing: {
13681
+ prompt: computeUsage("$0.40 / 1M tokens"),
13682
+ output: computeUsage("$0.40 / 1M tokens"), // <- Not sure
13683
+ },
13325
13684
  },
13326
- },
13327
- /**/
13328
- /**/
13329
- {
13330
- modelVariant: 'CHAT',
13331
- modelTitle: 'gpt-4-1106-preview',
13332
- modelName: 'gpt-4-1106-preview',
13333
- pricing: {
13334
- prompt: computeUsage("$10.00 / 1M tokens"),
13335
- output: computeUsage("$30.00 / 1M tokens"),
13685
+ /**/
13686
+ /**/
13687
+ {
13688
+ modelVariant: 'CHAT',
13689
+ modelTitle: 'gpt-4-1106-preview',
13690
+ modelName: 'gpt-4-1106-preview',
13691
+ pricing: {
13692
+ prompt: computeUsage("$10.00 / 1M tokens"),
13693
+ output: computeUsage("$30.00 / 1M tokens"),
13694
+ },
13336
13695
  },
13337
- },
13338
- /**/
13339
- /**/
13340
- {
13341
- modelVariant: 'CHAT',
13342
- modelTitle: 'gpt-4-0125-preview',
13343
- modelName: 'gpt-4-0125-preview',
13344
- pricing: {
13345
- prompt: computeUsage("$10.00 / 1M tokens"),
13346
- output: computeUsage("$30.00 / 1M tokens"),
13696
+ /**/
13697
+ /**/
13698
+ {
13699
+ modelVariant: 'CHAT',
13700
+ modelTitle: 'gpt-4-0125-preview',
13701
+ modelName: 'gpt-4-0125-preview',
13702
+ pricing: {
13703
+ prompt: computeUsage("$10.00 / 1M tokens"),
13704
+ output: computeUsage("$30.00 / 1M tokens"),
13705
+ },
13347
13706
  },
13348
- },
13349
- /**/
13350
- /*/
13351
- {
13352
- modelTitle: 'tts-1-1106',
13353
- modelName: 'tts-1-1106',
13354
- },
13355
- /**/
13356
- /**/
13357
- {
13358
- modelVariant: 'CHAT',
13359
- modelTitle: 'gpt-3.5-turbo-0125',
13360
- modelName: 'gpt-3.5-turbo-0125',
13361
- pricing: {
13362
- prompt: computeUsage("$0.50 / 1M tokens"),
13363
- output: computeUsage("$1.50 / 1M tokens"),
13707
+ /**/
13708
+ /*/
13709
+ {
13710
+ modelTitle: 'tts-1-1106',
13711
+ modelName: 'tts-1-1106',
13712
+ },
13713
+ /**/
13714
+ /**/
13715
+ {
13716
+ modelVariant: 'CHAT',
13717
+ modelTitle: 'gpt-3.5-turbo-0125',
13718
+ modelName: 'gpt-3.5-turbo-0125',
13719
+ pricing: {
13720
+ prompt: computeUsage("$0.50 / 1M tokens"),
13721
+ output: computeUsage("$1.50 / 1M tokens"),
13722
+ },
13364
13723
  },
13365
- },
13366
- /**/
13367
- /**/
13368
- {
13369
- modelVariant: 'CHAT',
13370
- modelTitle: 'gpt-4-turbo-preview',
13371
- modelName: 'gpt-4-turbo-preview',
13372
- pricing: {
13373
- prompt: computeUsage("$10.00 / 1M tokens"),
13374
- output: computeUsage("$30.00 / 1M tokens"), // <- Not sure, just for gpt-4-turbo
13724
+ /**/
13725
+ /**/
13726
+ {
13727
+ modelVariant: 'CHAT',
13728
+ modelTitle: 'gpt-4-turbo-preview',
13729
+ modelName: 'gpt-4-turbo-preview',
13730
+ pricing: {
13731
+ prompt: computeUsage("$10.00 / 1M tokens"),
13732
+ output: computeUsage("$30.00 / 1M tokens"), // <- Not sure, just for gpt-4-turbo
13733
+ },
13375
13734
  },
13376
- },
13377
- /**/
13378
- /**/
13379
- {
13380
- modelVariant: 'EMBEDDING',
13381
- modelTitle: 'text-embedding-3-large',
13382
- modelName: 'text-embedding-3-large',
13383
- pricing: {
13384
- prompt: computeUsage("$0.13 / 1M tokens"),
13385
- // TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
13386
- output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
13735
+ /**/
13736
+ /**/
13737
+ {
13738
+ modelVariant: 'EMBEDDING',
13739
+ modelTitle: 'text-embedding-3-large',
13740
+ modelName: 'text-embedding-3-large',
13741
+ pricing: {
13742
+ prompt: computeUsage("$0.13 / 1M tokens"),
13743
+ // TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
13744
+ output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
13745
+ },
13387
13746
  },
13388
- },
13389
- /**/
13390
- /**/
13391
- {
13392
- modelVariant: 'EMBEDDING',
13393
- modelTitle: 'text-embedding-3-small',
13394
- modelName: 'text-embedding-3-small',
13395
- pricing: {
13396
- prompt: computeUsage("$0.02 / 1M tokens"),
13397
- // TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
13398
- output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
13747
+ /**/
13748
+ /**/
13749
+ {
13750
+ modelVariant: 'EMBEDDING',
13751
+ modelTitle: 'text-embedding-3-small',
13752
+ modelName: 'text-embedding-3-small',
13753
+ pricing: {
13754
+ prompt: computeUsage("$0.02 / 1M tokens"),
13755
+ // TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
13756
+ output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
13757
+ },
13399
13758
  },
13400
- },
13401
- /**/
13402
- /**/
13403
- {
13404
- modelVariant: 'CHAT',
13405
- modelTitle: 'gpt-3.5-turbo-0613',
13406
- modelName: 'gpt-3.5-turbo-0613',
13407
- pricing: {
13408
- prompt: computeUsage("$1.50 / 1M tokens"),
13409
- output: computeUsage("$2.00 / 1M tokens"),
13759
+ /**/
13760
+ /**/
13761
+ {
13762
+ modelVariant: 'CHAT',
13763
+ modelTitle: 'gpt-3.5-turbo-0613',
13764
+ modelName: 'gpt-3.5-turbo-0613',
13765
+ pricing: {
13766
+ prompt: computeUsage("$1.50 / 1M tokens"),
13767
+ output: computeUsage("$2.00 / 1M tokens"),
13768
+ },
13410
13769
  },
13411
- },
13412
- /**/
13413
- /**/
13414
- {
13415
- modelVariant: 'EMBEDDING',
13416
- modelTitle: 'text-embedding-ada-002',
13417
- modelName: 'text-embedding-ada-002',
13418
- pricing: {
13419
- prompt: computeUsage("$0.1 / 1M tokens"),
13420
- // TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
13421
- output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
13770
+ /**/
13771
+ /**/
13772
+ {
13773
+ modelVariant: 'EMBEDDING',
13774
+ modelTitle: 'text-embedding-ada-002',
13775
+ modelName: 'text-embedding-ada-002',
13776
+ pricing: {
13777
+ prompt: computeUsage("$0.1 / 1M tokens"),
13778
+ // TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
13779
+ output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
13780
+ },
13422
13781
  },
13423
- },
13424
- /**/
13425
- /*/
13426
- {
13427
- modelVariant: 'CHAT',
13428
- modelTitle: 'gpt-4-1106-vision-preview',
13429
- modelName: 'gpt-4-1106-vision-preview',
13430
- },
13431
- /**/
13432
- /*/
13433
- {
13434
- modelVariant: 'CHAT',
13435
- modelTitle: 'gpt-4-vision-preview',
13436
- modelName: 'gpt-4-vision-preview',
13437
- pricing: {
13438
- prompt: computeUsage(`$10.00 / 1M tokens`),
13439
- output: computeUsage(`$30.00 / 1M tokens`),
13782
+ /**/
13783
+ /*/
13784
+ {
13785
+ modelVariant: 'CHAT',
13786
+ modelTitle: 'gpt-4-1106-vision-preview',
13787
+ modelName: 'gpt-4-1106-vision-preview',
13440
13788
  },
13441
- },
13442
- /**/
13443
- /**/
13444
- {
13445
- modelVariant: 'CHAT',
13446
- modelTitle: 'gpt-4o-2024-05-13',
13447
- modelName: 'gpt-4o-2024-05-13',
13448
- pricing: {
13449
- prompt: computeUsage("$5.00 / 1M tokens"),
13450
- output: computeUsage("$15.00 / 1M tokens"),
13789
+ /**/
13790
+ /*/
13791
+ {
13792
+ modelVariant: 'CHAT',
13793
+ modelTitle: 'gpt-4-vision-preview',
13794
+ modelName: 'gpt-4-vision-preview',
13795
+ pricing: {
13796
+ prompt: computeUsage(`$10.00 / 1M tokens`),
13797
+ output: computeUsage(`$30.00 / 1M tokens`),
13798
+ },
13799
+ },
13800
+ /**/
13801
+ /**/
13802
+ {
13803
+ modelVariant: 'CHAT',
13804
+ modelTitle: 'gpt-4o-2024-05-13',
13805
+ modelName: 'gpt-4o-2024-05-13',
13806
+ pricing: {
13807
+ prompt: computeUsage("$5.00 / 1M tokens"),
13808
+ output: computeUsage("$15.00 / 1M tokens"),
13809
+ },
13810
+ //TODO: [main] !!! Add gpt-4o-mini-2024-07-18 and all others to be up to date
13451
13811
  },
13452
- //TODO: [main] !!! Add gpt-4o-mini-2024-07-18 and all others to be up to date
13453
- },
13454
- /**/
13455
- /**/
13456
- {
13457
- modelVariant: 'CHAT',
13458
- modelTitle: 'gpt-4o',
13459
- modelName: 'gpt-4o',
13460
- pricing: {
13461
- prompt: computeUsage("$5.00 / 1M tokens"),
13462
- output: computeUsage("$15.00 / 1M tokens"),
13812
+ /**/
13813
+ /**/
13814
+ {
13815
+ modelVariant: 'CHAT',
13816
+ modelTitle: 'gpt-4o',
13817
+ modelName: 'gpt-4o',
13818
+ pricing: {
13819
+ prompt: computeUsage("$5.00 / 1M tokens"),
13820
+ output: computeUsage("$15.00 / 1M tokens"),
13821
+ },
13463
13822
  },
13464
- },
13465
- /**/
13466
- /**/
13467
- {
13468
- modelVariant: 'CHAT',
13469
- modelTitle: 'o1-preview',
13470
- modelName: 'o1-preview',
13471
- pricing: {
13472
- prompt: computeUsage("$15.00 / 1M tokens"),
13473
- output: computeUsage("$60.00 / 1M tokens"),
13823
+ /**/
13824
+ /**/
13825
+ {
13826
+ modelVariant: 'CHAT',
13827
+ modelTitle: 'o1-preview',
13828
+ modelName: 'o1-preview',
13829
+ pricing: {
13830
+ prompt: computeUsage("$15.00 / 1M tokens"),
13831
+ output: computeUsage("$60.00 / 1M tokens"),
13832
+ },
13474
13833
  },
13475
- },
13476
- /**/
13477
- /**/
13478
- {
13479
- modelVariant: 'CHAT',
13480
- modelTitle: 'o1-preview-2024-09-12',
13481
- modelName: 'o1-preview-2024-09-12',
13482
- // <- TODO: [💩] Some better system to organize theese date suffixes and versions
13483
- pricing: {
13484
- prompt: computeUsage("$15.00 / 1M tokens"),
13485
- output: computeUsage("$60.00 / 1M tokens"),
13834
+ /**/
13835
+ /**/
13836
+ {
13837
+ modelVariant: 'CHAT',
13838
+ modelTitle: 'o1-preview-2024-09-12',
13839
+ modelName: 'o1-preview-2024-09-12',
13840
+ // <- TODO: [💩] Some better system to organize theese date suffixes and versions
13841
+ pricing: {
13842
+ prompt: computeUsage("$15.00 / 1M tokens"),
13843
+ output: computeUsage("$60.00 / 1M tokens"),
13844
+ },
13486
13845
  },
13487
- },
13488
- /**/
13489
- /**/
13490
- {
13491
- modelVariant: 'CHAT',
13492
- modelTitle: 'o1-mini',
13493
- modelName: 'o1-mini',
13494
- pricing: {
13495
- prompt: computeUsage("$3.00 / 1M tokens"),
13496
- output: computeUsage("$12.00 / 1M tokens"),
13846
+ /**/
13847
+ /**/
13848
+ {
13849
+ modelVariant: 'CHAT',
13850
+ modelTitle: 'o1-mini',
13851
+ modelName: 'o1-mini',
13852
+ pricing: {
13853
+ prompt: computeUsage("$3.00 / 1M tokens"),
13854
+ output: computeUsage("$12.00 / 1M tokens"),
13855
+ },
13497
13856
  },
13498
- },
13499
- /**/
13500
- /**/
13501
- {
13502
- modelVariant: 'CHAT',
13503
- modelTitle: 'o1-mini-2024-09-12',
13504
- modelName: 'o1-mini-2024-09-12',
13505
- pricing: {
13506
- prompt: computeUsage("$3.00 / 1M tokens"),
13507
- output: computeUsage("$12.00 / 1M tokens"),
13857
+ /**/
13858
+ /**/
13859
+ {
13860
+ modelVariant: 'CHAT',
13861
+ modelTitle: 'o1-mini-2024-09-12',
13862
+ modelName: 'o1-mini-2024-09-12',
13863
+ pricing: {
13864
+ prompt: computeUsage("$3.00 / 1M tokens"),
13865
+ output: computeUsage("$12.00 / 1M tokens"),
13866
+ },
13508
13867
  },
13509
- },
13510
- /**/
13511
- /**/
13512
- {
13513
- modelVariant: 'CHAT',
13514
- modelTitle: 'gpt-3.5-turbo-16k-0613',
13515
- modelName: 'gpt-3.5-turbo-16k-0613',
13516
- pricing: {
13517
- prompt: computeUsage("$3.00 / 1M tokens"),
13518
- output: computeUsage("$4.00 / 1M tokens"),
13868
+ /**/
13869
+ /**/
13870
+ {
13871
+ modelVariant: 'CHAT',
13872
+ modelTitle: 'gpt-3.5-turbo-16k-0613',
13873
+ modelName: 'gpt-3.5-turbo-16k-0613',
13874
+ pricing: {
13875
+ prompt: computeUsage("$3.00 / 1M tokens"),
13876
+ output: computeUsage("$4.00 / 1M tokens"),
13877
+ },
13519
13878
  },
13520
- },
13521
- /**/
13522
- ]);
13879
+ /**/
13880
+ ],
13881
+ });
13523
13882
  /**
13524
13883
  * Note: [🤖] Add models of new variant
13525
13884
  * TODO: [🧠] Some mechanism to propagate unsureness
@@ -13694,18 +14053,23 @@
13694
14053
  input: __assign({ tokensCount: uncertainNumber((_b = rawResponse.usage) === null || _b === void 0 ? void 0 : _b.promptTokens) }, computeUsageCounts(prompt.content)),
13695
14054
  output: __assign({ tokensCount: uncertainNumber((_c = rawResponse.usage) === null || _c === void 0 ? void 0 : _c.completionTokens) }, computeUsageCounts(prompt.content)),
13696
14055
  };
13697
- return [2 /*return*/, $asDeeplyFrozenSerializableJson('AzureOpenAiExecutionTools ChatPromptResult', {
13698
- content: resultContent,
13699
- modelName: modelName,
13700
- timing: {
13701
- start: start,
13702
- complete: complete,
14056
+ return [2 /*return*/, exportJson({
14057
+ name: 'promptResult',
14058
+ message: "Result of `AzureOpenAiExecutionTools.callChatModel`",
14059
+ order: [],
14060
+ value: {
14061
+ content: resultContent,
14062
+ modelName: modelName,
14063
+ timing: {
14064
+ start: start,
14065
+ complete: complete,
14066
+ },
14067
+ usage: usage,
14068
+ rawPromptContent: rawPromptContent,
14069
+ rawRequest: rawRequest,
14070
+ rawResponse: __assign(__assign({}, rawResponse), { created: rawResponse.created.toISOString() }),
14071
+ // <- [🗯]
13703
14072
  },
13704
- usage: usage,
13705
- rawPromptContent: rawPromptContent,
13706
- rawRequest: rawRequest,
13707
- rawResponse: __assign(__assign({}, rawResponse), { created: rawResponse.created.toISOString() }),
13708
- // <- [🗯]
13709
14073
  })];
13710
14074
  case 4:
13711
14075
  error_1 = _d.sent();
@@ -13787,18 +14151,23 @@
13787
14151
  input: __assign({ tokensCount: uncertainNumber((_b = rawResponse.usage) === null || _b === void 0 ? void 0 : _b.promptTokens) }, computeUsageCounts(prompt.content)),
13788
14152
  output: __assign({ tokensCount: uncertainNumber((_c = rawResponse.usage) === null || _c === void 0 ? void 0 : _c.completionTokens) }, computeUsageCounts(prompt.content)),
13789
14153
  };
13790
- return [2 /*return*/, $asDeeplyFrozenSerializableJson('AzureOpenAiExecutionTools CompletionPromptResult', {
13791
- content: resultContent,
13792
- modelName: modelName,
13793
- timing: {
13794
- start: start,
13795
- complete: complete,
14154
+ return [2 /*return*/, exportJson({
14155
+ name: 'promptResult',
14156
+ message: "Result of `AzureOpenAiExecutionTools.callCompletionModel`",
14157
+ order: [],
14158
+ value: {
14159
+ content: resultContent,
14160
+ modelName: modelName,
14161
+ timing: {
14162
+ start: start,
14163
+ complete: complete,
14164
+ },
14165
+ usage: usage,
14166
+ rawPromptContent: rawPromptContent,
14167
+ rawRequest: rawRequest,
14168
+ rawResponse: __assign(__assign({}, rawResponse), { created: rawResponse.created.toISOString() }),
14169
+ // <- [🗯]
13796
14170
  },
13797
- usage: usage,
13798
- rawPromptContent: rawPromptContent,
13799
- rawRequest: rawRequest,
13800
- rawResponse: __assign(__assign({}, rawResponse), { created: rawResponse.created.toISOString() }),
13801
- // <- [🗯]
13802
14171
  })];
13803
14172
  case 4:
13804
14173
  error_2 = _d.sent();
@@ -14013,7 +14382,7 @@
14013
14382
  return modelVariant === 'CHAT';
14014
14383
  })) === null || _a === void 0 ? void 0 : _a.modelName);
14015
14384
  if (!modelName) {
14016
- throw new PipelineExecutionError(spaceTrim__default["default"]("\n Can not determine which model to use.\n\n You need to provide at least one of:\n 1) In `createExecutionToolsFromVercelProvider` options, provide `availableModels` with at least one model\n 2) In `prompt.modelRequirements`, provide `modelName` with the name of the model to use\n \n "));
14385
+ throw new PipelineExecutionError(spaceTrim__default["default"]("\n Can not determine which model to use.\n\n You need to provide at least one of:\n 1) In `createExecutionToolsFromVercelProvider` options, provide `availableModels` with at least one model\n 2) In `prompt.modelRequirements`, provide `modelName` with the name of the model to use\n\n "));
14017
14386
  }
14018
14387
  return [4 /*yield*/, vercelProvider.chat(modelName, __assign({ user: (userId === null || userId === void 0 ? void 0 : userId.toString()) || undefined }, additionalChatSettings))];
14019
14388
  case 1:
@@ -14076,18 +14445,22 @@
14076
14445
  }
14077
14446
  complete = $getCurrentDate();
14078
14447
  usage = UNCERTAIN_USAGE;
14079
- return [2 /*return*/, $asDeeplyFrozenSerializableJson('createExecutionToolsFromVercelProvider ChatPromptResult', {
14080
- content: rawResponse.text,
14081
- modelName: modelName,
14082
- timing: {
14083
- start: start,
14084
- complete: complete,
14448
+ return [2 /*return*/, exportJson({
14449
+ name: 'promptResult',
14450
+ message: "Result of `createExecutionToolsFromVercelProvider.callChatModel`",
14451
+ value: {
14452
+ content: rawResponse.text,
14453
+ modelName: modelName,
14454
+ timing: {
14455
+ start: start,
14456
+ complete: complete,
14457
+ },
14458
+ usage: usage,
14459
+ rawPromptContent: rawPromptContent,
14460
+ rawRequest: rawRequest,
14461
+ rawResponse: asSerializable(rawResponse),
14462
+ // <- [🗯]
14085
14463
  },
14086
- usage: usage,
14087
- rawPromptContent: rawPromptContent,
14088
- rawRequest: rawRequest,
14089
- rawResponse: asSerializable(rawResponse),
14090
- // <- [🗯]
14091
14464
  })];
14092
14465
  }
14093
14466
  });
@@ -14454,18 +14827,23 @@
14454
14827
  if (resultContent === null) {
14455
14828
  throw new PipelineExecutionError('No response message from OpenAI');
14456
14829
  }
14457
- return [2 /*return*/, $asDeeplyFrozenSerializableJson('OpenAiExecutionTools ChatPromptResult', {
14458
- content: resultContent,
14459
- modelName: rawResponse.model || modelName,
14460
- timing: {
14461
- start: start,
14462
- complete: complete,
14830
+ return [2 /*return*/, exportJson({
14831
+ name: 'promptResult',
14832
+ message: "Result of `OpenAiExecutionTools.callChatModel`",
14833
+ order: [],
14834
+ value: {
14835
+ content: resultContent,
14836
+ modelName: rawResponse.model || modelName,
14837
+ timing: {
14838
+ start: start,
14839
+ complete: complete,
14840
+ },
14841
+ usage: usage,
14842
+ rawPromptContent: rawPromptContent,
14843
+ rawRequest: rawRequest,
14844
+ rawResponse: rawResponse,
14845
+ // <- [🗯]
14463
14846
  },
14464
- usage: usage,
14465
- rawPromptContent: rawPromptContent,
14466
- rawRequest: rawRequest,
14467
- rawResponse: rawResponse,
14468
- // <- [🗯]
14469
14847
  })];
14470
14848
  }
14471
14849
  });
@@ -14530,18 +14908,23 @@
14530
14908
  // eslint-disable-next-line prefer-const
14531
14909
  complete = $getCurrentDate();
14532
14910
  usage = computeOpenAiUsage(content || '', resultContent || '', rawResponse);
14533
- return [2 /*return*/, $asDeeplyFrozenSerializableJson('OpenAiExecutionTools CompletionPromptResult', {
14534
- content: resultContent,
14535
- modelName: rawResponse.model || modelName,
14536
- timing: {
14537
- start: start,
14538
- complete: complete,
14911
+ return [2 /*return*/, exportJson({
14912
+ name: 'promptResult',
14913
+ message: "Result of `OpenAiExecutionTools.callCompletionModel`",
14914
+ order: [],
14915
+ value: {
14916
+ content: resultContent,
14917
+ modelName: rawResponse.model || modelName,
14918
+ timing: {
14919
+ start: start,
14920
+ complete: complete,
14921
+ },
14922
+ usage: usage,
14923
+ rawPromptContent: rawPromptContent,
14924
+ rawRequest: rawRequest,
14925
+ rawResponse: rawResponse,
14926
+ // <- [🗯]
14539
14927
  },
14540
- usage: usage,
14541
- rawPromptContent: rawPromptContent,
14542
- rawRequest: rawRequest,
14543
- rawResponse: rawResponse,
14544
- // <- [🗯]
14545
14928
  })];
14546
14929
  }
14547
14930
  });
@@ -14598,18 +14981,23 @@
14598
14981
  usage = computeOpenAiUsage(content || '', '',
14599
14982
  // <- Note: Embedding does not have result content
14600
14983
  rawResponse);
14601
- return [2 /*return*/, $asDeeplyFrozenSerializableJson('OpenAiExecutionTools EmbeddingPromptResult', {
14602
- content: resultContent,
14603
- modelName: rawResponse.model || modelName,
14604
- timing: {
14605
- start: start,
14606
- complete: complete,
14984
+ return [2 /*return*/, exportJson({
14985
+ name: 'promptResult',
14986
+ message: "Result of `OpenAiExecutionTools.callEmbeddingModel`",
14987
+ order: [],
14988
+ value: {
14989
+ content: resultContent,
14990
+ modelName: rawResponse.model || modelName,
14991
+ timing: {
14992
+ start: start,
14993
+ complete: complete,
14994
+ },
14995
+ usage: usage,
14996
+ rawPromptContent: rawPromptContent,
14997
+ rawRequest: rawRequest,
14998
+ rawResponse: rawResponse,
14999
+ // <- [🗯]
14607
15000
  },
14608
- usage: usage,
14609
- rawPromptContent: rawPromptContent,
14610
- rawRequest: rawRequest,
14611
- rawResponse: rawResponse,
14612
- // <- [🗯]
14613
15001
  })];
14614
15002
  }
14615
15003
  });
@@ -14805,20 +15193,25 @@
14805
15193
  if (resultContent === null) {
14806
15194
  throw new PipelineExecutionError('No response message from OpenAI');
14807
15195
  }
14808
- return [2 /*return*/, $asDeeplyFrozenSerializableJson('OpenAiAssistantExecutionTools ChatPromptResult', {
14809
- content: resultContent,
14810
- modelName: 'assistant',
14811
- // <- TODO: [🥘] Detect used model in assistant
14812
- // ?> model: rawResponse.model || modelName,
14813
- timing: {
14814
- start: start,
14815
- complete: complete,
15196
+ return [2 /*return*/, exportJson({
15197
+ name: 'promptResult',
15198
+ message: "Result of `OpenAiAssistantExecutionTools.callChatModel`",
15199
+ order: [],
15200
+ value: {
15201
+ content: resultContent,
15202
+ modelName: 'assistant',
15203
+ // <- TODO: [🥘] Detect used model in assistant
15204
+ // ?> model: rawResponse.model || modelName,
15205
+ timing: {
15206
+ start: start,
15207
+ complete: complete,
15208
+ },
15209
+ usage: usage,
15210
+ rawPromptContent: rawPromptContent,
15211
+ rawRequest: rawRequest,
15212
+ rawResponse: rawResponse,
15213
+ // <- [🗯]
14816
15214
  },
14817
- usage: usage,
14818
- rawPromptContent: rawPromptContent,
14819
- rawRequest: rawRequest,
14820
- rawResponse: rawResponse,
14821
- // <- [🗯]
14822
15215
  })];
14823
15216
  }
14824
15217
  });
@@ -15142,7 +15535,7 @@
15142
15535
  documentationUrl: 'https://github.com/webgptorg/promptbook/discussions/@@',
15143
15536
  isAvilableInBrowser: true,
15144
15537
  requiredExecutables: [],
15145
- }); /* <- TODO: [🤛] */
15538
+ }); /* <- Note: [🤛] */
15146
15539
  /**
15147
15540
  * Registration of known scraper metadata
15148
15541
  *
@@ -15339,7 +15732,7 @@
15339
15732
  documentationUrl: 'https://github.com/webgptorg/promptbook/discussions/@@',
15340
15733
  isAvilableInBrowser: false,
15341
15734
  requiredExecutables: ['Pandoc'],
15342
- }); /* <- TODO: [🤛] */
15735
+ }); /* <- Note: [🤛] */
15343
15736
  /**
15344
15737
  * Registration of known scraper metadata
15345
15738
  *
@@ -15506,7 +15899,7 @@
15506
15899
  'LibreOffice',
15507
15900
  // <- TODO: [🧠] Should be 'LibreOffice' here, its dependency of dependency
15508
15901
  ],
15509
- }); /* <- TODO: [🤛] */
15902
+ }); /* <- Note: [🤛] */
15510
15903
  /**
15511
15904
  * Registration of known scraper metadata
15512
15905
  *
@@ -15672,7 +16065,7 @@
15672
16065
  */
15673
16066
  var createLegacyDocumentScraper = Object.assign(function (tools, options) {
15674
16067
  return new LegacyDocumentScraper(tools, options);
15675
- }, legacyDocumentScraperMetadata); /* <- TODO: [🤛] */
16068
+ }, legacyDocumentScraperMetadata); /* <- Note: [🤛] */
15676
16069
  /**
15677
16070
  * TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
15678
16071
  */
@@ -15698,7 +16091,7 @@
15698
16091
  */
15699
16092
  var createDocumentScraper = Object.assign(function (tools, options) {
15700
16093
  return new DocumentScraper(tools, options);
15701
- }, documentScraperMetadata); /* <- TODO: [🤛] */
16094
+ }, documentScraperMetadata); /* <- Note: [🤛] */
15702
16095
  /**
15703
16096
  * TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
15704
16097
  */
@@ -15724,7 +16117,7 @@
15724
16117
  */
15725
16118
  var createMarkdownScraper = Object.assign(function (tools, options) {
15726
16119
  return new MarkdownScraper(tools, options);
15727
- }, markdownScraperMetadata); /* <- TODO: [🤛] */
16120
+ }, markdownScraperMetadata); /* <- Note: [🤛] */
15728
16121
  /**
15729
16122
  * TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
15730
16123
  */
@@ -15756,7 +16149,7 @@
15756
16149
  documentationUrl: 'https://github.com/webgptorg/promptbook/discussions/@@',
15757
16150
  isAvilableInBrowser: true,
15758
16151
  requiredExecutables: [],
15759
- }); /* <- TODO: [🤛] */
16152
+ }); /* <- Note: [🤛] */
15760
16153
  /**
15761
16154
  * Registration of known scraper metadata
15762
16155
  *
@@ -15836,7 +16229,7 @@
15836
16229
  */
15837
16230
  var createPdfScraper = Object.assign(function (tools, options) {
15838
16231
  return new PdfScraper(tools, options);
15839
- }, pdfScraperMetadata); /* <- TODO: [🤛] */
16232
+ }, pdfScraperMetadata); /* <- Note: [🤛] */
15840
16233
  /**
15841
16234
  * TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
15842
16235
  */
@@ -15868,7 +16261,7 @@
15868
16261
  documentationUrl: 'https://github.com/webgptorg/promptbook/discussions/@@',
15869
16262
  isAvilableInBrowser: false,
15870
16263
  requiredExecutables: [],
15871
- }); /* <- TODO: [🤛] */
16264
+ }); /* <- Note: [🤛] */
15872
16265
  /**
15873
16266
  * Registration of known scraper metadata
15874
16267
  *
@@ -16037,7 +16430,7 @@
16037
16430
  */
16038
16431
  var createWebsiteScraper = Object.assign(function (tools, options) {
16039
16432
  return new WebsiteScraper(tools, options);
16040
- }, websiteScraperMetadata); /* <- TODO: [🤛] */
16433
+ }, websiteScraperMetadata); /* <- Note: [🤛] */
16041
16434
  /**
16042
16435
  * TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
16043
16436
  */