@promptbook/cli 0.78.4 → 0.80.0-0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. package/README.md +4 -0
  2. package/esm/index.es.js +1332 -751
  3. package/esm/index.es.js.map +1 -1
  4. package/esm/typings/books/index.d.ts +6 -6
  5. package/esm/typings/src/_packages/core.index.d.ts +4 -2
  6. package/esm/typings/src/_packages/types.index.d.ts +10 -0
  7. package/esm/typings/src/_packages/utils.index.d.ts +4 -0
  8. package/esm/typings/src/cli/cli-commands/runInteractiveChatbot.d.ts +32 -0
  9. package/esm/typings/src/commands/_common/types/CommandParser.d.ts +5 -2
  10. package/esm/typings/src/config.d.ts +0 -25
  11. package/esm/typings/src/constants.d.ts +35 -0
  12. package/esm/typings/src/conversion/pipelineJsonToString.d.ts +1 -0
  13. package/esm/typings/src/conversion/pipelineStringToJsonSync.d.ts +1 -0
  14. package/esm/typings/src/formfactors/generator/GeneratorFormfactorDefinition.d.ts +6 -1
  15. package/esm/typings/src/formfactors/index.d.ts +12 -2
  16. package/esm/typings/src/formfactors/matcher/MatcherFormfactorDefinition.d.ts +6 -1
  17. package/esm/typings/src/high-level-abstractions/_common/HighLevelAbstraction.d.ts +20 -0
  18. package/esm/typings/src/high-level-abstractions/implicit-formfactor/ImplicitFormfactorHla.d.ts +10 -0
  19. package/esm/typings/src/high-level-abstractions/index.d.ts +44 -0
  20. package/esm/typings/src/high-level-abstractions/quick-chatbot/QuickChatbotHla.d.ts +10 -0
  21. package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +1 -1
  22. package/esm/typings/src/llm-providers/remote/startRemoteServer.d.ts +1 -1
  23. package/esm/typings/src/prepare/prepareTasks.d.ts +1 -0
  24. package/esm/typings/src/types/typeAliases.d.ts +1 -1
  25. package/esm/typings/src/utils/normalization/orderJson.d.ts +21 -0
  26. package/esm/typings/src/utils/normalization/orderJson.test.d.ts +4 -0
  27. package/esm/typings/src/utils/organization/keepTypeImported.d.ts +9 -0
  28. package/esm/typings/src/utils/serialization/$deepFreeze.d.ts +1 -1
  29. package/esm/typings/src/utils/serialization/checkSerializableAsJson.d.ts +20 -2
  30. package/esm/typings/src/utils/serialization/deepClone.test.d.ts +1 -0
  31. package/esm/typings/src/utils/serialization/exportJson.d.ts +29 -0
  32. package/esm/typings/src/utils/serialization/isSerializableAsJson.d.ts +2 -1
  33. package/package.json +2 -1
  34. package/umd/index.umd.js +1332 -751
  35. package/umd/index.umd.js.map +1 -1
  36. package/esm/typings/src/utils/serialization/$asDeeplyFrozenSerializableJson.d.ts +0 -17
package/esm/index.es.js CHANGED
@@ -37,7 +37,7 @@ var BOOK_LANGUAGE_VERSION = '1.0.0';
37
37
  *
38
38
  * @see https://github.com/webgptorg/promptbook
39
39
  */
40
- var PROMPTBOOK_ENGINE_VERSION = '0.78.3';
40
+ var PROMPTBOOK_ENGINE_VERSION = '0.79.0';
41
41
  /**
42
42
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
43
43
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -308,41 +308,6 @@ var DEFAULT_SCRAPE_CACHE_DIRNAME = './.promptbook/scrape-cache';
308
308
  * @public exported from `@promptbook/core`
309
309
  */
310
310
  var DEFAULT_PIPELINE_COLLECTION_BASE_FILENAME = "index";
311
- /**
312
- * Nonce which is used for replacing things in strings
313
- *
314
- * @private within the repository
315
- */
316
- var REPLACING_NONCE = 'u$k42k%!V2zo34w7Fu#@QUHYPW';
317
- /**
318
- * The names of the parameters that are reserved for special purposes
319
- *
320
- * @public exported from `@promptbook/core`
321
- */
322
- var RESERVED_PARAMETER_NAMES =
323
- /* !!!!!! $asDeeplyFrozenSerializableJson('RESERVED_PARAMETER_NAMES', _____ as const); */ [
324
- 'content',
325
- 'context',
326
- 'knowledge',
327
- 'examples',
328
- 'modelName',
329
- 'currentDate',
330
- // <- TODO: list here all command names
331
- // <- TODO: Add more like 'date', 'modelName',...
332
- // <- TODO: Add [emoji] + instructions ACRY when adding new reserved parameter
333
- ];
334
- /**
335
- * @@@
336
- *
337
- * @private within the repository
338
- */
339
- var RESERVED_PARAMETER_MISSING_VALUE = 'MISSING-' + REPLACING_NONCE;
340
- /**
341
- * @@@
342
- *
343
- * @private within the repository
344
- */
345
- var RESERVED_PARAMETER_RESTRICTED = 'RESTRICTED-' + REPLACING_NONCE;
346
311
  /**
347
312
  * The thresholds for the relative time in the `moment` NPM package.
348
313
  *
@@ -398,7 +363,6 @@ var IS_PIPELINE_LOGIC_VALIDATED = just(
398
363
  // Note: In normal situations, we check the pipeline logic:
399
364
  true);
400
365
  /**
401
- * TODO: Extract `constants.ts` from `config.ts`
402
366
  * Note: [💞] Ignore a discrepancy between file name and entity name
403
367
  * TODO: [🧠][🧜‍♂️] Maybe join remoteUrl and path into single value
404
368
  */
@@ -524,6 +488,56 @@ function $provideFilesystemForNode(options) {
524
488
  * Note: [🟢] Code in this file should never be never released in packages that could be imported into browser environment
525
489
  */
526
490
 
491
+ /**
492
+ * Orders JSON object by keys
493
+ *
494
+ * @returns The same type of object as the input re-ordered
495
+ * @public exported from `@promptbook/utils`
496
+ */
497
+ function orderJson(options) {
498
+ var value = options.value, order = options.order;
499
+ var orderedValue = __assign(__assign({}, (order === undefined ? {} : Object.fromEntries(order.map(function (key) { return [key, undefined]; })))), value);
500
+ return orderedValue;
501
+ }
502
+
503
+ /**
504
+ * Freezes the given object and all its nested objects recursively
505
+ *
506
+ * Note: `$` is used to indicate that this function is not a pure function - it mutates given object
507
+ * Note: This function mutates the object and returns the original (but mutated-deep-freezed) object
508
+ *
509
+ * @returns The same object as the input, but deeply frozen
510
+ * @public exported from `@promptbook/utils`
511
+ */
512
+ function $deepFreeze(objectValue) {
513
+ var e_1, _a;
514
+ if (Array.isArray(objectValue)) {
515
+ return Object.freeze(objectValue.map(function (item) { return $deepFreeze(item); }));
516
+ }
517
+ var propertyNames = Object.getOwnPropertyNames(objectValue);
518
+ try {
519
+ for (var propertyNames_1 = __values(propertyNames), propertyNames_1_1 = propertyNames_1.next(); !propertyNames_1_1.done; propertyNames_1_1 = propertyNames_1.next()) {
520
+ var propertyName = propertyNames_1_1.value;
521
+ var value = objectValue[propertyName];
522
+ if (value && typeof value === 'object') {
523
+ $deepFreeze(value);
524
+ }
525
+ }
526
+ }
527
+ catch (e_1_1) { e_1 = { error: e_1_1 }; }
528
+ finally {
529
+ try {
530
+ if (propertyNames_1_1 && !propertyNames_1_1.done && (_a = propertyNames_1.return)) _a.call(propertyNames_1);
531
+ }
532
+ finally { if (e_1) throw e_1.error; }
533
+ }
534
+ Object.freeze(objectValue);
535
+ return objectValue;
536
+ }
537
+ /**
538
+ * TODO: [🧠] Is there a way how to meaningfully test this utility
539
+ */
540
+
527
541
  /**
528
542
  * Make error report URL for the given error
529
543
  *
@@ -578,8 +592,9 @@ var UnexpectedError = /** @class */ (function (_super) {
578
592
  * @throws UnexpectedError if the value is not serializable as JSON
579
593
  * @public exported from `@promptbook/utils`
580
594
  */
581
- function checkSerializableAsJson(name, value) {
595
+ function checkSerializableAsJson(options) {
582
596
  var e_1, _a;
597
+ var value = options.value, name = options.name, message = options.message;
583
598
  if (value === undefined) {
584
599
  throw new UnexpectedError("".concat(name, " is undefined"));
585
600
  }
@@ -603,12 +618,12 @@ function checkSerializableAsJson(name, value) {
603
618
  }
604
619
  else if (typeof value === 'object' && Array.isArray(value)) {
605
620
  for (var i = 0; i < value.length; i++) {
606
- checkSerializableAsJson("".concat(name, "[").concat(i, "]"), value[i]);
621
+ checkSerializableAsJson({ name: "".concat(name, "[").concat(i, "]"), value: value[i], message: message });
607
622
  }
608
623
  }
609
624
  else if (typeof value === 'object') {
610
625
  if (value instanceof Date) {
611
- throw new UnexpectedError(spaceTrim("\n ".concat(name, " is Date\n\n Use `string_date_iso8601` instead\n ")));
626
+ throw new UnexpectedError(spaceTrim(function (block) { return "\n `".concat(name, "` is Date\n\n Use `string_date_iso8601` instead\n\n Additional message for `").concat(name, "`:\n ").concat(block(message || '(nothing)'), "\n "); }));
612
627
  }
613
628
  else if (value instanceof Map) {
614
629
  throw new UnexpectedError("".concat(name, " is Map"));
@@ -620,7 +635,7 @@ function checkSerializableAsJson(name, value) {
620
635
  throw new UnexpectedError("".concat(name, " is RegExp"));
621
636
  }
622
637
  else if (value instanceof Error) {
623
- throw new UnexpectedError(spaceTrim("\n ".concat(name, " is unserialized Error\n\n Use function `serializeError`\n ")));
638
+ throw new UnexpectedError(spaceTrim(function (block) { return "\n `".concat(name, "` is unserialized Error\n\n Use function `serializeError`\n\n Additional message for `").concat(name, "`:\n ").concat(block(message || '(nothing)'), "\n\n "); }));
624
639
  }
625
640
  else {
626
641
  try {
@@ -630,7 +645,7 @@ function checkSerializableAsJson(name, value) {
630
645
  // Note: undefined in object is serializable - it is just omited
631
646
  continue;
632
647
  }
633
- checkSerializableAsJson("".concat(name, ".").concat(subName), subValue);
648
+ checkSerializableAsJson({ name: "".concat(name, ".").concat(subName), value: subValue, message: message });
634
649
  }
635
650
  }
636
651
  catch (e_1_1) { e_1 = { error: e_1_1 }; }
@@ -647,7 +662,7 @@ function checkSerializableAsJson(name, value) {
647
662
  if (!(error instanceof Error)) {
648
663
  throw error;
649
664
  }
650
- throw new UnexpectedError(spaceTrim(function (block) { return "\n ".concat(name, " is not serializable\n\n ").concat(block(error.toString()), "\n "); }));
665
+ throw new UnexpectedError(spaceTrim(function (block) { return "\n `".concat(name, "` is not serializable\n\n ").concat(block(error.toString()), "\n\n Additional message for `").concat(name, "`:\n ").concat(block(message || '(nothing)'), "\n "); }));
651
666
  }
652
667
  /*
653
668
  TODO: [0] Is there some more elegant way to check circular references?
@@ -672,15 +687,134 @@ function checkSerializableAsJson(name, value) {
672
687
  }
673
688
  }
674
689
  else {
675
- throw new UnexpectedError("".concat(name, " is unknown"));
690
+ throw new UnexpectedError(spaceTrim(function (block) { return "\n `".concat(name, "` is unknown type\n\n Additional message for `").concat(name, "`:\n ").concat(block(message || '(nothing)'), "\n "); }));
676
691
  }
677
692
  }
678
693
  /**
679
- * TODO: [🧠][🛣] More elegant way to tracking than passing `name`
694
+ * TODO: Can be return type more type-safe? like `asserts options.value is JsonValue`
680
695
  * TODO: [🧠][main] !!! In-memory cache of same values to prevent multiple checks
681
696
  * Note: [🐠] This is how `checkSerializableAsJson` + `isSerializableAsJson` together can just retun true/false or rich error message
682
697
  */
683
698
 
699
+ /**
700
+ * @@@
701
+ *
702
+ * @public exported from `@promptbook/utils`
703
+ */
704
+ function deepClone(objectValue) {
705
+ return JSON.parse(JSON.stringify(objectValue));
706
+ /*
707
+ !!!!!!!!
708
+ TODO: [🧠] Is there a better implementation?
709
+ > const propertyNames = Object.getOwnPropertyNames(objectValue);
710
+ > for (const propertyName of propertyNames) {
711
+ > const value = (objectValue as really_any)[propertyName];
712
+ > if (value && typeof value === 'object') {
713
+ > deepClone(value);
714
+ > }
715
+ > }
716
+ > return Object.assign({}, objectValue);
717
+ */
718
+ }
719
+ /**
720
+ * TODO: [🧠] Is there a way how to meaningfully test this utility
721
+ */
722
+
723
+ /**
724
+ * Utility to export a JSON object from a function
725
+ *
726
+ * 1) Checks if the value is serializable as JSON
727
+ * 2) Makes a deep clone of the object
728
+ * 2) Orders the object properties
729
+ * 2) Deeply freezes the cloned object
730
+ *
731
+ * Note: This function does not mutates the given object
732
+ *
733
+ * @returns The same type of object as the input but read-only and re-ordered
734
+ * @public exported from `@promptbook/utils`
735
+ */
736
+ function exportJson(options) {
737
+ var name = options.name, value = options.value, order = options.order, message = options.message;
738
+ checkSerializableAsJson({ name: name, value: value, message: message });
739
+ var orderedValue =
740
+ // TODO: Fix error "Type instantiation is excessively deep and possibly infinite."
741
+ // eslint-disable-next-line @typescript-eslint/ban-ts-comment
742
+ // @ts-ignore
743
+ order === undefined
744
+ ? deepClone(value)
745
+ : orderJson({
746
+ value: value,
747
+ // <- Note: checkSerializableAsJson asserts that the value is serializable as JSON
748
+ order: order,
749
+ });
750
+ $deepFreeze(orderedValue);
751
+ return orderedValue;
752
+ }
753
+ /**
754
+ * TODO: [🧠] Is there a way how to meaningfully test this utility
755
+ */
756
+
757
+ /**
758
+ * Order of keys in the pipeline JSON
759
+ *
760
+ * @public exported from `@promptbook/core`
761
+ */
762
+ var ORDER_OF_PIPELINE_JSON = [
763
+ 'title',
764
+ 'pipelineUrl',
765
+ 'bookVersion',
766
+ 'description',
767
+ 'formfactorName',
768
+ 'parameters',
769
+ 'tasks',
770
+ 'personas',
771
+ 'preparations',
772
+ 'knowledgeSources',
773
+ 'knowledgePieces',
774
+ ];
775
+ /**
776
+ * Nonce which is used for replacing things in strings
777
+ *
778
+ * @private within the repository
779
+ */
780
+ var REPLACING_NONCE = 'u$k42k%!V2zo34w7Fu#@QUHYPW';
781
+ /**
782
+ * @@@
783
+ *
784
+ * @private within the repository
785
+ */
786
+ var RESERVED_PARAMETER_MISSING_VALUE = 'MISSING-' + REPLACING_NONCE;
787
+ /**
788
+ * @@@
789
+ *
790
+ * @private within the repository
791
+ */
792
+ var RESERVED_PARAMETER_RESTRICTED = 'RESTRICTED-' + REPLACING_NONCE;
793
+ /**
794
+ * The names of the parameters that are reserved for special purposes
795
+ *
796
+ * @public exported from `@promptbook/core`
797
+ */
798
+ var RESERVED_PARAMETER_NAMES = exportJson({
799
+ name: 'RESERVED_PARAMETER_NAMES',
800
+ message: "The names of the parameters that are reserved for special purposes",
801
+ value: [
802
+ 'content',
803
+ 'context',
804
+ 'knowledge',
805
+ 'examples',
806
+ 'modelName',
807
+ 'currentDate',
808
+ // <- TODO: list here all command names
809
+ // <- TODO: Add more like 'date', 'modelName',...
810
+ // <- TODO: Add [emoji] + instructions ACRY when adding new reserved parameter
811
+ ],
812
+ });
813
+ /**
814
+ * Note: [💞] Ignore a discrepancy between file name and entity name
815
+ */
816
+
817
+ // <- TODO: !!!!!!! Auto convert to type `import { ... } from 'type-fest';`
684
818
  /**
685
819
  * Tests if the value is [🚉] serializable as JSON
686
820
  *
@@ -702,7 +836,7 @@ function checkSerializableAsJson(name, value) {
702
836
  */
703
837
  function isSerializableAsJson(value) {
704
838
  try {
705
- checkSerializableAsJson('', value);
839
+ checkSerializableAsJson({ value: value });
706
840
  return true;
707
841
  }
708
842
  catch (error) {
@@ -1500,63 +1634,6 @@ function cacheLlmTools(llmTools, options) {
1500
1634
  * @@@ write how to combine multiple interceptors
1501
1635
  */
1502
1636
 
1503
- /**
1504
- * @@@
1505
- *
1506
- * @public exported from `@promptbook/utils`
1507
- */
1508
- function deepClone(objectValue) {
1509
- return JSON.parse(JSON.stringify(objectValue));
1510
- /*
1511
- TODO: [🧠] Is there a better implementation?
1512
- > const propertyNames = Object.getOwnPropertyNames(objectValue);
1513
- > for (const propertyName of propertyNames) {
1514
- > const value = (objectValue as really_any)[propertyName];
1515
- > if (value && typeof value === 'object') {
1516
- > deepClone(value);
1517
- > }
1518
- > }
1519
- > return Object.assign({}, objectValue);
1520
- */
1521
- }
1522
- /**
1523
- * TODO: [🧠] Is there a way how to meaningfully test this utility
1524
- */
1525
-
1526
- /**
1527
- * @@@
1528
- *
1529
- * Note: `$` is used to indicate that this function is not a pure function - it mutates given object
1530
- * Note: This function mutates the object and returns the original (but mutated-deep-freezed) object
1531
- *
1532
- * @returns The same object as the input, but deeply frozen
1533
- * @public exported from `@promptbook/utils`
1534
- */
1535
- function $deepFreeze(objectValue) {
1536
- var e_1, _a;
1537
- var propertyNames = Object.getOwnPropertyNames(objectValue);
1538
- try {
1539
- for (var propertyNames_1 = __values(propertyNames), propertyNames_1_1 = propertyNames_1.next(); !propertyNames_1_1.done; propertyNames_1_1 = propertyNames_1.next()) {
1540
- var propertyName = propertyNames_1_1.value;
1541
- var value = objectValue[propertyName];
1542
- if (value && typeof value === 'object') {
1543
- $deepFreeze(value);
1544
- }
1545
- }
1546
- }
1547
- catch (e_1_1) { e_1 = { error: e_1_1 }; }
1548
- finally {
1549
- try {
1550
- if (propertyNames_1_1 && !propertyNames_1_1.done && (_a = propertyNames_1.return)) _a.call(propertyNames_1);
1551
- }
1552
- finally { if (e_1) throw e_1.error; }
1553
- }
1554
- return Object.freeze(objectValue);
1555
- }
1556
- /**
1557
- * TODO: [🧠] Is there a way how to meaningfully test this utility
1558
- */
1559
-
1560
1637
  /**
1561
1638
  * Represents the usage with no resources consumed
1562
1639
  *
@@ -2621,6 +2698,7 @@ function capitalize(word) {
2621
2698
  /**
2622
2699
  * Converts promptbook in JSON format to string format
2623
2700
  *
2701
+ * @deprecated TODO: [🥍][🧠] Backup original files in `PipelineJson` same as in Promptbook.studio
2624
2702
  * @param pipelineJson Promptbook in JSON format (.book.json)
2625
2703
  * @returns Promptbook in string format (.book.md)
2626
2704
  * @public exported from `@promptbook/core`
@@ -2916,7 +2994,7 @@ function forEachAsync(array, options, callbackfunction) {
2916
2994
  });
2917
2995
  }
2918
2996
 
2919
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./books/prepare-knowledge-from-markdown.book.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./books/prepare-knowledge-keywords.book.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./books/prepare-knowledge-title.book.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book.md",formfactorName:"GENERIC",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelRequirements",format:"JSON",dependentParameterNames:["availableModelNames","personaDescription"]}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./books/prepare-persona.book.md"}];
2997
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sourceFile:"./books/prepare-knowledge-from-markdown.book.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sourceFile:"./books/prepare-knowledge-keywords.book.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sourceFile:"./books/prepare-knowledge-title.book.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book.md",formfactorName:"GENERIC",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelRequirements",format:"JSON",dependentParameterNames:["availableModelNames","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sourceFile:"./books/prepare-persona.book.md"}];
2920
2998
 
2921
2999
  /**
2922
3000
  * This error indicates that the promptbook in a markdown format cannot be parsed into a valid promptbook object
@@ -3161,6 +3239,21 @@ function validatePipelineCore(pipeline) {
3161
3239
  }
3162
3240
  };
3163
3241
  try {
3242
+ /*
3243
+ TODO: [🧠][🅾] Should be empty pipeline valid or not
3244
+ // Note: Check that pipeline has some tasks
3245
+ if (pipeline.tasks.length === 0) {
3246
+ throw new PipelineLogicError(
3247
+ spaceTrim(
3248
+ (block) => `
3249
+ Pipeline must have at least one task
3250
+
3251
+ ${block(pipelineIdentification)}
3252
+ `,
3253
+ ),
3254
+ );
3255
+ }
3256
+ */
3164
3257
  // Note: Check each parameter individually
3165
3258
  for (var _d = __values(pipeline.parameters), _e = _d.next(); !_e.done; _e = _d.next()) {
3166
3259
  var parameter = _e.value;
@@ -3321,6 +3414,9 @@ function validatePipelineCore(pipeline) {
3321
3414
  while (unresovedTasks.length > 0) {
3322
3415
  _loop_3();
3323
3416
  }
3417
+ // Note: Check that formfactor is corresponding to the pipeline interface
3418
+ // TODO: !!!!!! Implement this
3419
+ // pipeline.formfactorName
3324
3420
  }
3325
3421
  /**
3326
3422
  * TODO: !! [🧞‍♀️] Do not allow joker + foreach
@@ -3404,26 +3500,6 @@ function extractParameterNames(template) {
3404
3500
  return parameterNames;
3405
3501
  }
3406
3502
 
3407
- /**
3408
- * @@@
3409
- * @@@
3410
- *
3411
- * Note: This function mutates the object and returns the original (but mutated-deep-freezed) object
3412
- *
3413
- * @param name - Name of the object for debugging purposes
3414
- * @param objectValue - Object to be deeply frozen
3415
- * @returns The same object as the input, but deeply frozen
3416
- * @private this is in comparison to `deepFreeze` a more specific utility and maybe not very good practice to use without specific reason and considerations
3417
- */
3418
- function $asDeeplyFrozenSerializableJson(name, objectValue) {
3419
- checkSerializableAsJson(name, objectValue);
3420
- return $deepFreeze(objectValue);
3421
- }
3422
- /**
3423
- * TODO: [🧠][🛣] More elegant way to tracking than passing `name`
3424
- * TODO: [🧠] Is there a way how to meaningfully test this utility
3425
- */
3426
-
3427
3503
  /**
3428
3504
  * Unprepare just strips the preparation data of the pipeline
3429
3505
  *
@@ -3441,7 +3517,12 @@ function unpreparePipeline(pipeline) {
3441
3517
  delete taskUnprepared.preparedContent;
3442
3518
  return taskUnprepared;
3443
3519
  });
3444
- return $asDeeplyFrozenSerializableJson('Unprepared PipelineJson', __assign(__assign({}, pipeline), { tasks: tasks, knowledgeSources: knowledgeSources, knowledgePieces: [], personas: personas, preparations: [] }));
3520
+ return exportJson({
3521
+ name: 'pipelineJson',
3522
+ message: "Result of `unpreparePipeline`",
3523
+ order: ORDER_OF_PIPELINE_JSON,
3524
+ value: __assign(__assign({}, pipeline), { tasks: tasks, knowledgeSources: knowledgeSources, knowledgePieces: [], personas: personas, preparations: [] }),
3525
+ });
3445
3526
  }
3446
3527
  /**
3447
3528
  * TODO: [🧿] Maybe do same process with same granularity and subfinctions as `preparePipeline`
@@ -5509,16 +5590,21 @@ function executePipeline(options) {
5509
5590
  // Note: Wait a short time to prevent race conditions
5510
5591
  _g.sent();
5511
5592
  _g.label = 6;
5512
- case 6: return [2 /*return*/, $asDeeplyFrozenSerializableJson("Unuccessful PipelineExecutorResult (with missing parameter {".concat(parameter.name, "}) PipelineExecutorResult"), {
5513
- isSuccessful: false,
5514
- errors: __spreadArray([
5515
- new PipelineExecutionError("Parameter `{".concat(parameter.name, "}` is required as an input parameter"))
5516
- ], __read(errors), false).map(serializeError),
5517
- warnings: [],
5518
- executionReport: executionReport,
5519
- outputParameters: {},
5520
- usage: ZERO_USAGE,
5521
- preparedPipeline: preparedPipeline,
5593
+ case 6: return [2 /*return*/, exportJson({
5594
+ name: "executionReport",
5595
+ message: "Unuccessful PipelineExecutorResult (with missing parameter {".concat(parameter.name, "}) PipelineExecutorResult"),
5596
+ order: [],
5597
+ value: {
5598
+ isSuccessful: false,
5599
+ errors: __spreadArray([
5600
+ new PipelineExecutionError("Parameter `{".concat(parameter.name, "}` is required as an input parameter"))
5601
+ ], __read(errors), false).map(serializeError),
5602
+ warnings: [],
5603
+ executionReport: executionReport,
5604
+ outputParameters: {},
5605
+ usage: ZERO_USAGE,
5606
+ preparedPipeline: preparedPipeline,
5607
+ },
5522
5608
  })];
5523
5609
  case 7:
5524
5610
  _b = _a.next();
@@ -5557,16 +5643,21 @@ function executePipeline(options) {
5557
5643
  // Note: Wait a short time to prevent race conditions
5558
5644
  _h.sent();
5559
5645
  _h.label = 3;
5560
- case 3: return [2 /*return*/, { value: $asDeeplyFrozenSerializableJson(spaceTrim$1(function (block) { return "\n Unuccessful PipelineExecutorResult (with extra parameter {".concat(parameter.name, "}) PipelineExecutorResult\n\n ").concat(block(pipelineIdentification), "\n "); }), {
5561
- isSuccessful: false,
5562
- errors: __spreadArray([
5563
- new PipelineExecutionError(spaceTrim$1(function (block) { return "\n Parameter `{".concat(parameter.name, "}` is passed as input parameter but it is not input\n\n ").concat(block(pipelineIdentification), "\n "); }))
5564
- ], __read(errors), false).map(serializeError),
5565
- warnings: warnings.map(serializeError),
5566
- executionReport: executionReport,
5567
- outputParameters: {},
5568
- usage: ZERO_USAGE,
5569
- preparedPipeline: preparedPipeline,
5646
+ case 3: return [2 /*return*/, { value: exportJson({
5647
+ name: 'pipelineExecutorResult',
5648
+ message: spaceTrim$1(function (block) { return "\n Unuccessful PipelineExecutorResult (with extra parameter {".concat(parameter.name, "}) PipelineExecutorResult\n\n ").concat(block(pipelineIdentification), "\n "); }),
5649
+ order: [],
5650
+ value: {
5651
+ isSuccessful: false,
5652
+ errors: __spreadArray([
5653
+ new PipelineExecutionError(spaceTrim$1(function (block) { return "\n Parameter `{".concat(parameter.name, "}` is passed as input parameter but it is not input\n\n ").concat(block(pipelineIdentification), "\n "); }))
5654
+ ], __read(errors), false).map(serializeError),
5655
+ warnings: warnings.map(serializeError),
5656
+ executionReport: executionReport,
5657
+ outputParameters: {},
5658
+ usage: ZERO_USAGE,
5659
+ preparedPipeline: preparedPipeline,
5660
+ },
5570
5661
  }) }];
5571
5662
  case 4: return [2 /*return*/];
5572
5663
  }
@@ -5720,14 +5811,19 @@ function executePipeline(options) {
5720
5811
  // Note: Wait a short time to prevent race conditions
5721
5812
  _g.sent();
5722
5813
  _g.label = 27;
5723
- case 27: return [2 /*return*/, $asDeeplyFrozenSerializableJson('Unuccessful PipelineExecutorResult (with misc errors) PipelineExecutorResult', {
5724
- isSuccessful: false,
5725
- errors: __spreadArray([error_1], __read(errors), false).map(serializeError),
5726
- warnings: warnings.map(serializeError),
5727
- usage: usage_1,
5728
- executionReport: executionReport,
5729
- outputParameters: outputParameters_1,
5730
- preparedPipeline: preparedPipeline,
5814
+ case 27: return [2 /*return*/, exportJson({
5815
+ name: 'pipelineExecutorResult',
5816
+ message: "Unuccessful PipelineExecutorResult (with misc errors) PipelineExecutorResult",
5817
+ order: [],
5818
+ value: {
5819
+ isSuccessful: false,
5820
+ errors: __spreadArray([error_1], __read(errors), false).map(serializeError),
5821
+ warnings: warnings.map(serializeError),
5822
+ usage: usage_1,
5823
+ executionReport: executionReport,
5824
+ outputParameters: outputParameters_1,
5825
+ preparedPipeline: preparedPipeline,
5826
+ },
5731
5827
  })];
5732
5828
  case 28:
5733
5829
  usage = addUsage.apply(void 0, __spreadArray([], __read(executionReport.promptExecutions.map(function (_a) {
@@ -5748,14 +5844,19 @@ function executePipeline(options) {
5748
5844
  // Note: Wait a short time to prevent race conditions
5749
5845
  _g.sent();
5750
5846
  _g.label = 30;
5751
- case 30: return [2 /*return*/, $asDeeplyFrozenSerializableJson('Successful PipelineExecutorResult', {
5752
- isSuccessful: true,
5753
- errors: errors.map(serializeError),
5754
- warnings: warnings.map(serializeError),
5755
- usage: usage,
5756
- executionReport: executionReport,
5757
- outputParameters: outputParameters,
5758
- preparedPipeline: preparedPipeline,
5847
+ case 30: return [2 /*return*/, exportJson({
5848
+ name: 'pipelineExecutorResult',
5849
+ message: "Successful PipelineExecutorResult",
5850
+ order: [],
5851
+ value: {
5852
+ isSuccessful: true,
5853
+ errors: errors.map(serializeError),
5854
+ warnings: warnings.map(serializeError),
5855
+ usage: usage,
5856
+ executionReport: executionReport,
5857
+ outputParameters: outputParameters,
5858
+ preparedPipeline: preparedPipeline,
5859
+ },
5759
5860
  })];
5760
5861
  }
5761
5862
  });
@@ -6357,36 +6458,6 @@ TODO: [🧊] This is how it can look in future
6357
6458
  * [ ] One piece can have multiple sources
6358
6459
  */
6359
6460
 
6360
- /**
6361
- * @@@
6362
- *
6363
- * Note: It is usefull @@@
6364
- *
6365
- * @param pipeline
6366
- * @public exported from `@promptbook/utils`
6367
- */
6368
- function clonePipeline(pipeline) {
6369
- // Note: Not using spread operator (...) because @@@
6370
- var pipelineUrl = pipeline.pipelineUrl, sourceFile = pipeline.sourceFile, title = pipeline.title, bookVersion = pipeline.bookVersion, description = pipeline.description, formfactorName = pipeline.formfactorName, parameters = pipeline.parameters, tasks = pipeline.tasks, knowledgeSources = pipeline.knowledgeSources, knowledgePieces = pipeline.knowledgePieces, personas = pipeline.personas, preparations = pipeline.preparations;
6371
- return {
6372
- pipelineUrl: pipelineUrl,
6373
- sourceFile: sourceFile,
6374
- title: title,
6375
- bookVersion: bookVersion,
6376
- description: description,
6377
- formfactorName: formfactorName,
6378
- parameters: parameters,
6379
- tasks: tasks,
6380
- knowledgeSources: knowledgeSources,
6381
- knowledgePieces: knowledgePieces,
6382
- personas: personas,
6383
- preparations: preparations,
6384
- };
6385
- }
6386
- /**
6387
- * TODO: [🍙] Make some standard order of json properties
6388
- */
6389
-
6390
6461
  /**
6391
6462
  * @@@
6392
6463
  *
@@ -6427,6 +6498,7 @@ function prepareTasks(pipeline, tools, options) {
6427
6498
  });
6428
6499
  }
6429
6500
  /**
6501
+ * TODO: [😂] Adding knowledge should be convert to async high-level abstractions, simmilar thing with expectations to sync high-level abstractions
6430
6502
  * TODO: [🧠] Add context to each task (if missing)
6431
6503
  * TODO: [🧠] What is better name `prepareTask` or `prepareTaskAndParameters`
6432
6504
  * TODO: [♨][main] !!! Prepare index the examples and maybe tasks
@@ -6515,11 +6587,19 @@ function preparePipeline(pipeline, tools, options) {
6515
6587
  case 3:
6516
6588
  tasksPrepared = (_c.sent()).tasksPrepared;
6517
6589
  // ----- /Tasks preparation -----
6590
+ // TODO: [😂] Use here all `AsyncHighLevelAbstraction`
6518
6591
  // Note: Count total usage
6519
6592
  currentPreparation.usage = llmToolsWithUsage.getTotalUsage();
6520
- return [2 /*return*/, $asDeeplyFrozenSerializableJson('Prepared PipelineJson', __assign(__assign({}, clonePipeline(pipeline)), { tasks: __spreadArray([], __read(tasksPrepared), false),
6521
- // <- TODO: [🪓] Here should be no need for spreading new array, just ` tasks: tasksPrepared`
6522
- knowledgeSources: knowledgeSourcesPrepared, knowledgePieces: knowledgePiecesPrepared, personas: preparedPersonas, preparations: __spreadArray([], __read(preparations), false) }))];
6593
+ return [2 /*return*/, exportJson({
6594
+ name: 'pipelineJson',
6595
+ message: "Result of `preparePipeline`",
6596
+ order: ORDER_OF_PIPELINE_JSON,
6597
+ value: __assign(__assign({}, pipeline), {
6598
+ // <- TODO: Probbably deeply clone the pipeline because `$exportJson` freezes the subobjects
6599
+ knowledgeSources: knowledgeSourcesPrepared, knowledgePieces: knowledgePiecesPrepared, tasks: __spreadArray([], __read(tasksPrepared), false),
6600
+ // <- TODO: [🪓] Here should be no need for spreading new array, just ` tasks: tasksPrepared`
6601
+ personas: preparedPersonas, preparations: __spreadArray([], __read(preparations), false) }),
6602
+ })];
6523
6603
  }
6524
6604
  });
6525
6605
  });
@@ -6783,7 +6863,8 @@ var sectionCommandParser = {
6783
6863
  expectResultingParameterName();
6784
6864
  var parameter = $pipelineJson.parameters.find(function (param) { return param.name === $taskJson.resultingParameterName; });
6785
6865
  if (parameter === undefined) {
6786
- throw new ParseError("Can not find parameter {".concat($taskJson.resultingParameterName, "} to assign example value on it"));
6866
+ // TODO: !!!!!! Change to logic error for higher level abstractions to work
6867
+ throw new ParseError("Parameter `{".concat($taskJson.resultingParameterName, "}` is not defined so can not define example value of it"));
6787
6868
  }
6788
6869
  parameter.exampleValues = parameter.exampleValues || [];
6789
6870
  parameter.exampleValues.push($taskJson.content);
@@ -7676,7 +7757,13 @@ var GeneratorFormfactorDefinition = {
7676
7757
  documentationUrl: "https://github.com/webgptorg/promptbook/discussions/184",
7677
7758
  pipelineInterface: {
7678
7759
  inputParameters: [
7679
- /* @@@ */
7760
+ /* @@@ */
7761
+ {
7762
+ name: 'nonce',
7763
+ description: 'Just to prevent GENERATOR to be set as implicit formfactor',
7764
+ isInput: true,
7765
+ isOutput: false,
7766
+ },
7680
7767
  ],
7681
7768
  outputParameters: [
7682
7769
  /* @@@ */
@@ -7722,7 +7809,13 @@ var MatcherFormfactorDefinition = {
7722
7809
  documentationUrl: "https://github.com/webgptorg/promptbook/discussions/177",
7723
7810
  pipelineInterface: {
7724
7811
  inputParameters: [
7725
- /* @@@ */
7812
+ /* @@@ */
7813
+ {
7814
+ name: 'nonce',
7815
+ description: 'Just to prevent EXPERIMENTAL_MATCHER to be set as implicit formfactor',
7816
+ isInput: true,
7817
+ isOutput: false,
7818
+ },
7726
7819
  ],
7727
7820
  outputParameters: [
7728
7821
  /* @@@ */
@@ -7871,6 +7964,9 @@ var formfactorCommandParser = {
7871
7964
  * Note: `$` is used to indicate that this function mutates given `pipelineJson`
7872
7965
  */
7873
7966
  $applyToPipelineJson: function (command, $pipelineJson) {
7967
+ if ($pipelineJson.formfactorName !== undefined && $pipelineJson.formfactorName !== command.formfactorName) {
7968
+ throw new ParseError(spaceTrim("\n Redefinition of `FORMFACTOR` in the pipeline head\n\n You have used:\n 1) FORMFACTOR `".concat($pipelineJson.formfactorName, "`\n 2) FORMFACTOR `").concat(command.formfactorName, "`\n ")));
7969
+ }
7874
7970
  $pipelineJson.formfactorName = command.formfactorName;
7875
7971
  },
7876
7972
  /**
@@ -8058,7 +8154,7 @@ var modelCommandParser = {
8058
8154
  // <- TODO: [🚎][💩] Some better way how to get warnings from pipeline parsing / logic
8059
8155
  }
8060
8156
  else {
8061
- throw new ParseError(spaceTrim("\n Redefinition of MODEL `".concat(command.key, "` in the pipeline head\n\n You have used:\n - MODEL ").concat(command.key, " ").concat($pipelineJson.defaultModelRequirements[command.key], "\n - MODEL ").concat(command.key, " ").concat(command.value, "\n ")));
8157
+ throw new ParseError(spaceTrim("\n Redefinition of `MODEL ".concat(command.key, "` in the pipeline head\n\n You have used:\n 1) `MODEL ").concat(command.key, " ").concat($pipelineJson.defaultModelRequirements[command.key], "`\n 2) `MODEL ").concat(command.key, " ").concat(command.value, "`\n ")));
8062
8158
  }
8063
8159
  }
8064
8160
  $pipelineJson.defaultModelRequirements[command.key] = command.value;
@@ -8877,9 +8973,298 @@ function parseCommandVariant(input) {
8877
8973
  }
8878
8974
 
8879
8975
  /**
8880
- * Supported script languages
8976
+ * @@@
8881
8977
  *
8882
- * @private internal base for `ScriptLanguage`
8978
+ * @deprecated https://github.com/webgptorg/promptbook/pull/186
8979
+ * @see https://github.com/webgptorg/promptbook/discussions/171
8980
+ *
8981
+ * @public exported from `@promptbook/core`
8982
+ */
8983
+ function getPipelineInterface(pipeline) {
8984
+ var e_1, _a, e_2, _b;
8985
+ var pipelineInterface = {
8986
+ inputParameters: [],
8987
+ outputParameters: [],
8988
+ };
8989
+ try {
8990
+ for (var _c = __values(pipeline.parameters), _d = _c.next(); !_d.done; _d = _c.next()) {
8991
+ var parameter = _d.value;
8992
+ var isInput = parameter.isInput, isOutput = parameter.isOutput;
8993
+ if (isInput) {
8994
+ pipelineInterface.inputParameters.push(deepClone(parameter));
8995
+ }
8996
+ if (isOutput) {
8997
+ pipelineInterface.outputParameters.push(deepClone(parameter));
8998
+ }
8999
+ }
9000
+ }
9001
+ catch (e_1_1) { e_1 = { error: e_1_1 }; }
9002
+ finally {
9003
+ try {
9004
+ if (_d && !_d.done && (_a = _c.return)) _a.call(_c);
9005
+ }
9006
+ finally { if (e_1) throw e_1.error; }
9007
+ }
9008
+ try {
9009
+ for (var _e = __values(['inputParameters', 'outputParameters']), _f = _e.next(); !_f.done; _f = _e.next()) {
9010
+ var key = _f.value;
9011
+ pipelineInterface[key].sort(function (_a, _b) {
9012
+ var name1 = _a.name;
9013
+ var name2 = _b.name;
9014
+ return name1.localeCompare(name2);
9015
+ });
9016
+ }
9017
+ }
9018
+ catch (e_2_1) { e_2 = { error: e_2_1 }; }
9019
+ finally {
9020
+ try {
9021
+ if (_f && !_f.done && (_b = _e.return)) _b.call(_e);
9022
+ }
9023
+ finally { if (e_2) throw e_2.error; }
9024
+ }
9025
+ return exportJson({
9026
+ name: "pipelineInterface",
9027
+ message: "Result of `getPipelineInterface`",
9028
+ order: ['inputParameters', 'outputParameters'],
9029
+ value: pipelineInterface,
9030
+ });
9031
+ }
9032
+
9033
+ /**
9034
+ * @@@
9035
+ *
9036
+ * @deprecated https://github.com/webgptorg/promptbook/pull/186
9037
+ * @see https://github.com/webgptorg/promptbook/discussions/171
9038
+ *
9039
+ * @public exported from `@promptbook/core`
9040
+ */
9041
+ function isPipelineInterfacesEqual(pipelineInterface1, pipelineInterface2) {
9042
+ var e_1, _a, e_2, _b;
9043
+ try {
9044
+ for (var _c = __values(['inputParameters', 'outputParameters']), _d = _c.next(); !_d.done; _d = _c.next()) {
9045
+ var whichParameters = _d.value;
9046
+ var parameters1 = pipelineInterface1[whichParameters]; // <- Note: `isPipelineInterfacesEqual` is just temporary solution, no need to fix this
9047
+ var parameters2 = pipelineInterface2[whichParameters];
9048
+ if (parameters1.length !== parameters2.length) {
9049
+ return false;
9050
+ }
9051
+ var _loop_1 = function (parameter) {
9052
+ var matchingParameter = parameters2.find(function (_a) {
9053
+ var name = _a.name;
9054
+ return name === parameter.name;
9055
+ });
9056
+ if (!matchingParameter) {
9057
+ return { value: false };
9058
+ }
9059
+ // Note: Do not compare description, it is not relevant for compatibility
9060
+ if (matchingParameter.isInput !== parameter.isInput) {
9061
+ return { value: false };
9062
+ }
9063
+ if (matchingParameter.isOutput !== parameter.isOutput) {
9064
+ return { value: false };
9065
+ }
9066
+ };
9067
+ try {
9068
+ for (var parameters1_1 = (e_2 = void 0, __values(parameters1)), parameters1_1_1 = parameters1_1.next(); !parameters1_1_1.done; parameters1_1_1 = parameters1_1.next()) {
9069
+ var parameter = parameters1_1_1.value;
9070
+ var state_1 = _loop_1(parameter);
9071
+ if (typeof state_1 === "object")
9072
+ return state_1.value;
9073
+ }
9074
+ }
9075
+ catch (e_2_1) { e_2 = { error: e_2_1 }; }
9076
+ finally {
9077
+ try {
9078
+ if (parameters1_1_1 && !parameters1_1_1.done && (_b = parameters1_1.return)) _b.call(parameters1_1);
9079
+ }
9080
+ finally { if (e_2) throw e_2.error; }
9081
+ }
9082
+ }
9083
+ }
9084
+ catch (e_1_1) { e_1 = { error: e_1_1 }; }
9085
+ finally {
9086
+ try {
9087
+ if (_d && !_d.done && (_a = _c.return)) _a.call(_c);
9088
+ }
9089
+ finally { if (e_1) throw e_1.error; }
9090
+ }
9091
+ return true;
9092
+ }
9093
+
9094
+ /**
9095
+ * @@@
9096
+ *
9097
+ * @deprecated https://github.com/webgptorg/promptbook/pull/186
9098
+ * @see https://github.com/webgptorg/promptbook/discussions/171
9099
+ *
9100
+ * @public exported from `@promptbook/core`
9101
+ */
9102
+ function isPipelineImplementingInterface(options) {
9103
+ var pipeline = options.pipeline, pipelineInterface = options.pipelineInterface;
9104
+ return isPipelineInterfacesEqual(getPipelineInterface(pipeline), pipelineInterface);
9105
+ }
9106
+
9107
+ /**
9108
+ * Set formfactor based on the pipeline interface e
9109
+ *
9110
+ * @private
9111
+ */
9112
+ var ImplicitFormfactorHla = {
9113
+ type: 'SYNC',
9114
+ $applyToPipelineJson: function ($pipelineJson) {
9115
+ var e_1, _a;
9116
+ if ($pipelineJson.formfactorName !== undefined) {
9117
+ // Note: When formfactor is already set, do nothing
9118
+ return;
9119
+ }
9120
+ try {
9121
+ for (var _b = __values(FORMFACTOR_DEFINITIONS.filter(function (_a) {
9122
+ var name = _a.name;
9123
+ return name !== 'GENERIC';
9124
+ })), _c = _b.next(); !_c.done; _c = _b.next()) {
9125
+ var formfactorDefinition = _c.value;
9126
+ // <- Note: [♓️][💩] This is the order of the formfactors, make some explicit priority
9127
+ var name_1 = formfactorDefinition.name, pipelineInterface = formfactorDefinition.pipelineInterface;
9128
+ var isCompatible = isPipelineImplementingInterface({
9129
+ pipeline: __assign({ formfactorName: name_1 }, $pipelineJson),
9130
+ pipelineInterface: pipelineInterface,
9131
+ });
9132
+ /*/
9133
+ console.log({
9134
+ subject: `${$pipelineJson.title} implements ${name}`,
9135
+ pipelineTitle: $pipelineJson.title,
9136
+ formfactorName: name,
9137
+ isCompatible,
9138
+ formfactorInterface: pipelineInterface,
9139
+ pipelineInterface: getPipelineInterface($pipelineJson as PipelineJson),
9140
+ });
9141
+ /**/
9142
+ if (isCompatible) {
9143
+ $pipelineJson.formfactorName = name_1;
9144
+ return;
9145
+ }
9146
+ }
9147
+ }
9148
+ catch (e_1_1) { e_1 = { error: e_1_1 }; }
9149
+ finally {
9150
+ try {
9151
+ if (_c && !_c.done && (_a = _b.return)) _a.call(_b);
9152
+ }
9153
+ finally { if (e_1) throw e_1.error; }
9154
+ }
9155
+ },
9156
+ };
9157
+
9158
+ /**
9159
+ * Allow to define chatbot with no need to write full interface
9160
+ *
9161
+ * @private
9162
+ */
9163
+ var QuickChatbotHla = {
9164
+ type: 'SYNC',
9165
+ $applyToPipelineJson: function ($pipelineJson) {
9166
+ if ($pipelineJson.tasks.length !== 0) {
9167
+ // Note: When there are already tasks, do nothing
9168
+ return;
9169
+ }
9170
+ if ($pipelineJson.parameters.length !== 0) {
9171
+ // Note: When there are already parameters, do nothing
9172
+ return;
9173
+ }
9174
+ if ($pipelineJson.personas.length === 0) {
9175
+ // Note: When no personas defined, do nothing
9176
+ return;
9177
+ }
9178
+ var personaName = $pipelineJson.personas[0].name;
9179
+ $pipelineJson.formfactorName = 'CHATBOT';
9180
+ $pipelineJson.parameters.push({
9181
+ name: 'previousTitle',
9182
+ description: 'Previous title of the conversation',
9183
+ isInput: true,
9184
+ isOutput: false,
9185
+ }, {
9186
+ name: 'previousConversationSummary',
9187
+ description: 'Previous conversation summary',
9188
+ isInput: true,
9189
+ isOutput: false,
9190
+ }, {
9191
+ name: 'userMessage',
9192
+ description: 'User message',
9193
+ isInput: true,
9194
+ isOutput: false,
9195
+ }, {
9196
+ name: 'title',
9197
+ description: 'Title of the conversation',
9198
+ isInput: false,
9199
+ isOutput: true,
9200
+ }, {
9201
+ name: 'conversationSummary',
9202
+ description: 'Summary of the conversation',
9203
+ isInput: false,
9204
+ isOutput: true,
9205
+ }, {
9206
+ name: 'chatbotResponse',
9207
+ description: 'Chatbot response',
9208
+ isInput: false,
9209
+ isOutput: true,
9210
+ exampleValues: ['Hello, I am a Pavol`s virtual avatar. How can I help you?'],
9211
+ });
9212
+ // TODO: !!!!!! spaceTrim
9213
+ $pipelineJson.tasks.push({
9214
+ taskType: 'PROMPT_TASK',
9215
+ name: 'create-an-answer',
9216
+ title: 'Create an answer',
9217
+ content: 'Write a response to the user message:\n\n**Question from user**\n\n> {userMessage}\n\n**Previous conversation**\n\n> {previousConversationSummary}',
9218
+ resultingParameterName: 'chatbotResponse',
9219
+ personaName: personaName,
9220
+ dependentParameterNames: ['userMessage', 'previousConversationSummary' /* !!!!!!, 'knowledge'*/],
9221
+ // !!!!!! preparedContent: '{content}\n\n## Knowledge\n\n{knowledge}',
9222
+ }, {
9223
+ taskType: 'PROMPT_TASK',
9224
+ name: 'summarize-the-conversation',
9225
+ title: 'Summarize the conversation',
9226
+ content: 'Summarize the conversation in a few words:\n\n## Rules\n\n- Summarise the text of the conversation in a few words\n- Convert the text to its basic idea\n- Imagine you are writing the headline or subject line of an email\n- Respond with a few words of summary only\n\n## Conversation\n\n**User:**\n\n> {userMessage}\n\n**You:**\n\n> {chatbotResponse}',
9227
+ resultingParameterName: 'conversationSummary',
9228
+ personaName: personaName,
9229
+ expectations: {
9230
+ words: {
9231
+ min: 1,
9232
+ max: 10,
9233
+ },
9234
+ },
9235
+ dependentParameterNames: ['userMessage', 'chatbotResponse' /* !!!!!!, 'knowledge'*/],
9236
+ // !!!!!! preparedContent: '{content}\n\n## Knowledge\n\n{knowledge}',
9237
+ }, {
9238
+ taskType: 'SIMPLE_TASK',
9239
+ name: 'title',
9240
+ title: 'Title',
9241
+ content: '{conversationSummary}',
9242
+ resultingParameterName: 'title',
9243
+ dependentParameterNames: ['conversationSummary' /* !!!!!!, 'knowledge'*/],
9244
+ // !!!!!! preparedContent: '{content}\n\n## Knowledge\n\n{knowledge}',
9245
+ });
9246
+ },
9247
+ };
9248
+
9249
+ /**
9250
+ * All high-level abstractions
9251
+ *
9252
+ * @private internal index of `pipelineStringToJsonSync` (= used for sync) and `preparePipeline` (= used for async)
9253
+ */
9254
+ var HIGH_LEVEL_ABSTRACTIONS = [
9255
+ ImplicitFormfactorHla,
9256
+ QuickChatbotHla,
9257
+ // <- Note: [♓️][💩] This is the order of the application of high-level abstractions application on pipeline JSON
9258
+ ];
9259
+ /**
9260
+ * TODO: Test that all sync high-level abstractions are before async high-level abstractions
9261
+ * Note: [💞] Ignore a discrepancy between file name and entity name
9262
+ */
9263
+
9264
+ /**
9265
+ * Supported script languages
9266
+ *
9267
+ * @private internal base for `ScriptLanguage`
8883
9268
  */
8884
9269
  var SUPPORTED_SCRIPT_LANGUAGES = ['javascript', 'typescript', 'python'];
8885
9270
  // <- TODO: [🏥] DRY
@@ -9127,20 +9512,15 @@ function removeContentComments(content) {
9127
9512
  * @public exported from `@promptbook/core`
9128
9513
  */
9129
9514
  function pipelineStringToJsonSync(pipelineString) {
9130
- var e_1, _a, e_2, _b, e_3, _c, e_4, _d, e_5, _e;
9515
+ var e_1, _a, e_2, _b, e_3, _c, e_4, _d, e_5, _e, e_6, _f;
9131
9516
  var $pipelineJson = {
9132
- title: undefined /* <- Note: [🍙] Putting here placeholder to keep `title` on top at final JSON */,
9133
- pipelineUrl: undefined /* <- Note: Putting here placeholder to keep `pipelineUrl` on top at final JSON */,
9134
- bookVersion: undefined /* <- Note: By default no explicit version */,
9135
- description: undefined /* <- Note: [🍙] Putting here placeholder to keep `description` on top at final JSON */,
9136
- formfactorName: 'GENERIC',
9517
+ title: DEFAULT_TITLE,
9137
9518
  parameters: [],
9138
9519
  tasks: [],
9139
9520
  knowledgeSources: [],
9140
9521
  knowledgePieces: [],
9141
9522
  personas: [],
9142
9523
  preparations: [],
9143
- // <- TODO: [🍙] Some standard order of properties
9144
9524
  };
9145
9525
  function getPipelineIdentification() {
9146
9526
  // Note: This is a 😐 implementation of [🚞]
@@ -9156,7 +9536,7 @@ function pipelineStringToJsonSync(pipelineString) {
9156
9536
  // =============================================================
9157
9537
  // Note: 1️⃣ Parsing of the markdown into object
9158
9538
  if (pipelineString.startsWith('#!')) {
9159
- var _f = __read(pipelineString.split('\n')), shebangLine_1 = _f[0], restLines = _f.slice(1);
9539
+ var _g = __read(pipelineString.split('\n')), shebangLine_1 = _g[0], restLines = _g.slice(1);
9160
9540
  if (!(shebangLine_1 || '').includes('ptbk')) {
9161
9541
  throw new ParseError(spaceTrim$1(function (block) { return "\n It seems that you try to parse a book file which has non-standard shebang line for book files:\n Shebang line must contain 'ptbk'\n\n You have:\n ".concat(block(shebangLine_1 || '(empty line)'), "\n\n It should look like this:\n #!/usr/bin/env ptbk\n\n ").concat(block(getPipelineIdentification()), "\n "); }));
9162
9542
  }
@@ -9166,7 +9546,7 @@ function pipelineStringToJsonSync(pipelineString) {
9166
9546
  pipelineString = flattenMarkdown(pipelineString) /* <- Note: [🥞] */;
9167
9547
  pipelineString = pipelineString.replaceAll(/`\{(?<parameterName>[a-z0-9_]+)\}`/gi, '{$<parameterName>}');
9168
9548
  pipelineString = pipelineString.replaceAll(/`->\s+\{(?<parameterName>[a-z0-9_]+)\}`/gi, '-> {$<parameterName>}');
9169
- var _g = __read(splitMarkdownIntoSections(pipelineString).map(parseMarkdownSection)), pipelineHead = _g[0], pipelineSections = _g.slice(1); /* <- Note: [🥞] */
9549
+ var _h = __read(splitMarkdownIntoSections(pipelineString).map(parseMarkdownSection)), pipelineHead = _h[0], pipelineSections = _h.slice(1); /* <- Note: [🥞] */
9170
9550
  if (pipelineHead === undefined) {
9171
9551
  throw new UnexpectedError(spaceTrim$1(function (block) { return "\n Pipeline head is not defined\n\n ".concat(block(getPipelineIdentification()), "\n\n This should never happen, because the pipeline already flattened\n "); }));
9172
9552
  }
@@ -9231,7 +9611,7 @@ function pipelineStringToJsonSync(pipelineString) {
9231
9611
  }
9232
9612
  try {
9233
9613
  commandParser.$applyToPipelineJson(command, $pipelineJson);
9234
- // <- Note: [🦦] Its strange that this assertion must be here, [🦦][4] should do this assertion implicitelly
9614
+ // <- Note: [🦦] Its strange that this assertion must be here, [🦦][4] should do this assertion implicitly
9235
9615
  }
9236
9616
  catch (error) {
9237
9617
  if (!(error instanceof ParseError)) {
@@ -9288,10 +9668,10 @@ function pipelineStringToJsonSync(pipelineString) {
9288
9668
  return nameWithSuffix;
9289
9669
  };
9290
9670
  var _loop_2 = function (section) {
9291
- var e_6, _m, e_7, _o;
9671
+ var e_7, _q, e_8, _r;
9292
9672
  // TODO: Parse section's description (the content out of the codeblock and lists)
9293
9673
  var listItems_2 = extractAllListItemsFromMarkdown(section.content);
9294
- var _p = extractOneBlockFromMarkdown(section.content), language = _p.language, content = _p.content;
9674
+ var _s = extractOneBlockFromMarkdown(section.content), language = _s.language, content = _s.content;
9295
9675
  // TODO: [🎾][1] DRY description
9296
9676
  var description_1 = section.content;
9297
9677
  // Note: Remove codeblocks - TODO: [🎾]
@@ -9339,7 +9719,7 @@ function pipelineStringToJsonSync(pipelineString) {
9339
9719
  }
9340
9720
  try {
9341
9721
  commandParser.$applyToTaskJson(
9342
- // <- Note: [🦦] Its strange that this assertion must be here, [🦦][4] should do this assertion implicitelly
9722
+ // <- Note: [🦦] Its strange that this assertion must be here, [🦦][4] should do this assertion implicitly
9343
9723
  command, $taskJson, $pipelineJson);
9344
9724
  }
9345
9725
  catch (error) {
@@ -9355,17 +9735,17 @@ function pipelineStringToJsonSync(pipelineString) {
9355
9735
  };
9356
9736
  try {
9357
9737
  // TODO [♓️] List commands and before apply order them to achieve order-agnostic commands
9358
- for (var commands_1 = (e_6 = void 0, __values(commands)), commands_1_1 = commands_1.next(); !commands_1_1.done; commands_1_1 = commands_1.next()) {
9359
- var _q = commands_1_1.value, listItem = _q.listItem, command = _q.command;
9738
+ for (var commands_1 = (e_7 = void 0, __values(commands)), commands_1_1 = commands_1.next(); !commands_1_1.done; commands_1_1 = commands_1.next()) {
9739
+ var _t = commands_1_1.value, listItem = _t.listItem, command = _t.command;
9360
9740
  _loop_4(listItem, command);
9361
9741
  }
9362
9742
  }
9363
- catch (e_6_1) { e_6 = { error: e_6_1 }; }
9743
+ catch (e_7_1) { e_7 = { error: e_7_1 }; }
9364
9744
  finally {
9365
9745
  try {
9366
- if (commands_1_1 && !commands_1_1.done && (_m = commands_1.return)) _m.call(commands_1);
9746
+ if (commands_1_1 && !commands_1_1.done && (_q = commands_1.return)) _q.call(commands_1);
9367
9747
  }
9368
- finally { if (e_6) throw e_6.error; }
9748
+ finally { if (e_7) throw e_7.error; }
9369
9749
  }
9370
9750
  // TODO: [🍧] Should be done in SECTION command
9371
9751
  if ($taskJson.taskType === 'SCRIPT_TASK') {
@@ -9379,8 +9759,8 @@ function pipelineStringToJsonSync(pipelineString) {
9379
9759
  }
9380
9760
  $taskJson.dependentParameterNames = Array.from(extractParameterNamesFromTask($taskJson));
9381
9761
  try {
9382
- for (var _r = (e_7 = void 0, __values($taskJson.dependentParameterNames)), _s = _r.next(); !_s.done; _s = _r.next()) {
9383
- var parameterName = _s.value;
9762
+ for (var _u = (e_8 = void 0, __values($taskJson.dependentParameterNames)), _v = _u.next(); !_v.done; _v = _u.next()) {
9763
+ var parameterName = _v.value;
9384
9764
  // TODO: [🧠] This definition should be made first in the task
9385
9765
  defineParam({
9386
9766
  parameterName: parameterName,
@@ -9391,12 +9771,12 @@ function pipelineStringToJsonSync(pipelineString) {
9391
9771
  });
9392
9772
  }
9393
9773
  }
9394
- catch (e_7_1) { e_7 = { error: e_7_1 }; }
9774
+ catch (e_8_1) { e_8 = { error: e_8_1 }; }
9395
9775
  finally {
9396
9776
  try {
9397
- if (_s && !_s.done && (_o = _r.return)) _o.call(_r);
9777
+ if (_v && !_v.done && (_r = _u.return)) _r.call(_u);
9398
9778
  }
9399
- finally { if (e_7) throw e_7.error; }
9779
+ finally { if (e_8) throw e_8.error; }
9400
9780
  }
9401
9781
  /*
9402
9782
  // TODO: [🍧] This should be checked in `MODEL` command + better error message
@@ -9445,18 +9825,22 @@ function pipelineStringToJsonSync(pipelineString) {
9445
9825
  var isThisParameterResulting = $pipelineJson.tasks.some(function (task) { return task.resultingParameterName === parameter.name; });
9446
9826
  if (!isThisParameterResulting) {
9447
9827
  parameter.isInput = true;
9828
+ // <- TODO: [💔] Why this is making typescript error in vscode but not in cli
9829
+ // > Type 'true' is not assignable to type 'false'.ts(2322)
9830
+ // > (property) isInput: false
9831
+ // > The parameter is input of the pipeline The parameter is NOT input of the pipeline
9448
9832
  }
9449
9833
  };
9450
9834
  try {
9451
- for (var _h = __values($pipelineJson.parameters), _j = _h.next(); !_j.done; _j = _h.next()) {
9452
- var parameter = _j.value;
9835
+ for (var _j = __values($pipelineJson.parameters), _k = _j.next(); !_k.done; _k = _j.next()) {
9836
+ var parameter = _k.value;
9453
9837
  _loop_3(parameter);
9454
9838
  }
9455
9839
  }
9456
9840
  catch (e_4_1) { e_4 = { error: e_4_1 }; }
9457
9841
  finally {
9458
9842
  try {
9459
- if (_j && !_j.done && (_d = _h.return)) _d.call(_h);
9843
+ if (_k && !_k.done && (_d = _j.return)) _d.call(_j);
9460
9844
  }
9461
9845
  finally { if (e_4) throw e_4.error; }
9462
9846
  }
@@ -9465,17 +9849,18 @@ function pipelineStringToJsonSync(pipelineString) {
9465
9849
  // Note: 7️⃣ Mark all non-INPUT parameters as OUTPUT if any OUTPUT is not set
9466
9850
  if ($pipelineJson.parameters.every(function (parameter) { return !parameter.isOutput; })) {
9467
9851
  try {
9468
- for (var _k = __values($pipelineJson.parameters), _l = _k.next(); !_l.done; _l = _k.next()) {
9469
- var parameter = _l.value;
9852
+ for (var _l = __values($pipelineJson.parameters), _m = _l.next(); !_m.done; _m = _l.next()) {
9853
+ var parameter = _m.value;
9470
9854
  if (!parameter.isInput) {
9471
9855
  parameter.isOutput = true;
9856
+ // <- TODO: [💔]
9472
9857
  }
9473
9858
  }
9474
9859
  }
9475
9860
  catch (e_5_1) { e_5 = { error: e_5_1 }; }
9476
9861
  finally {
9477
9862
  try {
9478
- if (_l && !_l.done && (_e = _k.return)) _e.call(_k);
9863
+ if (_m && !_m.done && (_e = _l.return)) _e.call(_l);
9479
9864
  }
9480
9865
  finally { if (e_5) throw e_5.error; }
9481
9866
  }
@@ -9483,7 +9868,7 @@ function pipelineStringToJsonSync(pipelineString) {
9483
9868
  // =============================================================
9484
9869
  // Note: 8️⃣ Cleanup of undefined values
9485
9870
  $pipelineJson.tasks.forEach(function (tasks) {
9486
- var e_8, _a;
9871
+ var e_9, _a;
9487
9872
  try {
9488
9873
  for (var _b = __values(Object.entries(tasks)), _c = _b.next(); !_c.done; _c = _b.next()) {
9489
9874
  var _d = __read(_c.value, 2), key = _d[0], value = _d[1];
@@ -9492,16 +9877,16 @@ function pipelineStringToJsonSync(pipelineString) {
9492
9877
  }
9493
9878
  }
9494
9879
  }
9495
- catch (e_8_1) { e_8 = { error: e_8_1 }; }
9880
+ catch (e_9_1) { e_9 = { error: e_9_1 }; }
9496
9881
  finally {
9497
9882
  try {
9498
9883
  if (_c && !_c.done && (_a = _b.return)) _a.call(_b);
9499
9884
  }
9500
- finally { if (e_8) throw e_8.error; }
9885
+ finally { if (e_9) throw e_9.error; }
9501
9886
  }
9502
9887
  });
9503
9888
  $pipelineJson.parameters.forEach(function (parameter) {
9504
- var e_9, _a;
9889
+ var e_10, _a;
9505
9890
  try {
9506
9891
  for (var _b = __values(Object.entries(parameter)), _c = _b.next(); !_c.done; _c = _b.next()) {
9507
9892
  var _d = __read(_c.value, 2), key = _d[0], value = _d[1];
@@ -9510,19 +9895,49 @@ function pipelineStringToJsonSync(pipelineString) {
9510
9895
  }
9511
9896
  }
9512
9897
  }
9513
- catch (e_9_1) { e_9 = { error: e_9_1 }; }
9898
+ catch (e_10_1) { e_10 = { error: e_10_1 }; }
9514
9899
  finally {
9515
9900
  try {
9516
9901
  if (_c && !_c.done && (_a = _b.return)) _a.call(_b);
9517
9902
  }
9518
- finally { if (e_9) throw e_9.error; }
9903
+ finally { if (e_10) throw e_10.error; }
9519
9904
  }
9520
9905
  });
9906
+ try {
9907
+ // =============================================================
9908
+ // Note: 9️⃣ Apply sync high-level abstractions
9909
+ for (var _o = __values(HIGH_LEVEL_ABSTRACTIONS.filter(function (_a) {
9910
+ var type = _a.type;
9911
+ return type === 'SYNC';
9912
+ })), _p = _o.next(); !_p.done; _p = _o.next()) {
9913
+ var highLevelAbstraction = _p.value;
9914
+ highLevelAbstraction.$applyToPipelineJson($pipelineJson);
9915
+ }
9916
+ }
9917
+ catch (e_6_1) { e_6 = { error: e_6_1 }; }
9918
+ finally {
9919
+ try {
9920
+ if (_p && !_p.done && (_f = _o.return)) _f.call(_o);
9921
+ }
9922
+ finally { if (e_6) throw e_6.error; }
9923
+ }
9924
+ // =============================================================
9925
+ // Note: 🔟 Default formfactor
9926
+ // Note: [🔆] If formfactor is still not set, set it to 'GENERIC'
9927
+ if ($pipelineJson.formfactorName === undefined) {
9928
+ $pipelineJson.formfactorName = 'GENERIC';
9929
+ }
9521
9930
  // =============================================================
9522
9931
  // TODO: [🍙] Maybe do reorder of `$pipelineJson` here
9523
- return $asDeeplyFrozenSerializableJson('pipelineJson', $pipelineJson);
9932
+ return exportJson({
9933
+ name: 'pipelineJson',
9934
+ message: "Result of `pipelineStringToJsonSync`",
9935
+ order: ORDER_OF_PIPELINE_JSON,
9936
+ value: __assign({ formfactorName: 'GENERIC' }, $pipelineJson),
9937
+ });
9524
9938
  }
9525
9939
  /**
9940
+ * TODO: [🧠] Maybe more things here can be refactored as high-level abstractions
9526
9941
  * TODO: [main] !!!! Warn if used only sync version
9527
9942
  * TODO: [🚞] Report here line/column of error
9528
9943
  * TODO: Use spaceTrim more effectively
@@ -9566,7 +9981,7 @@ function pipelineStringToJson(pipelineString, tools, options) {
9566
9981
  pipelineJson = _a.sent();
9567
9982
  _a.label = 2;
9568
9983
  case 2:
9569
- // Note: No need to use `$asDeeplyFrozenSerializableJson` because `pipelineStringToJsonSync` and `preparePipeline` already do that
9984
+ // Note: No need to use `$exportJson` because `pipelineStringToJsonSync` and `preparePipeline` already do that
9570
9985
  return [2 /*return*/, pipelineJson];
9571
9986
  }
9572
9987
  });
@@ -10712,7 +11127,7 @@ function createCollectionFromDirectory(path, tools, options) {
10712
11127
  return [3 /*break*/, 7];
10713
11128
  case 6:
10714
11129
  if (isVerbose) {
10715
- console.info(colors.gray("Skipped file ".concat(fileName.split('\\').join('/'), " \u2013\u2060\u2060\u2060\u2060\u2060\u2060\u2060\u2060\u2060\u2060\u2060\u2060\u2060\u2060\u2060\u2060 Not a pipeline")));
11130
+ console.info(colors.gray("Skipped file ".concat(fileName.split('\\').join('/'), " \u2013\u2060\u2060\u2060\u2060\u2060\u2060\u2060\u2060\u2060\u2060\u2060\u2060\u2060\u2060\u2060\u2060 Not a book")));
10716
11131
  }
10717
11132
  _e.label = 7;
10718
11133
  case 7:
@@ -10759,7 +11174,7 @@ function createCollectionFromDirectory(path, tools, options) {
10759
11174
  if (!(error_1 instanceof Error)) {
10760
11175
  throw error_1;
10761
11176
  }
10762
- wrappedErrorMessage = spaceTrim(function (block) { return "\n ".concat(error_1.name, " in pipeline ").concat(fileName.split('\\').join('/'), "\u2060:\n\n ").concat(block(error_1.message), "\n\n "); });
11177
+ wrappedErrorMessage = spaceTrim(function (block) { return "\n ".concat(error_1.name, " in pipeline ").concat(fileName.split('\\').join('/'), "\u2060:\n\n Original error message:\n ").concat(block(error_1.message), "\n\n Original stack trace:\n ").concat(block(error_1.stack || ''), "\n\n ---\n\n "); }) + '\n';
10763
11178
  if (isCrashedOnError) {
10764
11179
  throw new CollectionError(wrappedErrorMessage);
10765
11180
  }
@@ -11639,6 +12054,122 @@ function executionReportJsonToString(executionReportJson, options) {
11639
12054
  * TODO: [🧠] Should be in generated file GENERATOR_WARNING
11640
12055
  */
11641
12056
 
12057
+ /**
12058
+ * Run the interactive chatbot in CLI
12059
+ *
12060
+ * @returns Never-ending promise or process exit
12061
+ * @private internal function of `promptbookCli` and `initializeRunCommand`
12062
+ */
12063
+ function runInteractiveChatbot(options) {
12064
+ var _a;
12065
+ return __awaiter(this, void 0, void 0, function () {
12066
+ var pipeline, pipelineExecutor, isVerbose, ongoingParameters, initialMessage, _loop_1, state_1;
12067
+ return __generator(this, function (_b) {
12068
+ switch (_b.label) {
12069
+ case 0:
12070
+ pipeline = options.pipeline, pipelineExecutor = options.pipelineExecutor, isVerbose = options.isVerbose;
12071
+ ongoingParameters = {
12072
+ /**
12073
+ * Title of the conversation
12074
+ */
12075
+ title: '',
12076
+ /**
12077
+ * Summary of the conversation
12078
+ */
12079
+ conversationSummary: '',
12080
+ /**
12081
+ * Chatbot response
12082
+ */
12083
+ chatbotResponse: '',
12084
+ };
12085
+ if (isVerbose) {
12086
+ console.info(colors.gray('--- Running interactive chatbot ---'));
12087
+ }
12088
+ initialMessage = (((_a = pipeline.parameters.find(function (_a) {
12089
+ var name = _a.name;
12090
+ return name === 'chatbotResponse';
12091
+ })) === null || _a === void 0 ? void 0 : _a.exampleValues) || [])[0];
12092
+ if (initialMessage) {
12093
+ console.info("\n");
12094
+ console.info(spaceTrim(function (block) { return "\n\n ".concat(colors.bold(colors.green('Chatbot:')), "\n ").concat(block(colors.green(initialMessage)), "\n\n "); }));
12095
+ }
12096
+ _loop_1 = function () {
12097
+ var title_1, conversationSummary_1, response, userMessage_1, inputParameters, result_1, error_1;
12098
+ return __generator(this, function (_c) {
12099
+ switch (_c.label) {
12100
+ case 0:
12101
+ _c.trys.push([0, 4, , 5]);
12102
+ return [4 /*yield*/, forTime(100)];
12103
+ case 1:
12104
+ _c.sent();
12105
+ title_1 = ongoingParameters.title, conversationSummary_1 = ongoingParameters.conversationSummary;
12106
+ console.info("\n");
12107
+ if (title_1 !== '' &&
12108
+ just(false) /* <- TODO: [⛲️] Some better way how to show the title of ongoing conversation */) {
12109
+ console.info(colors.gray("--- ".concat(title_1, " ---")));
12110
+ }
12111
+ else {
12112
+ console.info(colors.gray("---"));
12113
+ }
12114
+ return [4 /*yield*/, prompts({
12115
+ type: 'text',
12116
+ name: 'userMessage',
12117
+ message: 'User message',
12118
+ hint: spaceTrim(function (block) { return "\n Type \"exit\" to exit,\n\n previousTitle\n ".concat(block(title_1), "\n\n previousConversationSummary\n ").concat(block(conversationSummary_1), "\n\n "); }),
12119
+ })];
12120
+ case 2:
12121
+ response = _c.sent();
12122
+ userMessage_1 = response.userMessage;
12123
+ if (userMessage_1 === 'exit' || userMessage_1 === 'quit' || userMessage_1 === undefined) {
12124
+ return [2 /*return*/, { value: process.exit(0) }];
12125
+ }
12126
+ console.info("\n");
12127
+ console.info(spaceTrim(function (block) { return "\n\n ".concat(colors.bold(colors.blue('User:')), "\n ").concat(block(colors.blue(userMessage_1)), "\n\n "); }));
12128
+ inputParameters = {
12129
+ previousTitle: title_1,
12130
+ previousConversationSummary: conversationSummary_1,
12131
+ userMessage: userMessage_1,
12132
+ };
12133
+ return [4 /*yield*/, pipelineExecutor(inputParameters)];
12134
+ case 3:
12135
+ result_1 = _c.sent();
12136
+ assertsExecutionSuccessful(result_1);
12137
+ console.info("\n");
12138
+ console.info(spaceTrim(function (block) { return "\n\n ".concat(colors.bold(colors.green('Chatbot:')), "\n ").concat(block(colors.green(result_1.outputParameters.chatbotResponse)), "\n\n "); }));
12139
+ ongoingParameters = result_1.outputParameters;
12140
+ return [3 /*break*/, 5];
12141
+ case 4:
12142
+ error_1 = _c.sent();
12143
+ if (!(error_1 instanceof Error)) {
12144
+ throw error_1;
12145
+ }
12146
+ // TODO: Allow to ressurect the chatbot after an error - prompt the user to continue
12147
+ console.error(colors.red(error_1.stack || error_1.message));
12148
+ return [2 /*return*/, { value: process.exit(1) }];
12149
+ case 5: return [2 /*return*/];
12150
+ }
12151
+ });
12152
+ };
12153
+ _b.label = 1;
12154
+ case 1:
12155
+ if (!just(true)) return [3 /*break*/, 3];
12156
+ return [5 /*yield**/, _loop_1()];
12157
+ case 2:
12158
+ state_1 = _b.sent();
12159
+ if (typeof state_1 === "object")
12160
+ return [2 /*return*/, state_1.value];
12161
+ return [3 /*break*/, 1];
12162
+ case 3: return [2 /*return*/];
12163
+ }
12164
+ });
12165
+ });
12166
+ }
12167
+ /**
12168
+ * TODO: Saving reports from the chatbot conversation
12169
+ * TODO: [⛲️] This is the right place to start implementing INK
12170
+ * Note: [🟡] Code in this file should never be published outside of `@promptbook/cli`
12171
+ */
12172
+
11642
12173
  /**
11643
12174
  * Initializes `run` command for Promptbook CLI utilities
11644
12175
  *
@@ -11655,15 +12186,16 @@ function initializeRunCommand(program) {
11655
12186
  runCommand.option('-r, --reload', "Call LLM models even if same prompt with result is in the cache", false);
11656
12187
  runCommand.option('-v, --verbose', "Is output verbose", false);
11657
12188
  runCommand.option('--no-interactive', "Input is not interactive, if true you need to pass all the input parameters through --json");
12189
+ runCommand.option('--no-formfactor', "When set, behavior of the interactive mode is not changed by the formfactor of the pipeline");
11658
12190
  runCommand.option('-j, --json <json>', "Pass all or some input parameters as JSON record, if used the output is also returned as JSON");
11659
12191
  runCommand.option('-s, --save-report <path>', "Save report to file");
11660
12192
  runCommand.action(function (filePathRaw, options) { return __awaiter(_this, void 0, void 0, function () {
11661
- var isCacheReloaded, isInteractive, json, isVerbose, saveReport, inputParameters, prepareAndScrapeOptions, fs, filePath, filePathCandidates, filePathCandidates_1, filePathCandidates_1_1, filePathCandidate, e_1_1, llm, executables, tools, pipelineString, pipeline, error_1, pipelineExecutor, questions, response, result, isSuccessful, errors, warnings, outputParameters, executionReport, executionReportString, _a, _b, error, _c, _d, warning, _e, _f, key, value, separator;
12193
+ var isCacheReloaded, isInteractive, isFormfactorUsed, json, isVerbose, saveReport, inputParameters, prepareAndScrapeOptions, fs, filePath, filePathCandidates, filePathCandidates_1, filePathCandidates_1_1, filePathCandidate, e_1_1, llm, executables, tools, pipelineString, pipeline, error_1, pipelineExecutor, questions, response, result, isSuccessful, errors, warnings, outputParameters, executionReport, executionReportString, _a, _b, error, _c, _d, warning, _e, _f, key, value, separator;
11662
12194
  var e_1, _g, _h, e_2, _j, e_3, _k, e_4, _l;
11663
12195
  return __generator(this, function (_m) {
11664
12196
  switch (_m.label) {
11665
12197
  case 0:
11666
- isCacheReloaded = options.reload, isInteractive = options.interactive, json = options.json, isVerbose = options.verbose, saveReport = options.saveReport;
12198
+ isCacheReloaded = options.reload, isInteractive = options.interactive, isFormfactorUsed = options.formfactor, json = options.json, isVerbose = options.verbose, saveReport = options.saveReport;
11667
12199
  if (saveReport && !saveReport.endsWith('.json') && !saveReport.endsWith('.md')) {
11668
12200
  console.error(colors.red("Report file must be .json or .md"));
11669
12201
  return [2 /*return*/, process.exit(1)];
@@ -11732,7 +12264,7 @@ function initializeRunCommand(program) {
11732
12264
  if (!error.message.includes('No LLM tools')) {
11733
12265
  throw error;
11734
12266
  }
11735
- console.error(colors.red(spaceTrim(function (block) { return "\n You need to configure LLM tools first\n\n 1) Create .env file at the root of your project\n 2) Configure API keys for LLM tools\n \n For example:\n ".concat(block($llmToolsMetadataRegister
12267
+ console.error(colors.red(spaceTrim(function (block) { return "\n You need to configure LLM tools first\n\n 1) Create .env file at the root of your project\n 2) Configure API keys for LLM tools\n\n For example:\n ".concat(block($llmToolsMetadataRegister
11736
12268
  .list()
11737
12269
  .map(function (_a) {
11738
12270
  var title = _a.title, envVariables = _a.envVariables;
@@ -11795,6 +12327,10 @@ function initializeRunCommand(program) {
11795
12327
  // <- TODO: Why "LLM execution failed undefinedx"
11796
12328
  maxParallelCount: 1, // <- TODO: Pass CLI argument
11797
12329
  });
12330
+ // TODO: Make some better system for formfactors and interactive mode - here is just a quick hardcoded solution for chatbot
12331
+ if (isInteractive === true && isFormfactorUsed === true && pipeline.formfactorName === 'CHATBOT') {
12332
+ return [2 /*return*/, /* not await */ runInteractiveChatbot({ pipeline: pipeline, pipelineExecutor: pipelineExecutor, isVerbose: isVerbose })];
12333
+ }
11798
12334
  if (isVerbose) {
11799
12335
  console.info(colors.gray('--- Getting input parameters ---'));
11800
12336
  }
@@ -12255,7 +12791,7 @@ var RemoteLlmExecutionTools = /** @class */ (function () {
12255
12791
  isAnonymous: true,
12256
12792
  userId: this.options.userId,
12257
12793
  llmToolsConfiguration: this.options.llmToolsConfiguration,
12258
- } /* <- TODO: [🤛] */);
12794
+ } /* <- Note: [🤛] */);
12259
12795
  }
12260
12796
  else {
12261
12797
  socket.emit('listModels-request', {
@@ -12263,7 +12799,7 @@ var RemoteLlmExecutionTools = /** @class */ (function () {
12263
12799
  appId: this.options.appId,
12264
12800
  userId: this.options.userId,
12265
12801
  customOptions: this.options.customOptions,
12266
- } /* <- TODO: [🤛] */);
12802
+ } /* <- Note: [🤛] */);
12267
12803
  }
12268
12804
  return [4 /*yield*/, new Promise(function (resolve, reject) {
12269
12805
  socket.on('listModels-response', function (response) {
@@ -12351,7 +12887,7 @@ var RemoteLlmExecutionTools = /** @class */ (function () {
12351
12887
  userId: this.options.userId,
12352
12888
  llmToolsConfiguration: this.options.llmToolsConfiguration,
12353
12889
  prompt: prompt,
12354
- } /* <- TODO: [🤛] */);
12890
+ } /* <- Note: [🤛] */);
12355
12891
  }
12356
12892
  else {
12357
12893
  socket.emit('prompt-request', {
@@ -12360,7 +12896,7 @@ var RemoteLlmExecutionTools = /** @class */ (function () {
12360
12896
  userId: this.options.userId,
12361
12897
  customOptions: this.options.customOptions,
12362
12898
  prompt: prompt,
12363
- } /* <- TODO: [🤛] */);
12899
+ } /* <- Note: [🤛] */);
12364
12900
  }
12365
12901
  return [4 /*yield*/, new Promise(function (resolve, reject) {
12366
12902
  socket.on('prompt-response', function (response) {
@@ -12383,7 +12919,7 @@ var RemoteLlmExecutionTools = /** @class */ (function () {
12383
12919
  return RemoteLlmExecutionTools;
12384
12920
  }());
12385
12921
  /**
12386
- * TODO: Maybe use `$asDeeplyFrozenSerializableJson`
12922
+ * TODO: Maybe use `$exportJson`
12387
12923
  * TODO: [🧠][🛍] Maybe not `isAnonymous: boolean` BUT `mode: 'ANONYMOUS'|'COLLECTION'`
12388
12924
  * TODO: [🍓] Allow to list compatible models with each variant
12389
12925
  * TODO: [🗯] RemoteLlmExecutionTools should extend Destroyable and implement IDestroyable
@@ -12409,72 +12945,75 @@ function computeUsage(value) {
12409
12945
  * @see https://docs.anthropic.com/en/docs/models-overview
12410
12946
  * @public exported from `@promptbook/anthropic-claude`
12411
12947
  */
12412
- var ANTHROPIC_CLAUDE_MODELS = $asDeeplyFrozenSerializableJson('ANTHROPIC_CLAUDE_MODELS', [
12413
- {
12414
- modelVariant: 'CHAT',
12415
- modelTitle: 'Claude 3.5 Sonnet',
12416
- modelName: 'claude-3-5-sonnet-20240620',
12417
- pricing: {
12418
- prompt: computeUsage("$3.00 / 1M tokens"),
12419
- output: computeUsage("$15.00 / 1M tokens"),
12948
+ var ANTHROPIC_CLAUDE_MODELS = exportJson({
12949
+ name: 'ANTHROPIC_CLAUDE_MODELS',
12950
+ value: [
12951
+ {
12952
+ modelVariant: 'CHAT',
12953
+ modelTitle: 'Claude 3.5 Sonnet',
12954
+ modelName: 'claude-3-5-sonnet-20240620',
12955
+ pricing: {
12956
+ prompt: computeUsage("$3.00 / 1M tokens"),
12957
+ output: computeUsage("$15.00 / 1M tokens"),
12958
+ },
12420
12959
  },
12421
- },
12422
- {
12423
- modelVariant: 'CHAT',
12424
- modelTitle: 'Claude 3 Opus',
12425
- modelName: 'claude-3-opus-20240229',
12426
- pricing: {
12427
- prompt: computeUsage("$15.00 / 1M tokens"),
12428
- output: computeUsage("$75.00 / 1M tokens"),
12960
+ {
12961
+ modelVariant: 'CHAT',
12962
+ modelTitle: 'Claude 3 Opus',
12963
+ modelName: 'claude-3-opus-20240229',
12964
+ pricing: {
12965
+ prompt: computeUsage("$15.00 / 1M tokens"),
12966
+ output: computeUsage("$75.00 / 1M tokens"),
12967
+ },
12429
12968
  },
12430
- },
12431
- {
12432
- modelVariant: 'CHAT',
12433
- modelTitle: 'Claude 3 Sonnet',
12434
- modelName: 'claude-3-sonnet-20240229',
12435
- pricing: {
12436
- prompt: computeUsage("$3.00 / 1M tokens"),
12437
- output: computeUsage("$15.00 / 1M tokens"),
12969
+ {
12970
+ modelVariant: 'CHAT',
12971
+ modelTitle: 'Claude 3 Sonnet',
12972
+ modelName: 'claude-3-sonnet-20240229',
12973
+ pricing: {
12974
+ prompt: computeUsage("$3.00 / 1M tokens"),
12975
+ output: computeUsage("$15.00 / 1M tokens"),
12976
+ },
12438
12977
  },
12439
- },
12440
- {
12441
- modelVariant: 'CHAT',
12442
- modelTitle: 'Claude 3 Haiku',
12443
- modelName: ' claude-3-haiku-20240307',
12444
- pricing: {
12445
- prompt: computeUsage("$0.25 / 1M tokens"),
12446
- output: computeUsage("$1.25 / 1M tokens"),
12978
+ {
12979
+ modelVariant: 'CHAT',
12980
+ modelTitle: 'Claude 3 Haiku',
12981
+ modelName: ' claude-3-haiku-20240307',
12982
+ pricing: {
12983
+ prompt: computeUsage("$0.25 / 1M tokens"),
12984
+ output: computeUsage("$1.25 / 1M tokens"),
12985
+ },
12447
12986
  },
12448
- },
12449
- {
12450
- modelVariant: 'CHAT',
12451
- modelTitle: 'Claude 2.1',
12452
- modelName: 'claude-2.1',
12453
- pricing: {
12454
- prompt: computeUsage("$8.00 / 1M tokens"),
12455
- output: computeUsage("$24.00 / 1M tokens"),
12987
+ {
12988
+ modelVariant: 'CHAT',
12989
+ modelTitle: 'Claude 2.1',
12990
+ modelName: 'claude-2.1',
12991
+ pricing: {
12992
+ prompt: computeUsage("$8.00 / 1M tokens"),
12993
+ output: computeUsage("$24.00 / 1M tokens"),
12994
+ },
12456
12995
  },
12457
- },
12458
- {
12459
- modelVariant: 'CHAT',
12460
- modelTitle: 'Claude 2',
12461
- modelName: 'claude-2.0',
12462
- pricing: {
12463
- prompt: computeUsage("$8.00 / 1M tokens"),
12464
- output: computeUsage("$24.00 / 1M tokens"),
12996
+ {
12997
+ modelVariant: 'CHAT',
12998
+ modelTitle: 'Claude 2',
12999
+ modelName: 'claude-2.0',
13000
+ pricing: {
13001
+ prompt: computeUsage("$8.00 / 1M tokens"),
13002
+ output: computeUsage("$24.00 / 1M tokens"),
13003
+ },
12465
13004
  },
12466
- },
12467
- {
12468
- modelVariant: 'CHAT',
12469
- modelTitle: ' Claude Instant 1.2',
12470
- modelName: 'claude-instant-1.2',
12471
- pricing: {
12472
- prompt: computeUsage("$0.80 / 1M tokens"),
12473
- output: computeUsage("$2.40 / 1M tokens"),
13005
+ {
13006
+ modelVariant: 'CHAT',
13007
+ modelTitle: ' Claude Instant 1.2',
13008
+ modelName: 'claude-instant-1.2',
13009
+ pricing: {
13010
+ prompt: computeUsage("$0.80 / 1M tokens"),
13011
+ output: computeUsage("$2.40 / 1M tokens"),
13012
+ },
12474
13013
  },
12475
- },
12476
- // TODO: [main] !!! Claude 1 and 2 has also completion versions - ask Hoagy
12477
- ]);
13014
+ // TODO: [main] !!! Claude 1 and 2 has also completion versions - ask Hoagy
13015
+ ],
13016
+ });
12478
13017
  /**
12479
13018
  * Note: [🤖] Add models of new variant
12480
13019
  * TODO: [🧠][main] !!! Add embedding models OR Anthropic has only chat+completion models?
@@ -12694,18 +13233,23 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
12694
13233
  // eslint-disable-next-line prefer-const
12695
13234
  complete = $getCurrentDate();
12696
13235
  usage = computeAnthropicClaudeUsage(rawPromptContent || '', resultContent || '', rawResponse);
12697
- return [2 /*return*/, $asDeeplyFrozenSerializableJson('AnthropicClaudeExecutionTools ChatPromptResult', {
12698
- content: resultContent,
12699
- modelName: rawResponse.model,
12700
- timing: {
12701
- start: start,
12702
- complete: complete,
13236
+ return [2 /*return*/, exportJson({
13237
+ name: 'promptResult',
13238
+ message: "Result of `AzureOpenAiExecutionTools.callChatModel`",
13239
+ order: [],
13240
+ value: {
13241
+ content: resultContent,
13242
+ modelName: rawResponse.model,
13243
+ timing: {
13244
+ start: start,
13245
+ complete: complete,
13246
+ },
13247
+ usage: usage,
13248
+ rawPromptContent: rawPromptContent,
13249
+ rawRequest: rawRequest,
13250
+ rawResponse: rawResponse,
13251
+ // <- [🗯]
12703
13252
  },
12704
- usage: usage,
12705
- rawPromptContent: rawPromptContent,
12706
- rawRequest: rawRequest,
12707
- rawResponse: rawResponse,
12708
- // <- [🗯]
12709
13253
  })];
12710
13254
  }
12711
13255
  });
@@ -12775,7 +13319,7 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
12775
13319
 
12776
13320
 
12777
13321
 
12778
- return $asDeeplyFrozenSerializableJson('AnthropicClaudeExecutionTools CompletionPromptResult',{
13322
+ return $exportJson({ name: 'promptResult',message: Result of \`AzureOpenAiExecutionTools callChatModel\`, order: [],value:{
12779
13323
  content: resultContent,
12780
13324
  modelName: rawResponse.model || model,
12781
13325
  timing: {
@@ -12944,381 +13488,384 @@ var _AzureOpenAiMetadataRegistration = $llmToolsMetadataRegister.register({
12944
13488
  * @see https://openai.com/api/pricing/
12945
13489
  * @public exported from `@promptbook/openai`
12946
13490
  */
12947
- var OPENAI_MODELS = $asDeeplyFrozenSerializableJson('OPENAI_MODELS', [
12948
- /*/
13491
+ var OPENAI_MODELS = exportJson({
13492
+ name: 'OPENAI_MODELS',
13493
+ value: [
13494
+ /*/
13495
+ {
13496
+ modelTitle: 'dall-e-3',
13497
+ modelName: 'dall-e-3',
13498
+ },
13499
+ /**/
13500
+ /*/
13501
+ {
13502
+ modelTitle: 'whisper-1',
13503
+ modelName: 'whisper-1',
13504
+ },
13505
+ /**/
13506
+ /**/
13507
+ {
13508
+ modelVariant: 'COMPLETION',
13509
+ modelTitle: 'davinci-002',
13510
+ modelName: 'davinci-002',
13511
+ pricing: {
13512
+ prompt: computeUsage("$2.00 / 1M tokens"),
13513
+ output: computeUsage("$2.00 / 1M tokens"), // <- not sure
13514
+ },
13515
+ },
13516
+ /**/
13517
+ /*/
13518
+ {
13519
+ modelTitle: 'dall-e-2',
13520
+ modelName: 'dall-e-2',
13521
+ },
13522
+ /**/
13523
+ /**/
13524
+ {
13525
+ modelVariant: 'CHAT',
13526
+ modelTitle: 'gpt-3.5-turbo-16k',
13527
+ modelName: 'gpt-3.5-turbo-16k',
13528
+ pricing: {
13529
+ prompt: computeUsage("$3.00 / 1M tokens"),
13530
+ output: computeUsage("$4.00 / 1M tokens"),
13531
+ },
13532
+ },
13533
+ /**/
13534
+ /*/
12949
13535
  {
12950
- modelTitle: 'dall-e-3',
12951
- modelName: 'dall-e-3',
13536
+ modelTitle: 'tts-1-hd-1106',
13537
+ modelName: 'tts-1-hd-1106',
12952
13538
  },
12953
13539
  /**/
12954
- /*/
13540
+ /*/
12955
13541
  {
12956
- modelTitle: 'whisper-1',
12957
- modelName: 'whisper-1',
13542
+ modelTitle: 'tts-1-hd',
13543
+ modelName: 'tts-1-hd',
12958
13544
  },
12959
- /**/
12960
- /**/
12961
- {
12962
- modelVariant: 'COMPLETION',
12963
- modelTitle: 'davinci-002',
12964
- modelName: 'davinci-002',
12965
- pricing: {
12966
- prompt: computeUsage("$2.00 / 1M tokens"),
12967
- output: computeUsage("$2.00 / 1M tokens"), // <- not sure
12968
- },
12969
- },
12970
- /**/
12971
- /*/
12972
- {
12973
- modelTitle: 'dall-e-2',
12974
- modelName: 'dall-e-2',
12975
- },
12976
- /**/
12977
- /**/
12978
- {
12979
- modelVariant: 'CHAT',
12980
- modelTitle: 'gpt-3.5-turbo-16k',
12981
- modelName: 'gpt-3.5-turbo-16k',
12982
- pricing: {
12983
- prompt: computeUsage("$3.00 / 1M tokens"),
12984
- output: computeUsage("$4.00 / 1M tokens"),
12985
- },
12986
- },
12987
- /**/
12988
- /*/
12989
- {
12990
- modelTitle: 'tts-1-hd-1106',
12991
- modelName: 'tts-1-hd-1106',
12992
- },
12993
- /**/
12994
- /*/
12995
- {
12996
- modelTitle: 'tts-1-hd',
12997
- modelName: 'tts-1-hd',
12998
- },
12999
- /**/
13000
- /**/
13001
- {
13002
- modelVariant: 'CHAT',
13003
- modelTitle: 'gpt-4',
13004
- modelName: 'gpt-4',
13005
- pricing: {
13006
- prompt: computeUsage("$30.00 / 1M tokens"),
13007
- output: computeUsage("$60.00 / 1M tokens"),
13545
+ /**/
13546
+ /**/
13547
+ {
13548
+ modelVariant: 'CHAT',
13549
+ modelTitle: 'gpt-4',
13550
+ modelName: 'gpt-4',
13551
+ pricing: {
13552
+ prompt: computeUsage("$30.00 / 1M tokens"),
13553
+ output: computeUsage("$60.00 / 1M tokens"),
13554
+ },
13008
13555
  },
13009
- },
13010
- /**/
13011
- /**/
13012
- {
13013
- modelVariant: 'CHAT',
13014
- modelTitle: 'gpt-4-32k',
13015
- modelName: 'gpt-4-32k',
13016
- pricing: {
13017
- prompt: computeUsage("$60.00 / 1M tokens"),
13018
- output: computeUsage("$120.00 / 1M tokens"),
13556
+ /**/
13557
+ /**/
13558
+ {
13559
+ modelVariant: 'CHAT',
13560
+ modelTitle: 'gpt-4-32k',
13561
+ modelName: 'gpt-4-32k',
13562
+ pricing: {
13563
+ prompt: computeUsage("$60.00 / 1M tokens"),
13564
+ output: computeUsage("$120.00 / 1M tokens"),
13565
+ },
13019
13566
  },
13020
- },
13021
- /**/
13022
- /*/
13023
- {
13024
- modelVariant: 'CHAT',
13025
- modelTitle: 'gpt-4-0613',
13026
- modelName: 'gpt-4-0613',
13027
- pricing: {
13028
- prompt: computeUsage(` / 1M tokens`),
13029
- output: computeUsage(` / 1M tokens`),
13567
+ /**/
13568
+ /*/
13569
+ {
13570
+ modelVariant: 'CHAT',
13571
+ modelTitle: 'gpt-4-0613',
13572
+ modelName: 'gpt-4-0613',
13573
+ pricing: {
13574
+ prompt: computeUsage(` / 1M tokens`),
13575
+ output: computeUsage(` / 1M tokens`),
13576
+ },
13030
13577
  },
13031
- },
13032
- /**/
13033
- /**/
13034
- {
13035
- modelVariant: 'CHAT',
13036
- modelTitle: 'gpt-4-turbo-2024-04-09',
13037
- modelName: 'gpt-4-turbo-2024-04-09',
13038
- pricing: {
13039
- prompt: computeUsage("$10.00 / 1M tokens"),
13040
- output: computeUsage("$30.00 / 1M tokens"),
13578
+ /**/
13579
+ /**/
13580
+ {
13581
+ modelVariant: 'CHAT',
13582
+ modelTitle: 'gpt-4-turbo-2024-04-09',
13583
+ modelName: 'gpt-4-turbo-2024-04-09',
13584
+ pricing: {
13585
+ prompt: computeUsage("$10.00 / 1M tokens"),
13586
+ output: computeUsage("$30.00 / 1M tokens"),
13587
+ },
13041
13588
  },
13042
- },
13043
- /**/
13044
- /**/
13045
- {
13046
- modelVariant: 'CHAT',
13047
- modelTitle: 'gpt-3.5-turbo-1106',
13048
- modelName: 'gpt-3.5-turbo-1106',
13049
- pricing: {
13050
- prompt: computeUsage("$1.00 / 1M tokens"),
13051
- output: computeUsage("$2.00 / 1M tokens"),
13589
+ /**/
13590
+ /**/
13591
+ {
13592
+ modelVariant: 'CHAT',
13593
+ modelTitle: 'gpt-3.5-turbo-1106',
13594
+ modelName: 'gpt-3.5-turbo-1106',
13595
+ pricing: {
13596
+ prompt: computeUsage("$1.00 / 1M tokens"),
13597
+ output: computeUsage("$2.00 / 1M tokens"),
13598
+ },
13052
13599
  },
13053
- },
13054
- /**/
13055
- /**/
13056
- {
13057
- modelVariant: 'CHAT',
13058
- modelTitle: 'gpt-4-turbo',
13059
- modelName: 'gpt-4-turbo',
13060
- pricing: {
13061
- prompt: computeUsage("$10.00 / 1M tokens"),
13062
- output: computeUsage("$30.00 / 1M tokens"),
13600
+ /**/
13601
+ /**/
13602
+ {
13603
+ modelVariant: 'CHAT',
13604
+ modelTitle: 'gpt-4-turbo',
13605
+ modelName: 'gpt-4-turbo',
13606
+ pricing: {
13607
+ prompt: computeUsage("$10.00 / 1M tokens"),
13608
+ output: computeUsage("$30.00 / 1M tokens"),
13609
+ },
13063
13610
  },
13064
- },
13065
- /**/
13066
- /**/
13067
- {
13068
- modelVariant: 'COMPLETION',
13069
- modelTitle: 'gpt-3.5-turbo-instruct-0914',
13070
- modelName: 'gpt-3.5-turbo-instruct-0914',
13071
- pricing: {
13072
- prompt: computeUsage("$1.50 / 1M tokens"),
13073
- output: computeUsage("$2.00 / 1M tokens"), // <- For gpt-3.5-turbo-instruct
13611
+ /**/
13612
+ /**/
13613
+ {
13614
+ modelVariant: 'COMPLETION',
13615
+ modelTitle: 'gpt-3.5-turbo-instruct-0914',
13616
+ modelName: 'gpt-3.5-turbo-instruct-0914',
13617
+ pricing: {
13618
+ prompt: computeUsage("$1.50 / 1M tokens"),
13619
+ output: computeUsage("$2.00 / 1M tokens"), // <- For gpt-3.5-turbo-instruct
13620
+ },
13074
13621
  },
13075
- },
13076
- /**/
13077
- /**/
13078
- {
13079
- modelVariant: 'COMPLETION',
13080
- modelTitle: 'gpt-3.5-turbo-instruct',
13081
- modelName: 'gpt-3.5-turbo-instruct',
13082
- pricing: {
13083
- prompt: computeUsage("$1.50 / 1M tokens"),
13084
- output: computeUsage("$2.00 / 1M tokens"),
13622
+ /**/
13623
+ /**/
13624
+ {
13625
+ modelVariant: 'COMPLETION',
13626
+ modelTitle: 'gpt-3.5-turbo-instruct',
13627
+ modelName: 'gpt-3.5-turbo-instruct',
13628
+ pricing: {
13629
+ prompt: computeUsage("$1.50 / 1M tokens"),
13630
+ output: computeUsage("$2.00 / 1M tokens"),
13631
+ },
13085
13632
  },
13086
- },
13087
- /**/
13088
- /*/
13089
- {
13090
- modelTitle: 'tts-1',
13091
- modelName: 'tts-1',
13092
- },
13093
- /**/
13094
- /**/
13095
- {
13096
- modelVariant: 'CHAT',
13097
- modelTitle: 'gpt-3.5-turbo',
13098
- modelName: 'gpt-3.5-turbo',
13099
- pricing: {
13100
- prompt: computeUsage("$3.00 / 1M tokens"),
13101
- output: computeUsage("$6.00 / 1M tokens"), // <- Not sure, refer to gpt-3.5-turbo in Fine-tuning models
13633
+ /**/
13634
+ /*/
13635
+ {
13636
+ modelTitle: 'tts-1',
13637
+ modelName: 'tts-1',
13638
+ },
13639
+ /**/
13640
+ /**/
13641
+ {
13642
+ modelVariant: 'CHAT',
13643
+ modelTitle: 'gpt-3.5-turbo',
13644
+ modelName: 'gpt-3.5-turbo',
13645
+ pricing: {
13646
+ prompt: computeUsage("$3.00 / 1M tokens"),
13647
+ output: computeUsage("$6.00 / 1M tokens"), // <- Not sure, refer to gpt-3.5-turbo in Fine-tuning models
13648
+ },
13102
13649
  },
13103
- },
13104
- /**/
13105
- /**/
13106
- {
13107
- modelVariant: 'CHAT',
13108
- modelTitle: 'gpt-3.5-turbo-0301',
13109
- modelName: 'gpt-3.5-turbo-0301',
13110
- pricing: {
13111
- prompt: computeUsage("$1.50 / 1M tokens"),
13112
- output: computeUsage("$2.00 / 1M tokens"),
13650
+ /**/
13651
+ /**/
13652
+ {
13653
+ modelVariant: 'CHAT',
13654
+ modelTitle: 'gpt-3.5-turbo-0301',
13655
+ modelName: 'gpt-3.5-turbo-0301',
13656
+ pricing: {
13657
+ prompt: computeUsage("$1.50 / 1M tokens"),
13658
+ output: computeUsage("$2.00 / 1M tokens"),
13659
+ },
13113
13660
  },
13114
- },
13115
- /**/
13116
- /**/
13117
- {
13118
- modelVariant: 'COMPLETION',
13119
- modelTitle: 'babbage-002',
13120
- modelName: 'babbage-002',
13121
- pricing: {
13122
- prompt: computeUsage("$0.40 / 1M tokens"),
13123
- output: computeUsage("$0.40 / 1M tokens"), // <- Not sure
13661
+ /**/
13662
+ /**/
13663
+ {
13664
+ modelVariant: 'COMPLETION',
13665
+ modelTitle: 'babbage-002',
13666
+ modelName: 'babbage-002',
13667
+ pricing: {
13668
+ prompt: computeUsage("$0.40 / 1M tokens"),
13669
+ output: computeUsage("$0.40 / 1M tokens"), // <- Not sure
13670
+ },
13124
13671
  },
13125
- },
13126
- /**/
13127
- /**/
13128
- {
13129
- modelVariant: 'CHAT',
13130
- modelTitle: 'gpt-4-1106-preview',
13131
- modelName: 'gpt-4-1106-preview',
13132
- pricing: {
13133
- prompt: computeUsage("$10.00 / 1M tokens"),
13134
- output: computeUsage("$30.00 / 1M tokens"),
13672
+ /**/
13673
+ /**/
13674
+ {
13675
+ modelVariant: 'CHAT',
13676
+ modelTitle: 'gpt-4-1106-preview',
13677
+ modelName: 'gpt-4-1106-preview',
13678
+ pricing: {
13679
+ prompt: computeUsage("$10.00 / 1M tokens"),
13680
+ output: computeUsage("$30.00 / 1M tokens"),
13681
+ },
13135
13682
  },
13136
- },
13137
- /**/
13138
- /**/
13139
- {
13140
- modelVariant: 'CHAT',
13141
- modelTitle: 'gpt-4-0125-preview',
13142
- modelName: 'gpt-4-0125-preview',
13143
- pricing: {
13144
- prompt: computeUsage("$10.00 / 1M tokens"),
13145
- output: computeUsage("$30.00 / 1M tokens"),
13683
+ /**/
13684
+ /**/
13685
+ {
13686
+ modelVariant: 'CHAT',
13687
+ modelTitle: 'gpt-4-0125-preview',
13688
+ modelName: 'gpt-4-0125-preview',
13689
+ pricing: {
13690
+ prompt: computeUsage("$10.00 / 1M tokens"),
13691
+ output: computeUsage("$30.00 / 1M tokens"),
13692
+ },
13146
13693
  },
13147
- },
13148
- /**/
13149
- /*/
13150
- {
13151
- modelTitle: 'tts-1-1106',
13152
- modelName: 'tts-1-1106',
13153
- },
13154
- /**/
13155
- /**/
13156
- {
13157
- modelVariant: 'CHAT',
13158
- modelTitle: 'gpt-3.5-turbo-0125',
13159
- modelName: 'gpt-3.5-turbo-0125',
13160
- pricing: {
13161
- prompt: computeUsage("$0.50 / 1M tokens"),
13162
- output: computeUsage("$1.50 / 1M tokens"),
13694
+ /**/
13695
+ /*/
13696
+ {
13697
+ modelTitle: 'tts-1-1106',
13698
+ modelName: 'tts-1-1106',
13699
+ },
13700
+ /**/
13701
+ /**/
13702
+ {
13703
+ modelVariant: 'CHAT',
13704
+ modelTitle: 'gpt-3.5-turbo-0125',
13705
+ modelName: 'gpt-3.5-turbo-0125',
13706
+ pricing: {
13707
+ prompt: computeUsage("$0.50 / 1M tokens"),
13708
+ output: computeUsage("$1.50 / 1M tokens"),
13709
+ },
13163
13710
  },
13164
- },
13165
- /**/
13166
- /**/
13167
- {
13168
- modelVariant: 'CHAT',
13169
- modelTitle: 'gpt-4-turbo-preview',
13170
- modelName: 'gpt-4-turbo-preview',
13171
- pricing: {
13172
- prompt: computeUsage("$10.00 / 1M tokens"),
13173
- output: computeUsage("$30.00 / 1M tokens"), // <- Not sure, just for gpt-4-turbo
13711
+ /**/
13712
+ /**/
13713
+ {
13714
+ modelVariant: 'CHAT',
13715
+ modelTitle: 'gpt-4-turbo-preview',
13716
+ modelName: 'gpt-4-turbo-preview',
13717
+ pricing: {
13718
+ prompt: computeUsage("$10.00 / 1M tokens"),
13719
+ output: computeUsage("$30.00 / 1M tokens"), // <- Not sure, just for gpt-4-turbo
13720
+ },
13174
13721
  },
13175
- },
13176
- /**/
13177
- /**/
13178
- {
13179
- modelVariant: 'EMBEDDING',
13180
- modelTitle: 'text-embedding-3-large',
13181
- modelName: 'text-embedding-3-large',
13182
- pricing: {
13183
- prompt: computeUsage("$0.13 / 1M tokens"),
13184
- // TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
13185
- output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
13722
+ /**/
13723
+ /**/
13724
+ {
13725
+ modelVariant: 'EMBEDDING',
13726
+ modelTitle: 'text-embedding-3-large',
13727
+ modelName: 'text-embedding-3-large',
13728
+ pricing: {
13729
+ prompt: computeUsage("$0.13 / 1M tokens"),
13730
+ // TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
13731
+ output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
13732
+ },
13186
13733
  },
13187
- },
13188
- /**/
13189
- /**/
13190
- {
13191
- modelVariant: 'EMBEDDING',
13192
- modelTitle: 'text-embedding-3-small',
13193
- modelName: 'text-embedding-3-small',
13194
- pricing: {
13195
- prompt: computeUsage("$0.02 / 1M tokens"),
13196
- // TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
13197
- output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
13734
+ /**/
13735
+ /**/
13736
+ {
13737
+ modelVariant: 'EMBEDDING',
13738
+ modelTitle: 'text-embedding-3-small',
13739
+ modelName: 'text-embedding-3-small',
13740
+ pricing: {
13741
+ prompt: computeUsage("$0.02 / 1M tokens"),
13742
+ // TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
13743
+ output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
13744
+ },
13198
13745
  },
13199
- },
13200
- /**/
13201
- /**/
13202
- {
13203
- modelVariant: 'CHAT',
13204
- modelTitle: 'gpt-3.5-turbo-0613',
13205
- modelName: 'gpt-3.5-turbo-0613',
13206
- pricing: {
13207
- prompt: computeUsage("$1.50 / 1M tokens"),
13208
- output: computeUsage("$2.00 / 1M tokens"),
13746
+ /**/
13747
+ /**/
13748
+ {
13749
+ modelVariant: 'CHAT',
13750
+ modelTitle: 'gpt-3.5-turbo-0613',
13751
+ modelName: 'gpt-3.5-turbo-0613',
13752
+ pricing: {
13753
+ prompt: computeUsage("$1.50 / 1M tokens"),
13754
+ output: computeUsage("$2.00 / 1M tokens"),
13755
+ },
13209
13756
  },
13210
- },
13211
- /**/
13212
- /**/
13213
- {
13214
- modelVariant: 'EMBEDDING',
13215
- modelTitle: 'text-embedding-ada-002',
13216
- modelName: 'text-embedding-ada-002',
13217
- pricing: {
13218
- prompt: computeUsage("$0.1 / 1M tokens"),
13219
- // TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
13220
- output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
13757
+ /**/
13758
+ /**/
13759
+ {
13760
+ modelVariant: 'EMBEDDING',
13761
+ modelTitle: 'text-embedding-ada-002',
13762
+ modelName: 'text-embedding-ada-002',
13763
+ pricing: {
13764
+ prompt: computeUsage("$0.1 / 1M tokens"),
13765
+ // TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
13766
+ output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
13767
+ },
13221
13768
  },
13222
- },
13223
- /**/
13224
- /*/
13225
- {
13226
- modelVariant: 'CHAT',
13227
- modelTitle: 'gpt-4-1106-vision-preview',
13228
- modelName: 'gpt-4-1106-vision-preview',
13229
- },
13230
- /**/
13231
- /*/
13232
- {
13233
- modelVariant: 'CHAT',
13234
- modelTitle: 'gpt-4-vision-preview',
13235
- modelName: 'gpt-4-vision-preview',
13236
- pricing: {
13237
- prompt: computeUsage(`$10.00 / 1M tokens`),
13238
- output: computeUsage(`$30.00 / 1M tokens`),
13769
+ /**/
13770
+ /*/
13771
+ {
13772
+ modelVariant: 'CHAT',
13773
+ modelTitle: 'gpt-4-1106-vision-preview',
13774
+ modelName: 'gpt-4-1106-vision-preview',
13239
13775
  },
13240
- },
13241
- /**/
13242
- /**/
13243
- {
13244
- modelVariant: 'CHAT',
13245
- modelTitle: 'gpt-4o-2024-05-13',
13246
- modelName: 'gpt-4o-2024-05-13',
13247
- pricing: {
13248
- prompt: computeUsage("$5.00 / 1M tokens"),
13249
- output: computeUsage("$15.00 / 1M tokens"),
13776
+ /**/
13777
+ /*/
13778
+ {
13779
+ modelVariant: 'CHAT',
13780
+ modelTitle: 'gpt-4-vision-preview',
13781
+ modelName: 'gpt-4-vision-preview',
13782
+ pricing: {
13783
+ prompt: computeUsage(`$10.00 / 1M tokens`),
13784
+ output: computeUsage(`$30.00 / 1M tokens`),
13785
+ },
13786
+ },
13787
+ /**/
13788
+ /**/
13789
+ {
13790
+ modelVariant: 'CHAT',
13791
+ modelTitle: 'gpt-4o-2024-05-13',
13792
+ modelName: 'gpt-4o-2024-05-13',
13793
+ pricing: {
13794
+ prompt: computeUsage("$5.00 / 1M tokens"),
13795
+ output: computeUsage("$15.00 / 1M tokens"),
13796
+ },
13797
+ //TODO: [main] !!! Add gpt-4o-mini-2024-07-18 and all others to be up to date
13250
13798
  },
13251
- //TODO: [main] !!! Add gpt-4o-mini-2024-07-18 and all others to be up to date
13252
- },
13253
- /**/
13254
- /**/
13255
- {
13256
- modelVariant: 'CHAT',
13257
- modelTitle: 'gpt-4o',
13258
- modelName: 'gpt-4o',
13259
- pricing: {
13260
- prompt: computeUsage("$5.00 / 1M tokens"),
13261
- output: computeUsage("$15.00 / 1M tokens"),
13799
+ /**/
13800
+ /**/
13801
+ {
13802
+ modelVariant: 'CHAT',
13803
+ modelTitle: 'gpt-4o',
13804
+ modelName: 'gpt-4o',
13805
+ pricing: {
13806
+ prompt: computeUsage("$5.00 / 1M tokens"),
13807
+ output: computeUsage("$15.00 / 1M tokens"),
13808
+ },
13262
13809
  },
13263
- },
13264
- /**/
13265
- /**/
13266
- {
13267
- modelVariant: 'CHAT',
13268
- modelTitle: 'o1-preview',
13269
- modelName: 'o1-preview',
13270
- pricing: {
13271
- prompt: computeUsage("$15.00 / 1M tokens"),
13272
- output: computeUsage("$60.00 / 1M tokens"),
13810
+ /**/
13811
+ /**/
13812
+ {
13813
+ modelVariant: 'CHAT',
13814
+ modelTitle: 'o1-preview',
13815
+ modelName: 'o1-preview',
13816
+ pricing: {
13817
+ prompt: computeUsage("$15.00 / 1M tokens"),
13818
+ output: computeUsage("$60.00 / 1M tokens"),
13819
+ },
13273
13820
  },
13274
- },
13275
- /**/
13276
- /**/
13277
- {
13278
- modelVariant: 'CHAT',
13279
- modelTitle: 'o1-preview-2024-09-12',
13280
- modelName: 'o1-preview-2024-09-12',
13281
- // <- TODO: [💩] Some better system to organize theese date suffixes and versions
13282
- pricing: {
13283
- prompt: computeUsage("$15.00 / 1M tokens"),
13284
- output: computeUsage("$60.00 / 1M tokens"),
13821
+ /**/
13822
+ /**/
13823
+ {
13824
+ modelVariant: 'CHAT',
13825
+ modelTitle: 'o1-preview-2024-09-12',
13826
+ modelName: 'o1-preview-2024-09-12',
13827
+ // <- TODO: [💩] Some better system to organize theese date suffixes and versions
13828
+ pricing: {
13829
+ prompt: computeUsage("$15.00 / 1M tokens"),
13830
+ output: computeUsage("$60.00 / 1M tokens"),
13831
+ },
13285
13832
  },
13286
- },
13287
- /**/
13288
- /**/
13289
- {
13290
- modelVariant: 'CHAT',
13291
- modelTitle: 'o1-mini',
13292
- modelName: 'o1-mini',
13293
- pricing: {
13294
- prompt: computeUsage("$3.00 / 1M tokens"),
13295
- output: computeUsage("$12.00 / 1M tokens"),
13833
+ /**/
13834
+ /**/
13835
+ {
13836
+ modelVariant: 'CHAT',
13837
+ modelTitle: 'o1-mini',
13838
+ modelName: 'o1-mini',
13839
+ pricing: {
13840
+ prompt: computeUsage("$3.00 / 1M tokens"),
13841
+ output: computeUsage("$12.00 / 1M tokens"),
13842
+ },
13296
13843
  },
13297
- },
13298
- /**/
13299
- /**/
13300
- {
13301
- modelVariant: 'CHAT',
13302
- modelTitle: 'o1-mini-2024-09-12',
13303
- modelName: 'o1-mini-2024-09-12',
13304
- pricing: {
13305
- prompt: computeUsage("$3.00 / 1M tokens"),
13306
- output: computeUsage("$12.00 / 1M tokens"),
13844
+ /**/
13845
+ /**/
13846
+ {
13847
+ modelVariant: 'CHAT',
13848
+ modelTitle: 'o1-mini-2024-09-12',
13849
+ modelName: 'o1-mini-2024-09-12',
13850
+ pricing: {
13851
+ prompt: computeUsage("$3.00 / 1M tokens"),
13852
+ output: computeUsage("$12.00 / 1M tokens"),
13853
+ },
13307
13854
  },
13308
- },
13309
- /**/
13310
- /**/
13311
- {
13312
- modelVariant: 'CHAT',
13313
- modelTitle: 'gpt-3.5-turbo-16k-0613',
13314
- modelName: 'gpt-3.5-turbo-16k-0613',
13315
- pricing: {
13316
- prompt: computeUsage("$3.00 / 1M tokens"),
13317
- output: computeUsage("$4.00 / 1M tokens"),
13855
+ /**/
13856
+ /**/
13857
+ {
13858
+ modelVariant: 'CHAT',
13859
+ modelTitle: 'gpt-3.5-turbo-16k-0613',
13860
+ modelName: 'gpt-3.5-turbo-16k-0613',
13861
+ pricing: {
13862
+ prompt: computeUsage("$3.00 / 1M tokens"),
13863
+ output: computeUsage("$4.00 / 1M tokens"),
13864
+ },
13318
13865
  },
13319
- },
13320
- /**/
13321
- ]);
13866
+ /**/
13867
+ ],
13868
+ });
13322
13869
  /**
13323
13870
  * Note: [🤖] Add models of new variant
13324
13871
  * TODO: [🧠] Some mechanism to propagate unsureness
@@ -13493,18 +14040,23 @@ var AzureOpenAiExecutionTools = /** @class */ (function () {
13493
14040
  input: __assign({ tokensCount: uncertainNumber((_b = rawResponse.usage) === null || _b === void 0 ? void 0 : _b.promptTokens) }, computeUsageCounts(prompt.content)),
13494
14041
  output: __assign({ tokensCount: uncertainNumber((_c = rawResponse.usage) === null || _c === void 0 ? void 0 : _c.completionTokens) }, computeUsageCounts(prompt.content)),
13495
14042
  };
13496
- return [2 /*return*/, $asDeeplyFrozenSerializableJson('AzureOpenAiExecutionTools ChatPromptResult', {
13497
- content: resultContent,
13498
- modelName: modelName,
13499
- timing: {
13500
- start: start,
13501
- complete: complete,
14043
+ return [2 /*return*/, exportJson({
14044
+ name: 'promptResult',
14045
+ message: "Result of `AzureOpenAiExecutionTools.callChatModel`",
14046
+ order: [],
14047
+ value: {
14048
+ content: resultContent,
14049
+ modelName: modelName,
14050
+ timing: {
14051
+ start: start,
14052
+ complete: complete,
14053
+ },
14054
+ usage: usage,
14055
+ rawPromptContent: rawPromptContent,
14056
+ rawRequest: rawRequest,
14057
+ rawResponse: __assign(__assign({}, rawResponse), { created: rawResponse.created.toISOString() }),
14058
+ // <- [🗯]
13502
14059
  },
13503
- usage: usage,
13504
- rawPromptContent: rawPromptContent,
13505
- rawRequest: rawRequest,
13506
- rawResponse: __assign(__assign({}, rawResponse), { created: rawResponse.created.toISOString() }),
13507
- // <- [🗯]
13508
14060
  })];
13509
14061
  case 4:
13510
14062
  error_1 = _d.sent();
@@ -13586,18 +14138,23 @@ var AzureOpenAiExecutionTools = /** @class */ (function () {
13586
14138
  input: __assign({ tokensCount: uncertainNumber((_b = rawResponse.usage) === null || _b === void 0 ? void 0 : _b.promptTokens) }, computeUsageCounts(prompt.content)),
13587
14139
  output: __assign({ tokensCount: uncertainNumber((_c = rawResponse.usage) === null || _c === void 0 ? void 0 : _c.completionTokens) }, computeUsageCounts(prompt.content)),
13588
14140
  };
13589
- return [2 /*return*/, $asDeeplyFrozenSerializableJson('AzureOpenAiExecutionTools CompletionPromptResult', {
13590
- content: resultContent,
13591
- modelName: modelName,
13592
- timing: {
13593
- start: start,
13594
- complete: complete,
14141
+ return [2 /*return*/, exportJson({
14142
+ name: 'promptResult',
14143
+ message: "Result of `AzureOpenAiExecutionTools.callCompletionModel`",
14144
+ order: [],
14145
+ value: {
14146
+ content: resultContent,
14147
+ modelName: modelName,
14148
+ timing: {
14149
+ start: start,
14150
+ complete: complete,
14151
+ },
14152
+ usage: usage,
14153
+ rawPromptContent: rawPromptContent,
14154
+ rawRequest: rawRequest,
14155
+ rawResponse: __assign(__assign({}, rawResponse), { created: rawResponse.created.toISOString() }),
14156
+ // <- [🗯]
13595
14157
  },
13596
- usage: usage,
13597
- rawPromptContent: rawPromptContent,
13598
- rawRequest: rawRequest,
13599
- rawResponse: __assign(__assign({}, rawResponse), { created: rawResponse.created.toISOString() }),
13600
- // <- [🗯]
13601
14158
  })];
13602
14159
  case 4:
13603
14160
  error_2 = _d.sent();
@@ -13812,7 +14369,7 @@ function createExecutionToolsFromVercelProvider(options) {
13812
14369
  return modelVariant === 'CHAT';
13813
14370
  })) === null || _a === void 0 ? void 0 : _a.modelName);
13814
14371
  if (!modelName) {
13815
- throw new PipelineExecutionError(spaceTrim("\n Can not determine which model to use.\n\n You need to provide at least one of:\n 1) In `createExecutionToolsFromVercelProvider` options, provide `availableModels` with at least one model\n 2) In `prompt.modelRequirements`, provide `modelName` with the name of the model to use\n \n "));
14372
+ throw new PipelineExecutionError(spaceTrim("\n Can not determine which model to use.\n\n You need to provide at least one of:\n 1) In `createExecutionToolsFromVercelProvider` options, provide `availableModels` with at least one model\n 2) In `prompt.modelRequirements`, provide `modelName` with the name of the model to use\n\n "));
13816
14373
  }
13817
14374
  return [4 /*yield*/, vercelProvider.chat(modelName, __assign({ user: (userId === null || userId === void 0 ? void 0 : userId.toString()) || undefined }, additionalChatSettings))];
13818
14375
  case 1:
@@ -13875,18 +14432,22 @@ function createExecutionToolsFromVercelProvider(options) {
13875
14432
  }
13876
14433
  complete = $getCurrentDate();
13877
14434
  usage = UNCERTAIN_USAGE;
13878
- return [2 /*return*/, $asDeeplyFrozenSerializableJson('createExecutionToolsFromVercelProvider ChatPromptResult', {
13879
- content: rawResponse.text,
13880
- modelName: modelName,
13881
- timing: {
13882
- start: start,
13883
- complete: complete,
14435
+ return [2 /*return*/, exportJson({
14436
+ name: 'promptResult',
14437
+ message: "Result of `createExecutionToolsFromVercelProvider.callChatModel`",
14438
+ value: {
14439
+ content: rawResponse.text,
14440
+ modelName: modelName,
14441
+ timing: {
14442
+ start: start,
14443
+ complete: complete,
14444
+ },
14445
+ usage: usage,
14446
+ rawPromptContent: rawPromptContent,
14447
+ rawRequest: rawRequest,
14448
+ rawResponse: asSerializable(rawResponse),
14449
+ // <- [🗯]
13884
14450
  },
13885
- usage: usage,
13886
- rawPromptContent: rawPromptContent,
13887
- rawRequest: rawRequest,
13888
- rawResponse: asSerializable(rawResponse),
13889
- // <- [🗯]
13890
14451
  })];
13891
14452
  }
13892
14453
  });
@@ -14253,18 +14814,23 @@ var OpenAiExecutionTools = /** @class */ (function () {
14253
14814
  if (resultContent === null) {
14254
14815
  throw new PipelineExecutionError('No response message from OpenAI');
14255
14816
  }
14256
- return [2 /*return*/, $asDeeplyFrozenSerializableJson('OpenAiExecutionTools ChatPromptResult', {
14257
- content: resultContent,
14258
- modelName: rawResponse.model || modelName,
14259
- timing: {
14260
- start: start,
14261
- complete: complete,
14817
+ return [2 /*return*/, exportJson({
14818
+ name: 'promptResult',
14819
+ message: "Result of `OpenAiExecutionTools.callChatModel`",
14820
+ order: [],
14821
+ value: {
14822
+ content: resultContent,
14823
+ modelName: rawResponse.model || modelName,
14824
+ timing: {
14825
+ start: start,
14826
+ complete: complete,
14827
+ },
14828
+ usage: usage,
14829
+ rawPromptContent: rawPromptContent,
14830
+ rawRequest: rawRequest,
14831
+ rawResponse: rawResponse,
14832
+ // <- [🗯]
14262
14833
  },
14263
- usage: usage,
14264
- rawPromptContent: rawPromptContent,
14265
- rawRequest: rawRequest,
14266
- rawResponse: rawResponse,
14267
- // <- [🗯]
14268
14834
  })];
14269
14835
  }
14270
14836
  });
@@ -14329,18 +14895,23 @@ var OpenAiExecutionTools = /** @class */ (function () {
14329
14895
  // eslint-disable-next-line prefer-const
14330
14896
  complete = $getCurrentDate();
14331
14897
  usage = computeOpenAiUsage(content || '', resultContent || '', rawResponse);
14332
- return [2 /*return*/, $asDeeplyFrozenSerializableJson('OpenAiExecutionTools CompletionPromptResult', {
14333
- content: resultContent,
14334
- modelName: rawResponse.model || modelName,
14335
- timing: {
14336
- start: start,
14337
- complete: complete,
14898
+ return [2 /*return*/, exportJson({
14899
+ name: 'promptResult',
14900
+ message: "Result of `OpenAiExecutionTools.callCompletionModel`",
14901
+ order: [],
14902
+ value: {
14903
+ content: resultContent,
14904
+ modelName: rawResponse.model || modelName,
14905
+ timing: {
14906
+ start: start,
14907
+ complete: complete,
14908
+ },
14909
+ usage: usage,
14910
+ rawPromptContent: rawPromptContent,
14911
+ rawRequest: rawRequest,
14912
+ rawResponse: rawResponse,
14913
+ // <- [🗯]
14338
14914
  },
14339
- usage: usage,
14340
- rawPromptContent: rawPromptContent,
14341
- rawRequest: rawRequest,
14342
- rawResponse: rawResponse,
14343
- // <- [🗯]
14344
14915
  })];
14345
14916
  }
14346
14917
  });
@@ -14397,18 +14968,23 @@ var OpenAiExecutionTools = /** @class */ (function () {
14397
14968
  usage = computeOpenAiUsage(content || '', '',
14398
14969
  // <- Note: Embedding does not have result content
14399
14970
  rawResponse);
14400
- return [2 /*return*/, $asDeeplyFrozenSerializableJson('OpenAiExecutionTools EmbeddingPromptResult', {
14401
- content: resultContent,
14402
- modelName: rawResponse.model || modelName,
14403
- timing: {
14404
- start: start,
14405
- complete: complete,
14971
+ return [2 /*return*/, exportJson({
14972
+ name: 'promptResult',
14973
+ message: "Result of `OpenAiExecutionTools.callEmbeddingModel`",
14974
+ order: [],
14975
+ value: {
14976
+ content: resultContent,
14977
+ modelName: rawResponse.model || modelName,
14978
+ timing: {
14979
+ start: start,
14980
+ complete: complete,
14981
+ },
14982
+ usage: usage,
14983
+ rawPromptContent: rawPromptContent,
14984
+ rawRequest: rawRequest,
14985
+ rawResponse: rawResponse,
14986
+ // <- [🗯]
14406
14987
  },
14407
- usage: usage,
14408
- rawPromptContent: rawPromptContent,
14409
- rawRequest: rawRequest,
14410
- rawResponse: rawResponse,
14411
- // <- [🗯]
14412
14988
  })];
14413
14989
  }
14414
14990
  });
@@ -14604,20 +15180,25 @@ var OpenAiAssistantExecutionTools = /** @class */ (function (_super) {
14604
15180
  if (resultContent === null) {
14605
15181
  throw new PipelineExecutionError('No response message from OpenAI');
14606
15182
  }
14607
- return [2 /*return*/, $asDeeplyFrozenSerializableJson('OpenAiAssistantExecutionTools ChatPromptResult', {
14608
- content: resultContent,
14609
- modelName: 'assistant',
14610
- // <- TODO: [🥘] Detect used model in assistant
14611
- // ?> model: rawResponse.model || modelName,
14612
- timing: {
14613
- start: start,
14614
- complete: complete,
15183
+ return [2 /*return*/, exportJson({
15184
+ name: 'promptResult',
15185
+ message: "Result of `OpenAiAssistantExecutionTools.callChatModel`",
15186
+ order: [],
15187
+ value: {
15188
+ content: resultContent,
15189
+ modelName: 'assistant',
15190
+ // <- TODO: [🥘] Detect used model in assistant
15191
+ // ?> model: rawResponse.model || modelName,
15192
+ timing: {
15193
+ start: start,
15194
+ complete: complete,
15195
+ },
15196
+ usage: usage,
15197
+ rawPromptContent: rawPromptContent,
15198
+ rawRequest: rawRequest,
15199
+ rawResponse: rawResponse,
15200
+ // <- [🗯]
14615
15201
  },
14616
- usage: usage,
14617
- rawPromptContent: rawPromptContent,
14618
- rawRequest: rawRequest,
14619
- rawResponse: rawResponse,
14620
- // <- [🗯]
14621
15202
  })];
14622
15203
  }
14623
15204
  });
@@ -14941,7 +15522,7 @@ var markdownScraperMetadata = $deepFreeze({
14941
15522
  documentationUrl: 'https://github.com/webgptorg/promptbook/discussions/@@',
14942
15523
  isAvilableInBrowser: true,
14943
15524
  requiredExecutables: [],
14944
- }); /* <- TODO: [🤛] */
15525
+ }); /* <- Note: [🤛] */
14945
15526
  /**
14946
15527
  * Registration of known scraper metadata
14947
15528
  *
@@ -15138,7 +15719,7 @@ var documentScraperMetadata = $deepFreeze({
15138
15719
  documentationUrl: 'https://github.com/webgptorg/promptbook/discussions/@@',
15139
15720
  isAvilableInBrowser: false,
15140
15721
  requiredExecutables: ['Pandoc'],
15141
- }); /* <- TODO: [🤛] */
15722
+ }); /* <- Note: [🤛] */
15142
15723
  /**
15143
15724
  * Registration of known scraper metadata
15144
15725
  *
@@ -15305,7 +15886,7 @@ var legacyDocumentScraperMetadata = $deepFreeze({
15305
15886
  'LibreOffice',
15306
15887
  // <- TODO: [🧠] Should be 'LibreOffice' here, its dependency of dependency
15307
15888
  ],
15308
- }); /* <- TODO: [🤛] */
15889
+ }); /* <- Note: [🤛] */
15309
15890
  /**
15310
15891
  * Registration of known scraper metadata
15311
15892
  *
@@ -15471,7 +16052,7 @@ var LegacyDocumentScraper = /** @class */ (function () {
15471
16052
  */
15472
16053
  var createLegacyDocumentScraper = Object.assign(function (tools, options) {
15473
16054
  return new LegacyDocumentScraper(tools, options);
15474
- }, legacyDocumentScraperMetadata); /* <- TODO: [🤛] */
16055
+ }, legacyDocumentScraperMetadata); /* <- Note: [🤛] */
15475
16056
  /**
15476
16057
  * TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
15477
16058
  */
@@ -15497,7 +16078,7 @@ var _LegacyDocumentScraperRegistration = $scrapersRegister.register(createLegacy
15497
16078
  */
15498
16079
  var createDocumentScraper = Object.assign(function (tools, options) {
15499
16080
  return new DocumentScraper(tools, options);
15500
- }, documentScraperMetadata); /* <- TODO: [🤛] */
16081
+ }, documentScraperMetadata); /* <- Note: [🤛] */
15501
16082
  /**
15502
16083
  * TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
15503
16084
  */
@@ -15523,7 +16104,7 @@ var _DocumentScraperRegistration = $scrapersRegister.register(createDocumentScra
15523
16104
  */
15524
16105
  var createMarkdownScraper = Object.assign(function (tools, options) {
15525
16106
  return new MarkdownScraper(tools, options);
15526
- }, markdownScraperMetadata); /* <- TODO: [🤛] */
16107
+ }, markdownScraperMetadata); /* <- Note: [🤛] */
15527
16108
  /**
15528
16109
  * TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
15529
16110
  */
@@ -15555,7 +16136,7 @@ var pdfScraperMetadata = $deepFreeze({
15555
16136
  documentationUrl: 'https://github.com/webgptorg/promptbook/discussions/@@',
15556
16137
  isAvilableInBrowser: true,
15557
16138
  requiredExecutables: [],
15558
- }); /* <- TODO: [🤛] */
16139
+ }); /* <- Note: [🤛] */
15559
16140
  /**
15560
16141
  * Registration of known scraper metadata
15561
16142
  *
@@ -15635,7 +16216,7 @@ var PdfScraper = /** @class */ (function () {
15635
16216
  */
15636
16217
  var createPdfScraper = Object.assign(function (tools, options) {
15637
16218
  return new PdfScraper(tools, options);
15638
- }, pdfScraperMetadata); /* <- TODO: [🤛] */
16219
+ }, pdfScraperMetadata); /* <- Note: [🤛] */
15639
16220
  /**
15640
16221
  * TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
15641
16222
  */
@@ -15667,7 +16248,7 @@ var websiteScraperMetadata = $deepFreeze({
15667
16248
  documentationUrl: 'https://github.com/webgptorg/promptbook/discussions/@@',
15668
16249
  isAvilableInBrowser: false,
15669
16250
  requiredExecutables: [],
15670
- }); /* <- TODO: [🤛] */
16251
+ }); /* <- Note: [🤛] */
15671
16252
  /**
15672
16253
  * Registration of known scraper metadata
15673
16254
  *
@@ -15836,7 +16417,7 @@ var WebsiteScraper = /** @class */ (function () {
15836
16417
  */
15837
16418
  var createWebsiteScraper = Object.assign(function (tools, options) {
15838
16419
  return new WebsiteScraper(tools, options);
15839
- }, websiteScraperMetadata); /* <- TODO: [🤛] */
16420
+ }, websiteScraperMetadata); /* <- Note: [🤛] */
15840
16421
  /**
15841
16422
  * TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
15842
16423
  */