@promptbook/cli 0.80.0-0 → 0.80.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -23,10 +23,6 @@
23
23
 
24
24
 
25
25
 
26
- <blockquote style="color: #ff8811">
27
- <b>⚠ Warning:</b> This is a pre-release version of the library. It is not yet ready for production use. Please look at <a href="https://www.npmjs.com/package/@promptbook/core?activeTab=versions">latest stable release</a>.
28
- </blockquote>
29
-
30
26
  ## 📦 Package `@promptbook/cli`
31
27
 
32
28
  - Promptbooks are [divided into several](#-packages) packages, all are published from [single monorepo](https://github.com/webgptorg/promptbook).
package/esm/index.es.js CHANGED
@@ -37,7 +37,7 @@ var BOOK_LANGUAGE_VERSION = '1.0.0';
37
37
  *
38
38
  * @see https://github.com/webgptorg/promptbook
39
39
  */
40
- var PROMPTBOOK_ENGINE_VERSION = '0.79.0';
40
+ var PROMPTBOOK_ENGINE_VERSION = '0.80.0-1';
41
41
  /**
42
42
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
43
43
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -2652,6 +2652,100 @@ function collectionToJson(collection) {
2652
2652
  * TODO: [🧠] Maybe clear `sourceFile` or clear when exposing through API or remote server
2653
2653
  */
2654
2654
 
2655
+ /**
2656
+ * This error type indicates that some tools are missing for pipeline execution or preparation
2657
+ *
2658
+ * @public exported from `@promptbook/core`
2659
+ */
2660
+ var MissingToolsError = /** @class */ (function (_super) {
2661
+ __extends(MissingToolsError, _super);
2662
+ function MissingToolsError(message) {
2663
+ var _this = _super.call(this, spaceTrim$1(function (block) { return "\n ".concat(block(message), "\n\n Note: You have probbably forgot to provide some tools for pipeline execution or preparation\n\n "); })) || this;
2664
+ _this.name = 'MissingToolsError';
2665
+ Object.setPrototypeOf(_this, MissingToolsError.prototype);
2666
+ return _this;
2667
+ }
2668
+ return MissingToolsError;
2669
+ }(Error));
2670
+
2671
+ /**
2672
+ * Async version of Array.forEach
2673
+ *
2674
+ * @param array - Array to iterate over
2675
+ * @param options - Options for the function
2676
+ * @param callbackfunction - Function to call for each item
2677
+ * @public exported from `@promptbook/utils`
2678
+ * @deprecated [🪂] Use queues instead
2679
+ */
2680
+ function forEachAsync(array, options, callbackfunction) {
2681
+ return __awaiter(this, void 0, void 0, function () {
2682
+ var _a, maxParallelCount, index, runningTasks, tasks, _loop_1, _b, _c, item, e_1_1;
2683
+ var e_1, _d;
2684
+ return __generator(this, function (_e) {
2685
+ switch (_e.label) {
2686
+ case 0:
2687
+ _a = options.maxParallelCount, maxParallelCount = _a === void 0 ? Infinity : _a;
2688
+ index = 0;
2689
+ runningTasks = [];
2690
+ tasks = [];
2691
+ _loop_1 = function (item) {
2692
+ var currentIndex, task;
2693
+ return __generator(this, function (_f) {
2694
+ switch (_f.label) {
2695
+ case 0:
2696
+ currentIndex = index++;
2697
+ task = callbackfunction(item, currentIndex, array);
2698
+ tasks.push(task);
2699
+ runningTasks.push(task);
2700
+ /* not await */ Promise.resolve(task).then(function () {
2701
+ runningTasks = runningTasks.filter(function (t) { return t !== task; });
2702
+ });
2703
+ if (!(maxParallelCount < runningTasks.length)) return [3 /*break*/, 2];
2704
+ return [4 /*yield*/, Promise.race(runningTasks)];
2705
+ case 1:
2706
+ _f.sent();
2707
+ _f.label = 2;
2708
+ case 2: return [2 /*return*/];
2709
+ }
2710
+ });
2711
+ };
2712
+ _e.label = 1;
2713
+ case 1:
2714
+ _e.trys.push([1, 6, 7, 8]);
2715
+ _b = __values(array), _c = _b.next();
2716
+ _e.label = 2;
2717
+ case 2:
2718
+ if (!!_c.done) return [3 /*break*/, 5];
2719
+ item = _c.value;
2720
+ return [5 /*yield**/, _loop_1(item)];
2721
+ case 3:
2722
+ _e.sent();
2723
+ _e.label = 4;
2724
+ case 4:
2725
+ _c = _b.next();
2726
+ return [3 /*break*/, 2];
2727
+ case 5: return [3 /*break*/, 8];
2728
+ case 6:
2729
+ e_1_1 = _e.sent();
2730
+ e_1 = { error: e_1_1 };
2731
+ return [3 /*break*/, 8];
2732
+ case 7:
2733
+ try {
2734
+ if (_c && !_c.done && (_d = _b.return)) _d.call(_b);
2735
+ }
2736
+ finally { if (e_1) throw e_1.error; }
2737
+ return [7 /*endfinally*/];
2738
+ case 8: return [4 /*yield*/, Promise.all(tasks)];
2739
+ case 9:
2740
+ _e.sent();
2741
+ return [2 /*return*/];
2742
+ }
2743
+ });
2744
+ });
2745
+ }
2746
+
2747
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sourceFile:"./books/prepare-knowledge-from-markdown.book.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sourceFile:"./books/prepare-knowledge-keywords.book.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sourceFile:"./books/prepare-knowledge-title.book.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book.md",formfactorName:"GENERIC",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelRequirements",format:"JSON",dependentParameterNames:["availableModelNames","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sourceFile:"./books/prepare-persona.book.md"}];
2748
+
2655
2749
  /**
2656
2750
  * Prettify the html code
2657
2751
  *
@@ -2902,100 +2996,6 @@ function taskParameterJsonToString(taskParameterJson) {
2902
2996
  * TODO: [🧠] Should be in generated .book.md file GENERATOR_WARNING
2903
2997
  */
2904
2998
 
2905
- /**
2906
- * This error type indicates that some tools are missing for pipeline execution or preparation
2907
- *
2908
- * @public exported from `@promptbook/core`
2909
- */
2910
- var MissingToolsError = /** @class */ (function (_super) {
2911
- __extends(MissingToolsError, _super);
2912
- function MissingToolsError(message) {
2913
- var _this = _super.call(this, spaceTrim$1(function (block) { return "\n ".concat(block(message), "\n\n Note: You have probbably forgot to provide some tools for pipeline execution or preparation\n\n "); })) || this;
2914
- _this.name = 'MissingToolsError';
2915
- Object.setPrototypeOf(_this, MissingToolsError.prototype);
2916
- return _this;
2917
- }
2918
- return MissingToolsError;
2919
- }(Error));
2920
-
2921
- /**
2922
- * Async version of Array.forEach
2923
- *
2924
- * @param array - Array to iterate over
2925
- * @param options - Options for the function
2926
- * @param callbackfunction - Function to call for each item
2927
- * @public exported from `@promptbook/utils`
2928
- * @deprecated [🪂] Use queues instead
2929
- */
2930
- function forEachAsync(array, options, callbackfunction) {
2931
- return __awaiter(this, void 0, void 0, function () {
2932
- var _a, maxParallelCount, index, runningTasks, tasks, _loop_1, _b, _c, item, e_1_1;
2933
- var e_1, _d;
2934
- return __generator(this, function (_e) {
2935
- switch (_e.label) {
2936
- case 0:
2937
- _a = options.maxParallelCount, maxParallelCount = _a === void 0 ? Infinity : _a;
2938
- index = 0;
2939
- runningTasks = [];
2940
- tasks = [];
2941
- _loop_1 = function (item) {
2942
- var currentIndex, task;
2943
- return __generator(this, function (_f) {
2944
- switch (_f.label) {
2945
- case 0:
2946
- currentIndex = index++;
2947
- task = callbackfunction(item, currentIndex, array);
2948
- tasks.push(task);
2949
- runningTasks.push(task);
2950
- /* not await */ Promise.resolve(task).then(function () {
2951
- runningTasks = runningTasks.filter(function (t) { return t !== task; });
2952
- });
2953
- if (!(maxParallelCount < runningTasks.length)) return [3 /*break*/, 2];
2954
- return [4 /*yield*/, Promise.race(runningTasks)];
2955
- case 1:
2956
- _f.sent();
2957
- _f.label = 2;
2958
- case 2: return [2 /*return*/];
2959
- }
2960
- });
2961
- };
2962
- _e.label = 1;
2963
- case 1:
2964
- _e.trys.push([1, 6, 7, 8]);
2965
- _b = __values(array), _c = _b.next();
2966
- _e.label = 2;
2967
- case 2:
2968
- if (!!_c.done) return [3 /*break*/, 5];
2969
- item = _c.value;
2970
- return [5 /*yield**/, _loop_1(item)];
2971
- case 3:
2972
- _e.sent();
2973
- _e.label = 4;
2974
- case 4:
2975
- _c = _b.next();
2976
- return [3 /*break*/, 2];
2977
- case 5: return [3 /*break*/, 8];
2978
- case 6:
2979
- e_1_1 = _e.sent();
2980
- e_1 = { error: e_1_1 };
2981
- return [3 /*break*/, 8];
2982
- case 7:
2983
- try {
2984
- if (_c && !_c.done && (_d = _b.return)) _d.call(_b);
2985
- }
2986
- finally { if (e_1) throw e_1.error; }
2987
- return [7 /*endfinally*/];
2988
- case 8: return [4 /*yield*/, Promise.all(tasks)];
2989
- case 9:
2990
- _e.sent();
2991
- return [2 /*return*/];
2992
- }
2993
- });
2994
- });
2995
- }
2996
-
2997
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sourceFile:"./books/prepare-knowledge-from-markdown.book.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sourceFile:"./books/prepare-knowledge-keywords.book.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sourceFile:"./books/prepare-knowledge-title.book.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book.md",formfactorName:"GENERIC",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelRequirements",format:"JSON",dependentParameterNames:["availableModelNames","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sourceFile:"./books/prepare-persona.book.md"}];
2998
-
2999
2999
  /**
3000
3000
  * This error indicates that the promptbook in a markdown format cannot be parsed into a valid promptbook object
3001
3001
  *
@@ -3503,6 +3503,7 @@ function extractParameterNames(template) {
3503
3503
  /**
3504
3504
  * Unprepare just strips the preparation data of the pipeline
3505
3505
  *
3506
+ * @deprecated In future version this function will be removed or deprecated
3506
3507
  * @public exported from `@promptbook/core`
3507
3508
  */
3508
3509
  function unpreparePipeline(pipeline) {
@@ -8282,7 +8283,7 @@ var parameterCommandParser = {
8282
8283
  * Note: `$` is used to indicate that this function mutates given `pipelineJson`
8283
8284
  */
8284
8285
  $applyToPipelineJson: function (command, $pipelineJson) {
8285
- // Note: [🍣] Do nothing, its application is implemented separately in `pipelineStringToJsonSync`
8286
+ // Note: [🍣] Do nothing, its application is implemented separately in `precompilePipeline`
8286
8287
  },
8287
8288
  /**
8288
8289
  * Apply the PARAMETER command to the `pipelineJson`
@@ -8290,7 +8291,7 @@ var parameterCommandParser = {
8290
8291
  * Note: `$` is used to indicate that this function mutates given `taskJson`
8291
8292
  */
8292
8293
  $applyToTaskJson: function (command, $taskJson, $pipelineJson) {
8293
- // Note: [🍣] Do nothing, its application is implemented separately in `pipelineStringToJsonSync`
8294
+ // Note: [🍣] Do nothing, its application is implemented separately in `precompilePipeline`
8294
8295
  },
8295
8296
  /**
8296
8297
  * Converts the PARAMETER command back to string
@@ -8798,7 +8799,7 @@ var COMMANDS = [
8798
8799
  * @returns the parser for the command
8799
8800
  * @throws {UnexpectedError} if the parser is not found
8800
8801
  *
8801
- * @private within the pipelineStringToJson
8802
+ * @private within the compilePipeline
8802
8803
  */
8803
8804
  function getParserForCommand(command) {
8804
8805
  var commandParser = COMMANDS.find(function (commandParser) { return commandParser.name === command.type; });
@@ -8834,7 +8835,7 @@ function removeMarkdownFormatting(str) {
8834
8835
  * @returns parsed command object
8835
8836
  * @throws {ParseError} if the command is invalid
8836
8837
  *
8837
- * @private within the pipelineStringToJson
8838
+ * @private within the compilePipeline
8838
8839
  */
8839
8840
  function parseCommand(raw, usagePlace) {
8840
8841
  if (raw.includes('\n') || raw.includes('\r')) {
@@ -9249,7 +9250,7 @@ var QuickChatbotHla = {
9249
9250
  /**
9250
9251
  * All high-level abstractions
9251
9252
  *
9252
- * @private internal index of `pipelineStringToJsonSync` (= used for sync) and `preparePipeline` (= used for async)
9253
+ * @private internal index of `precompilePipeline` (= used for sync) and `preparePipeline` (= used for async)
9253
9254
  */
9254
9255
  var HIGH_LEVEL_ABSTRACTIONS = [
9255
9256
  ImplicitFormfactorHla,
@@ -9499,8 +9500,8 @@ function removeContentComments(content) {
9499
9500
  * Compile pipeline from string (markdown) format to JSON format synchronously
9500
9501
  *
9501
9502
  * Note: There are 3 similar functions:
9502
- * - `pipelineStringToJson` **(preferred)** - which propperly compiles the promptbook and use embedding for external knowledge
9503
- * - `pipelineStringToJsonSync` - use only if you need to compile promptbook synchronously and it contains NO external knowledge
9503
+ * - `compilePipeline` **(preferred)** - which propperly compiles the promptbook and use embedding for external knowledge
9504
+ * - `precompilePipeline` - use only if you need to compile promptbook synchronously and it contains NO external knowledge
9504
9505
  * - `preparePipeline` - just one step in the compilation process
9505
9506
  *
9506
9507
  * Note: This function does not validate logic of the pipeline only the parsing
@@ -9511,7 +9512,7 @@ function removeContentComments(content) {
9511
9512
  * @throws {ParseError} if the promptbook string is not valid
9512
9513
  * @public exported from `@promptbook/core`
9513
9514
  */
9514
- function pipelineStringToJsonSync(pipelineString) {
9515
+ function precompilePipeline(pipelineString) {
9515
9516
  var e_1, _a, e_2, _b, e_3, _c, e_4, _d, e_5, _e, e_6, _f;
9516
9517
  var $pipelineJson = {
9517
9518
  title: DEFAULT_TITLE,
@@ -9931,7 +9932,7 @@ function pipelineStringToJsonSync(pipelineString) {
9931
9932
  // TODO: [🍙] Maybe do reorder of `$pipelineJson` here
9932
9933
  return exportJson({
9933
9934
  name: 'pipelineJson',
9934
- message: "Result of `pipelineStringToJsonSync`",
9935
+ message: "Result of `precompilePipeline`",
9935
9936
  order: ORDER_OF_PIPELINE_JSON,
9936
9937
  value: __assign({ formfactorName: 'GENERIC' }, $pipelineJson),
9937
9938
  });
@@ -9952,8 +9953,8 @@ function pipelineStringToJsonSync(pipelineString) {
9952
9953
  * Compile pipeline from string (markdown) format to JSON format
9953
9954
  *
9954
9955
  * Note: There are 3 similar functions:
9955
- * - `pipelineStringToJson` **(preferred)** - which propperly compiles the promptbook and use embedding for external knowledge
9956
- * - `pipelineStringToJsonSync` - use only if you need to compile promptbook synchronously and it contains NO external knowledge
9956
+ * - `compilePipeline` **(preferred)** - which propperly compiles the promptbook and use embedding for external knowledge
9957
+ * - `precompilePipeline` - use only if you need to compile promptbook synchronously and it contains NO external knowledge
9957
9958
  * - `preparePipeline` - just one step in the compilation process
9958
9959
  *
9959
9960
  * Note: This function does not validate logic of the pipeline only the parsing
@@ -9966,13 +9967,13 @@ function pipelineStringToJsonSync(pipelineString) {
9966
9967
  * @throws {ParseError} if the promptbook string is not valid
9967
9968
  * @public exported from `@promptbook/core`
9968
9969
  */
9969
- function pipelineStringToJson(pipelineString, tools, options) {
9970
+ function compilePipeline(pipelineString, tools, options) {
9970
9971
  return __awaiter(this, void 0, void 0, function () {
9971
9972
  var pipelineJson;
9972
9973
  return __generator(this, function (_a) {
9973
9974
  switch (_a.label) {
9974
9975
  case 0:
9975
- pipelineJson = pipelineStringToJsonSync(pipelineString);
9976
+ pipelineJson = precompilePipeline(pipelineString);
9976
9977
  if (!(tools !== undefined && tools.llm !== undefined)) return [3 /*break*/, 2];
9977
9978
  return [4 /*yield*/, preparePipeline(pipelineJson, tools, options || {
9978
9979
  rootDirname: null,
@@ -9981,7 +9982,7 @@ function pipelineStringToJson(pipelineString, tools, options) {
9981
9982
  pipelineJson = _a.sent();
9982
9983
  _a.label = 2;
9983
9984
  case 2:
9984
- // Note: No need to use `$exportJson` because `pipelineStringToJsonSync` and `preparePipeline` already do that
9985
+ // Note: No need to use `$exportJson` because `precompilePipeline` and `preparePipeline` already do that
9985
9986
  return [2 /*return*/, pipelineJson];
9986
9987
  }
9987
9988
  });
@@ -11108,7 +11109,7 @@ function createCollectionFromDirectory(path, tools, options) {
11108
11109
  return [4 /*yield*/, readFile(fileName, 'utf-8')];
11109
11110
  case 2:
11110
11111
  pipelineString = (_e.sent());
11111
- return [4 /*yield*/, pipelineStringToJson(pipelineString, tools, {
11112
+ return [4 /*yield*/, compilePipeline(pipelineString, tools, {
11112
11113
  rootDirname: rootDirname,
11113
11114
  })];
11114
11115
  case 3:
@@ -11605,7 +11606,7 @@ function prettifyPipelineString(pipelineString, options) {
11605
11606
  case 0:
11606
11607
  isGraphAdded = options.isGraphAdded, isPrettifyed = options.isPrettifyed;
11607
11608
  if (!isGraphAdded) return [3 /*break*/, 2];
11608
- return [4 /*yield*/, pipelineStringToJson(pipelineString)];
11609
+ return [4 /*yield*/, compilePipeline(pipelineString)];
11609
11610
  case 1:
11610
11611
  pipelineJson = _a.sent();
11611
11612
  promptbookMermaid_1 = renderPromptbookMermaid(pipelineJson, {
@@ -12299,7 +12300,7 @@ function initializeRunCommand(program) {
12299
12300
  _m.label = 12;
12300
12301
  case 12:
12301
12302
  _m.trys.push([12, 14, , 15]);
12302
- return [4 /*yield*/, pipelineStringToJson(pipelineString, tools)];
12303
+ return [4 /*yield*/, compilePipeline(pipelineString, tools)];
12303
12304
  case 13:
12304
12305
  pipeline = _m.sent();
12305
12306
  return [3 /*break*/, 15];
@@ -12563,7 +12564,7 @@ function initializeTestCommand(program) {
12563
12564
  return [4 /*yield*/, readFile(filename, 'utf-8')];
12564
12565
  case 7:
12565
12566
  pipelineMarkdown = (_f.sent());
12566
- return [4 /*yield*/, pipelineStringToJson(pipelineMarkdown, tools)];
12567
+ return [4 /*yield*/, compilePipeline(pipelineMarkdown, tools)];
12567
12568
  case 8:
12568
12569
  pipeline = _f.sent();
12569
12570
  if (isVerbose) {