@promptbook/core 0.81.0-5 → 0.81.0-7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/esm/index.es.js CHANGED
@@ -15,15 +15,17 @@ import sha256 from 'crypto-js/sha256';
15
15
  /**
16
16
  * The version of the Book language
17
17
  *
18
+ * @generated
18
19
  * @see https://github.com/webgptorg/book
19
20
  */
20
21
  var BOOK_LANGUAGE_VERSION = '1.0.0';
21
22
  /**
22
23
  * The version of the Promptbook engine
23
24
  *
25
+ * @generated
24
26
  * @see https://github.com/webgptorg/promptbook
25
27
  */
26
- var PROMPTBOOK_ENGINE_VERSION = '0.81.0-4';
28
+ var PROMPTBOOK_ENGINE_VERSION = '0.81.0-6';
27
29
  /**
28
30
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
29
31
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -509,6 +511,26 @@ var DEFAULT_TITLE = "Untitled";
509
511
  * @private within the repository - too low-level in comparison with other `MAX_...`
510
512
  */
511
513
  var LOOP_LIMIT = 1000;
514
+ /**
515
+ * Strings to represent various values in the context of parameter values
516
+ *
517
+ * @public exported from `@promptbook/utils`
518
+ */
519
+ var VALUE_STRINGS = {
520
+ empty: '(nothing; empty string)',
521
+ null: '(no value; null)',
522
+ undefined: '(unknown value; undefined)',
523
+ nan: '(not a number; NaN)',
524
+ infinity: '(infinity; ∞)',
525
+ negativeInfinity: '(negative infinity; -∞)',
526
+ unserializable: '(unserializable value)',
527
+ };
528
+ /**
529
+ * Small number limit
530
+ *
531
+ * @public exported from `@promptbook/utils`
532
+ */
533
+ var SMALL_NUMBER = 0.001;
512
534
  /**
513
535
  * Short time interval to prevent race conditions in milliseconds
514
536
  *
@@ -638,6 +660,12 @@ function SET_IS_VERBOSE(isVerbose) {
638
660
  * @public exported from `@promptbook/core`
639
661
  */
640
662
  var DEFAULT_IS_AUTO_INSTALLED = false;
663
+ /**
664
+ * Function name for generated function via `ptbk make` to get the pipeline collection
665
+ *
666
+ * @public exported from `@promptbook/core`
667
+ */
668
+ var DEFAULT_GET_PIPELINE_COLLECTION_FUNCTION_NAME = "getPipelineCollection";
641
669
  /**
642
670
  * @@@
643
671
  *
@@ -924,6 +952,7 @@ function exportJson(options) {
924
952
  * @public exported from `@promptbook/core`
925
953
  */
926
954
  var ORDER_OF_PIPELINE_JSON = [
955
+ // Note: [🍙] In this order will be pipeline serialized
927
956
  'title',
928
957
  'pipelineUrl',
929
958
  'bookVersion',
@@ -935,6 +964,7 @@ var ORDER_OF_PIPELINE_JSON = [
935
964
  'preparations',
936
965
  'knowledgeSources',
937
966
  'knowledgePieces',
967
+ 'sources', // <- TODO: [🧠] Where should the `sources` be
938
968
  ];
939
969
  /**
940
970
  * Nonce which is used for replacing things in strings
@@ -2464,7 +2494,7 @@ function joinLlmExecutionTools() {
2464
2494
  * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
2465
2495
  */
2466
2496
 
2467
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sourceFile:"./books/prepare-knowledge-from-markdown.book.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sourceFile:"./books/prepare-knowledge-keywords.book.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sourceFile:"./books/prepare-knowledge-title.book.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book.md",formfactorName:"GENERIC",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelRequirements",format:"JSON",dependentParameterNames:["availableModelNames","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sourceFile:"./books/prepare-persona.book.md"}];
2497
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book.md`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book.md`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book.md`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book.md",formfactorName:"GENERIC",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelRequirements",format:"JSON",dependentParameterNames:["availableModelNames","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book.md`\n- INPUT PARAMETER `{availableModelNames}` List of available model names separated by comma (,)\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n\\`\\`\\`json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelRequirements}`\n"}],sourceFile:"./books/prepare-persona.book.md"}];
2468
2498
 
2469
2499
  /**
2470
2500
  * This error indicates problems parsing the format value
@@ -3443,9 +3473,87 @@ function arrayableToArray(input) {
3443
3473
  return [input];
3444
3474
  }
3445
3475
 
3476
+ /**
3477
+ * Format either small or big number
3478
+ *
3479
+ * @public exported from `@promptbook/utils`
3480
+ */
3481
+ function numberToString(value) {
3482
+ if (value === 0) {
3483
+ return '0';
3484
+ }
3485
+ else if (Number.isNaN(value)) {
3486
+ return VALUE_STRINGS.nan;
3487
+ }
3488
+ else if (value === Infinity) {
3489
+ return VALUE_STRINGS.infinity;
3490
+ }
3491
+ else if (value === -Infinity) {
3492
+ return VALUE_STRINGS.negativeInfinity;
3493
+ }
3494
+ for (var exponent = 0; exponent < 15; exponent++) {
3495
+ var factor = Math.pow(10, exponent);
3496
+ var valueRounded = Math.round(value * factor) / factor;
3497
+ if (Math.abs(value - valueRounded) / value < SMALL_NUMBER) {
3498
+ return valueRounded.toFixed(exponent);
3499
+ }
3500
+ }
3501
+ return value.toString();
3502
+ }
3503
+
3504
+ /**
3505
+ * Function `valueToString` will convert the given value to string
3506
+ * This is useful and used in the `templateParameters` function
3507
+ *
3508
+ * Note: This function is not just calling `toString` method
3509
+ * It's more complex and can handle this conversion specifically for LLM models
3510
+ * See `VALUE_STRINGS`
3511
+ *
3512
+ * Note: There are 2 similar functions
3513
+ * - `valueToString` converts value to string for LLM models as human-readable string
3514
+ * - `asSerializable` converts value to string to preserve full information to be able to convert it back
3515
+ *
3516
+ * @public exported from `@promptbook/utils`
3517
+ */
3518
+ function valueToString(value) {
3519
+ try {
3520
+ if (value === '') {
3521
+ return VALUE_STRINGS.empty;
3522
+ }
3523
+ else if (value === null) {
3524
+ return VALUE_STRINGS.null;
3525
+ }
3526
+ else if (value === undefined) {
3527
+ return VALUE_STRINGS.undefined;
3528
+ }
3529
+ else if (typeof value === 'string') {
3530
+ return value;
3531
+ }
3532
+ else if (typeof value === 'number') {
3533
+ return numberToString(value);
3534
+ }
3535
+ else if (value instanceof Date) {
3536
+ return value.toISOString();
3537
+ }
3538
+ else {
3539
+ return JSON.stringify(value);
3540
+ }
3541
+ }
3542
+ catch (error) {
3543
+ if (!(error instanceof Error)) {
3544
+ throw error;
3545
+ }
3546
+ console.error(error);
3547
+ return VALUE_STRINGS.unserializable;
3548
+ }
3549
+ }
3550
+
3446
3551
  /**
3447
3552
  * Replaces parameters in template with values from parameters object
3448
3553
  *
3554
+ * Note: This function is not places strings into string,
3555
+ * It's more complex and can handle this operation specifically for LLM models
3556
+ *
3449
3557
  * @param template the template with parameters in {curly} braces
3450
3558
  * @param parameters the object with parameters
3451
3559
  * @returns the template with replaced parameters
@@ -3495,7 +3603,7 @@ function templateParameters(template, parameters) {
3495
3603
  if (parameterValue === undefined) {
3496
3604
  throw new PipelineExecutionError("Parameter `{".concat(parameterName, "}` is not defined"));
3497
3605
  }
3498
- parameterValue = parameterValue.toString();
3606
+ parameterValue = valueToString(parameterValue);
3499
3607
  if (parameterValue.includes('\n') && /^\s*\W{0,3}\s*$/.test(precol)) {
3500
3608
  parameterValue = parameterValue
3501
3609
  .split('\n')
@@ -9001,6 +9109,14 @@ function precompilePipeline(pipelineString) {
9001
9109
  knowledgePieces: [],
9002
9110
  personas: [],
9003
9111
  preparations: [],
9112
+ sources: [
9113
+ {
9114
+ type: 'BOOK',
9115
+ path: null,
9116
+ // <- TODO: !!!!!! Pass here path of the file
9117
+ content: pipelineString,
9118
+ },
9119
+ ],
9004
9120
  };
9005
9121
  function getPipelineIdentification() {
9006
9122
  // Note: This is a 😐 implementation of [🚞]
@@ -9408,7 +9524,6 @@ function precompilePipeline(pipelineString) {
9408
9524
  $pipelineJson.formfactorName = 'GENERIC';
9409
9525
  }
9410
9526
  // =============================================================
9411
- // TODO: [🍙] Maybe do reorder of `$pipelineJson` here
9412
9527
  return exportJson({
9413
9528
  name: 'pipelineJson',
9414
9529
  message: "Result of `precompilePipeline`",
@@ -9667,26 +9782,6 @@ function embeddingVectorToString(embeddingVector) {
9667
9782
  return "[EmbeddingVector; ".concat(embeddingVector.length, " dimensions; length: ").concat(vectorLength.toFixed(2), "; ").concat(embeddingVector.slice(0, 3).join(', '), "...]");
9668
9783
  }
9669
9784
 
9670
- /**
9671
- * Format either small or big number
9672
- *
9673
- * @private within the repository
9674
- */
9675
- function formatNumber(value) {
9676
- if (value === 0) {
9677
- return '0';
9678
- }
9679
- for (var exponent = 0; exponent < 15; exponent++) {
9680
- var factor = Math.pow(10, exponent);
9681
- var valueRounded = Math.round(value * factor) / factor;
9682
- if (Math.abs(value - valueRounded) / value <
9683
- 0.001 /* <- TODO: Pass as option, pass to executionReportJsonToString as option */) {
9684
- return valueRounded.toFixed(exponent);
9685
- }
9686
- }
9687
- return value.toString();
9688
- }
9689
-
9690
9785
  /**
9691
9786
  * Create a markdown table from a 2D array of strings
9692
9787
  *
@@ -9746,7 +9841,7 @@ function createMarkdownChart(options) {
9746
9841
  }
9747
9842
  finally { if (e_1) throw e_1.error; }
9748
9843
  }
9749
- var legend = "_Note: Each \u2588 represents ".concat(formatNumber(1 / scale), " ").concat(unitName, ", width of ").concat(valueHeader.toLowerCase(), " is ").concat(formatNumber(to - from), " ").concat(unitName, " = ").concat(width, " squares_");
9844
+ var legend = "_Note: Each \u2588 represents ".concat(numberToString(1 / scale), " ").concat(unitName, ", width of ").concat(valueHeader.toLowerCase(), " is ").concat(numberToString(to - from), " ").concat(unitName, " = ").concat(width, " squares_");
9750
9845
  return createMarkdownTable(table) + '\n\n' + legend;
9751
9846
  }
9752
9847
  /**
@@ -9865,7 +9960,7 @@ function executionReportJsonToString(executionReportJson, options) {
9865
9960
  headerList.push("COMPLETED AT ".concat(moment(completedAt).format("YYYY-MM-DD HH:mm:ss")));
9866
9961
  headerList.push("TOTAL DURATION ".concat(duration.humanize(MOMENT_ARG_THRESHOLDS)));
9867
9962
  headerList.push("TOTAL LLM DURATION ".concat(llmDuration.humanize(MOMENT_ARG_THRESHOLDS)));
9868
- headerList.push("TOTAL COST $".concat(formatNumber(cost * (1 + taxRate))) +
9963
+ headerList.push("TOTAL COST $".concat(numberToString(cost * (1 + taxRate))) +
9869
9964
  (executionsWithKnownCost.length === executionReportJson.promptExecutions.length
9870
9965
  ? ''
9871
9966
  : " *(Some cost is unknown)*") +
@@ -9923,7 +10018,7 @@ function executionReportJsonToString(executionReportJson, options) {
9923
10018
  // > taskList.push(`STARTED AT ${moment(startedAt).calendar()}`);
9924
10019
  taskList.push("DURATION ".concat(duration.humanize(MOMENT_ARG_THRESHOLDS)));
9925
10020
  if (typeof ((_g = (_f = promptExecution.result) === null || _f === void 0 ? void 0 : _f.usage) === null || _g === void 0 ? void 0 : _g.price) === 'number') {
9926
- taskList.push("COST $".concat(formatNumber(promptExecution.result.usage.price * (1 + taxRate))) +
10021
+ taskList.push("COST $".concat(numberToString(promptExecution.result.usage.price * (1 + taxRate))) +
9927
10022
  (taxRate !== 0 ? " *(with tax ".concat(taxRate * 100, "%)*") : ''));
9928
10023
  }
9929
10024
  else {
@@ -10985,5 +11080,5 @@ var PrefixStorage = /** @class */ (function () {
10985
11080
  return PrefixStorage;
10986
11081
  }());
10987
11082
 
10988
- export { $llmToolsMetadataRegister, $llmToolsRegister, $scrapersMetadataRegister, $scrapersRegister, ADMIN_EMAIL, ADMIN_GITHUB_NAME, AbstractFormatError, BOOK_LANGUAGE_VERSION, BlackholeStorage, BoilerplateError, BoilerplateFormfactorDefinition, CLAIM, CallbackInterfaceTools, ChatbotFormfactorDefinition, CollectionError, CsvFormatDefinition, CsvFormatError, DEFAULT_BOOKS_DIRNAME, DEFAULT_CSV_SETTINGS, DEFAULT_EXECUTIONS_CACHE_DIRNAME, DEFAULT_INTERMEDIATE_FILES_STRATEGY, DEFAULT_IS_AUTO_INSTALLED, DEFAULT_IS_VERBOSE, DEFAULT_MAX_EXECUTION_ATTEMPTS, DEFAULT_MAX_KNOWLEDGE_SOURCES_SCRAPING_DEPTH, DEFAULT_MAX_KNOWLEDGE_SOURCES_SCRAPING_TOTAL, DEFAULT_MAX_PARALLEL_COUNT, DEFAULT_PIPELINE_COLLECTION_BASE_FILENAME, DEFAULT_REMOTE_URL, DEFAULT_REMOTE_URL_PATH, DEFAULT_SCRAPE_CACHE_DIRNAME, DEFAULT_TITLE, EXPECTATION_UNITS, EnvironmentMismatchError, ExecutionReportStringOptionsDefaults, ExpectError, FORMFACTOR_DEFINITIONS, GENERIC_PIPELINE_INTERFACE, GeneratorFormfactorDefinition, GenericFormfactorDefinition, KnowledgeScrapeError, LOGO_DARK_SRC, LOGO_LIGHT_SRC, LimitReachedError, MANDATORY_CSV_SETTINGS, MAX_FILENAME_LENGTH, MODEL_VARIANTS, MatcherFormfactorDefinition, MemoryStorage, MissingToolsError, MultipleLlmExecutionTools, NAME, NonTaskSectionTypes, NotFoundError, NotYetImplementedError, ORDER_OF_PIPELINE_JSON, PROMPTBOOK_ENGINE_VERSION, PROMPTBOOK_ERRORS, ParseError, PipelineExecutionError, PipelineLogicError, PipelineUrlError, PrefixStorage, RESERVED_PARAMETER_NAMES, SET_IS_VERBOSE, SectionTypes, SheetsFormfactorDefinition, TaskTypes, TextFormatDefinition, TranslatorFormfactorDefinition, UNCERTAIN_USAGE, UnexpectedError, ZERO_USAGE, _AnthropicClaudeMetadataRegistration, _AzureOpenAiMetadataRegistration, _DocumentScraperMetadataRegistration, _GoogleMetadataRegistration, _LegacyDocumentScraperMetadataRegistration, _MarkdownScraperMetadataRegistration, _OpenAiAssistantMetadataRegistration, _OpenAiMetadataRegistration, _PdfScraperMetadataRegistration, _WebsiteScraperMetadataRegistration, addUsage, assertsExecutionSuccessful, cacheLlmTools, collectionToJson, compilePipeline, countTotalUsage, createCollectionFromJson, createCollectionFromPromise, createCollectionFromUrl, createLlmToolsFromConfiguration, createPipelineExecutor, createSubcollection, embeddingVectorToString, executionReportJsonToString, extractParameterNamesFromTask, getPipelineInterface, isPassingExpectations, isPipelineImplementingInterface, isPipelineInterfacesEqual, isPipelinePrepared, joinLlmExecutionTools, limitTotalUsage, makeKnowledgeSourceHandler, pipelineJsonToString, precompilePipeline, prepareKnowledgePieces, preparePersona, preparePipeline, prepareTasks, prettifyPipelineString, unpreparePipeline, usageToHuman, usageToWorktime, validatePipeline };
11083
+ export { $llmToolsMetadataRegister, $llmToolsRegister, $scrapersMetadataRegister, $scrapersRegister, ADMIN_EMAIL, ADMIN_GITHUB_NAME, AbstractFormatError, BOOK_LANGUAGE_VERSION, BlackholeStorage, BoilerplateError, BoilerplateFormfactorDefinition, CLAIM, CallbackInterfaceTools, ChatbotFormfactorDefinition, CollectionError, CsvFormatDefinition, CsvFormatError, DEFAULT_BOOKS_DIRNAME, DEFAULT_CSV_SETTINGS, DEFAULT_EXECUTIONS_CACHE_DIRNAME, DEFAULT_GET_PIPELINE_COLLECTION_FUNCTION_NAME, DEFAULT_INTERMEDIATE_FILES_STRATEGY, DEFAULT_IS_AUTO_INSTALLED, DEFAULT_IS_VERBOSE, DEFAULT_MAX_EXECUTION_ATTEMPTS, DEFAULT_MAX_KNOWLEDGE_SOURCES_SCRAPING_DEPTH, DEFAULT_MAX_KNOWLEDGE_SOURCES_SCRAPING_TOTAL, DEFAULT_MAX_PARALLEL_COUNT, DEFAULT_PIPELINE_COLLECTION_BASE_FILENAME, DEFAULT_REMOTE_URL, DEFAULT_REMOTE_URL_PATH, DEFAULT_SCRAPE_CACHE_DIRNAME, DEFAULT_TITLE, EXPECTATION_UNITS, EnvironmentMismatchError, ExecutionReportStringOptionsDefaults, ExpectError, FORMFACTOR_DEFINITIONS, GENERIC_PIPELINE_INTERFACE, GeneratorFormfactorDefinition, GenericFormfactorDefinition, KnowledgeScrapeError, LOGO_DARK_SRC, LOGO_LIGHT_SRC, LimitReachedError, MANDATORY_CSV_SETTINGS, MAX_FILENAME_LENGTH, MODEL_VARIANTS, MatcherFormfactorDefinition, MemoryStorage, MissingToolsError, MultipleLlmExecutionTools, NAME, NonTaskSectionTypes, NotFoundError, NotYetImplementedError, ORDER_OF_PIPELINE_JSON, PROMPTBOOK_ENGINE_VERSION, PROMPTBOOK_ERRORS, ParseError, PipelineExecutionError, PipelineLogicError, PipelineUrlError, PrefixStorage, RESERVED_PARAMETER_NAMES, SET_IS_VERBOSE, SectionTypes, SheetsFormfactorDefinition, TaskTypes, TextFormatDefinition, TranslatorFormfactorDefinition, UNCERTAIN_USAGE, UnexpectedError, ZERO_USAGE, _AnthropicClaudeMetadataRegistration, _AzureOpenAiMetadataRegistration, _DocumentScraperMetadataRegistration, _GoogleMetadataRegistration, _LegacyDocumentScraperMetadataRegistration, _MarkdownScraperMetadataRegistration, _OpenAiAssistantMetadataRegistration, _OpenAiMetadataRegistration, _PdfScraperMetadataRegistration, _WebsiteScraperMetadataRegistration, addUsage, assertsExecutionSuccessful, cacheLlmTools, collectionToJson, compilePipeline, countTotalUsage, createCollectionFromJson, createCollectionFromPromise, createCollectionFromUrl, createLlmToolsFromConfiguration, createPipelineExecutor, createSubcollection, embeddingVectorToString, executionReportJsonToString, extractParameterNamesFromTask, getPipelineInterface, isPassingExpectations, isPipelineImplementingInterface, isPipelineInterfacesEqual, isPipelinePrepared, joinLlmExecutionTools, limitTotalUsage, makeKnowledgeSourceHandler, pipelineJsonToString, precompilePipeline, prepareKnowledgePieces, preparePersona, preparePipeline, prepareTasks, prettifyPipelineString, unpreparePipeline, usageToHuman, usageToWorktime, validatePipeline };
10989
11084
  //# sourceMappingURL=index.es.js.map