@promptbook/markdown-utils 0.81.0-5 → 0.81.0-6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/esm/index.es.js CHANGED
@@ -20,7 +20,7 @@ var BOOK_LANGUAGE_VERSION = '1.0.0';
20
20
  *
21
21
  * @see https://github.com/webgptorg/promptbook
22
22
  */
23
- var PROMPTBOOK_ENGINE_VERSION = '0.81.0-4';
23
+ var PROMPTBOOK_ENGINE_VERSION = '0.81.0-5';
24
24
  /**
25
25
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
26
26
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -356,7 +356,7 @@ function extractJsonBlock(markdown) {
356
356
  * TODO: [🏢] Make this logic part of `JsonFormatDefinition` or `isValidJsonString`
357
357
  */
358
358
 
359
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sourceFile:"./books/prepare-knowledge-from-markdown.book.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sourceFile:"./books/prepare-knowledge-keywords.book.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sourceFile:"./books/prepare-knowledge-title.book.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book.md",formfactorName:"GENERIC",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelRequirements",format:"JSON",dependentParameterNames:["availableModelNames","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sourceFile:"./books/prepare-persona.book.md"}];
359
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book.md`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book.md`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book.md`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book.md",formfactorName:"GENERIC",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelRequirements",format:"JSON",dependentParameterNames:["availableModelNames","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book.md`\n- INPUT PARAMETER `{availableModelNames}` List of available model names separated by comma (,)\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n\\`\\`\\`json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelRequirements}`\n"}],sourceFile:"./books/prepare-persona.book.md"}];
360
360
 
361
361
  /**
362
362
  * Prettify the html code
@@ -658,6 +658,26 @@ var DEFAULT_TITLE = "Untitled";
658
658
  * @private within the repository - too low-level in comparison with other `MAX_...`
659
659
  */
660
660
  var LOOP_LIMIT = 1000;
661
+ /**
662
+ * Strings to represent various values in the context of parameter values
663
+ *
664
+ * @public exported from `@promptbook/utils`
665
+ */
666
+ var VALUE_STRINGS = {
667
+ empty: '(nothing; empty string)',
668
+ null: '(no value; null)',
669
+ undefined: '(unknown value; undefined)',
670
+ nan: '(not a number; NaN)',
671
+ infinity: '(infinity; ∞)',
672
+ negativeInfinity: '(negative infinity; -∞)',
673
+ unserializable: '(unserializable value)',
674
+ };
675
+ /**
676
+ * Small number limit
677
+ *
678
+ * @public exported from `@promptbook/utils`
679
+ */
680
+ var SMALL_NUMBER = 0.001;
661
681
  /**
662
682
  * Short time interval to prevent race conditions in milliseconds
663
683
  *
@@ -1001,6 +1021,7 @@ function exportJson(options) {
1001
1021
  * @public exported from `@promptbook/core`
1002
1022
  */
1003
1023
  var ORDER_OF_PIPELINE_JSON = [
1024
+ // Note: [🍙] In this order will be pipeline serialized
1004
1025
  'title',
1005
1026
  'pipelineUrl',
1006
1027
  'bookVersion',
@@ -1012,6 +1033,7 @@ var ORDER_OF_PIPELINE_JSON = [
1012
1033
  'preparations',
1013
1034
  'knowledgeSources',
1014
1035
  'knowledgePieces',
1036
+ 'sources', // <- TODO: [🧠] Where should the `sources` be
1015
1037
  ];
1016
1038
  /**
1017
1039
  * Nonce which is used for replacing things in strings
@@ -4395,9 +4417,87 @@ function mapAvailableToExpectedParameters(options) {
4395
4417
  return mappedParameters;
4396
4418
  }
4397
4419
 
4420
+ /**
4421
+ * Format either small or big number
4422
+ *
4423
+ * @public exported from `@promptbook/utils`
4424
+ */
4425
+ function numberToString(value) {
4426
+ if (value === 0) {
4427
+ return '0';
4428
+ }
4429
+ else if (Number.isNaN(value)) {
4430
+ return VALUE_STRINGS.nan;
4431
+ }
4432
+ else if (value === Infinity) {
4433
+ return VALUE_STRINGS.infinity;
4434
+ }
4435
+ else if (value === -Infinity) {
4436
+ return VALUE_STRINGS.negativeInfinity;
4437
+ }
4438
+ for (var exponent = 0; exponent < 15; exponent++) {
4439
+ var factor = Math.pow(10, exponent);
4440
+ var valueRounded = Math.round(value * factor) / factor;
4441
+ if (Math.abs(value - valueRounded) / value < SMALL_NUMBER) {
4442
+ return valueRounded.toFixed(exponent);
4443
+ }
4444
+ }
4445
+ return value.toString();
4446
+ }
4447
+
4448
+ /**
4449
+ * Function `valueToString` will convert the given value to string
4450
+ * This is useful and used in the `templateParameters` function
4451
+ *
4452
+ * Note: This function is not just calling `toString` method
4453
+ * It's more complex and can handle this conversion specifically for LLM models
4454
+ * See `VALUE_STRINGS`
4455
+ *
4456
+ * Note: There are 2 similar functions
4457
+ * - `valueToString` converts value to string for LLM models as human-readable string
4458
+ * - `asSerializable` converts value to string to preserve full information to be able to convert it back
4459
+ *
4460
+ * @public exported from `@promptbook/utils`
4461
+ */
4462
+ function valueToString(value) {
4463
+ try {
4464
+ if (value === '') {
4465
+ return VALUE_STRINGS.empty;
4466
+ }
4467
+ else if (value === null) {
4468
+ return VALUE_STRINGS.null;
4469
+ }
4470
+ else if (value === undefined) {
4471
+ return VALUE_STRINGS.undefined;
4472
+ }
4473
+ else if (typeof value === 'string') {
4474
+ return value;
4475
+ }
4476
+ else if (typeof value === 'number') {
4477
+ return numberToString(value);
4478
+ }
4479
+ else if (value instanceof Date) {
4480
+ return value.toISOString();
4481
+ }
4482
+ else {
4483
+ return JSON.stringify(value);
4484
+ }
4485
+ }
4486
+ catch (error) {
4487
+ if (!(error instanceof Error)) {
4488
+ throw error;
4489
+ }
4490
+ console.error(error);
4491
+ return VALUE_STRINGS.unserializable;
4492
+ }
4493
+ }
4494
+
4398
4495
  /**
4399
4496
  * Replaces parameters in template with values from parameters object
4400
4497
  *
4498
+ * Note: This function is not places strings into string,
4499
+ * It's more complex and can handle this operation specifically for LLM models
4500
+ *
4401
4501
  * @param template the template with parameters in {curly} braces
4402
4502
  * @param parameters the object with parameters
4403
4503
  * @returns the template with replaced parameters
@@ -4447,7 +4547,7 @@ function templateParameters(template, parameters) {
4447
4547
  if (parameterValue === undefined) {
4448
4548
  throw new PipelineExecutionError("Parameter `{".concat(parameterName, "}` is not defined"));
4449
4549
  }
4450
- parameterValue = parameterValue.toString();
4550
+ parameterValue = valueToString(parameterValue);
4451
4551
  if (parameterValue.includes('\n') && /^\s*\W{0,3}\s*$/.test(precol)) {
4452
4552
  parameterValue = parameterValue
4453
4553
  .split('\n')
@@ -6044,26 +6144,6 @@ function addAutoGeneratedSection(content, options) {
6044
6144
  * TODO: [🏛] This can be part of markdown builder
6045
6145
  */
6046
6146
 
6047
- /**
6048
- * Format either small or big number
6049
- *
6050
- * @private within the repository
6051
- */
6052
- function formatNumber(value) {
6053
- if (value === 0) {
6054
- return '0';
6055
- }
6056
- for (var exponent = 0; exponent < 15; exponent++) {
6057
- var factor = Math.pow(10, exponent);
6058
- var valueRounded = Math.round(value * factor) / factor;
6059
- if (Math.abs(value - valueRounded) / value <
6060
- 0.001 /* <- TODO: Pass as option, pass to executionReportJsonToString as option */) {
6061
- return valueRounded.toFixed(exponent);
6062
- }
6063
- }
6064
- return value.toString();
6065
- }
6066
-
6067
6147
  /**
6068
6148
  * Create a markdown table from a 2D array of strings
6069
6149
  *
@@ -6123,7 +6203,7 @@ function createMarkdownChart(options) {
6123
6203
  }
6124
6204
  finally { if (e_1) throw e_1.error; }
6125
6205
  }
6126
- var legend = "_Note: Each \u2588 represents ".concat(formatNumber(1 / scale), " ").concat(unitName, ", width of ").concat(valueHeader.toLowerCase(), " is ").concat(formatNumber(to - from), " ").concat(unitName, " = ").concat(width, " squares_");
6206
+ var legend = "_Note: Each \u2588 represents ".concat(numberToString(1 / scale), " ").concat(unitName, ", width of ").concat(valueHeader.toLowerCase(), " is ").concat(numberToString(to - from), " ").concat(unitName, " = ").concat(width, " squares_");
6127
6207
  return createMarkdownTable(table) + '\n\n' + legend;
6128
6208
  }
6129
6209
  /**