@promptbook/markdown-utils 0.80.0 → 0.81.0-6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. package/README.md +6 -0
  2. package/esm/index.es.js +141 -36
  3. package/esm/index.es.js.map +1 -1
  4. package/esm/typings/books/index.d.ts +15 -0
  5. package/esm/typings/src/_packages/core.index.d.ts +2 -6
  6. package/esm/typings/src/_packages/editable.index.d.ts +10 -0
  7. package/esm/typings/src/_packages/templates.index.d.ts +4 -0
  8. package/esm/typings/src/_packages/types.index.d.ts +4 -0
  9. package/esm/typings/src/_packages/utils.index.d.ts +10 -2
  10. package/esm/typings/src/config.d.ts +26 -0
  11. package/esm/typings/src/execution/ExecutionTools.d.ts +7 -0
  12. package/esm/typings/src/execution/PromptbookFetch.d.ts +5 -0
  13. package/esm/typings/src/execution/PromptbookFetch.test-type.d.ts +5 -0
  14. package/esm/typings/src/expectations/drafts/isDomainNameFree.d.ts +2 -1
  15. package/esm/typings/src/expectations/drafts/isGithubNameFree.d.ts +2 -1
  16. package/esm/typings/src/high-level-abstractions/index.d.ts +10 -0
  17. package/esm/typings/src/other/templates/getBookTemplate.d.ts +12 -0
  18. package/esm/typings/src/other/templates/getTemplatesPipelineCollection.d.ts +10 -0
  19. package/esm/typings/src/pipeline/PipelineJson/PipelineJson.d.ts +10 -0
  20. package/esm/typings/src/scrapers/_common/utils/makeKnowledgeSourceHandler.d.ts +1 -1
  21. package/esm/typings/src/scrapers/_common/utils/scraperFetch.d.ts +7 -0
  22. package/esm/typings/src/utils/editable/types/PipelineEditableSerialized.d.ts +27 -0
  23. package/esm/typings/src/{conversion → utils/editable}/utils/removePipelineCommand.d.ts +3 -3
  24. package/esm/typings/src/{conversion → utils/editable}/utils/renamePipelineParameter.d.ts +3 -3
  25. package/esm/typings/src/{conversion → utils/editable}/utils/stringifyPipelineJson.d.ts +2 -2
  26. package/esm/typings/src/utils/parameters/numberToString.d.ts +7 -0
  27. package/esm/typings/src/utils/parameters/{replaceParameters.d.ts → templateParameters.d.ts} +6 -2
  28. package/esm/typings/src/utils/parameters/valueToString.d.ts +17 -0
  29. package/esm/typings/src/utils/parameters/valueToString.test.d.ts +1 -0
  30. package/esm/typings/src/utils/serialization/asSerializable.d.ts +4 -0
  31. package/package.json +1 -1
  32. package/umd/index.umd.js +141 -36
  33. package/umd/index.umd.js.map +1 -1
  34. package/esm/typings/src/utils/formatNumber.d.ts +0 -6
  35. /package/esm/typings/src/{conversion → utils/editable}/utils/removePipelineCommand.test.d.ts +0 -0
  36. /package/esm/typings/src/{conversion → utils/editable}/utils/renamePipelineParameter.test.d.ts +0 -0
  37. /package/esm/typings/src/{conversion → utils/editable}/utils/stringifyPipelineJson.test.d.ts +0 -0
  38. /package/esm/typings/src/utils/{formatNumber.test.d.ts → parameters/numberToString.test.d.ts} +0 -0
  39. /package/esm/typings/src/utils/parameters/{replaceParameters.test.d.ts → templateParameters.test.d.ts} +0 -0
package/umd/index.umd.js CHANGED
@@ -22,7 +22,7 @@
22
22
  *
23
23
  * @see https://github.com/webgptorg/promptbook
24
24
  */
25
- var PROMPTBOOK_ENGINE_VERSION = '0.80.0-1';
25
+ var PROMPTBOOK_ENGINE_VERSION = '0.81.0-5';
26
26
  /**
27
27
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
28
28
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -358,7 +358,7 @@
358
358
  * TODO: [🏢] Make this logic part of `JsonFormatDefinition` or `isValidJsonString`
359
359
  */
360
360
 
361
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sourceFile:"./books/prepare-knowledge-from-markdown.book.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sourceFile:"./books/prepare-knowledge-keywords.book.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sourceFile:"./books/prepare-knowledge-title.book.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book.md",formfactorName:"GENERIC",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelRequirements",format:"JSON",dependentParameterNames:["availableModelNames","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sourceFile:"./books/prepare-persona.book.md"}];
361
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book.md`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book.md`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book.md`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book.md",formfactorName:"GENERIC",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelRequirements",format:"JSON",dependentParameterNames:["availableModelNames","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book.md`\n- INPUT PARAMETER `{availableModelNames}` List of available model names separated by comma (,)\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n\\`\\`\\`json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelRequirements}`\n"}],sourceFile:"./books/prepare-persona.book.md"}];
362
362
 
363
363
  /**
364
364
  * Prettify the html code
@@ -660,6 +660,26 @@
660
660
  * @private within the repository - too low-level in comparison with other `MAX_...`
661
661
  */
662
662
  var LOOP_LIMIT = 1000;
663
+ /**
664
+ * Strings to represent various values in the context of parameter values
665
+ *
666
+ * @public exported from `@promptbook/utils`
667
+ */
668
+ var VALUE_STRINGS = {
669
+ empty: '(nothing; empty string)',
670
+ null: '(no value; null)',
671
+ undefined: '(unknown value; undefined)',
672
+ nan: '(not a number; NaN)',
673
+ infinity: '(infinity; ∞)',
674
+ negativeInfinity: '(negative infinity; -∞)',
675
+ unserializable: '(unserializable value)',
676
+ };
677
+ /**
678
+ * Small number limit
679
+ *
680
+ * @public exported from `@promptbook/utils`
681
+ */
682
+ var SMALL_NUMBER = 0.001;
663
683
  /**
664
684
  * Short time interval to prevent race conditions in milliseconds
665
685
  *
@@ -1003,6 +1023,7 @@
1003
1023
  * @public exported from `@promptbook/core`
1004
1024
  */
1005
1025
  var ORDER_OF_PIPELINE_JSON = [
1026
+ // Note: [🍙] In this order will be pipeline serialized
1006
1027
  'title',
1007
1028
  'pipelineUrl',
1008
1029
  'bookVersion',
@@ -1014,6 +1035,7 @@
1014
1035
  'preparations',
1015
1036
  'knowledgeSources',
1016
1037
  'knowledgePieces',
1038
+ 'sources', // <- TODO: [🧠] Where should the `sources` be
1017
1039
  ];
1018
1040
  /**
1019
1041
  * Nonce which is used for replacing things in strings
@@ -3402,6 +3424,30 @@
3402
3424
  return false;
3403
3425
  }
3404
3426
 
3427
+ /**
3428
+ * The built-in `fetch' function with a lightweight error handling wrapper as default fetch function used in Promptbook scrapers
3429
+ *
3430
+ * @private as default `fetch` function used in Promptbook scrapers
3431
+ */
3432
+ var scraperFetch = function (url, init) { return __awaiter(void 0, void 0, void 0, function () {
3433
+ var error_1;
3434
+ return __generator(this, function (_a) {
3435
+ switch (_a.label) {
3436
+ case 0:
3437
+ _a.trys.push([0, 2, , 3]);
3438
+ return [4 /*yield*/, fetch(url, init)];
3439
+ case 1: return [2 /*return*/, _a.sent()];
3440
+ case 2:
3441
+ error_1 = _a.sent();
3442
+ if (!(error_1 instanceof Error)) {
3443
+ throw error_1;
3444
+ }
3445
+ throw new KnowledgeScrapeError(spaceTrim__default["default"](function (block) { return "\n Can not fetch \"".concat(url, "\"\n\n Fetch error:\n ").concat(block(error_1.message), "\n\n "); }));
3446
+ case 3: return [2 /*return*/];
3447
+ }
3448
+ });
3449
+ }); };
3450
+
3405
3451
  /**
3406
3452
  * @@@
3407
3453
  *
@@ -3410,13 +3456,14 @@
3410
3456
  function makeKnowledgeSourceHandler(knowledgeSource, tools, options) {
3411
3457
  var _a;
3412
3458
  return __awaiter(this, void 0, void 0, function () {
3413
- var sourceContent, name, _b, _c, rootDirname, url, response_1, mimeType, filename_1, fileExtension, mimeType;
3414
- return __generator(this, function (_e) {
3415
- switch (_e.label) {
3459
+ var _b, fetch, sourceContent, name, _c, _d, rootDirname, url, response_1, mimeType, filename_1, fileExtension, mimeType;
3460
+ return __generator(this, function (_f) {
3461
+ switch (_f.label) {
3416
3462
  case 0:
3463
+ _b = tools.fetch, fetch = _b === void 0 ? scraperFetch : _b;
3417
3464
  sourceContent = knowledgeSource.sourceContent;
3418
3465
  name = knowledgeSource.name;
3419
- _b = options || {}, _c = _b.rootDirname, rootDirname = _c === void 0 ? null : _c, _b.isVerbose;
3466
+ _c = options || {}, _d = _c.rootDirname, rootDirname = _d === void 0 ? null : _d, _c.isVerbose;
3420
3467
  if (!name) {
3421
3468
  name = sourceContentToName(sourceContent);
3422
3469
  }
@@ -3424,7 +3471,7 @@
3424
3471
  url = sourceContent;
3425
3472
  return [4 /*yield*/, fetch(url)];
3426
3473
  case 1:
3427
- response_1 = _e.sent();
3474
+ response_1 = _f.sent();
3428
3475
  mimeType = ((_a = response_1.headers.get('content-type')) === null || _a === void 0 ? void 0 : _a.split(';')[0]) || 'text/html';
3429
3476
  return [2 /*return*/, {
3430
3477
  source: name,
@@ -3481,7 +3528,7 @@
3481
3528
  mimeType = extensionToMimeType(fileExtension || '');
3482
3529
  return [4 /*yield*/, isFileExisting(filename_1, tools.fs)];
3483
3530
  case 3:
3484
- if (!(_e.sent())) {
3531
+ if (!(_f.sent())) {
3485
3532
  throw new NotFoundError(spaceTrim__default["default"](function (block) { return "\n Can not make source handler for file which does not exist:\n\n File:\n ".concat(block(filename_1), "\n "); }));
3486
3533
  }
3487
3534
  // TODO: [🧠][😿] Test security file - file is scoped to the project (BUT maybe do this in `filesystemTools`)
@@ -4372,16 +4419,94 @@
4372
4419
  return mappedParameters;
4373
4420
  }
4374
4421
 
4422
+ /**
4423
+ * Format either small or big number
4424
+ *
4425
+ * @public exported from `@promptbook/utils`
4426
+ */
4427
+ function numberToString(value) {
4428
+ if (value === 0) {
4429
+ return '0';
4430
+ }
4431
+ else if (Number.isNaN(value)) {
4432
+ return VALUE_STRINGS.nan;
4433
+ }
4434
+ else if (value === Infinity) {
4435
+ return VALUE_STRINGS.infinity;
4436
+ }
4437
+ else if (value === -Infinity) {
4438
+ return VALUE_STRINGS.negativeInfinity;
4439
+ }
4440
+ for (var exponent = 0; exponent < 15; exponent++) {
4441
+ var factor = Math.pow(10, exponent);
4442
+ var valueRounded = Math.round(value * factor) / factor;
4443
+ if (Math.abs(value - valueRounded) / value < SMALL_NUMBER) {
4444
+ return valueRounded.toFixed(exponent);
4445
+ }
4446
+ }
4447
+ return value.toString();
4448
+ }
4449
+
4450
+ /**
4451
+ * Function `valueToString` will convert the given value to string
4452
+ * This is useful and used in the `templateParameters` function
4453
+ *
4454
+ * Note: This function is not just calling `toString` method
4455
+ * It's more complex and can handle this conversion specifically for LLM models
4456
+ * See `VALUE_STRINGS`
4457
+ *
4458
+ * Note: There are 2 similar functions
4459
+ * - `valueToString` converts value to string for LLM models as human-readable string
4460
+ * - `asSerializable` converts value to string to preserve full information to be able to convert it back
4461
+ *
4462
+ * @public exported from `@promptbook/utils`
4463
+ */
4464
+ function valueToString(value) {
4465
+ try {
4466
+ if (value === '') {
4467
+ return VALUE_STRINGS.empty;
4468
+ }
4469
+ else if (value === null) {
4470
+ return VALUE_STRINGS.null;
4471
+ }
4472
+ else if (value === undefined) {
4473
+ return VALUE_STRINGS.undefined;
4474
+ }
4475
+ else if (typeof value === 'string') {
4476
+ return value;
4477
+ }
4478
+ else if (typeof value === 'number') {
4479
+ return numberToString(value);
4480
+ }
4481
+ else if (value instanceof Date) {
4482
+ return value.toISOString();
4483
+ }
4484
+ else {
4485
+ return JSON.stringify(value);
4486
+ }
4487
+ }
4488
+ catch (error) {
4489
+ if (!(error instanceof Error)) {
4490
+ throw error;
4491
+ }
4492
+ console.error(error);
4493
+ return VALUE_STRINGS.unserializable;
4494
+ }
4495
+ }
4496
+
4375
4497
  /**
4376
4498
  * Replaces parameters in template with values from parameters object
4377
4499
  *
4500
+ * Note: This function is not places strings into string,
4501
+ * It's more complex and can handle this operation specifically for LLM models
4502
+ *
4378
4503
  * @param template the template with parameters in {curly} braces
4379
4504
  * @param parameters the object with parameters
4380
4505
  * @returns the template with replaced parameters
4381
4506
  * @throws {PipelineExecutionError} if parameter is not defined, not closed, or not opened
4382
4507
  * @public exported from `@promptbook/utils`
4383
4508
  */
4384
- function replaceParameters(template, parameters) {
4509
+ function templateParameters(template, parameters) {
4385
4510
  var e_1, _a;
4386
4511
  try {
4387
4512
  for (var _b = __values(Object.entries(parameters)), _c = _b.next(); !_c.done; _c = _b.next()) {
@@ -4407,7 +4532,7 @@
4407
4532
  var loopLimit = LOOP_LIMIT;
4408
4533
  var _loop_1 = function () {
4409
4534
  if (loopLimit-- < 0) {
4410
- throw new LimitReachedError('Loop limit reached during parameters replacement in `replaceParameters`');
4535
+ throw new LimitReachedError('Loop limit reached during parameters replacement in `templateParameters`');
4411
4536
  }
4412
4537
  var precol = match.groups.precol;
4413
4538
  var parameterName = match.groups.parameterName;
@@ -4424,7 +4549,7 @@
4424
4549
  if (parameterValue === undefined) {
4425
4550
  throw new PipelineExecutionError("Parameter `{".concat(parameterName, "}` is not defined"));
4426
4551
  }
4427
- parameterValue = parameterValue.toString();
4552
+ parameterValue = valueToString(parameterValue);
4428
4553
  if (parameterValue.includes('\n') && /^\s*\W{0,3}\s*$/.test(precol)) {
4429
4554
  parameterValue = parameterValue
4430
4555
  .split('\n')
@@ -4659,7 +4784,7 @@
4659
4784
  }
4660
4785
  return [3 /*break*/, 24];
4661
4786
  case 2:
4662
- $ongoingTaskResult.$resultString = replaceParameters(preparedContent, parameters);
4787
+ $ongoingTaskResult.$resultString = templateParameters(preparedContent, parameters);
4663
4788
  return [3 /*break*/, 25];
4664
4789
  case 3:
4665
4790
  modelRequirements = __assign(__assign({ modelVariant: 'CHAT' }, (preparedPipeline.defaultModelRequirements || {})), (task.modelRequirements || {}));
@@ -4782,8 +4907,8 @@
4782
4907
  _j = $ongoingTaskResult;
4783
4908
  return [4 /*yield*/, tools.userInterface.promptDialog($deepFreeze({
4784
4909
  promptTitle: task.title,
4785
- promptMessage: replaceParameters(task.description || '', parameters),
4786
- defaultValue: replaceParameters(preparedContent, parameters),
4910
+ promptMessage: templateParameters(task.description || '', parameters),
4911
+ defaultValue: templateParameters(preparedContent, parameters),
4787
4912
  // TODO: [🧠] !! Figure out how to define placeholder in .book.md file
4788
4913
  placeholder: undefined,
4789
4914
  priority: priority,
@@ -4907,7 +5032,7 @@
4907
5032
  if (!isJokerAttempt &&
4908
5033
  task.taskType === 'PROMPT_TASK' &&
4909
5034
  $ongoingTaskResult.$prompt
4910
- // <- Note: [2] When some expected parameter is not defined, error will occur in replaceParameters
5035
+ // <- Note: [2] When some expected parameter is not defined, error will occur in templateParameters
4911
5036
  // In that case we don’t want to make a report about it because it’s not a llm execution error
4912
5037
  ) {
4913
5038
  // TODO: [🧠] Maybe put other taskTypes into report
@@ -6021,26 +6146,6 @@
6021
6146
  * TODO: [🏛] This can be part of markdown builder
6022
6147
  */
6023
6148
 
6024
- /**
6025
- * Format either small or big number
6026
- *
6027
- * @private within the repository
6028
- */
6029
- function formatNumber(value) {
6030
- if (value === 0) {
6031
- return '0';
6032
- }
6033
- for (var exponent = 0; exponent < 15; exponent++) {
6034
- var factor = Math.pow(10, exponent);
6035
- var valueRounded = Math.round(value * factor) / factor;
6036
- if (Math.abs(value - valueRounded) / value <
6037
- 0.001 /* <- TODO: Pass as option, pass to executionReportJsonToString as option */) {
6038
- return valueRounded.toFixed(exponent);
6039
- }
6040
- }
6041
- return value.toString();
6042
- }
6043
-
6044
6149
  /**
6045
6150
  * Create a markdown table from a 2D array of strings
6046
6151
  *
@@ -6100,7 +6205,7 @@
6100
6205
  }
6101
6206
  finally { if (e_1) throw e_1.error; }
6102
6207
  }
6103
- var legend = "_Note: Each \u2588 represents ".concat(formatNumber(1 / scale), " ").concat(unitName, ", width of ").concat(valueHeader.toLowerCase(), " is ").concat(formatNumber(to - from), " ").concat(unitName, " = ").concat(width, " squares_");
6208
+ var legend = "_Note: Each \u2588 represents ".concat(numberToString(1 / scale), " ").concat(unitName, ", width of ").concat(valueHeader.toLowerCase(), " is ").concat(numberToString(to - from), " ").concat(unitName, " = ").concat(width, " squares_");
6104
6209
  return createMarkdownTable(table) + '\n\n' + legend;
6105
6210
  }
6106
6211
  /**