@promptbook/pdf 0.80.0 → 0.81.0-6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. package/README.md +6 -0
  2. package/esm/index.es.js +140 -15
  3. package/esm/index.es.js.map +1 -1
  4. package/esm/typings/books/index.d.ts +15 -0
  5. package/esm/typings/src/_packages/core.index.d.ts +2 -6
  6. package/esm/typings/src/_packages/editable.index.d.ts +10 -0
  7. package/esm/typings/src/_packages/templates.index.d.ts +4 -0
  8. package/esm/typings/src/_packages/types.index.d.ts +4 -0
  9. package/esm/typings/src/_packages/utils.index.d.ts +10 -2
  10. package/esm/typings/src/config.d.ts +26 -0
  11. package/esm/typings/src/execution/ExecutionTools.d.ts +7 -0
  12. package/esm/typings/src/execution/PromptbookFetch.d.ts +5 -0
  13. package/esm/typings/src/execution/PromptbookFetch.test-type.d.ts +5 -0
  14. package/esm/typings/src/expectations/drafts/isDomainNameFree.d.ts +2 -1
  15. package/esm/typings/src/expectations/drafts/isGithubNameFree.d.ts +2 -1
  16. package/esm/typings/src/high-level-abstractions/index.d.ts +10 -0
  17. package/esm/typings/src/other/templates/getBookTemplate.d.ts +12 -0
  18. package/esm/typings/src/other/templates/getTemplatesPipelineCollection.d.ts +10 -0
  19. package/esm/typings/src/pipeline/PipelineJson/PipelineJson.d.ts +10 -0
  20. package/esm/typings/src/scrapers/_common/utils/makeKnowledgeSourceHandler.d.ts +1 -1
  21. package/esm/typings/src/scrapers/_common/utils/scraperFetch.d.ts +7 -0
  22. package/esm/typings/src/utils/editable/types/PipelineEditableSerialized.d.ts +27 -0
  23. package/esm/typings/src/{conversion → utils/editable}/utils/removePipelineCommand.d.ts +3 -3
  24. package/esm/typings/src/{conversion → utils/editable}/utils/renamePipelineParameter.d.ts +3 -3
  25. package/esm/typings/src/{conversion → utils/editable}/utils/stringifyPipelineJson.d.ts +2 -2
  26. package/esm/typings/src/utils/parameters/numberToString.d.ts +7 -0
  27. package/esm/typings/src/utils/parameters/{replaceParameters.d.ts → templateParameters.d.ts} +6 -2
  28. package/esm/typings/src/utils/parameters/valueToString.d.ts +17 -0
  29. package/esm/typings/src/utils/parameters/valueToString.test.d.ts +1 -0
  30. package/esm/typings/src/utils/serialization/asSerializable.d.ts +4 -0
  31. package/package.json +2 -2
  32. package/umd/index.umd.js +140 -15
  33. package/umd/index.umd.js.map +1 -1
  34. package/esm/typings/src/utils/formatNumber.d.ts +0 -6
  35. /package/esm/typings/src/{conversion → utils/editable}/utils/removePipelineCommand.test.d.ts +0 -0
  36. /package/esm/typings/src/{conversion → utils/editable}/utils/renamePipelineParameter.test.d.ts +0 -0
  37. /package/esm/typings/src/{conversion → utils/editable}/utils/stringifyPipelineJson.test.d.ts +0 -0
  38. /package/esm/typings/src/utils/{formatNumber.test.d.ts → parameters/numberToString.test.d.ts} +0 -0
  39. /package/esm/typings/src/utils/parameters/{replaceParameters.test.d.ts → templateParameters.test.d.ts} +0 -0
package/README.md CHANGED
@@ -23,6 +23,10 @@
23
23
 
24
24
 
25
25
 
26
+ <blockquote style="color: #ff8811">
27
+ <b>⚠ Warning:</b> This is a pre-release version of the library. It is not yet ready for production use. Please look at <a href="https://www.npmjs.com/package/@promptbook/core?activeTab=versions">latest stable release</a>.
28
+ </blockquote>
29
+
26
30
  ## 📦 Package `@promptbook/pdf`
27
31
 
28
32
  - Promptbooks are [divided into several](#-packages) packages, all are published from [single monorepo](https://github.com/webgptorg/promptbook).
@@ -249,6 +253,8 @@ Or you can install them separately:
249
253
  - **[@promptbook/documents](https://www.npmjs.com/package/@promptbook/documents)** - Read knowledge from documents like `.docx`, `.odt`,…
250
254
  - **[@promptbook/legacy-documents](https://www.npmjs.com/package/@promptbook/legacy-documents)** - Read knowledge from legacy documents like `.doc`, `.rtf`,…
251
255
  - **[@promptbook/website-crawler](https://www.npmjs.com/package/@promptbook/website-crawler)** - Crawl knowledge from the web
256
+ - **[@promptbook/editable](https://www.npmjs.com/package/@promptbook/editable)** - Editable book as native javascript object with imperative object API
257
+ - **[@promptbook/templates](https://www.npmjs.com/package/@promptbook/templates)** - Usefull templates and examples of books which can be used as a starting point
252
258
  - **[@promptbook/types](https://www.npmjs.com/package/@promptbook/types)** - Just typescript types used in the library
253
259
  - **[@promptbook/cli](https://www.npmjs.com/package/@promptbook/cli)** - Command line interface utilities for promptbooks
254
260
 
package/esm/index.es.js CHANGED
@@ -20,7 +20,7 @@ var BOOK_LANGUAGE_VERSION = '1.0.0';
20
20
  *
21
21
  * @see https://github.com/webgptorg/promptbook
22
22
  */
23
- var PROMPTBOOK_ENGINE_VERSION = '0.80.0-1';
23
+ var PROMPTBOOK_ENGINE_VERSION = '0.81.0-5';
24
24
  /**
25
25
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
26
26
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -174,7 +174,7 @@ var NotYetImplementedError = /** @class */ (function (_super) {
174
174
  function TODO_USE() {
175
175
  }
176
176
 
177
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sourceFile:"./books/prepare-knowledge-from-markdown.book.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sourceFile:"./books/prepare-knowledge-keywords.book.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sourceFile:"./books/prepare-knowledge-title.book.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book.md",formfactorName:"GENERIC",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelRequirements",format:"JSON",dependentParameterNames:["availableModelNames","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sourceFile:"./books/prepare-persona.book.md"}];
177
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book.md`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book.md`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book.md",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book.md`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book.md",formfactorName:"GENERIC",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelRequirements",format:"JSON",dependentParameterNames:["availableModelNames","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book.md`\n- INPUT PARAMETER `{availableModelNames}` List of available model names separated by comma (,)\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n\\`\\`\\`json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelRequirements}`\n"}],sourceFile:"./books/prepare-persona.book.md"}];
178
178
 
179
179
  /**
180
180
  * Prettify the html code
@@ -473,6 +473,26 @@ var ADMIN_GITHUB_NAME = 'hejny';
473
473
  * @private within the repository - too low-level in comparison with other `MAX_...`
474
474
  */
475
475
  var LOOP_LIMIT = 1000;
476
+ /**
477
+ * Strings to represent various values in the context of parameter values
478
+ *
479
+ * @public exported from `@promptbook/utils`
480
+ */
481
+ var VALUE_STRINGS = {
482
+ empty: '(nothing; empty string)',
483
+ null: '(no value; null)',
484
+ undefined: '(unknown value; undefined)',
485
+ nan: '(not a number; NaN)',
486
+ infinity: '(infinity; ∞)',
487
+ negativeInfinity: '(negative infinity; -∞)',
488
+ unserializable: '(unserializable value)',
489
+ };
490
+ /**
491
+ * Small number limit
492
+ *
493
+ * @public exported from `@promptbook/utils`
494
+ */
495
+ var SMALL_NUMBER = 0.001;
476
496
  /**
477
497
  * Short time interval to prevent race conditions in milliseconds
478
498
  *
@@ -816,6 +836,7 @@ function exportJson(options) {
816
836
  * @public exported from `@promptbook/core`
817
837
  */
818
838
  var ORDER_OF_PIPELINE_JSON = [
839
+ // Note: [🍙] In this order will be pipeline serialized
819
840
  'title',
820
841
  'pipelineUrl',
821
842
  'bookVersion',
@@ -827,6 +848,7 @@ var ORDER_OF_PIPELINE_JSON = [
827
848
  'preparations',
828
849
  'knowledgeSources',
829
850
  'knowledgePieces',
851
+ 'sources', // <- TODO: [🧠] Where should the `sources` be
830
852
  ];
831
853
  /**
832
854
  * Nonce which is used for replacing things in strings
@@ -3218,6 +3240,30 @@ function isValidFilePath(filename) {
3218
3240
  return false;
3219
3241
  }
3220
3242
 
3243
+ /**
3244
+ * The built-in `fetch' function with a lightweight error handling wrapper as default fetch function used in Promptbook scrapers
3245
+ *
3246
+ * @private as default `fetch` function used in Promptbook scrapers
3247
+ */
3248
+ var scraperFetch = function (url, init) { return __awaiter(void 0, void 0, void 0, function () {
3249
+ var error_1;
3250
+ return __generator(this, function (_a) {
3251
+ switch (_a.label) {
3252
+ case 0:
3253
+ _a.trys.push([0, 2, , 3]);
3254
+ return [4 /*yield*/, fetch(url, init)];
3255
+ case 1: return [2 /*return*/, _a.sent()];
3256
+ case 2:
3257
+ error_1 = _a.sent();
3258
+ if (!(error_1 instanceof Error)) {
3259
+ throw error_1;
3260
+ }
3261
+ throw new KnowledgeScrapeError(spaceTrim$1(function (block) { return "\n Can not fetch \"".concat(url, "\"\n\n Fetch error:\n ").concat(block(error_1.message), "\n\n "); }));
3262
+ case 3: return [2 /*return*/];
3263
+ }
3264
+ });
3265
+ }); };
3266
+
3221
3267
  /**
3222
3268
  * @@@
3223
3269
  *
@@ -3226,13 +3272,14 @@ function isValidFilePath(filename) {
3226
3272
  function makeKnowledgeSourceHandler(knowledgeSource, tools, options) {
3227
3273
  var _a;
3228
3274
  return __awaiter(this, void 0, void 0, function () {
3229
- var sourceContent, name, _b, _c, rootDirname, url, response_1, mimeType, filename_1, fileExtension, mimeType;
3230
- return __generator(this, function (_e) {
3231
- switch (_e.label) {
3275
+ var _b, fetch, sourceContent, name, _c, _d, rootDirname, url, response_1, mimeType, filename_1, fileExtension, mimeType;
3276
+ return __generator(this, function (_f) {
3277
+ switch (_f.label) {
3232
3278
  case 0:
3279
+ _b = tools.fetch, fetch = _b === void 0 ? scraperFetch : _b;
3233
3280
  sourceContent = knowledgeSource.sourceContent;
3234
3281
  name = knowledgeSource.name;
3235
- _b = options || {}, _c = _b.rootDirname, rootDirname = _c === void 0 ? null : _c, _b.isVerbose;
3282
+ _c = options || {}, _d = _c.rootDirname, rootDirname = _d === void 0 ? null : _d, _c.isVerbose;
3236
3283
  if (!name) {
3237
3284
  name = sourceContentToName(sourceContent);
3238
3285
  }
@@ -3240,7 +3287,7 @@ function makeKnowledgeSourceHandler(knowledgeSource, tools, options) {
3240
3287
  url = sourceContent;
3241
3288
  return [4 /*yield*/, fetch(url)];
3242
3289
  case 1:
3243
- response_1 = _e.sent();
3290
+ response_1 = _f.sent();
3244
3291
  mimeType = ((_a = response_1.headers.get('content-type')) === null || _a === void 0 ? void 0 : _a.split(';')[0]) || 'text/html';
3245
3292
  return [2 /*return*/, {
3246
3293
  source: name,
@@ -3297,7 +3344,7 @@ function makeKnowledgeSourceHandler(knowledgeSource, tools, options) {
3297
3344
  mimeType = extensionToMimeType(fileExtension || '');
3298
3345
  return [4 /*yield*/, isFileExisting(filename_1, tools.fs)];
3299
3346
  case 3:
3300
- if (!(_e.sent())) {
3347
+ if (!(_f.sent())) {
3301
3348
  throw new NotFoundError(spaceTrim$1(function (block) { return "\n Can not make source handler for file which does not exist:\n\n File:\n ".concat(block(filename_1), "\n "); }));
3302
3349
  }
3303
3350
  // TODO: [🧠][😿] Test security file - file is scoped to the project (BUT maybe do this in `filesystemTools`)
@@ -4325,16 +4372,94 @@ function extractJsonBlock(markdown) {
4325
4372
  * TODO: [🏢] Make this logic part of `JsonFormatDefinition` or `isValidJsonString`
4326
4373
  */
4327
4374
 
4375
+ /**
4376
+ * Format either small or big number
4377
+ *
4378
+ * @public exported from `@promptbook/utils`
4379
+ */
4380
+ function numberToString(value) {
4381
+ if (value === 0) {
4382
+ return '0';
4383
+ }
4384
+ else if (Number.isNaN(value)) {
4385
+ return VALUE_STRINGS.nan;
4386
+ }
4387
+ else if (value === Infinity) {
4388
+ return VALUE_STRINGS.infinity;
4389
+ }
4390
+ else if (value === -Infinity) {
4391
+ return VALUE_STRINGS.negativeInfinity;
4392
+ }
4393
+ for (var exponent = 0; exponent < 15; exponent++) {
4394
+ var factor = Math.pow(10, exponent);
4395
+ var valueRounded = Math.round(value * factor) / factor;
4396
+ if (Math.abs(value - valueRounded) / value < SMALL_NUMBER) {
4397
+ return valueRounded.toFixed(exponent);
4398
+ }
4399
+ }
4400
+ return value.toString();
4401
+ }
4402
+
4403
+ /**
4404
+ * Function `valueToString` will convert the given value to string
4405
+ * This is useful and used in the `templateParameters` function
4406
+ *
4407
+ * Note: This function is not just calling `toString` method
4408
+ * It's more complex and can handle this conversion specifically for LLM models
4409
+ * See `VALUE_STRINGS`
4410
+ *
4411
+ * Note: There are 2 similar functions
4412
+ * - `valueToString` converts value to string for LLM models as human-readable string
4413
+ * - `asSerializable` converts value to string to preserve full information to be able to convert it back
4414
+ *
4415
+ * @public exported from `@promptbook/utils`
4416
+ */
4417
+ function valueToString(value) {
4418
+ try {
4419
+ if (value === '') {
4420
+ return VALUE_STRINGS.empty;
4421
+ }
4422
+ else if (value === null) {
4423
+ return VALUE_STRINGS.null;
4424
+ }
4425
+ else if (value === undefined) {
4426
+ return VALUE_STRINGS.undefined;
4427
+ }
4428
+ else if (typeof value === 'string') {
4429
+ return value;
4430
+ }
4431
+ else if (typeof value === 'number') {
4432
+ return numberToString(value);
4433
+ }
4434
+ else if (value instanceof Date) {
4435
+ return value.toISOString();
4436
+ }
4437
+ else {
4438
+ return JSON.stringify(value);
4439
+ }
4440
+ }
4441
+ catch (error) {
4442
+ if (!(error instanceof Error)) {
4443
+ throw error;
4444
+ }
4445
+ console.error(error);
4446
+ return VALUE_STRINGS.unserializable;
4447
+ }
4448
+ }
4449
+
4328
4450
  /**
4329
4451
  * Replaces parameters in template with values from parameters object
4330
4452
  *
4453
+ * Note: This function is not places strings into string,
4454
+ * It's more complex and can handle this operation specifically for LLM models
4455
+ *
4331
4456
  * @param template the template with parameters in {curly} braces
4332
4457
  * @param parameters the object with parameters
4333
4458
  * @returns the template with replaced parameters
4334
4459
  * @throws {PipelineExecutionError} if parameter is not defined, not closed, or not opened
4335
4460
  * @public exported from `@promptbook/utils`
4336
4461
  */
4337
- function replaceParameters(template, parameters) {
4462
+ function templateParameters(template, parameters) {
4338
4463
  var e_1, _a;
4339
4464
  try {
4340
4465
  for (var _b = __values(Object.entries(parameters)), _c = _b.next(); !_c.done; _c = _b.next()) {
@@ -4360,7 +4485,7 @@ function replaceParameters(template, parameters) {
4360
4485
  var loopLimit = LOOP_LIMIT;
4361
4486
  var _loop_1 = function () {
4362
4487
  if (loopLimit-- < 0) {
4363
- throw new LimitReachedError('Loop limit reached during parameters replacement in `replaceParameters`');
4488
+ throw new LimitReachedError('Loop limit reached during parameters replacement in `templateParameters`');
4364
4489
  }
4365
4490
  var precol = match.groups.precol;
4366
4491
  var parameterName = match.groups.parameterName;
@@ -4377,7 +4502,7 @@ function replaceParameters(template, parameters) {
4377
4502
  if (parameterValue === undefined) {
4378
4503
  throw new PipelineExecutionError("Parameter `{".concat(parameterName, "}` is not defined"));
4379
4504
  }
4380
- parameterValue = parameterValue.toString();
4505
+ parameterValue = valueToString(parameterValue);
4381
4506
  if (parameterValue.includes('\n') && /^\s*\W{0,3}\s*$/.test(precol)) {
4382
4507
  parameterValue = parameterValue
4383
4508
  .split('\n')
@@ -4612,7 +4737,7 @@ function executeAttempts(options) {
4612
4737
  }
4613
4738
  return [3 /*break*/, 24];
4614
4739
  case 2:
4615
- $ongoingTaskResult.$resultString = replaceParameters(preparedContent, parameters);
4740
+ $ongoingTaskResult.$resultString = templateParameters(preparedContent, parameters);
4616
4741
  return [3 /*break*/, 25];
4617
4742
  case 3:
4618
4743
  modelRequirements = __assign(__assign({ modelVariant: 'CHAT' }, (preparedPipeline.defaultModelRequirements || {})), (task.modelRequirements || {}));
@@ -4735,8 +4860,8 @@ function executeAttempts(options) {
4735
4860
  _j = $ongoingTaskResult;
4736
4861
  return [4 /*yield*/, tools.userInterface.promptDialog($deepFreeze({
4737
4862
  promptTitle: task.title,
4738
- promptMessage: replaceParameters(task.description || '', parameters),
4739
- defaultValue: replaceParameters(preparedContent, parameters),
4863
+ promptMessage: templateParameters(task.description || '', parameters),
4864
+ defaultValue: templateParameters(preparedContent, parameters),
4740
4865
  // TODO: [🧠] !! Figure out how to define placeholder in .book.md file
4741
4866
  placeholder: undefined,
4742
4867
  priority: priority,
@@ -4860,7 +4985,7 @@ function executeAttempts(options) {
4860
4985
  if (!isJokerAttempt &&
4861
4986
  task.taskType === 'PROMPT_TASK' &&
4862
4987
  $ongoingTaskResult.$prompt
4863
- // <- Note: [2] When some expected parameter is not defined, error will occur in replaceParameters
4988
+ // <- Note: [2] When some expected parameter is not defined, error will occur in templateParameters
4864
4989
  // In that case we don’t want to make a report about it because it’s not a llm execution error
4865
4990
  ) {
4866
4991
  // TODO: [🧠] Maybe put other taskTypes into report